diff --git a/wms/bin/local-servers.sh b/wms/bin/local-servers.sh index 66cb99e1..d4b3a392 100755 --- a/wms/bin/local-servers.sh +++ b/wms/bin/local-servers.sh @@ -1,6 +1,6 @@ #!/bin/sh #/** -# *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. +# *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. # * # * Licensed under the Apache License, Version 2.0 (the "License"); # * you may not use this file except in compliance with the License. diff --git a/wms/bin/scripts/sys_shell.py b/wms/bin/scripts/sys_shell.py index 64a3c304..24a278e6 100755 --- a/wms/bin/scripts/sys_shell.py +++ b/wms/bin/scripts/sys_shell.py @@ -1,5 +1,5 @@ #/** -# *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. +# *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. # * # * Licensed under the Apache License, Version 2.0 (the "License"); # * you may not use this file except in compliance with the License. diff --git a/wms/bin/servers.sh b/wms/bin/servers.sh index 63c32067..070a2af6 100755 --- a/wms/bin/servers.sh +++ b/wms/bin/servers.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash #/** -# *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. +# *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. # * # * Licensed under the Apache License, Version 2.0 (the "License"); # * you may not use this file except in compliance with the License. diff --git a/wms/bin/start-wms.sh b/wms/bin/start-wms.sh index 7155b488..efdab32c 100755 --- a/wms/bin/start-wms.sh +++ b/wms/bin/start-wms.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash #/** -# *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. +# *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. # * # * Licensed under the Apache License, Version 2.0 (the "License"); # * you may not use this file except in compliance with the License. diff --git a/wms/bin/stop-wms.sh b/wms/bin/stop-wms.sh index 1d1dc9fc..693e63e7 100755 --- a/wms/bin/stop-wms.sh +++ b/wms/bin/stop-wms.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash #/** -# *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. +# *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. # * # * Licensed under the Apache License, Version 2.0 (the "License"); # * you may not use this file except in compliance with the License. diff --git a/wms/bin/wms b/wms/bin/wms index 2934c866..4eb90adc 100755 --- a/wms/bin/wms +++ b/wms/bin/wms @@ -1,6 +1,6 @@ #! /usr/bin/env bash #/** -# *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. +# *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. # * # * Licensed under the Apache License, Version 2.0 (the "License"); # * you may not use this file except in compliance with the License. diff --git a/wms/bin/wms-config.sh b/wms/bin/wms-config.sh index fbd315fb..30ad2d37 100755 --- a/wms/bin/wms-config.sh +++ b/wms/bin/wms-config.sh @@ -1,5 +1,5 @@ #/** -# *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. +# *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. # * # * Licensed under the Apache License, Version 2.0 (the "License"); # * you may not use this file except in compliance with the License. diff --git a/wms/bin/wms-daemon.sh b/wms/bin/wms-daemon.sh index b56adae5..f6296cd7 100755 --- a/wms/bin/wms-daemon.sh +++ b/wms/bin/wms-daemon.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash #/** -# *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. +# *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. # * # * Licensed under the Apache License, Version 2.0 (the "License"); # * you may not use this file except in compliance with the License. diff --git a/wms/bin/wms-daemons.sh b/wms/bin/wms-daemons.sh index 1a148671..c9dadc69 100755 --- a/wms/bin/wms-daemons.sh +++ b/wms/bin/wms-daemons.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash #/** -# *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. +# *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. # * # * Licensed under the Apache License, Version 2.0 (the "License"); # * you may not use this file except in compliance with the License. diff --git a/wms/bin/zookeepers.sh b/wms/bin/zookeepers.sh index 335e9f70..9cc0cd61 100755 --- a/wms/bin/zookeepers.sh +++ b/wms/bin/zookeepers.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash #/** -# *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. +# *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. # * # * Licensed under the Apache License, Version 2.0 (the "License"); # * you may not use this file except in compliance with the License. diff --git a/wms/conf/wms-env.sh b/wms/conf/wms-env.sh index d36f1e41..e17cdebe 100644 --- a/wms/conf/wms-env.sh +++ b/wms/conf/wms-env.sh @@ -1,5 +1,5 @@ #/** -# *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. +# *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. # * # * Licensed under the Apache License, Version 2.0 (the "License"); # * you may not use this file except in compliance with the License. diff --git a/wms/conf/wms-site.xml b/wms/conf/wms-site.xml index 7c4afa3d..a2312e40 100644 --- a/wms/conf/wms-site.xml +++ b/wms/conf/wms-site.xml @@ -2,7 +2,7 @@ - - - - - - - - - - - - - - - - - - - - - + run @@ -296,62 +267,40 @@ + maven-assembly-plugin @@ -393,6 +342,33 @@ + + + org.asciidoctor + asciidoctor-maven-plugin + ${asciidoctor.maven.plugin.version} + + + output-html + + process-asciidoc + + site + + ./images + book + + wms.css + ${project.version} + ${maven.build.timestamp} + + html5 + coderay + target/site + + + + @@ -401,15 +377,28 @@ ${maven.build.timestamp} UTF-8 1.6 - 1.0.4 - 2.0.0-cdh4.5.0 - /opt/home/tools/thrift-0.9.0/bin/thrift - + 1.0.0 ${project.artifactId}-${project.version} + + 1.6 + 3.3 + 2.9.1 + 2.6 + 2.3 + 2.3.4 + 1.0 + 2.7.2 + 1.5.2 + 2.1.2 + 1.5 + 2.5.3 + + + 2.6.0 1.2 1.4 2.1 @@ -429,22 +418,13 @@ 1.4.3 1.2.16 3.4.5 - 2.0.5-alpha - - 1.7.3 - 0.9.0 2.5.3 - 0.11.0 - 0.12.0 - 0.12.0 - 6.0.1-0 - 2.0.0-wso2v1 - - - 2.17 - false - true + + + 2.17 + false + true org.trafodion.wms.SmallTests org.trafodion.wms.MediumTests, org.trafodion.wms.LargeTests @@ -457,7 +437,7 @@ test - + com.yammer.metrics metrics-core @@ -508,87 +488,14 @@ log4j ${log4j.version} - - + - org.apache.hadoop - hadoop-core - 2.0.0-mr1-cdh4.5.0 - - org.apache.hadoop hadoop-common - ${hadoop-two.version} + ${hadoop.version} - org.apache.hadoop - hadoop-hdfs - ${hadoop-two.version} - - - org.apache.hadoop - hadoop-mapreduce-client-app - ${hadoop-two.version} - - - org.apache.hadoop - hadoop-mapreduce-client-common - ${hadoop-two.version} - - - org.apache.hadoop - hadoop-mapreduce-client-core - ${hadoop-two.version} - - - org.apache.hadoop - hadoop-mapreduce-client-hs - ${hadoop-two.version} - - - org.apache.hadoop - hadoop-mapreduce-client-jobclient - ${hadoop-two.version} - - - - - org.apache.hadoop - hadoop-yarn-server-resourcemanager - ${hadoop-two.version} - - - org.apache.avro avro ${avro.version} @@ -619,17 +526,6 @@ - - org.apache.thrift - libthrift - ${thrift.version} - - - org.slf4j - slf4j-simple - - - org.mortbay.jetty jetty @@ -765,52 +661,6 @@ - - - - org.apache.hive - hive-common - ${hive-common.version} - - - org.apache.hive - hive-metastore - ${hive-metastore.version} - - - org.apache.hive - hive-exec - ${hive-exec.version} - - - - - vertica - vertica-jdk5 - ${vertica.version} - - - - - org.wso2.siddhi - siddhi-api - ${siddhi.version} - - - org.wso2.siddhi - siddhi-core - ${siddhi.version} - - - org.wso2.siddhi - siddhi-query - ${siddhi.version} - - - joda-time - joda-time - 2.3 - @@ -874,12 +724,7 @@ project-team mailing-list - cim - issue-tracking - license - scm - index - + @@ -913,11 +758,6 @@ maven-jxr-plugin 2.1 - - org.apache.rat - apache-rat-plugin - 0.8 - org.apache.maven.plugins maven-surefire-report-plugin diff --git a/wms/src/assembly/all.xml b/wms/src/assembly/all.xml index 4b8e3373..24d38b1a 100644 --- a/wms/src/assembly/all.xml +++ b/wms/src/assembly/all.xml @@ -4,7 +4,7 @@ xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.1 http://maven.apache.org/xsd/assembly-1.1.1.xsd"> - - - - - <link xlink:href="http://www.trafodion.org"> - The Trafodion™ Data Connectivity Services Reference Guide - </link> - - - - - - - - - 2013The Hewlett-Packard Development Company, L.P. - All Rights Reserved. Trafodion, Trafodion DCS and the Trafodion project logo are trademarks of the Hewlett-Packard Development Company, L.P.. - - - - This is the official reference guide of - Trafodion DCS (Data Connectivity Services), - a distributed, ODBC, JDBC connectivity component of Trafodion, built on top of - Apache ZooKeeper - - - - - - - - - - - - - - - - - - - - - - Architecture -
- Overview -
- DCS? - DCS, see figure , is a framework that connects - ODBC/JDBC T4 clients to Trafodion user programs (MXOSRVR servers). In a nutshell, clients connect to a listening - WmsMaster on a well known port. DcsMaster looks in ZooKeeper for an "available" - DcsServer user program (MXOSRVR) and returns an object reference to that server back to the client. - The client then connects directly to the MXOSRVR. After the initial startup DcsMaster restarts any failed - DcsServers. And, DcsServers restart any failed MXOSRVRs. - - DCS provides the following: - - A lightweight process management framework. - High performance client listener using Java NIO. - Simple configuration and startup - A highly scaleable Trafodion connectivity service. - Uses ZooKeeper as backbone for coordination and process management. - Embedded user interface to examine state, logs, process status. - Standalone REST server. - 100% Java implementation. - - -
- DCS Architecture - - - - - - DCS Architecture - - -
-
-
- -
- Client - The Trafodion ODBC/JDBC T4 client drivers connect to MXOSRVRs through the DCS Master. - -
- -
DcsMaster - DcsMaster is the implementation of the Master Server. The Master server - is responsible for listening for client connection requests, monitoring all DcsServer instances in the cluster - and restarting any DcsServers that fail after initial startup. - -
Startup Behavior - The DcsMaster is started via the scripts found in the /bin directory. During startup it registers itself in Zookeeper. - -
-
Runtime Impact - A common question is what happens to an DCS cluster when the DcsMaster goes down. Because the - DcsMaster doesn't affect the running DcsServers or connected clients, the cluster can still function - i.e., clients already connected to MXOSRVRs can continue to work." - However, the DcsMaster controls critical functions such as listening for clients and restarting DcsServers. - So while the cluster can still run for a time without the Master, - the Master should be restarted as soon as possible. - -
-
Processes - The Master runs several background threads: - -
Listener - The listener thread is responsible for servicing client requests. It pairs - each client with an registered MXOSRVR found in Zookeeper. A default port - is configured but this may be changed in the configuration. - See for configuring this property. - -
-
- -
-
DcsServer - DcsServer is the server implementation. It is responsible for starting and keeping - its Trafodion MXOSRVR running. - -
Startup Behavior - The DcsServer is started via the scripts found in the /bin directory. During startup it registers itself in Zookeeper. - -
-
Runtime Impact - The DcsServer can continue to function if the DcsMaster goes down. the cluster can still function in a "steady - state." However, the Master controls critical functions such as DcsServer failure and. - So while the cluster can still run for a time without the Master, - the Master should be restarted as soon as possible. - -
-
Processes - The DcsServer runs a variety of background threads: -
-
- -
- - - - - - - - FAQ - - General - - What is DCS? - - See the in the Architecture chapter. - - - - - Configuration - - How can I get started with my first cluster? - - - See . - - - - - Where can I learn about the rest of the configuration options? - - - See . - - - - - Performance and Troubleshooting - - - How can I improve DCS cluster performance? - - - - See . - - - - - - How can I troubleshoot my DCS cluster? - - - - See . - - - - - Operations - - - How do I manage my DCS cluster? - - - - See - - - - - - -
diff --git a/wms/src/docbkx/configuration.xml b/wms/src/docbkx/configuration.xml deleted file mode 100644 index d218046b..00000000 --- a/wms/src/docbkx/configuration.xml +++ /dev/null @@ -1,600 +0,0 @@ - - - - - Configuration - This chapter is the Not-So-Quick start guide to DCS configuration. - Please read this chapter carefully and ensure that all requirements have - been satisfied. Failure to do so will cause you (and us) grief debugging strange errors. - - - - To configure a deploy, edit a file of environment variables - in conf/wms-env.sh -- this configuration - is used mostly by the launcher shell scripts getting the cluster - off the ground -- and then add configuration to an XML file to - do things like override DCS defaults, tell the location of the ZooKeeper ensemble - - -Be careful editing XML. Make sure you close all elements. -Run your file through xmllint or similar -to ensure well-formedness of your document after an edit session. - - - . - - - After you make an edit to an DCS configuration, make sure you copy the - content of the conf directory to - all nodes of the cluster. DCS will not do this for you. - Use rsync. - -
- Java - - DCS requires java 6 from Oracle. Usually - you'll want to use the latest version available except the problematic - u18 (u24 is the latest version as of this writing). -
-
- Operating System -
- ssh - - ssh must be installed and - sshd must be running to use DCS's' scripts to - manage remote DCS daemons. You must be able to ssh to all - nodes, including your local node, using passwordless login (Google - "ssh passwordless login"). -
- -
- DNS - - Both forward and reverse DNS resolving should work. - - If your machine has multiple interfaces, DCS will use the - interface that the primary hostname resolves to. -
- -
- Loopback IP - DCS expects the loopback IP address to be 127.0.0.1. Ubuntu and some other distributions, - for example, will default to 127.0.1.1 and this will cause problems for you. - - /etc/hosts should look something like this: - - 127.0.0.1 localhost - 127.0.0.1 ubuntu.ubuntu-domain ubuntu - - -
- -
- NTP - - The clocks on cluster members should be in basic alignments. - Some skew is tolerable but wild skew could generate odd behaviors. Run - NTP - on your cluster, or an equivalent. -
- -
- Windows - DCS is not supported on Windows. -
- -
- -
- Run modes - -
- Single Node - - This is the default mode. Single node is what is described - in the section. In - single node, it runs all DCS daemons and a local - ZooKeeper all on the same node. Zookeeper binds to a well known port. - -
- -
- Multi-Node - - Multi node is where the daemons are spread - across all nodes in the cluster. Before proceeding, ensure you have a - working Trafodion instance. - - - Below we describe the different setups. Starting, - verification and exploration of your install. Configuration is described in a - section that follows, . - - - To set up a multi-node deploy, you will need to - configure DCS by editing files in the DCS conf - directory. - - - You may need to edit - conf/wms-env.sh to tell DCS which - java to use. In this file you set DCS environment - variables such as the heap size and other options for the - JVM, the preferred location for log files, - etc. Set JAVA_HOME to point at the root of your - java install. - -
- <filename>servers</filename> - - In addition, a multi-node deploy requires that you - modify conf/servers. The - servers file - lists all hosts that you would have running - DcsServers, one host per line. - All servers listed in this file will be started and stopped - when DCS start or stop is run. -
- -
- ZooKeeper and DCS - See section for ZooKeeper setup for DCS. -
-
- -
- Running and Confirming Your Installation - - Make sure Trafodion is running first. Start and stop the Trafodion instance - by running sqstart.sh over in the - MY_SQROOT/sql/scripts directory. You can ensure it started - properly by testing with sqcheck. - - - If you are managing your own ZooKeeper, - start it and confirm its running else, DCS will start up ZooKeeper - for you as part of its start process. - - - - Start DCS with the following command: - - - - bin/start-wms.sh - - Run the above from the - - DCS_HOME - - directory. - - You should now have a running DCS instance. DCS logs can be - found in the logs subdirectory. Check them out - especially if DCS had trouble starting. - - - - DCS also puts up a UI listing vital attributes and metrics. By default its - deployed on the DcsMaster host at port 40010 (DcsServers put up an - informational http server at 40030+their instance number). If the DcsMaster were running on a host named - master.example.org on the default port, to see the - DcsMaster's homepage you'd point your browser at - http://master.example.org:40010. - - - To stop DCS after exiting the DCS shell enter - $ ./bin/stop-wms.sh -stopping wms............... Shutdown can take a moment to - complete. It can take longer if your cluster is comprised of many - machines. - - -
-
- -
- ZooKeeper<indexterm> - <primary>ZooKeeper</primary> - </indexterm> - - DCS depends on a running ZooKeeper cluster. - All participating nodes and clients need to be able to access the - running ZooKeeper ensemble. DCS by default manages a ZooKeeper - "cluster" for you. It will start and stop the ZooKeeper ensemble - as part of the DCS start/stop process. You can also manage the - ZooKeeper ensemble independent of DCS and just point DCS at - the cluster it should use. To toggle DCS management of - ZooKeeper, use the DCS_MANAGES_ZK variable in - conf/wms-env.sh. This variable, which - defaults to true, tells DCS whether to - start/stop the ZooKeeper ensemble servers as part of DCS - start/stop. - - When DCS manages the ZooKeeper ensemble, you can specify - ZooKeeper configuration using its native - zoo.cfg file, or, the easier option is to - just specify ZooKeeper options directly in - conf/wms-site.xml. A ZooKeeper - configuration option can be set as a property in the DCS - wms-site.xml XML configuration file by - prefacing the ZooKeeper option name with - wms.zookeeper.property. For example, the - clientPort setting in ZooKeeper can be changed - by setting the - wms.zookeeper.property.clientPort property. - For all default values used by DCS, including ZooKeeper - configuration, see . Look for the - wms.zookeeper.property prefix - For the full list of ZooKeeper configurations, see - ZooKeeper's zoo.cfg. DCS does not ship - with a zoo.cfg so you will need to browse - the conf directory in an appropriate - ZooKeeper download. - - - You must at least list the ensemble servers in - wms-site.xml using the - wms.zookeeper.quorum property. This property - defaults to a single ensemble member at - localhost which is not suitable for a fully - distributed DCS. (It binds to the local machine only and remote - clients will not be able to connect). - How many ZooKeepers should I run? - - You can run a ZooKeeper ensemble that comprises 1 node - only but in production it is recommended that you run a - ZooKeeper ensemble of 3, 5 or 7 machines; the more members an - ensemble has, the more tolerant the ensemble is of host - failures. Also, run an odd number of machines. In ZooKeeper, - an even number of peers is supported, but it is normally not used - because an even sized ensemble requires, proportionally, more peers - to form a quorum than an odd sized ensemble requires. For example, an - ensemble with 4 peers requires 3 to form a quorum, while an ensemble with - 5 also requires 3 to form a quorum. Thus, an ensemble of 5 allows 2 peers to - fail, and thus is more fault tolerant than the ensemble of 4, which allows - only 1 down peer. - - Give each ZooKeeper server around 1GB of RAM, and if possible, its own - dedicated disk (A dedicated disk is the best thing you can do - to ensure a performant ZooKeeper ensemble). For very heavily - loaded clusters, run ZooKeeper servers on separate machines - from DcsServers. - - - For example, to have DCS manage a ZooKeeper quorum on - nodes host{1,2,3,4,5}.example.com, bound to - port 2222 (the default is 2181) ensure - DCS_MANAGE_ZK is commented out or set to - true in conf/wms-env.sh - and then edit conf/wms-site.xml and set - wms.zookeeper.property.clientPort and - wms.zookeeper.quorum. You should also set - wms.zookeeper.property.dataDir to other than - the default as the default has ZooKeeper persist data under - /tmp which is often cleared on system - restart. In the example below we have ZooKeeper persist to - /user/local/zookeeper. - <configuration> - ... - <property> - <name>wms.zookeeper.property.clientPort</name> - <value>2222</value> - <description>Property from ZooKeeper's config zoo.cfg. - The port at which the clients will connect. - </description> - </property> - <property> - <name>wms.zookeeper.quorum</name> - <value>host1.example.com,host2.example.com,host3.example.com,host4.example.com,host5.example.com</value> - <description>Comma separated list of servers in the ZooKeeper Quorum. - For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com". - By default this is set to localhost. For a multi-node setup, this should be set to a full - list of ZooKeeper quorum servers. If DCS_MANAGES_ZK=true set in wms-env.sh - this is the list of servers which we will start/stop ZooKeeper on. - </description> - </property> - <property> - <name>wms.zookeeper.property.dataDir</name> - <value>/usr/local/zookeeper</value> - <description>Property from ZooKeeper's config zoo.cfg. - The directory where the snapshot is stored. - </description> - </property> - ... - </configuration> - -
- Using existing ZooKeeper ensemble - - To point DCS at an existing ZooKeeper cluster, one that - is not managed by DCS, uncomment and set DCS_MANAGES_ZK - in conf/wms-env.sh to false - - ... - # Tell DCS whether it should manage it's own instance of Zookeeper or not. - export DCS_MANAGES_ZK=false Next set ensemble locations - and client port, if non-standard, in - wms-site.xml, or add a suitably - configured zoo.cfg to DCS's - CLASSPATH. DCS will prefer the - configuration found in zoo.cfg over any - settings in wms-site.xml. - - When DCS manages ZooKeeper, it will start/stop the - ZooKeeper servers as a part of the regular start/stop scripts. - If you would like to run ZooKeeper yourself, independent of - DCS start/stop, you would do the following - - -${DCS_HOME}/bin/wms-daemons.sh {start,stop} zookeeper - - - Note that you can use DCS in this manner to start up a - ZooKeeper cluster, unrelated to DCS. Just make sure to uncomment and set - DCS_MANAGES_ZK to false - if you want it to stay up across DCS restarts so that when - DCS shuts down, it doesn't take ZooKeeper down with it. - - For more information about running a distinct ZooKeeper - cluster, see the ZooKeeper Getting - Started Guide. Additionally, see the ZooKeeper Wiki or the - ZooKeeper documentation - for more information on ZooKeeper sizing. - -
-
- - -
- Configuration Files - -
- <filename>wms-site.xml</filename> and <filename>wms-default.xml</filename> - You add site-specific configuration - to the wms-site.xml file, - for DCS, site specific customizations go into - the file conf/wms-site.xml. - For the list of configurable properties, see - - below or view the raw wms-default.xml - source file in the DCS source code at - src/main/resources. - - - Not all configuration options make it out to - wms-default.xml. Configuration - that it is thought rare anyone would change can exist only - in code; the only way to turn up such configurations is - via a reading of the source code itself. - - - Currently, changes here will require a cluster restart for DCS to notice the change. - - - -
- -
- <filename>wms-env.sh</filename> - Set DCS environment variables in this file. - Examples include options to pass the JVM on start of - an DCS daemon such as heap size and garbarge collector configs. - You can also set configurations for DCS configuration, log directories, - niceness, ssh options, where to locate process pid files, - etc. Open the file at - conf/wms-env.sh and peruse its content. - Each option is fairly well documented. Add your own environment - variables here if you want them read by DCS daemons on startup. - - Changes here will require a cluster restart for DCS to notice the change. - -
- -
- <filename>log4j.properties</filename> - Edit this file to change rate at which DCS files - are rolled and to change the level at which DCS logs messages. - - - Changes here will require a cluster restart for DCS to notice the change - though log levels can be changed for particular daemons via the DCS UI. - -
- -
- -
- Example Configurations - -
- Basic Distributed DCS Install - - Here is an example basic configuration for a distributed ten - node cluster. The nodes are named example1, - example1, etc., through node - example9 in this example. The DCS Master - is running on the node example1. - DCS Servers run on nodes - example1-example9. A 3-node - ZooKeeper ensemble runs on example1, - example2, and example3 on the - default ports. ZooKeeper data is persisted to the directory - /export/zookeeper. Below we show what the main - configuration files -- wms-site.xml, - servers, and - wms-env.sh -- found in the DCS - conf directory might look like. - -
- <filename>wms-site.xml</filename> - - - -<?xml version="1.0"?> -<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> -<configuration> - <property> - <name>wms.zookeeper.quorum</name> - <value>example1,example2,example3</value> - <description>The directory shared by RegionServers. - </description> - </property> - <property> - <name>wms.zookeeper.property.dataDir</name> - <value>/export/zookeeper</value> - <description>Property from ZooKeeper's config zoo.cfg. - The directory where the snapshot is stored. - </description> - </property> -</configuration> - - -
- -
- <filename>servers</filename> - - In this file you list the nodes that will run servers. - - - - example1 - example2 - example3 - example4 - example5 - example6 - example7 - example8 - example9 - -
- -
- <filename>wms-env.sh</filename> - - Below we use a diff to show the differences - from default in the wms-env.sh file. Here we - are setting the DCS heap to be 4G instead of the default - 1G. - - - -$ git diff wms-env.sh -diff --git a/conf/wms-env.sh b/conf/wms-env.sh -index e70ebc6..96f8c27 100644 ---- a/conf/wms-env.sh -+++ b/conf/wms-env.sh -@@ -31,7 +31,7 @@ export JAVA_HOME=/usr/lib//jvm/java-6-sun/ - # export DCS_CLASSPATH= - - # The maximum amount of heap to use, in MB. Default is 1000. --# export DCS_HEAPSIZE=1000 -+export DCS_HEAPSIZE=4096 - - # Extra Java runtime options. - # Below are what we set by default. May only work with SUN JVM. - - - - Use rsync to copy the content of the - conf directory to all nodes of the - cluster. -
-
-
- - -
- The Important Configurations - Below we list what the important - Configurations. We've divided this section into - required configuration and worth-a-look recommended configs. - - - -
Required Configurations - Review the section. - -
- -
Recommended Configurations -
<varname>wms.server.user.program</varname> - The default value is true. When true the DcsServer will always start the - user program (MXOSRVR) using the commands found in property wms.server.user.program.command. - When false DcsServer will not execute the commands. This is useful mainly for debugging or isolating - a problem in DCS vs. the user program. - - - To change this configuration, edit wms-site.xml, - copy the changed file around the cluster and restart. - -
- -
<varname>wms.server.user.program.command</varname> - The default value is cd ${wms.user.program.home};. sqenv.sh. - This is the command that the DcsServer executes to start the user program (MXOSRVR). - You may enter any additional switches. There is no validation done on the command. It's wise to - test your commands using a shell outside of DCS prior to overiding the value of this property. - - - To change this configuration, edit wms-site.xml, - copy the changed file around the cluster and restart. - -
- -
- -
- -
diff --git a/wms/src/docbkx/customization.xsl b/wms/src/docbkx/customization.xsl deleted file mode 100644 index d80a2b5a..00000000 --- a/wms/src/docbkx/customization.xsl +++ /dev/null @@ -1,34 +0,0 @@ - - - - - - - - - - - - diff --git a/wms/src/docbkx/external_apis.xml b/wms/src/docbkx/external_apis.xml deleted file mode 100644 index decbdccd..00000000 --- a/wms/src/docbkx/external_apis.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - External APIs - This chapter will cover access to DCS either through non-Java languages, or through custom protocols. - -
- REST -
-
diff --git a/wms/src/docbkx/getting_started.xml b/wms/src/docbkx/getting_started.xml deleted file mode 100644 index bbe3c1c8..00000000 --- a/wms/src/docbkx/getting_started.xml +++ /dev/null @@ -1,140 +0,0 @@ - - - - - Getting Started - -
- Introduction - - will get you up and - running on a single-node instance of DCS. - describes setup - of DCS in a multi-node configuration. -
- -
- Quick Start - - This guide describes setup of a single node DCS instance. - It leads you through creating a configuration, and then starting - up and shutting down your DCS instance. The below exercise - should take no more than ten minutes (not including download time). - -
- Download and unpack the latest release. - - Decompress and untar your download and then change into the - unpacked directory. - - $ tar xzf dcs-.tar.gz -$ cd dcs- - - - - Is <application>java</application> installed? - - The steps in the following sections presume a 1.6 version of Oracle - java is installed on your machine and - available on your path; i.e. when you type - java, you see output that describes the - options the java program takes (DCS requires java 6 or better). If this is not - the case, DCS will not start. Install java, edit - conf/dcs-env.sh, uncommenting the - JAVA_HOME line pointing it to your java install. - - - - Is <application>Trafodion</application> installed and running? - - DCS presumes a Trafodion instance - is installed and running on your machine and available on your path; i.e. the - MY_SQROOT is set and when you type - sqcheck, you see output that confirms - Trafodion is running. If this is not - the case, DCS may start but you'll see many errors in the DcsServer logs - related to user program startup. - - - - At this point, you are ready to start DCS. -
- -
- Start DCS - - Now start DCS:$ bin/start-dcs.sh -localhost: starting zookeeper, logging to /logs/dcs-user-1-zookeeper-hostname.out -localhost: running Zookeeper -starting master, logging to /logs/dcs-user-1-master-hostname.out -localhost: starting server, logging to /logs/dcs-user-1-server-hostname.out - - You should now have a running DCS instance. DCS logs can be found in the - logs subdirectory. Peruse them especially if - DCS had trouble starting. -
- -
- Stopping DCS - - Stop your DCS instance by running the stop script. - - $ ./bin/stop-dcs.sh -localhost: stopping server. -stopping master. -localhost: stopping zookeeper. - -
- -
- Where to go next - - The above described setup is good for testing and - experiments only. Next move on to where we'll go into - depth on the different requirements and critical - configurations needed setting up a distributed DCS deploy. -
-
- -
diff --git a/wms/src/docbkx/ops_mgt.xml b/wms/src/docbkx/ops_mgt.xml deleted file mode 100644 index fd8c0a7e..00000000 --- a/wms/src/docbkx/ops_mgt.xml +++ /dev/null @@ -1,58 +0,0 @@ - - - - - Operational Management - This chapter will cover operational tools and practices required of a running DCS cluster. - The subject of operations is related to the topics of , , - and but is a distinct topic in itself. - -
- Tools and Utilities - - Here we list tools for administration, analysis, and - debugging. - -
-
diff --git a/wms/src/docbkx/performance.xml b/wms/src/docbkx/performance.xml deleted file mode 100644 index d415f9ab..00000000 --- a/wms/src/docbkx/performance.xml +++ /dev/null @@ -1,74 +0,0 @@ - - - - - Performance Tuning - -
- Operating System -
- Memory - RAM, RAM, RAM. Don't starve Dcs. -
-
- 64-bit - Use a 64-bit platform (and 64-bit JVM). -
-
- Swapping - Watch out for swapping. Set swappiness to 0. -
-
- -
- Network -
- -
- ZooKeeper - See for information on configuring ZooKeeper, and see the part - about having a dedicated disk. - -
-
diff --git a/wms/src/docbkx/preface.xml b/wms/src/docbkx/preface.xml deleted file mode 100644 index 2f3139f3..00000000 --- a/wms/src/docbkx/preface.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - - - Preface - - This is the official reference guide for the DCS version it ships with. - This document describes DCS version . - Herein you will find either the definitive documentation on an DCS topic - as of its standing when the referenced DCS version shipped, or it - will point to the location in javadoc, where - the pertinent information can be found. - - This reference guide is a work in progress. - - diff --git a/wms/src/docbkx/troubleshooting.xml b/wms/src/docbkx/troubleshooting.xml deleted file mode 100644 index 312b664e..00000000 --- a/wms/src/docbkx/troubleshooting.xml +++ /dev/null @@ -1,193 +0,0 @@ - - - - - Troubleshooting and Debugging -
- General Guidelines -
-
- Logs - - The key process logs are as follows... (replace <user> with the user that started the service, <instance> for the server instance and <hostname> for the machine name) - - - DcsMaster: $DCS_HOME/logs/dcs-<user>-<instance>-master-<hostname>.log - - - DcsServer: $DCS_HOME/logs/dcs-<user>-<instance>-server-<hostname>.log - - - ZooKeeper: $DCS_HOME/logs/dcs-<user>-<instance>-zookeeper-<hostname>.log - -
-
- Resources -
-
- Tools -
- Builtin Tools -
- DcsMaster Web Interface - The DcsMaster starts a web-interface on port 40010 by default. - - The DcsMaster web UI lists created DcsServers (e.g., build info, zookeeper quorum, metrics, etc.). Additionally, - the available DcsServers in the cluster are listed along with selected high-level metrics (listenerRequests, listenerCompletedRequests, - totalAvailable/totalConnected/totalConnecting MXOSRVRs, totalHeap, usedHeap, maxHeap, etc). - The DcsMaster web UI allows navigation to each DcsServer's web UI. - -
-
- DcsServer Web Interface - DcsServers starts a web-interface on port 40030 by default. - - The DcsServer web UI lists its server metrics (build info, zookeeper quorum, usedHeap, maxHeap, etc.). - - See for more information in metric definitions. - -
-
- zkcli - zkcli is a very useful tool for investigating ZooKeeper-related issues. To invoke: - -./dcs zkcli -server host:port <cmd> <args> - - The commands (and arguments) are: - - connect host:port - get path [watch] - ls path [watch] - set path data [version] - delquota [-n|-b] path - quit - printwatches on|off - create [-s] [-e] path data acl - stat path [watch] - close - ls2 path [watch] - history - listquota path - setAcl path acl - getAcl path - sync path - redo cmdno - addauth scheme auth - delete path [version] - setquota -n|-b val path - - -
-
-
- External Tools -
- tail - - tail is the command line tool that lets you look at the end of a file. Add the “-f” option and it will refresh when new data is available. It’s useful when you are wondering what’s happening, for example, when a cluster is taking a long time to shutdown or startup as you can just fire a new terminal and tail the master log (and maybe a few DcsServers). - -
-
- top - - top is probably one of the most important tool when first trying to see what’s running on a machine and how the resources are consumed. Here’s an example from production system: - -top - 14:46:59 up 39 days, 11:55, 1 user, load average: 3.75, 3.57, 3.84 -Tasks: 309 total, 1 running, 308 sleeping, 0 stopped, 0 zombie -Cpu(s): 4.5%us, 1.6%sy, 0.0%ni, 91.7%id, 1.4%wa, 0.1%hi, 0.6%si, 0.0%st -Mem: 24414432k total, 24296956k used, 117476k free, 7196k buffers -Swap: 16008732k total, 14348k used, 15994384k free, 11106908k cached - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND -15558 hadoop 18 -2 3292m 2.4g 3556 S 79 10.4 6523:52 java -13268 hadoop 18 -2 8967m 8.2g 4104 S 21 35.1 5170:30 java - 8895 hadoop 18 -2 1581m 497m 3420 S 11 2.1 4002:32 java -… - - - - Here we can see that the system load average during the last five minutes is 3.75, which very roughly means that on average 3.75 threads were waiting for CPU time during these 5 minutes. In general, the “perfect” utilization equals to the number of cores, under that number the machine is under utilized and over that the machine is over utilized. This is an important concept, see this article to understand it more: http://www.linuxjournal.com/article/9001. - - - Apart from load, we can see that the system is using almost all its available RAM but most of it is used for the OS cache (which is good). The swap only has a few KBs in it and this is wanted, high numbers would indicate swapping activity which is the nemesis of performance of Java systems. Another way to detect swapping is when the load average goes through the roof (although this could also be caused by things like a dying disk, among others). - - - The list of processes isn’t super useful by default, all we know is that 3 java processes are using about 111% of the CPUs. To know which is which, simply type “c” and each line will be expanded. Typing “1” will give you the detail of how each CPU is used instead of the average for all of them like shown here. - -
-
- jps - - jps is shipped with every JDK and gives the java process ids for the current user (if root, then it gives the ids for all users). Example: - -
-
-
- -
- ZooKeeper -
- Startup Errors -
- Could not find my address: xyz in list of ZooKeeper quorum servers - A ZooKeeper server wasn't able to start, throws that error. xyz is the name of your server. - This is a name lookup problem. DCS tries to start a ZooKeeper server on some machine but that machine isn't able to find itself in the dcs.zookeeper.quorum configuration. - - Use the hostname presented in the error message instead of the value you used. If you have a DNS server, you can set dcs.zookeeper.dns.interface and dcs.zookeeper.dns.nameserver in dcs-site.xml to make sure it resolves to the correct FQDN. - -
- -
-
- ZooKeeper, The Cluster Canary - ZooKeeper is the cluster's "canary in the mineshaft". It'll be the first to notice issues if any so making sure its happy is the short-cut to a humming cluster. - - - See the ZooKeeper Operating Environment Troubleshooting page. It has suggestions and tools for checking disk and networking performance; i.e. the operating environment your ZooKeeper and DCS are running in. - - Additionally, the utility may help investigate ZooKeeper issues. - -
-
-
diff --git a/wms/src/main/c/INSTALL b/wms/src/main/c/INSTALL deleted file mode 100644 index 5458714e..00000000 --- a/wms/src/main/c/INSTALL +++ /dev/null @@ -1,234 +0,0 @@ -Installation Instructions -************************* - -Copyright (C) 1994, 1995, 1996, 1999, 2000, 2001, 2002, 2004, 2005, -2006 Free Software Foundation, Inc. - -This file is free documentation; the Free Software Foundation gives -unlimited permission to copy, distribute and modify it. - -Basic Installation -================== - -Briefly, the shell commands `./configure; make; make install' should -configure, build, and install this package. The following -more-detailed instructions are generic; see the `README' file for -instructions specific to this package. - - The `configure' shell script attempts to guess correct values for -various system-dependent variables used during compilation. It uses -those values to create a `Makefile' in each directory of the package. -It may also create one or more `.h' files containing system-dependent -definitions. Finally, it creates a shell script `config.status' that -you can run in the future to recreate the current configuration, and a -file `config.log' containing compiler output (useful mainly for -debugging `configure'). - - It can also use an optional file (typically called `config.cache' -and enabled with `--cache-file=config.cache' or simply `-C') that saves -the results of its tests to speed up reconfiguring. Caching is -disabled by default to prevent problems with accidental use of stale -cache files. - - If you need to do unusual things to compile the package, please try -to figure out how `configure' could check whether to do them, and mail -diffs or instructions to the address given in the `README' so they can -be considered for the next release. If you are using the cache, and at -some point `config.cache' contains results you don't want to keep, you -may remove or edit it. - - The file `configure.ac' (or `configure.in') is used to create -`configure' by a program called `autoconf'. You need `configure.ac' if -you want to change it or regenerate `configure' using a newer version -of `autoconf'. - -The simplest way to compile this package is: - - 1. `cd' to the directory containing the package's source code and type - `./configure' to configure the package for your system. - - Running `configure' might take a while. While running, it prints - some messages telling which features it is checking for. - - 2. Type `make' to compile the package. - - 3. Optionally, type `make check' to run any self-tests that come with - the package. - - 4. Type `make install' to install the programs and any data files and - documentation. - - 5. You can remove the program binaries and object files from the - source code directory by typing `make clean'. To also remove the - files that `configure' created (so you can compile the package for - a different kind of computer), type `make distclean'. There is - also a `make maintainer-clean' target, but that is intended mainly - for the package's developers. If you use it, you may have to get - all sorts of other programs in order to regenerate files that came - with the distribution. - -Compilers and Options -===================== - -Some systems require unusual options for compilation or linking that the -`configure' script does not know about. Run `./configure --help' for -details on some of the pertinent environment variables. - - You can give `configure' initial values for configuration parameters -by setting variables in the command line or in the environment. Here -is an example: - - ./configure CC=c99 CFLAGS=-g LIBS=-lposix - - *Note Defining Variables::, for more details. - -Compiling For Multiple Architectures -==================================== - -You can compile the package for more than one kind of computer at the -same time, by placing the object files for each architecture in their -own directory. To do this, you can use GNU `make'. `cd' to the -directory where you want the object files and executables to go and run -the `configure' script. `configure' automatically checks for the -source code in the directory that `configure' is in and in `..'. - - With a non-GNU `make', it is safer to compile the package for one -architecture at a time in the source code directory. After you have -installed the package for one architecture, use `make distclean' before -reconfiguring for another architecture. - -Installation Names -================== - -By default, `make install' installs the package's commands under -`/usr/local/bin', include files under `/usr/local/include', etc. You -can specify an installation prefix other than `/usr/local' by giving -`configure' the option `--prefix=PREFIX'. - - You can specify separate installation prefixes for -architecture-specific files and architecture-independent files. If you -pass the option `--exec-prefix=PREFIX' to `configure', the package uses -PREFIX as the prefix for installing programs and libraries. -Documentation and other data files still use the regular prefix. - - In addition, if you use an unusual directory layout you can give -options like `--bindir=DIR' to specify different values for particular -kinds of files. Run `configure --help' for a list of the directories -you can set and what kinds of files go in them. - - If the package supports it, you can cause programs to be installed -with an extra prefix or suffix on their names by giving `configure' the -option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'. - -Optional Features -================= - -Some packages pay attention to `--enable-FEATURE' options to -`configure', where FEATURE indicates an optional part of the package. -They may also pay attention to `--with-PACKAGE' options, where PACKAGE -is something like `gnu-as' or `x' (for the X Window System). The -`README' should mention any `--enable-' and `--with-' options that the -package recognizes. - - For packages that use the X Window System, `configure' can usually -find the X include and library files automatically, but if it doesn't, -you can use the `configure' options `--x-includes=DIR' and -`--x-libraries=DIR' to specify their locations. - -Specifying the System Type -========================== - -There may be some features `configure' cannot figure out automatically, -but needs to determine by the type of machine the package will run on. -Usually, assuming the package is built to be run on the _same_ -architectures, `configure' can figure that out, but if it prints a -message saying it cannot guess the machine type, give it the -`--build=TYPE' option. TYPE can either be a short name for the system -type, such as `sun4', or a canonical name which has the form: - - CPU-COMPANY-SYSTEM - -where SYSTEM can have one of these forms: - - OS KERNEL-OS - - See the file `config.sub' for the possible values of each field. If -`config.sub' isn't included in this package, then this package doesn't -need to know the machine type. - - If you are _building_ compiler tools for cross-compiling, you should -use the option `--target=TYPE' to select the type of system they will -produce code for. - - If you want to _use_ a cross compiler, that generates code for a -platform different from the build platform, you should specify the -"host" platform (i.e., that on which the generated programs will -eventually be run) with `--host=TYPE'. - -Sharing Defaults -================ - -If you want to set default values for `configure' scripts to share, you -can create a site shell script called `config.site' that gives default -values for variables like `CC', `cache_file', and `prefix'. -`configure' looks for `PREFIX/share/config.site' if it exists, then -`PREFIX/etc/config.site' if it exists. Or, you can set the -`CONFIG_SITE' environment variable to the location of the site script. -A warning: not all `configure' scripts look for a site script. - -Defining Variables -================== - -Variables not defined in a site shell script can be set in the -environment passed to `configure'. However, some packages may run -configure again during the build, and the customized values of these -variables may be lost. In order to avoid this problem, you should set -them in the `configure' command line, using `VAR=value'. For example: - - ./configure CC=/usr/local2/bin/gcc - -causes the specified `gcc' to be used as the C compiler (unless it is -overridden in the site shell script). - -Unfortunately, this technique does not work for `CONFIG_SHELL' due to -an Autoconf bug. Until the bug is fixed you can use this workaround: - - CONFIG_SHELL=/bin/bash /bin/bash ./configure CONFIG_SHELL=/bin/bash - -`configure' Invocation -====================== - -`configure' recognizes the following options to control how it operates. - -`--help' -`-h' - Print a summary of the options to `configure', and exit. - -`--version' -`-V' - Print the version of Autoconf used to generate the `configure' - script, and exit. - -`--cache-file=FILE' - Enable the cache: use and save the results of the tests in FILE, - traditionally `config.cache'. FILE defaults to `/dev/null' to - disable caching. - -`--config-cache' -`-C' - Alias for `--cache-file=config.cache'. - -`--quiet' -`--silent' -`-q' - Do not print messages saying which checks are being made. To - suppress all normal output, redirect it to `/dev/null' (any error - messages will still be shown). - -`--srcdir=DIR' - Look for the package's source code in directory DIR. Usually - `configure' can determine that directory automatically. - -`configure' also accepts some other, not widely useful, options. Run -`configure --help' for more details. - diff --git a/wms/src/main/c/Makefile.am b/wms/src/main/c/Makefile.am deleted file mode 100644 index 3ac4831b..00000000 --- a/wms/src/main/c/Makefile.am +++ /dev/null @@ -1,48 +0,0 @@ -# need this for Doxygen integration -#include $(top_srcdir)/aminclude.am - -AM_CPPFLAGS = -I${srcdir}/include -I${srcdir}/tests -I${srcdir}/generated -AM_CFLAGS = -Wall -Werror -AM_CXXFLAGS = -Wall - -LIB_LDFLAGS = -no-undefined -version-info 2 - -pkginclude_HEADERS = include/Wms.h generated/WmsService_types.h -EXTRA_DIST=LICENSE - -COMMON_SRC = include/GlobalData.h include/GlobalHeader.h include/Wms.h include/WmsException.h include/WmsZookeeper.h \ - src/Wms.cpp src/GlobalData.cpp src/WmsException.cpp src/WmsZookeeper.cpp \ - generated/WmsService_constants.cpp generated/WmsService_constants.h generated/WmsService_types.cpp generated/WmsService_types.h \ - generated/WmsService.cpp generated/WmsService.h - -noinst_LTLIBRARIES = libwms.la -libwms_la_SOURCES = $(COMMON_SRC) -libwms_la_LIBADD = -lm - -lib_LTLIBRARIES = libwms_st.la -libwms_st_la_SOURCES = -libwms_st_la_LIBADD=libwms.la -lzookeeper_mt -lthrift -libwms_st_la_DEPENDENCIES=libwms.la -libwms_st_la_LDFLAGS = $(LIB_LDFLAGS) - -######################################################################### -# build and run unit tests - -EXTRA_DIST+=$(wildcard ${srcdir}/tests/*.cpp) $(wildcard ${srcdir}/tests/*.h) - -TEST_SOURCES = tests/wms_test.cpp - -check_PROGRAMS = wmstest-st -nodist_wmstest_st_SOURCES = $(TEST_SOURCES) -wmstest_st_LDADD = libwms_st.la -wmstest_st_CXXFLAGS = -DUSE_STATIC_LIB -wmstest_st_LDFLAGS = -static-libtool-libs - -run-check: check - ./wmstest-st $(TEST_OPTIONS) - -clean-local: clean-check - $(RM) $(DX_CLEANFILES) - -clean-check: - $(RM) $(nodist_wmstest_st_OBJECTS) $(nodist_wmstest_mt_OBJECTS) diff --git a/wms/src/main/c/configure.ac b/wms/src/main/c/configure.ac deleted file mode 100644 index ba171203..00000000 --- a/wms/src/main/c/configure.ac +++ /dev/null @@ -1,96 +0,0 @@ -# -*- Autoconf -*- -# Process this file with autoconf to produce a configure script. - -AC_PREREQ(2.59) - -AC_INIT([WMS C client],0.1.0,[mattbrown@hp.com],[wms]) -AC_CONFIG_SRCDIR([src/Wms.cpp]) - -# Save initial CFLAGS and CXXFLAGS values before AC_PROG_CC and AC_PROG_CXX -init_cflags="$CFLAGS" -init_cxxflags="$CXXFLAGS" - -# initialize automake -AM_INIT_AUTOMAKE([-Wall foreign]) -AC_CONFIG_HEADER([config.h]) - -AC_PROG_CC -AM_PROG_CC_C_O -AC_PROG_CXX -AC_PROG_INSTALL -AC_PROG_LN_S - -# AC_DISABLE_SHARED -AC_PROG_LIBTOOL - -AC_ARG_ENABLE([debug], - [AS_HELP_STRING([--enable-debug],[enable debug build [default=no]])], - [],[enable_debug=no]) - -if test "x$enable_debug" = xyes; then - if test "x$init_cflags" = x; then - CFLAGS="" - fi - CFLAGS="$CFLAGS -g -O0 -D_GNU_SOURCE" -else - if test "x$init_cflags" = x; then - CFLAGS="-g -O2 -D_GNU_SOURCE" - fi -fi - -if test "x$enable_debug" = xyes; then - if test "x$init_cxxflags" = x; then - CXXFLAGS="" - fi - CXXFLAGS="$CXXFLAGS -g -O0" -else - if test "x$init_cxxflags" = x; then - CXXFLAGS="-g -O2" - fi -fi - -# Checks for libraries. -AC_CHECK_LIB([pthread], [pthread_mutex_lock],[have_pthread=yes],[have_pthread=no]) - -# Checks for header files. -AC_HEADER_STDC -AC_CHECK_HEADERS([arpa/inet.h fcntl.h netdb.h netinet/in.h stdlib.h string.h sys/socket.h sys/time.h unistd.h sys/utsname.h]) - -# Checks for typedefs, structures, and compiler characteristics. -AC_C_CONST -AC_C_INLINE -AC_HEADER_TIME -AC_CHECK_TYPE([nfds_t], - [AC_DEFINE([POLL_NFDS_TYPE],[nfds_t],[poll() second argument type])], - [AC_DEFINE([POLL_NFDS_TYPE],[unsigned int],[poll() second argument type])], - [#include ]) - -AC_MSG_CHECKING([whether to enable ipv6]) - -AC_TRY_RUN([ /* is AF_INET6 available? */ -#include -#include -main() -{ - if (socket(AF_INET6, SOCK_STREAM, 0) < 0) - exit(1); - else - exit(0); -} -], AC_MSG_RESULT(yes) - ipv6=yes, - AC_MSG_RESULT(no) - ipv6=no, - AC_MSG_RESULT(no) - ipv6=no) - -if test x"$ipv6" = xyes; then - USEIPV6="-DZOO_IPV6_ENABLED" - AC_SUBST(USEIPV6) -fi - -# Checks for library functions. -AC_CHECK_FUNCS([getcwd gethostbyname gethostname getlogin getpwuid_r gettimeofday getuid memmove memset poll socket strchr strdup strerror strtol]) - -AC_CONFIG_FILES([Makefile]) -AC_OUTPUT diff --git a/wms/src/main/c/generated/WmsAdminService.cpp b/wms/src/main/c/generated/WmsAdminService.cpp deleted file mode 100644 index 06fe7284..00000000 --- a/wms/src/main/c/generated/WmsAdminService.cpp +++ /dev/null @@ -1,2971 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -#include "WmsAdminService.h" - -namespace trafodion { namespace wms { namespace thrift { - -uint32_t WmsAdminService_ping_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_I64) { - xfer += iprot->readI64(this->timestamp); - this->__isset.timestamp = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_ping_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_ping_args"); - - xfer += oprot->writeFieldBegin("timestamp", ::apache::thrift::protocol::T_I64, 1); - xfer += oprot->writeI64(this->timestamp); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_ping_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_ping_pargs"); - - xfer += oprot->writeFieldBegin("timestamp", ::apache::thrift::protocol::T_I64, 1); - xfer += oprot->writeI64((*(this->timestamp))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_ping_result::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_I64) { - xfer += iprot->readI64(this->success); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_ping_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("WmsAdminService_ping_result"); - - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_I64, 0); - xfer += oprot->writeI64(this->success); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.io) { - xfer += oprot->writeFieldBegin("io", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->io.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_ping_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_I64) { - xfer += iprot->readI64((*(this->success))); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_addStream_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->stream.read(iprot); - this->__isset.stream = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_addStream_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_addStream_args"); - - xfer += oprot->writeFieldBegin("stream", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->stream.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_addStream_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_addStream_pargs"); - - xfer += oprot->writeFieldBegin("stream", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->stream)).write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_addStream_result::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->ia.read(iprot); - this->__isset.ia = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_addStream_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("WmsAdminService_addStream_result"); - - if (this->__isset.io) { - xfer += oprot->writeFieldBegin("io", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->io.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.ia) { - xfer += oprot->writeFieldBegin("ia", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->ia.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_addStream_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->ia.read(iprot); - this->__isset.ia = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_alterStream_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->stream.read(iprot); - this->__isset.stream = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_alterStream_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_alterStream_args"); - - xfer += oprot->writeFieldBegin("stream", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->stream.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_alterStream_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_alterStream_pargs"); - - xfer += oprot->writeFieldBegin("stream", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->stream)).write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_alterStream_result::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->ia.read(iprot); - this->__isset.ia = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_alterStream_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("WmsAdminService_alterStream_result"); - - if (this->__isset.io) { - xfer += oprot->writeFieldBegin("io", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->io.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.ia) { - xfer += oprot->writeFieldBegin("ia", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->ia.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_alterStream_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->ia.read(iprot); - this->__isset.ia = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_deleteStream_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->stream.read(iprot); - this->__isset.stream = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_deleteStream_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_deleteStream_args"); - - xfer += oprot->writeFieldBegin("stream", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->stream.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_deleteStream_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_deleteStream_pargs"); - - xfer += oprot->writeFieldBegin("stream", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->stream)).write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_deleteStream_result::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->ia.read(iprot); - this->__isset.ia = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_deleteStream_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("WmsAdminService_deleteStream_result"); - - if (this->__isset.io) { - xfer += oprot->writeFieldBegin("io", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->io.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.ia) { - xfer += oprot->writeFieldBegin("ia", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->ia.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_deleteStream_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->ia.read(iprot); - this->__isset.ia = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_stream_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - xfer += iprot->skip(ftype); - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_stream_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_stream_args"); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_stream_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_stream_pargs"); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_stream_result::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_stream_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("WmsAdminService_stream_result"); - - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.io) { - xfer += oprot->writeFieldBegin("io", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->io.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_stream_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_addRule_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->rule.read(iprot); - this->__isset.rule = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_addRule_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_addRule_args"); - - xfer += oprot->writeFieldBegin("rule", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->rule.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_addRule_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_addRule_pargs"); - - xfer += oprot->writeFieldBegin("rule", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->rule)).write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_addRule_result::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->ia.read(iprot); - this->__isset.ia = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_addRule_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("WmsAdminService_addRule_result"); - - if (this->__isset.io) { - xfer += oprot->writeFieldBegin("io", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->io.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.ia) { - xfer += oprot->writeFieldBegin("ia", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->ia.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_addRule_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->ia.read(iprot); - this->__isset.ia = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_alterRule_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->rule.read(iprot); - this->__isset.rule = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_alterRule_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_alterRule_args"); - - xfer += oprot->writeFieldBegin("rule", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->rule.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_alterRule_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_alterRule_pargs"); - - xfer += oprot->writeFieldBegin("rule", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->rule)).write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_alterRule_result::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->ia.read(iprot); - this->__isset.ia = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_alterRule_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("WmsAdminService_alterRule_result"); - - if (this->__isset.io) { - xfer += oprot->writeFieldBegin("io", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->io.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.ia) { - xfer += oprot->writeFieldBegin("ia", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->ia.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_alterRule_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->ia.read(iprot); - this->__isset.ia = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_deleteRule_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->rule.read(iprot); - this->__isset.rule = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_deleteRule_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_deleteRule_args"); - - xfer += oprot->writeFieldBegin("rule", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->rule.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_deleteRule_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_deleteRule_pargs"); - - xfer += oprot->writeFieldBegin("rule", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->rule)).write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_deleteRule_result::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->ia.read(iprot); - this->__isset.ia = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_deleteRule_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("WmsAdminService_deleteRule_result"); - - if (this->__isset.io) { - xfer += oprot->writeFieldBegin("io", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->io.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.ia) { - xfer += oprot->writeFieldBegin("ia", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->ia.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_deleteRule_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->ia.read(iprot); - this->__isset.ia = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_rule_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - xfer += iprot->skip(ftype); - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_rule_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_rule_args"); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_rule_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_rule_pargs"); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_rule_result::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_rule_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("WmsAdminService_rule_result"); - - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.io) { - xfer += oprot->writeFieldBegin("io", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->io.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_rule_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_workload_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - xfer += iprot->skip(ftype); - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_workload_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_workload_args"); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_workload_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsAdminService_workload_pargs"); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_workload_result::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsAdminService_workload_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("WmsAdminService_workload_result"); - - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.io) { - xfer += oprot->writeFieldBegin("io", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->io.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsAdminService_workload_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -int64_t WmsAdminServiceClient::ping(const int64_t timestamp) -{ - send_ping(timestamp); - return recv_ping(); -} - -void WmsAdminServiceClient::send_ping(const int64_t timestamp) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("ping", ::apache::thrift::protocol::T_CALL, cseqid); - - WmsAdminService_ping_pargs args; - args.timestamp = ×tamp; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); -} - -int64_t WmsAdminServiceClient::recv_ping() -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("ping") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - int64_t _return; - WmsAdminService_ping_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.success) { - return _return; - } - if (result.__isset.io) { - throw result.io; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "ping failed: unknown result"); -} - -void WmsAdminServiceClient::addStream(const Stream& stream) -{ - send_addStream(stream); - recv_addStream(); -} - -void WmsAdminServiceClient::send_addStream(const Stream& stream) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("addStream", ::apache::thrift::protocol::T_CALL, cseqid); - - WmsAdminService_addStream_pargs args; - args.stream = &stream; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); -} - -void WmsAdminServiceClient::recv_addStream() -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("addStream") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - WmsAdminService_addStream_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.io) { - throw result.io; - } - if (result.__isset.ia) { - throw result.ia; - } - return; -} - -void WmsAdminServiceClient::alterStream(const Stream& stream) -{ - send_alterStream(stream); - recv_alterStream(); -} - -void WmsAdminServiceClient::send_alterStream(const Stream& stream) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("alterStream", ::apache::thrift::protocol::T_CALL, cseqid); - - WmsAdminService_alterStream_pargs args; - args.stream = &stream; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); -} - -void WmsAdminServiceClient::recv_alterStream() -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("alterStream") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - WmsAdminService_alterStream_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.io) { - throw result.io; - } - if (result.__isset.ia) { - throw result.ia; - } - return; -} - -void WmsAdminServiceClient::deleteStream(const Stream& stream) -{ - send_deleteStream(stream); - recv_deleteStream(); -} - -void WmsAdminServiceClient::send_deleteStream(const Stream& stream) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("deleteStream", ::apache::thrift::protocol::T_CALL, cseqid); - - WmsAdminService_deleteStream_pargs args; - args.stream = &stream; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); -} - -void WmsAdminServiceClient::recv_deleteStream() -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("deleteStream") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - WmsAdminService_deleteStream_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.io) { - throw result.io; - } - if (result.__isset.ia) { - throw result.ia; - } - return; -} - -void WmsAdminServiceClient::stream(StreamResponse& _return) -{ - send_stream(); - recv_stream(_return); -} - -void WmsAdminServiceClient::send_stream() -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("stream", ::apache::thrift::protocol::T_CALL, cseqid); - - WmsAdminService_stream_pargs args; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); -} - -void WmsAdminServiceClient::recv_stream(StreamResponse& _return) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("stream") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - WmsAdminService_stream_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.success) { - // _return pointer has now been filled - return; - } - if (result.__isset.io) { - throw result.io; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "stream failed: unknown result"); -} - -void WmsAdminServiceClient::addRule(const Rule& rule) -{ - send_addRule(rule); - recv_addRule(); -} - -void WmsAdminServiceClient::send_addRule(const Rule& rule) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("addRule", ::apache::thrift::protocol::T_CALL, cseqid); - - WmsAdminService_addRule_pargs args; - args.rule = &rule; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); -} - -void WmsAdminServiceClient::recv_addRule() -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("addRule") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - WmsAdminService_addRule_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.io) { - throw result.io; - } - if (result.__isset.ia) { - throw result.ia; - } - return; -} - -void WmsAdminServiceClient::alterRule(const Rule& rule) -{ - send_alterRule(rule); - recv_alterRule(); -} - -void WmsAdminServiceClient::send_alterRule(const Rule& rule) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("alterRule", ::apache::thrift::protocol::T_CALL, cseqid); - - WmsAdminService_alterRule_pargs args; - args.rule = &rule; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); -} - -void WmsAdminServiceClient::recv_alterRule() -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("alterRule") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - WmsAdminService_alterRule_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.io) { - throw result.io; - } - if (result.__isset.ia) { - throw result.ia; - } - return; -} - -void WmsAdminServiceClient::deleteRule(const Rule& rule) -{ - send_deleteRule(rule); - recv_deleteRule(); -} - -void WmsAdminServiceClient::send_deleteRule(const Rule& rule) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("deleteRule", ::apache::thrift::protocol::T_CALL, cseqid); - - WmsAdminService_deleteRule_pargs args; - args.rule = &rule; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); -} - -void WmsAdminServiceClient::recv_deleteRule() -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("deleteRule") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - WmsAdminService_deleteRule_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.io) { - throw result.io; - } - if (result.__isset.ia) { - throw result.ia; - } - return; -} - -void WmsAdminServiceClient::rule(RuleResponse& _return) -{ - send_rule(); - recv_rule(_return); -} - -void WmsAdminServiceClient::send_rule() -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("rule", ::apache::thrift::protocol::T_CALL, cseqid); - - WmsAdminService_rule_pargs args; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); -} - -void WmsAdminServiceClient::recv_rule(RuleResponse& _return) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("rule") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - WmsAdminService_rule_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.success) { - // _return pointer has now been filled - return; - } - if (result.__isset.io) { - throw result.io; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "rule failed: unknown result"); -} - -void WmsAdminServiceClient::workload(WorkloadResponse& _return) -{ - send_workload(); - recv_workload(_return); -} - -void WmsAdminServiceClient::send_workload() -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("workload", ::apache::thrift::protocol::T_CALL, cseqid); - - WmsAdminService_workload_pargs args; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); -} - -void WmsAdminServiceClient::recv_workload(WorkloadResponse& _return) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("workload") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - WmsAdminService_workload_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.success) { - // _return pointer has now been filled - return; - } - if (result.__isset.io) { - throw result.io; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "workload failed: unknown result"); -} - -bool WmsAdminServiceProcessor::dispatchCall(::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, const std::string& fname, int32_t seqid, void* callContext) { - ProcessMap::iterator pfn; - pfn = processMap_.find(fname); - if (pfn == processMap_.end()) { - iprot->skip(::apache::thrift::protocol::T_STRUCT); - iprot->readMessageEnd(); - iprot->getTransport()->readEnd(); - ::apache::thrift::TApplicationException x(::apache::thrift::TApplicationException::UNKNOWN_METHOD, "Invalid method name: '"+fname+"'"); - oprot->writeMessageBegin(fname, ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return true; - } - (this->*(pfn->second))(seqid, iprot, oprot, callContext); - return true; -} - -void WmsAdminServiceProcessor::process_ping(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) -{ - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("WmsAdminService.ping", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "WmsAdminService.ping"); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "WmsAdminService.ping"); - } - - WmsAdminService_ping_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "WmsAdminService.ping", bytes); - } - - WmsAdminService_ping_result result; - try { - result.success = iface_->ping(args.timestamp); - result.__isset.success = true; - } catch (IOError &io) { - result.io = io; - result.__isset.io = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "WmsAdminService.ping"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("ping", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "WmsAdminService.ping"); - } - - oprot->writeMessageBegin("ping", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "WmsAdminService.ping", bytes); - } -} - -void WmsAdminServiceProcessor::process_addStream(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) -{ - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("WmsAdminService.addStream", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "WmsAdminService.addStream"); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "WmsAdminService.addStream"); - } - - WmsAdminService_addStream_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "WmsAdminService.addStream", bytes); - } - - WmsAdminService_addStream_result result; - try { - iface_->addStream(args.stream); - } catch (IOError &io) { - result.io = io; - result.__isset.io = true; - } catch (IllegalArgument &ia) { - result.ia = ia; - result.__isset.ia = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "WmsAdminService.addStream"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("addStream", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "WmsAdminService.addStream"); - } - - oprot->writeMessageBegin("addStream", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "WmsAdminService.addStream", bytes); - } -} - -void WmsAdminServiceProcessor::process_alterStream(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) -{ - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("WmsAdminService.alterStream", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "WmsAdminService.alterStream"); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "WmsAdminService.alterStream"); - } - - WmsAdminService_alterStream_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "WmsAdminService.alterStream", bytes); - } - - WmsAdminService_alterStream_result result; - try { - iface_->alterStream(args.stream); - } catch (IOError &io) { - result.io = io; - result.__isset.io = true; - } catch (IllegalArgument &ia) { - result.ia = ia; - result.__isset.ia = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "WmsAdminService.alterStream"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("alterStream", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "WmsAdminService.alterStream"); - } - - oprot->writeMessageBegin("alterStream", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "WmsAdminService.alterStream", bytes); - } -} - -void WmsAdminServiceProcessor::process_deleteStream(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) -{ - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("WmsAdminService.deleteStream", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "WmsAdminService.deleteStream"); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "WmsAdminService.deleteStream"); - } - - WmsAdminService_deleteStream_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "WmsAdminService.deleteStream", bytes); - } - - WmsAdminService_deleteStream_result result; - try { - iface_->deleteStream(args.stream); - } catch (IOError &io) { - result.io = io; - result.__isset.io = true; - } catch (IllegalArgument &ia) { - result.ia = ia; - result.__isset.ia = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "WmsAdminService.deleteStream"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("deleteStream", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "WmsAdminService.deleteStream"); - } - - oprot->writeMessageBegin("deleteStream", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "WmsAdminService.deleteStream", bytes); - } -} - -void WmsAdminServiceProcessor::process_stream(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) -{ - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("WmsAdminService.stream", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "WmsAdminService.stream"); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "WmsAdminService.stream"); - } - - WmsAdminService_stream_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "WmsAdminService.stream", bytes); - } - - WmsAdminService_stream_result result; - try { - iface_->stream(result.success); - result.__isset.success = true; - } catch (IOError &io) { - result.io = io; - result.__isset.io = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "WmsAdminService.stream"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("stream", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "WmsAdminService.stream"); - } - - oprot->writeMessageBegin("stream", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "WmsAdminService.stream", bytes); - } -} - -void WmsAdminServiceProcessor::process_addRule(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) -{ - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("WmsAdminService.addRule", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "WmsAdminService.addRule"); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "WmsAdminService.addRule"); - } - - WmsAdminService_addRule_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "WmsAdminService.addRule", bytes); - } - - WmsAdminService_addRule_result result; - try { - iface_->addRule(args.rule); - } catch (IOError &io) { - result.io = io; - result.__isset.io = true; - } catch (IllegalArgument &ia) { - result.ia = ia; - result.__isset.ia = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "WmsAdminService.addRule"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("addRule", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "WmsAdminService.addRule"); - } - - oprot->writeMessageBegin("addRule", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "WmsAdminService.addRule", bytes); - } -} - -void WmsAdminServiceProcessor::process_alterRule(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) -{ - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("WmsAdminService.alterRule", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "WmsAdminService.alterRule"); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "WmsAdminService.alterRule"); - } - - WmsAdminService_alterRule_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "WmsAdminService.alterRule", bytes); - } - - WmsAdminService_alterRule_result result; - try { - iface_->alterRule(args.rule); - } catch (IOError &io) { - result.io = io; - result.__isset.io = true; - } catch (IllegalArgument &ia) { - result.ia = ia; - result.__isset.ia = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "WmsAdminService.alterRule"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("alterRule", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "WmsAdminService.alterRule"); - } - - oprot->writeMessageBegin("alterRule", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "WmsAdminService.alterRule", bytes); - } -} - -void WmsAdminServiceProcessor::process_deleteRule(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) -{ - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("WmsAdminService.deleteRule", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "WmsAdminService.deleteRule"); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "WmsAdminService.deleteRule"); - } - - WmsAdminService_deleteRule_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "WmsAdminService.deleteRule", bytes); - } - - WmsAdminService_deleteRule_result result; - try { - iface_->deleteRule(args.rule); - } catch (IOError &io) { - result.io = io; - result.__isset.io = true; - } catch (IllegalArgument &ia) { - result.ia = ia; - result.__isset.ia = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "WmsAdminService.deleteRule"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("deleteRule", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "WmsAdminService.deleteRule"); - } - - oprot->writeMessageBegin("deleteRule", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "WmsAdminService.deleteRule", bytes); - } -} - -void WmsAdminServiceProcessor::process_rule(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) -{ - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("WmsAdminService.rule", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "WmsAdminService.rule"); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "WmsAdminService.rule"); - } - - WmsAdminService_rule_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "WmsAdminService.rule", bytes); - } - - WmsAdminService_rule_result result; - try { - iface_->rule(result.success); - result.__isset.success = true; - } catch (IOError &io) { - result.io = io; - result.__isset.io = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "WmsAdminService.rule"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("rule", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "WmsAdminService.rule"); - } - - oprot->writeMessageBegin("rule", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "WmsAdminService.rule", bytes); - } -} - -void WmsAdminServiceProcessor::process_workload(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) -{ - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("WmsAdminService.workload", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "WmsAdminService.workload"); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "WmsAdminService.workload"); - } - - WmsAdminService_workload_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "WmsAdminService.workload", bytes); - } - - WmsAdminService_workload_result result; - try { - iface_->workload(result.success); - result.__isset.success = true; - } catch (IOError &io) { - result.io = io; - result.__isset.io = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "WmsAdminService.workload"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("workload", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "WmsAdminService.workload"); - } - - oprot->writeMessageBegin("workload", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "WmsAdminService.workload", bytes); - } -} - -::boost::shared_ptr< ::apache::thrift::TProcessor > WmsAdminServiceProcessorFactory::getProcessor(const ::apache::thrift::TConnectionInfo& connInfo) { - ::apache::thrift::ReleaseHandler< WmsAdminServiceIfFactory > cleanup(handlerFactory_); - ::boost::shared_ptr< WmsAdminServiceIf > handler(handlerFactory_->getHandler(connInfo), cleanup); - ::boost::shared_ptr< ::apache::thrift::TProcessor > processor(new WmsAdminServiceProcessor(handler)); - return processor; -} -}}} // namespace - diff --git a/wms/src/main/c/generated/WmsAdminService.h b/wms/src/main/c/generated/WmsAdminService.h deleted file mode 100644 index f8274fb5..00000000 --- a/wms/src/main/c/generated/WmsAdminService.h +++ /dev/null @@ -1,1441 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -#ifndef WmsAdminService_H -#define WmsAdminService_H - -#include -#include "WmsService_types.h" - -namespace trafodion { namespace wms { namespace thrift { - -class WmsAdminServiceIf { - public: - virtual ~WmsAdminServiceIf() {} - virtual int64_t ping(const int64_t timestamp) = 0; - virtual void addStream(const Stream& stream) = 0; - virtual void alterStream(const Stream& stream) = 0; - virtual void deleteStream(const Stream& stream) = 0; - virtual void stream(StreamResponse& _return) = 0; - virtual void addRule(const Rule& rule) = 0; - virtual void alterRule(const Rule& rule) = 0; - virtual void deleteRule(const Rule& rule) = 0; - virtual void rule(RuleResponse& _return) = 0; - virtual void workload(WorkloadResponse& _return) = 0; -}; - -class WmsAdminServiceIfFactory { - public: - typedef WmsAdminServiceIf Handler; - - virtual ~WmsAdminServiceIfFactory() {} - - virtual WmsAdminServiceIf* getHandler(const ::apache::thrift::TConnectionInfo& connInfo) = 0; - virtual void releaseHandler(WmsAdminServiceIf* /* handler */) = 0; -}; - -class WmsAdminServiceIfSingletonFactory : virtual public WmsAdminServiceIfFactory { - public: - WmsAdminServiceIfSingletonFactory(const boost::shared_ptr& iface) : iface_(iface) {} - virtual ~WmsAdminServiceIfSingletonFactory() {} - - virtual WmsAdminServiceIf* getHandler(const ::apache::thrift::TConnectionInfo&) { - return iface_.get(); - } - virtual void releaseHandler(WmsAdminServiceIf* /* handler */) {} - - protected: - boost::shared_ptr iface_; -}; - -class WmsAdminServiceNull : virtual public WmsAdminServiceIf { - public: - virtual ~WmsAdminServiceNull() {} - int64_t ping(const int64_t /* timestamp */) { - int64_t _return = 0; - return _return; - } - void addStream(const Stream& /* stream */) { - return; - } - void alterStream(const Stream& /* stream */) { - return; - } - void deleteStream(const Stream& /* stream */) { - return; - } - void stream(StreamResponse& /* _return */) { - return; - } - void addRule(const Rule& /* rule */) { - return; - } - void alterRule(const Rule& /* rule */) { - return; - } - void deleteRule(const Rule& /* rule */) { - return; - } - void rule(RuleResponse& /* _return */) { - return; - } - void workload(WorkloadResponse& /* _return */) { - return; - } -}; - -typedef struct _WmsAdminService_ping_args__isset { - _WmsAdminService_ping_args__isset() : timestamp(false) {} - bool timestamp; -} _WmsAdminService_ping_args__isset; - -class WmsAdminService_ping_args { - public: - - WmsAdminService_ping_args() : timestamp(0) { - } - - virtual ~WmsAdminService_ping_args() throw() {} - - int64_t timestamp; - - _WmsAdminService_ping_args__isset __isset; - - void __set_timestamp(const int64_t val) { - timestamp = val; - } - - bool operator == (const WmsAdminService_ping_args & rhs) const - { - if (!(timestamp == rhs.timestamp)) - return false; - return true; - } - bool operator != (const WmsAdminService_ping_args &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_ping_args & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - - -class WmsAdminService_ping_pargs { - public: - - - virtual ~WmsAdminService_ping_pargs() throw() {} - - const int64_t* timestamp; - - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_ping_result__isset { - _WmsAdminService_ping_result__isset() : success(false), io(false) {} - bool success; - bool io; -} _WmsAdminService_ping_result__isset; - -class WmsAdminService_ping_result { - public: - - WmsAdminService_ping_result() : success(0) { - } - - virtual ~WmsAdminService_ping_result() throw() {} - - int64_t success; - IOError io; - - _WmsAdminService_ping_result__isset __isset; - - void __set_success(const int64_t val) { - success = val; - } - - void __set_io(const IOError& val) { - io = val; - } - - bool operator == (const WmsAdminService_ping_result & rhs) const - { - if (!(success == rhs.success)) - return false; - if (!(io == rhs.io)) - return false; - return true; - } - bool operator != (const WmsAdminService_ping_result &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_ping_result & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_ping_presult__isset { - _WmsAdminService_ping_presult__isset() : success(false), io(false) {} - bool success; - bool io; -} _WmsAdminService_ping_presult__isset; - -class WmsAdminService_ping_presult { - public: - - - virtual ~WmsAdminService_ping_presult() throw() {} - - int64_t* success; - IOError io; - - _WmsAdminService_ping_presult__isset __isset; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - -}; - -typedef struct _WmsAdminService_addStream_args__isset { - _WmsAdminService_addStream_args__isset() : stream(false) {} - bool stream; -} _WmsAdminService_addStream_args__isset; - -class WmsAdminService_addStream_args { - public: - - WmsAdminService_addStream_args() { - } - - virtual ~WmsAdminService_addStream_args() throw() {} - - Stream stream; - - _WmsAdminService_addStream_args__isset __isset; - - void __set_stream(const Stream& val) { - stream = val; - } - - bool operator == (const WmsAdminService_addStream_args & rhs) const - { - if (!(stream == rhs.stream)) - return false; - return true; - } - bool operator != (const WmsAdminService_addStream_args &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_addStream_args & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - - -class WmsAdminService_addStream_pargs { - public: - - - virtual ~WmsAdminService_addStream_pargs() throw() {} - - const Stream* stream; - - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_addStream_result__isset { - _WmsAdminService_addStream_result__isset() : io(false), ia(false) {} - bool io; - bool ia; -} _WmsAdminService_addStream_result__isset; - -class WmsAdminService_addStream_result { - public: - - WmsAdminService_addStream_result() { - } - - virtual ~WmsAdminService_addStream_result() throw() {} - - IOError io; - IllegalArgument ia; - - _WmsAdminService_addStream_result__isset __isset; - - void __set_io(const IOError& val) { - io = val; - } - - void __set_ia(const IllegalArgument& val) { - ia = val; - } - - bool operator == (const WmsAdminService_addStream_result & rhs) const - { - if (!(io == rhs.io)) - return false; - if (!(ia == rhs.ia)) - return false; - return true; - } - bool operator != (const WmsAdminService_addStream_result &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_addStream_result & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_addStream_presult__isset { - _WmsAdminService_addStream_presult__isset() : io(false), ia(false) {} - bool io; - bool ia; -} _WmsAdminService_addStream_presult__isset; - -class WmsAdminService_addStream_presult { - public: - - - virtual ~WmsAdminService_addStream_presult() throw() {} - - IOError io; - IllegalArgument ia; - - _WmsAdminService_addStream_presult__isset __isset; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - -}; - -typedef struct _WmsAdminService_alterStream_args__isset { - _WmsAdminService_alterStream_args__isset() : stream(false) {} - bool stream; -} _WmsAdminService_alterStream_args__isset; - -class WmsAdminService_alterStream_args { - public: - - WmsAdminService_alterStream_args() { - } - - virtual ~WmsAdminService_alterStream_args() throw() {} - - Stream stream; - - _WmsAdminService_alterStream_args__isset __isset; - - void __set_stream(const Stream& val) { - stream = val; - } - - bool operator == (const WmsAdminService_alterStream_args & rhs) const - { - if (!(stream == rhs.stream)) - return false; - return true; - } - bool operator != (const WmsAdminService_alterStream_args &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_alterStream_args & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - - -class WmsAdminService_alterStream_pargs { - public: - - - virtual ~WmsAdminService_alterStream_pargs() throw() {} - - const Stream* stream; - - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_alterStream_result__isset { - _WmsAdminService_alterStream_result__isset() : io(false), ia(false) {} - bool io; - bool ia; -} _WmsAdminService_alterStream_result__isset; - -class WmsAdminService_alterStream_result { - public: - - WmsAdminService_alterStream_result() { - } - - virtual ~WmsAdminService_alterStream_result() throw() {} - - IOError io; - IllegalArgument ia; - - _WmsAdminService_alterStream_result__isset __isset; - - void __set_io(const IOError& val) { - io = val; - } - - void __set_ia(const IllegalArgument& val) { - ia = val; - } - - bool operator == (const WmsAdminService_alterStream_result & rhs) const - { - if (!(io == rhs.io)) - return false; - if (!(ia == rhs.ia)) - return false; - return true; - } - bool operator != (const WmsAdminService_alterStream_result &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_alterStream_result & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_alterStream_presult__isset { - _WmsAdminService_alterStream_presult__isset() : io(false), ia(false) {} - bool io; - bool ia; -} _WmsAdminService_alterStream_presult__isset; - -class WmsAdminService_alterStream_presult { - public: - - - virtual ~WmsAdminService_alterStream_presult() throw() {} - - IOError io; - IllegalArgument ia; - - _WmsAdminService_alterStream_presult__isset __isset; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - -}; - -typedef struct _WmsAdminService_deleteStream_args__isset { - _WmsAdminService_deleteStream_args__isset() : stream(false) {} - bool stream; -} _WmsAdminService_deleteStream_args__isset; - -class WmsAdminService_deleteStream_args { - public: - - WmsAdminService_deleteStream_args() { - } - - virtual ~WmsAdminService_deleteStream_args() throw() {} - - Stream stream; - - _WmsAdminService_deleteStream_args__isset __isset; - - void __set_stream(const Stream& val) { - stream = val; - } - - bool operator == (const WmsAdminService_deleteStream_args & rhs) const - { - if (!(stream == rhs.stream)) - return false; - return true; - } - bool operator != (const WmsAdminService_deleteStream_args &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_deleteStream_args & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - - -class WmsAdminService_deleteStream_pargs { - public: - - - virtual ~WmsAdminService_deleteStream_pargs() throw() {} - - const Stream* stream; - - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_deleteStream_result__isset { - _WmsAdminService_deleteStream_result__isset() : io(false), ia(false) {} - bool io; - bool ia; -} _WmsAdminService_deleteStream_result__isset; - -class WmsAdminService_deleteStream_result { - public: - - WmsAdminService_deleteStream_result() { - } - - virtual ~WmsAdminService_deleteStream_result() throw() {} - - IOError io; - IllegalArgument ia; - - _WmsAdminService_deleteStream_result__isset __isset; - - void __set_io(const IOError& val) { - io = val; - } - - void __set_ia(const IllegalArgument& val) { - ia = val; - } - - bool operator == (const WmsAdminService_deleteStream_result & rhs) const - { - if (!(io == rhs.io)) - return false; - if (!(ia == rhs.ia)) - return false; - return true; - } - bool operator != (const WmsAdminService_deleteStream_result &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_deleteStream_result & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_deleteStream_presult__isset { - _WmsAdminService_deleteStream_presult__isset() : io(false), ia(false) {} - bool io; - bool ia; -} _WmsAdminService_deleteStream_presult__isset; - -class WmsAdminService_deleteStream_presult { - public: - - - virtual ~WmsAdminService_deleteStream_presult() throw() {} - - IOError io; - IllegalArgument ia; - - _WmsAdminService_deleteStream_presult__isset __isset; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - -}; - - -class WmsAdminService_stream_args { - public: - - WmsAdminService_stream_args() { - } - - virtual ~WmsAdminService_stream_args() throw() {} - - - bool operator == (const WmsAdminService_stream_args & /* rhs */) const - { - return true; - } - bool operator != (const WmsAdminService_stream_args &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_stream_args & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - - -class WmsAdminService_stream_pargs { - public: - - - virtual ~WmsAdminService_stream_pargs() throw() {} - - - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_stream_result__isset { - _WmsAdminService_stream_result__isset() : success(false), io(false) {} - bool success; - bool io; -} _WmsAdminService_stream_result__isset; - -class WmsAdminService_stream_result { - public: - - WmsAdminService_stream_result() { - } - - virtual ~WmsAdminService_stream_result() throw() {} - - StreamResponse success; - IOError io; - - _WmsAdminService_stream_result__isset __isset; - - void __set_success(const StreamResponse& val) { - success = val; - } - - void __set_io(const IOError& val) { - io = val; - } - - bool operator == (const WmsAdminService_stream_result & rhs) const - { - if (!(success == rhs.success)) - return false; - if (!(io == rhs.io)) - return false; - return true; - } - bool operator != (const WmsAdminService_stream_result &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_stream_result & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_stream_presult__isset { - _WmsAdminService_stream_presult__isset() : success(false), io(false) {} - bool success; - bool io; -} _WmsAdminService_stream_presult__isset; - -class WmsAdminService_stream_presult { - public: - - - virtual ~WmsAdminService_stream_presult() throw() {} - - StreamResponse* success; - IOError io; - - _WmsAdminService_stream_presult__isset __isset; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - -}; - -typedef struct _WmsAdminService_addRule_args__isset { - _WmsAdminService_addRule_args__isset() : rule(false) {} - bool rule; -} _WmsAdminService_addRule_args__isset; - -class WmsAdminService_addRule_args { - public: - - WmsAdminService_addRule_args() { - } - - virtual ~WmsAdminService_addRule_args() throw() {} - - Rule rule; - - _WmsAdminService_addRule_args__isset __isset; - - void __set_rule(const Rule& val) { - rule = val; - } - - bool operator == (const WmsAdminService_addRule_args & rhs) const - { - if (!(rule == rhs.rule)) - return false; - return true; - } - bool operator != (const WmsAdminService_addRule_args &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_addRule_args & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - - -class WmsAdminService_addRule_pargs { - public: - - - virtual ~WmsAdminService_addRule_pargs() throw() {} - - const Rule* rule; - - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_addRule_result__isset { - _WmsAdminService_addRule_result__isset() : io(false), ia(false) {} - bool io; - bool ia; -} _WmsAdminService_addRule_result__isset; - -class WmsAdminService_addRule_result { - public: - - WmsAdminService_addRule_result() { - } - - virtual ~WmsAdminService_addRule_result() throw() {} - - IOError io; - IllegalArgument ia; - - _WmsAdminService_addRule_result__isset __isset; - - void __set_io(const IOError& val) { - io = val; - } - - void __set_ia(const IllegalArgument& val) { - ia = val; - } - - bool operator == (const WmsAdminService_addRule_result & rhs) const - { - if (!(io == rhs.io)) - return false; - if (!(ia == rhs.ia)) - return false; - return true; - } - bool operator != (const WmsAdminService_addRule_result &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_addRule_result & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_addRule_presult__isset { - _WmsAdminService_addRule_presult__isset() : io(false), ia(false) {} - bool io; - bool ia; -} _WmsAdminService_addRule_presult__isset; - -class WmsAdminService_addRule_presult { - public: - - - virtual ~WmsAdminService_addRule_presult() throw() {} - - IOError io; - IllegalArgument ia; - - _WmsAdminService_addRule_presult__isset __isset; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - -}; - -typedef struct _WmsAdminService_alterRule_args__isset { - _WmsAdminService_alterRule_args__isset() : rule(false) {} - bool rule; -} _WmsAdminService_alterRule_args__isset; - -class WmsAdminService_alterRule_args { - public: - - WmsAdminService_alterRule_args() { - } - - virtual ~WmsAdminService_alterRule_args() throw() {} - - Rule rule; - - _WmsAdminService_alterRule_args__isset __isset; - - void __set_rule(const Rule& val) { - rule = val; - } - - bool operator == (const WmsAdminService_alterRule_args & rhs) const - { - if (!(rule == rhs.rule)) - return false; - return true; - } - bool operator != (const WmsAdminService_alterRule_args &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_alterRule_args & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - - -class WmsAdminService_alterRule_pargs { - public: - - - virtual ~WmsAdminService_alterRule_pargs() throw() {} - - const Rule* rule; - - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_alterRule_result__isset { - _WmsAdminService_alterRule_result__isset() : io(false), ia(false) {} - bool io; - bool ia; -} _WmsAdminService_alterRule_result__isset; - -class WmsAdminService_alterRule_result { - public: - - WmsAdminService_alterRule_result() { - } - - virtual ~WmsAdminService_alterRule_result() throw() {} - - IOError io; - IllegalArgument ia; - - _WmsAdminService_alterRule_result__isset __isset; - - void __set_io(const IOError& val) { - io = val; - } - - void __set_ia(const IllegalArgument& val) { - ia = val; - } - - bool operator == (const WmsAdminService_alterRule_result & rhs) const - { - if (!(io == rhs.io)) - return false; - if (!(ia == rhs.ia)) - return false; - return true; - } - bool operator != (const WmsAdminService_alterRule_result &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_alterRule_result & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_alterRule_presult__isset { - _WmsAdminService_alterRule_presult__isset() : io(false), ia(false) {} - bool io; - bool ia; -} _WmsAdminService_alterRule_presult__isset; - -class WmsAdminService_alterRule_presult { - public: - - - virtual ~WmsAdminService_alterRule_presult() throw() {} - - IOError io; - IllegalArgument ia; - - _WmsAdminService_alterRule_presult__isset __isset; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - -}; - -typedef struct _WmsAdminService_deleteRule_args__isset { - _WmsAdminService_deleteRule_args__isset() : rule(false) {} - bool rule; -} _WmsAdminService_deleteRule_args__isset; - -class WmsAdminService_deleteRule_args { - public: - - WmsAdminService_deleteRule_args() { - } - - virtual ~WmsAdminService_deleteRule_args() throw() {} - - Rule rule; - - _WmsAdminService_deleteRule_args__isset __isset; - - void __set_rule(const Rule& val) { - rule = val; - } - - bool operator == (const WmsAdminService_deleteRule_args & rhs) const - { - if (!(rule == rhs.rule)) - return false; - return true; - } - bool operator != (const WmsAdminService_deleteRule_args &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_deleteRule_args & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - - -class WmsAdminService_deleteRule_pargs { - public: - - - virtual ~WmsAdminService_deleteRule_pargs() throw() {} - - const Rule* rule; - - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_deleteRule_result__isset { - _WmsAdminService_deleteRule_result__isset() : io(false), ia(false) {} - bool io; - bool ia; -} _WmsAdminService_deleteRule_result__isset; - -class WmsAdminService_deleteRule_result { - public: - - WmsAdminService_deleteRule_result() { - } - - virtual ~WmsAdminService_deleteRule_result() throw() {} - - IOError io; - IllegalArgument ia; - - _WmsAdminService_deleteRule_result__isset __isset; - - void __set_io(const IOError& val) { - io = val; - } - - void __set_ia(const IllegalArgument& val) { - ia = val; - } - - bool operator == (const WmsAdminService_deleteRule_result & rhs) const - { - if (!(io == rhs.io)) - return false; - if (!(ia == rhs.ia)) - return false; - return true; - } - bool operator != (const WmsAdminService_deleteRule_result &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_deleteRule_result & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_deleteRule_presult__isset { - _WmsAdminService_deleteRule_presult__isset() : io(false), ia(false) {} - bool io; - bool ia; -} _WmsAdminService_deleteRule_presult__isset; - -class WmsAdminService_deleteRule_presult { - public: - - - virtual ~WmsAdminService_deleteRule_presult() throw() {} - - IOError io; - IllegalArgument ia; - - _WmsAdminService_deleteRule_presult__isset __isset; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - -}; - - -class WmsAdminService_rule_args { - public: - - WmsAdminService_rule_args() { - } - - virtual ~WmsAdminService_rule_args() throw() {} - - - bool operator == (const WmsAdminService_rule_args & /* rhs */) const - { - return true; - } - bool operator != (const WmsAdminService_rule_args &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_rule_args & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - - -class WmsAdminService_rule_pargs { - public: - - - virtual ~WmsAdminService_rule_pargs() throw() {} - - - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_rule_result__isset { - _WmsAdminService_rule_result__isset() : success(false), io(false) {} - bool success; - bool io; -} _WmsAdminService_rule_result__isset; - -class WmsAdminService_rule_result { - public: - - WmsAdminService_rule_result() { - } - - virtual ~WmsAdminService_rule_result() throw() {} - - RuleResponse success; - IOError io; - - _WmsAdminService_rule_result__isset __isset; - - void __set_success(const RuleResponse& val) { - success = val; - } - - void __set_io(const IOError& val) { - io = val; - } - - bool operator == (const WmsAdminService_rule_result & rhs) const - { - if (!(success == rhs.success)) - return false; - if (!(io == rhs.io)) - return false; - return true; - } - bool operator != (const WmsAdminService_rule_result &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_rule_result & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_rule_presult__isset { - _WmsAdminService_rule_presult__isset() : success(false), io(false) {} - bool success; - bool io; -} _WmsAdminService_rule_presult__isset; - -class WmsAdminService_rule_presult { - public: - - - virtual ~WmsAdminService_rule_presult() throw() {} - - RuleResponse* success; - IOError io; - - _WmsAdminService_rule_presult__isset __isset; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - -}; - - -class WmsAdminService_workload_args { - public: - - WmsAdminService_workload_args() { - } - - virtual ~WmsAdminService_workload_args() throw() {} - - - bool operator == (const WmsAdminService_workload_args & /* rhs */) const - { - return true; - } - bool operator != (const WmsAdminService_workload_args &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_workload_args & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - - -class WmsAdminService_workload_pargs { - public: - - - virtual ~WmsAdminService_workload_pargs() throw() {} - - - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_workload_result__isset { - _WmsAdminService_workload_result__isset() : success(false), io(false) {} - bool success; - bool io; -} _WmsAdminService_workload_result__isset; - -class WmsAdminService_workload_result { - public: - - WmsAdminService_workload_result() { - } - - virtual ~WmsAdminService_workload_result() throw() {} - - WorkloadResponse success; - IOError io; - - _WmsAdminService_workload_result__isset __isset; - - void __set_success(const WorkloadResponse& val) { - success = val; - } - - void __set_io(const IOError& val) { - io = val; - } - - bool operator == (const WmsAdminService_workload_result & rhs) const - { - if (!(success == rhs.success)) - return false; - if (!(io == rhs.io)) - return false; - return true; - } - bool operator != (const WmsAdminService_workload_result &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsAdminService_workload_result & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsAdminService_workload_presult__isset { - _WmsAdminService_workload_presult__isset() : success(false), io(false) {} - bool success; - bool io; -} _WmsAdminService_workload_presult__isset; - -class WmsAdminService_workload_presult { - public: - - - virtual ~WmsAdminService_workload_presult() throw() {} - - WorkloadResponse* success; - IOError io; - - _WmsAdminService_workload_presult__isset __isset; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - -}; - -class WmsAdminServiceClient : virtual public WmsAdminServiceIf { - public: - WmsAdminServiceClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) : - piprot_(prot), - poprot_(prot) { - iprot_ = prot.get(); - oprot_ = prot.get(); - } - WmsAdminServiceClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> iprot, boost::shared_ptr< ::apache::thrift::protocol::TProtocol> oprot) : - piprot_(iprot), - poprot_(oprot) { - iprot_ = iprot.get(); - oprot_ = oprot.get(); - } - boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getInputProtocol() { - return piprot_; - } - boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getOutputProtocol() { - return poprot_; - } - int64_t ping(const int64_t timestamp); - void send_ping(const int64_t timestamp); - int64_t recv_ping(); - void addStream(const Stream& stream); - void send_addStream(const Stream& stream); - void recv_addStream(); - void alterStream(const Stream& stream); - void send_alterStream(const Stream& stream); - void recv_alterStream(); - void deleteStream(const Stream& stream); - void send_deleteStream(const Stream& stream); - void recv_deleteStream(); - void stream(StreamResponse& _return); - void send_stream(); - void recv_stream(StreamResponse& _return); - void addRule(const Rule& rule); - void send_addRule(const Rule& rule); - void recv_addRule(); - void alterRule(const Rule& rule); - void send_alterRule(const Rule& rule); - void recv_alterRule(); - void deleteRule(const Rule& rule); - void send_deleteRule(const Rule& rule); - void recv_deleteRule(); - void rule(RuleResponse& _return); - void send_rule(); - void recv_rule(RuleResponse& _return); - void workload(WorkloadResponse& _return); - void send_workload(); - void recv_workload(WorkloadResponse& _return); - protected: - boost::shared_ptr< ::apache::thrift::protocol::TProtocol> piprot_; - boost::shared_ptr< ::apache::thrift::protocol::TProtocol> poprot_; - ::apache::thrift::protocol::TProtocol* iprot_; - ::apache::thrift::protocol::TProtocol* oprot_; -}; - -class WmsAdminServiceProcessor : public ::apache::thrift::TDispatchProcessor { - protected: - boost::shared_ptr iface_; - virtual bool dispatchCall(::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, const std::string& fname, int32_t seqid, void* callContext); - private: - typedef void (WmsAdminServiceProcessor::*ProcessFunction)(int32_t, ::apache::thrift::protocol::TProtocol*, ::apache::thrift::protocol::TProtocol*, void*); - typedef std::map ProcessMap; - ProcessMap processMap_; - void process_ping(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); - void process_addStream(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); - void process_alterStream(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); - void process_deleteStream(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); - void process_stream(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); - void process_addRule(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); - void process_alterRule(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); - void process_deleteRule(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); - void process_rule(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); - void process_workload(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); - public: - WmsAdminServiceProcessor(boost::shared_ptr iface) : - iface_(iface) { - processMap_["ping"] = &WmsAdminServiceProcessor::process_ping; - processMap_["addStream"] = &WmsAdminServiceProcessor::process_addStream; - processMap_["alterStream"] = &WmsAdminServiceProcessor::process_alterStream; - processMap_["deleteStream"] = &WmsAdminServiceProcessor::process_deleteStream; - processMap_["stream"] = &WmsAdminServiceProcessor::process_stream; - processMap_["addRule"] = &WmsAdminServiceProcessor::process_addRule; - processMap_["alterRule"] = &WmsAdminServiceProcessor::process_alterRule; - processMap_["deleteRule"] = &WmsAdminServiceProcessor::process_deleteRule; - processMap_["rule"] = &WmsAdminServiceProcessor::process_rule; - processMap_["workload"] = &WmsAdminServiceProcessor::process_workload; - } - - virtual ~WmsAdminServiceProcessor() {} -}; - -class WmsAdminServiceProcessorFactory : public ::apache::thrift::TProcessorFactory { - public: - WmsAdminServiceProcessorFactory(const ::boost::shared_ptr< WmsAdminServiceIfFactory >& handlerFactory) : - handlerFactory_(handlerFactory) {} - - ::boost::shared_ptr< ::apache::thrift::TProcessor > getProcessor(const ::apache::thrift::TConnectionInfo& connInfo); - - protected: - ::boost::shared_ptr< WmsAdminServiceIfFactory > handlerFactory_; -}; - -class WmsAdminServiceMultiface : virtual public WmsAdminServiceIf { - public: - WmsAdminServiceMultiface(std::vector >& ifaces) : ifaces_(ifaces) { - } - virtual ~WmsAdminServiceMultiface() {} - protected: - std::vector > ifaces_; - WmsAdminServiceMultiface() {} - void add(boost::shared_ptr iface) { - ifaces_.push_back(iface); - } - public: - int64_t ping(const int64_t timestamp) { - size_t sz = ifaces_.size(); - size_t i = 0; - for (; i < (sz - 1); ++i) { - ifaces_[i]->ping(timestamp); - } - return ifaces_[i]->ping(timestamp); - } - - void addStream(const Stream& stream) { - size_t sz = ifaces_.size(); - size_t i = 0; - for (; i < (sz - 1); ++i) { - ifaces_[i]->addStream(stream); - } - ifaces_[i]->addStream(stream); - } - - void alterStream(const Stream& stream) { - size_t sz = ifaces_.size(); - size_t i = 0; - for (; i < (sz - 1); ++i) { - ifaces_[i]->alterStream(stream); - } - ifaces_[i]->alterStream(stream); - } - - void deleteStream(const Stream& stream) { - size_t sz = ifaces_.size(); - size_t i = 0; - for (; i < (sz - 1); ++i) { - ifaces_[i]->deleteStream(stream); - } - ifaces_[i]->deleteStream(stream); - } - - void stream(StreamResponse& _return) { - size_t sz = ifaces_.size(); - size_t i = 0; - for (; i < (sz - 1); ++i) { - ifaces_[i]->stream(_return); - } - ifaces_[i]->stream(_return); - return; - } - - void addRule(const Rule& rule) { - size_t sz = ifaces_.size(); - size_t i = 0; - for (; i < (sz - 1); ++i) { - ifaces_[i]->addRule(rule); - } - ifaces_[i]->addRule(rule); - } - - void alterRule(const Rule& rule) { - size_t sz = ifaces_.size(); - size_t i = 0; - for (; i < (sz - 1); ++i) { - ifaces_[i]->alterRule(rule); - } - ifaces_[i]->alterRule(rule); - } - - void deleteRule(const Rule& rule) { - size_t sz = ifaces_.size(); - size_t i = 0; - for (; i < (sz - 1); ++i) { - ifaces_[i]->deleteRule(rule); - } - ifaces_[i]->deleteRule(rule); - } - - void rule(RuleResponse& _return) { - size_t sz = ifaces_.size(); - size_t i = 0; - for (; i < (sz - 1); ++i) { - ifaces_[i]->rule(_return); - } - ifaces_[i]->rule(_return); - return; - } - - void workload(WorkloadResponse& _return) { - size_t sz = ifaces_.size(); - size_t i = 0; - for (; i < (sz - 1); ++i) { - ifaces_[i]->workload(_return); - } - ifaces_[i]->workload(_return); - return; - } - -}; - -}}} // namespace - -#endif diff --git a/wms/src/main/c/generated/WmsAdminService_server.skeleton.cpp b/wms/src/main/c/generated/WmsAdminService_server.skeleton.cpp deleted file mode 100644 index e22a64da..00000000 --- a/wms/src/main/c/generated/WmsAdminService_server.skeleton.cpp +++ /dev/null @@ -1,89 +0,0 @@ -// This autogenerated skeleton file illustrates how to build a server. -// You should copy it to another filename to avoid overwriting it. - -#include "WmsAdminService.h" -#include -#include -#include -#include - -using namespace ::apache::thrift; -using namespace ::apache::thrift::protocol; -using namespace ::apache::thrift::transport; -using namespace ::apache::thrift::server; - -using boost::shared_ptr; - -using namespace ::trafodion::wms::thrift; - -class WmsAdminServiceHandler : virtual public WmsAdminServiceIf { - public: - WmsAdminServiceHandler() { - // Your initialization goes here - } - - int64_t ping(const int64_t timestamp) { - // Your implementation goes here - printf("ping\n"); - } - - void addStream(const Stream& stream) { - // Your implementation goes here - printf("addStream\n"); - } - - void alterStream(const Stream& stream) { - // Your implementation goes here - printf("alterStream\n"); - } - - void deleteStream(const Stream& stream) { - // Your implementation goes here - printf("deleteStream\n"); - } - - void stream(StreamResponse& _return) { - // Your implementation goes here - printf("stream\n"); - } - - void addRule(const Rule& rule) { - // Your implementation goes here - printf("addRule\n"); - } - - void alterRule(const Rule& rule) { - // Your implementation goes here - printf("alterRule\n"); - } - - void deleteRule(const Rule& rule) { - // Your implementation goes here - printf("deleteRule\n"); - } - - void rule(RuleResponse& _return) { - // Your implementation goes here - printf("rule\n"); - } - - void workload(WorkloadResponse& _return) { - // Your implementation goes here - printf("workload\n"); - } - -}; - -int main(int argc, char **argv) { - int port = 9090; - shared_ptr handler(new WmsAdminServiceHandler()); - shared_ptr processor(new WmsAdminServiceProcessor(handler)); - shared_ptr serverTransport(new TServerSocket(port)); - shared_ptr transportFactory(new TBufferedTransportFactory()); - shared_ptr protocolFactory(new TBinaryProtocolFactory()); - - TSimpleServer server(processor, serverTransport, transportFactory, protocolFactory); - server.serve(); - return 0; -} - diff --git a/wms/src/main/c/generated/WmsService.cpp b/wms/src/main/c/generated/WmsService.cpp deleted file mode 100644 index 2952e5c2..00000000 --- a/wms/src/main/c/generated/WmsService.cpp +++ /dev/null @@ -1,663 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -#include "WmsService.h" - -namespace trafodion { namespace wms { namespace thrift { - -uint32_t WmsService_ping_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_I64) { - xfer += iprot->readI64(this->timestamp); - this->__isset.timestamp = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsService_ping_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsService_ping_args"); - - xfer += oprot->writeFieldBegin("timestamp", ::apache::thrift::protocol::T_I64, 1); - xfer += oprot->writeI64(this->timestamp); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsService_ping_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsService_ping_pargs"); - - xfer += oprot->writeFieldBegin("timestamp", ::apache::thrift::protocol::T_I64, 1); - xfer += oprot->writeI64((*(this->timestamp))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsService_ping_result::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_I64) { - xfer += iprot->readI64(this->success); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsService_ping_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("WmsService_ping_result"); - - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_I64, 0); - xfer += oprot->writeI64(this->success); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.io) { - xfer += oprot->writeFieldBegin("io", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->io.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsService_ping_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_I64) { - xfer += iprot->readI64((*(this->success))); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsService_writeread_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->request.read(iprot); - this->__isset.request = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsService_writeread_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsService_writeread_args"); - - xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->request.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsService_writeread_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WmsService_writeread_pargs"); - - xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->request)).write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsService_writeread_result::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->ia.read(iprot); - this->__isset.ia = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WmsService_writeread_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("WmsService_writeread_result"); - - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.io) { - xfer += oprot->writeFieldBegin("io", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->io.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.ia) { - xfer += oprot->writeFieldBegin("ia", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->ia.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t WmsService_writeread_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->io.read(iprot); - this->__isset.io = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->ia.read(iprot); - this->__isset.ia = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -int64_t WmsServiceClient::ping(const int64_t timestamp) -{ - send_ping(timestamp); - return recv_ping(); -} - -void WmsServiceClient::send_ping(const int64_t timestamp) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("ping", ::apache::thrift::protocol::T_CALL, cseqid); - - WmsService_ping_pargs args; - args.timestamp = ×tamp; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); -} - -int64_t WmsServiceClient::recv_ping() -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("ping") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - int64_t _return; - WmsService_ping_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.success) { - return _return; - } - if (result.__isset.io) { - throw result.io; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "ping failed: unknown result"); -} - -void WmsServiceClient::writeread(Response& _return, const Request& request) -{ - send_writeread(request); - recv_writeread(_return); -} - -void WmsServiceClient::send_writeread(const Request& request) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("writeread", ::apache::thrift::protocol::T_CALL, cseqid); - - WmsService_writeread_pargs args; - args.request = &request; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); -} - -void WmsServiceClient::recv_writeread(Response& _return) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("writeread") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - WmsService_writeread_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.success) { - // _return pointer has now been filled - return; - } - if (result.__isset.io) { - throw result.io; - } - if (result.__isset.ia) { - throw result.ia; - } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "writeread failed: unknown result"); -} - -bool WmsServiceProcessor::dispatchCall(::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, const std::string& fname, int32_t seqid, void* callContext) { - ProcessMap::iterator pfn; - pfn = processMap_.find(fname); - if (pfn == processMap_.end()) { - iprot->skip(::apache::thrift::protocol::T_STRUCT); - iprot->readMessageEnd(); - iprot->getTransport()->readEnd(); - ::apache::thrift::TApplicationException x(::apache::thrift::TApplicationException::UNKNOWN_METHOD, "Invalid method name: '"+fname+"'"); - oprot->writeMessageBegin(fname, ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return true; - } - (this->*(pfn->second))(seqid, iprot, oprot, callContext); - return true; -} - -void WmsServiceProcessor::process_ping(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) -{ - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("WmsService.ping", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "WmsService.ping"); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "WmsService.ping"); - } - - WmsService_ping_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "WmsService.ping", bytes); - } - - WmsService_ping_result result; - try { - result.success = iface_->ping(args.timestamp); - result.__isset.success = true; - } catch (IOError &io) { - result.io = io; - result.__isset.io = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "WmsService.ping"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("ping", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "WmsService.ping"); - } - - oprot->writeMessageBegin("ping", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "WmsService.ping", bytes); - } -} - -void WmsServiceProcessor::process_writeread(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) -{ - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("WmsService.writeread", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "WmsService.writeread"); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "WmsService.writeread"); - } - - WmsService_writeread_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "WmsService.writeread", bytes); - } - - WmsService_writeread_result result; - try { - iface_->writeread(result.success, args.request); - result.__isset.success = true; - } catch (IOError &io) { - result.io = io; - result.__isset.io = true; - } catch (IllegalArgument &ia) { - result.ia = ia; - result.__isset.ia = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "WmsService.writeread"); - } - - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("writeread", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "WmsService.writeread"); - } - - oprot->writeMessageBegin("writeread", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "WmsService.writeread", bytes); - } -} - -::boost::shared_ptr< ::apache::thrift::TProcessor > WmsServiceProcessorFactory::getProcessor(const ::apache::thrift::TConnectionInfo& connInfo) { - ::apache::thrift::ReleaseHandler< WmsServiceIfFactory > cleanup(handlerFactory_); - ::boost::shared_ptr< WmsServiceIf > handler(handlerFactory_->getHandler(connInfo), cleanup); - ::boost::shared_ptr< ::apache::thrift::TProcessor > processor(new WmsServiceProcessor(handler)); - return processor; -} -}}} // namespace - diff --git a/wms/src/main/c/generated/WmsService.h b/wms/src/main/c/generated/WmsService.h deleted file mode 100644 index 43dbf8e4..00000000 --- a/wms/src/main/c/generated/WmsService.h +++ /dev/null @@ -1,403 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -#ifndef WmsService_H -#define WmsService_H - -#include -#include "WmsService_types.h" - -namespace trafodion { namespace wms { namespace thrift { - -class WmsServiceIf { - public: - virtual ~WmsServiceIf() {} - virtual int64_t ping(const int64_t timestamp) = 0; - virtual void writeread(Response& _return, const Request& request) = 0; -}; - -class WmsServiceIfFactory { - public: - typedef WmsServiceIf Handler; - - virtual ~WmsServiceIfFactory() {} - - virtual WmsServiceIf* getHandler(const ::apache::thrift::TConnectionInfo& connInfo) = 0; - virtual void releaseHandler(WmsServiceIf* /* handler */) = 0; -}; - -class WmsServiceIfSingletonFactory : virtual public WmsServiceIfFactory { - public: - WmsServiceIfSingletonFactory(const boost::shared_ptr& iface) : iface_(iface) {} - virtual ~WmsServiceIfSingletonFactory() {} - - virtual WmsServiceIf* getHandler(const ::apache::thrift::TConnectionInfo&) { - return iface_.get(); - } - virtual void releaseHandler(WmsServiceIf* /* handler */) {} - - protected: - boost::shared_ptr iface_; -}; - -class WmsServiceNull : virtual public WmsServiceIf { - public: - virtual ~WmsServiceNull() {} - int64_t ping(const int64_t /* timestamp */) { - int64_t _return = 0; - return _return; - } - void writeread(Response& /* _return */, const Request& /* request */) { - return; - } -}; - -typedef struct _WmsService_ping_args__isset { - _WmsService_ping_args__isset() : timestamp(false) {} - bool timestamp; -} _WmsService_ping_args__isset; - -class WmsService_ping_args { - public: - - WmsService_ping_args() : timestamp(0) { - } - - virtual ~WmsService_ping_args() throw() {} - - int64_t timestamp; - - _WmsService_ping_args__isset __isset; - - void __set_timestamp(const int64_t val) { - timestamp = val; - } - - bool operator == (const WmsService_ping_args & rhs) const - { - if (!(timestamp == rhs.timestamp)) - return false; - return true; - } - bool operator != (const WmsService_ping_args &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsService_ping_args & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - - -class WmsService_ping_pargs { - public: - - - virtual ~WmsService_ping_pargs() throw() {} - - const int64_t* timestamp; - - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsService_ping_result__isset { - _WmsService_ping_result__isset() : success(false), io(false) {} - bool success; - bool io; -} _WmsService_ping_result__isset; - -class WmsService_ping_result { - public: - - WmsService_ping_result() : success(0) { - } - - virtual ~WmsService_ping_result() throw() {} - - int64_t success; - IOError io; - - _WmsService_ping_result__isset __isset; - - void __set_success(const int64_t val) { - success = val; - } - - void __set_io(const IOError& val) { - io = val; - } - - bool operator == (const WmsService_ping_result & rhs) const - { - if (!(success == rhs.success)) - return false; - if (!(io == rhs.io)) - return false; - return true; - } - bool operator != (const WmsService_ping_result &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsService_ping_result & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsService_ping_presult__isset { - _WmsService_ping_presult__isset() : success(false), io(false) {} - bool success; - bool io; -} _WmsService_ping_presult__isset; - -class WmsService_ping_presult { - public: - - - virtual ~WmsService_ping_presult() throw() {} - - int64_t* success; - IOError io; - - _WmsService_ping_presult__isset __isset; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - -}; - -typedef struct _WmsService_writeread_args__isset { - _WmsService_writeread_args__isset() : request(false) {} - bool request; -} _WmsService_writeread_args__isset; - -class WmsService_writeread_args { - public: - - WmsService_writeread_args() { - } - - virtual ~WmsService_writeread_args() throw() {} - - Request request; - - _WmsService_writeread_args__isset __isset; - - void __set_request(const Request& val) { - request = val; - } - - bool operator == (const WmsService_writeread_args & rhs) const - { - if (!(request == rhs.request)) - return false; - return true; - } - bool operator != (const WmsService_writeread_args &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsService_writeread_args & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - - -class WmsService_writeread_pargs { - public: - - - virtual ~WmsService_writeread_pargs() throw() {} - - const Request* request; - - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsService_writeread_result__isset { - _WmsService_writeread_result__isset() : success(false), io(false), ia(false) {} - bool success; - bool io; - bool ia; -} _WmsService_writeread_result__isset; - -class WmsService_writeread_result { - public: - - WmsService_writeread_result() { - } - - virtual ~WmsService_writeread_result() throw() {} - - Response success; - IOError io; - IllegalArgument ia; - - _WmsService_writeread_result__isset __isset; - - void __set_success(const Response& val) { - success = val; - } - - void __set_io(const IOError& val) { - io = val; - } - - void __set_ia(const IllegalArgument& val) { - ia = val; - } - - bool operator == (const WmsService_writeread_result & rhs) const - { - if (!(success == rhs.success)) - return false; - if (!(io == rhs.io)) - return false; - if (!(ia == rhs.ia)) - return false; - return true; - } - bool operator != (const WmsService_writeread_result &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WmsService_writeread_result & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -typedef struct _WmsService_writeread_presult__isset { - _WmsService_writeread_presult__isset() : success(false), io(false), ia(false) {} - bool success; - bool io; - bool ia; -} _WmsService_writeread_presult__isset; - -class WmsService_writeread_presult { - public: - - - virtual ~WmsService_writeread_presult() throw() {} - - Response* success; - IOError io; - IllegalArgument ia; - - _WmsService_writeread_presult__isset __isset; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - -}; - -class WmsServiceClient : virtual public WmsServiceIf { - public: - WmsServiceClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) : - piprot_(prot), - poprot_(prot) { - iprot_ = prot.get(); - oprot_ = prot.get(); - } - WmsServiceClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> iprot, boost::shared_ptr< ::apache::thrift::protocol::TProtocol> oprot) : - piprot_(iprot), - poprot_(oprot) { - iprot_ = iprot.get(); - oprot_ = oprot.get(); - } - boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getInputProtocol() { - return piprot_; - } - boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getOutputProtocol() { - return poprot_; - } - int64_t ping(const int64_t timestamp); - void send_ping(const int64_t timestamp); - int64_t recv_ping(); - void writeread(Response& _return, const Request& request); - void send_writeread(const Request& request); - void recv_writeread(Response& _return); - protected: - boost::shared_ptr< ::apache::thrift::protocol::TProtocol> piprot_; - boost::shared_ptr< ::apache::thrift::protocol::TProtocol> poprot_; - ::apache::thrift::protocol::TProtocol* iprot_; - ::apache::thrift::protocol::TProtocol* oprot_; -}; - -class WmsServiceProcessor : public ::apache::thrift::TDispatchProcessor { - protected: - boost::shared_ptr iface_; - virtual bool dispatchCall(::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, const std::string& fname, int32_t seqid, void* callContext); - private: - typedef void (WmsServiceProcessor::*ProcessFunction)(int32_t, ::apache::thrift::protocol::TProtocol*, ::apache::thrift::protocol::TProtocol*, void*); - typedef std::map ProcessMap; - ProcessMap processMap_; - void process_ping(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); - void process_writeread(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); - public: - WmsServiceProcessor(boost::shared_ptr iface) : - iface_(iface) { - processMap_["ping"] = &WmsServiceProcessor::process_ping; - processMap_["writeread"] = &WmsServiceProcessor::process_writeread; - } - - virtual ~WmsServiceProcessor() {} -}; - -class WmsServiceProcessorFactory : public ::apache::thrift::TProcessorFactory { - public: - WmsServiceProcessorFactory(const ::boost::shared_ptr< WmsServiceIfFactory >& handlerFactory) : - handlerFactory_(handlerFactory) {} - - ::boost::shared_ptr< ::apache::thrift::TProcessor > getProcessor(const ::apache::thrift::TConnectionInfo& connInfo); - - protected: - ::boost::shared_ptr< WmsServiceIfFactory > handlerFactory_; -}; - -class WmsServiceMultiface : virtual public WmsServiceIf { - public: - WmsServiceMultiface(std::vector >& ifaces) : ifaces_(ifaces) { - } - virtual ~WmsServiceMultiface() {} - protected: - std::vector > ifaces_; - WmsServiceMultiface() {} - void add(boost::shared_ptr iface) { - ifaces_.push_back(iface); - } - public: - int64_t ping(const int64_t timestamp) { - size_t sz = ifaces_.size(); - size_t i = 0; - for (; i < (sz - 1); ++i) { - ifaces_[i]->ping(timestamp); - } - return ifaces_[i]->ping(timestamp); - } - - void writeread(Response& _return, const Request& request) { - size_t sz = ifaces_.size(); - size_t i = 0; - for (; i < (sz - 1); ++i) { - ifaces_[i]->writeread(_return, request); - } - ifaces_[i]->writeread(_return, request); - return; - } - -}; - -}}} // namespace - -#endif diff --git a/wms/src/main/c/generated/WmsService_constants.cpp b/wms/src/main/c/generated/WmsService_constants.cpp deleted file mode 100644 index bdda30ff..00000000 --- a/wms/src/main/c/generated/WmsService_constants.cpp +++ /dev/null @@ -1,17 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -#include "WmsService_constants.h" - -namespace trafodion { namespace wms { namespace thrift { - -const WmsServiceConstants g_WmsService_constants; - -WmsServiceConstants::WmsServiceConstants() { -} - -}}} // namespace - diff --git a/wms/src/main/c/generated/WmsService_constants.h b/wms/src/main/c/generated/WmsService_constants.h deleted file mode 100644 index bc3bc675..00000000 --- a/wms/src/main/c/generated/WmsService_constants.h +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -#ifndef WmsService_CONSTANTS_H -#define WmsService_CONSTANTS_H - -#include "WmsService_types.h" - -namespace trafodion { namespace wms { namespace thrift { - -class WmsServiceConstants { - public: - WmsServiceConstants(); - -}; - -extern const WmsServiceConstants g_WmsService_constants; - -}}} // namespace - -#endif diff --git a/wms/src/main/c/generated/WmsService_server.skeleton.cpp b/wms/src/main/c/generated/WmsService_server.skeleton.cpp deleted file mode 100644 index d884ba24..00000000 --- a/wms/src/main/c/generated/WmsService_server.skeleton.cpp +++ /dev/null @@ -1,49 +0,0 @@ -// This autogenerated skeleton file illustrates how to build a server. -// You should copy it to another filename to avoid overwriting it. - -#include "WmsService.h" -#include -#include -#include -#include - -using namespace ::apache::thrift; -using namespace ::apache::thrift::protocol; -using namespace ::apache::thrift::transport; -using namespace ::apache::thrift::server; - -using boost::shared_ptr; - -using namespace ::trafodion::wms::thrift; - -class WmsServiceHandler : virtual public WmsServiceIf { - public: - WmsServiceHandler() { - // Your initialization goes here - } - - int64_t ping(const int64_t timestamp) { - // Your implementation goes here - printf("ping\n"); - } - - void writeread(Response& _return, const Request& request) { - // Your implementation goes here - printf("writeread\n"); - } - -}; - -int main(int argc, char **argv) { - int port = 9090; - shared_ptr handler(new WmsServiceHandler()); - shared_ptr processor(new WmsServiceProcessor(handler)); - shared_ptr serverTransport(new TServerSocket(port)); - shared_ptr transportFactory(new TBufferedTransportFactory()); - shared_ptr protocolFactory(new TBinaryProtocolFactory()); - - TSimpleServer server(processor, serverTransport, transportFactory, protocolFactory); - server.serve(); - return 0; -} - diff --git a/wms/src/main/c/generated/WmsService_types.cpp b/wms/src/main/c/generated/WmsService_types.cpp deleted file mode 100644 index e467fe85..00000000 --- a/wms/src/main/c/generated/WmsService_types.cpp +++ /dev/null @@ -1,1134 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -#include "WmsService_types.h" - -#include - -namespace trafodion { namespace wms { namespace thrift { - -int _kOperationValues[] = { - Operation::OPERATION_BEGIN, - Operation::OPERATION_UPDATE, - Operation::OPERATION_END, - Operation::OPERATION_UPDATE_PARENT_ID, - Operation::OPERATION_CANCEL_CHILDREN -}; -const char* _kOperationNames[] = { - "OPERATION_BEGIN", - "OPERATION_UPDATE", - "OPERATION_END", - "OPERATION_UPDATE_PARENT_ID", - "OPERATION_CANCEL_CHILDREN" -}; -const std::map _Operation_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(5, _kOperationValues, _kOperationNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL)); - -int _kActionValues[] = { - Action::ACTION_CONTINUE, - Action::ACTION_REJECT, - Action::ACTION_CANCEL, - Action::ACTION_KILL, - Action::ACTION_WARNING, - Action::ACTION_PRIORITY_LOW, - Action::ACTION_PRIORITY_MEDIUM, - Action::ACTION_PRIORITY_HIGH -}; -const char* _kActionNames[] = { - "ACTION_CONTINUE", - "ACTION_REJECT", - "ACTION_CANCEL", - "ACTION_KILL", - "ACTION_WARNING", - "ACTION_PRIORITY_LOW", - "ACTION_PRIORITY_MEDIUM", - "ACTION_PRIORITY_HIGH" -}; -const std::map _Action_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(8, _kActionValues, _kActionNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL)); - -const char* KeyValue::ascii_fingerprint = "8030FA0B1A7FF7D870C593DE44E90298"; -const uint8_t KeyValue::binary_fingerprint[16] = {0x80,0x30,0xFA,0x0B,0x1A,0x7F,0xF7,0xD8,0x70,0xC5,0x93,0xDE,0x44,0xE9,0x02,0x98}; - -uint32_t KeyValue::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->boolValue); - this->__isset.boolValue = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_BYTE) { - xfer += iprot->readByte(this->byteValue); - this->__isset.byteValue = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_I16) { - xfer += iprot->readI16(this->shortValue); - this->__isset.shortValue = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: - if (ftype == ::apache::thrift::protocol::T_I32) { - xfer += iprot->readI32(this->intValue); - this->__isset.intValue = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 5: - if (ftype == ::apache::thrift::protocol::T_I64) { - xfer += iprot->readI64(this->longValue); - this->__isset.longValue = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 6: - if (ftype == ::apache::thrift::protocol::T_DOUBLE) { - xfer += iprot->readDouble(this->floatValue); - this->__isset.floatValue = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 7: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->stringValue); - this->__isset.stringValue = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t KeyValue::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("KeyValue"); - - if (this->__isset.boolValue) { - xfer += oprot->writeFieldBegin("boolValue", ::apache::thrift::protocol::T_BOOL, 1); - xfer += oprot->writeBool(this->boolValue); - xfer += oprot->writeFieldEnd(); - } - if (this->__isset.byteValue) { - xfer += oprot->writeFieldBegin("byteValue", ::apache::thrift::protocol::T_BYTE, 2); - xfer += oprot->writeByte(this->byteValue); - xfer += oprot->writeFieldEnd(); - } - if (this->__isset.shortValue) { - xfer += oprot->writeFieldBegin("shortValue", ::apache::thrift::protocol::T_I16, 3); - xfer += oprot->writeI16(this->shortValue); - xfer += oprot->writeFieldEnd(); - } - if (this->__isset.intValue) { - xfer += oprot->writeFieldBegin("intValue", ::apache::thrift::protocol::T_I32, 4); - xfer += oprot->writeI32(this->intValue); - xfer += oprot->writeFieldEnd(); - } - if (this->__isset.longValue) { - xfer += oprot->writeFieldBegin("longValue", ::apache::thrift::protocol::T_I64, 5); - xfer += oprot->writeI64(this->longValue); - xfer += oprot->writeFieldEnd(); - } - if (this->__isset.floatValue) { - xfer += oprot->writeFieldBegin("floatValue", ::apache::thrift::protocol::T_DOUBLE, 6); - xfer += oprot->writeDouble(this->floatValue); - xfer += oprot->writeFieldEnd(); - } - if (this->__isset.stringValue) { - xfer += oprot->writeFieldBegin("stringValue", ::apache::thrift::protocol::T_STRING, 7); - xfer += oprot->writeString(this->stringValue); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -void swap(KeyValue &a, KeyValue &b) { - using ::std::swap; - swap(a.boolValue, b.boolValue); - swap(a.byteValue, b.byteValue); - swap(a.shortValue, b.shortValue); - swap(a.intValue, b.intValue); - swap(a.longValue, b.longValue); - swap(a.floatValue, b.floatValue); - swap(a.stringValue, b.stringValue); - swap(a.__isset, b.__isset); -} - -const char* Data::ascii_fingerprint = "3744E9C024E14DD4F9ECA86B921D240A"; -const uint8_t Data::binary_fingerprint[16] = {0x37,0x44,0xE9,0xC0,0x24,0xE1,0x4D,0xD4,0xF9,0xEC,0xA8,0x6B,0x92,0x1D,0x24,0x0A}; - -uint32_t Data::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_MAP) { - { - this->keyValues.clear(); - uint32_t _size0; - ::apache::thrift::protocol::TType _ktype1; - ::apache::thrift::protocol::TType _vtype2; - xfer += iprot->readMapBegin(_ktype1, _vtype2, _size0); - uint32_t _i4; - for (_i4 = 0; _i4 < _size0; ++_i4) - { - std::string _key5; - xfer += iprot->readString(_key5); - KeyValue& _val6 = this->keyValues[_key5]; - xfer += _val6.read(iprot); - } - xfer += iprot->readMapEnd(); - } - this->__isset.keyValues = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t Data::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("Data"); - - if (this->__isset.keyValues) { - xfer += oprot->writeFieldBegin("keyValues", ::apache::thrift::protocol::T_MAP, 1); - { - xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->keyValues.size())); - std::map ::const_iterator _iter7; - for (_iter7 = this->keyValues.begin(); _iter7 != this->keyValues.end(); ++_iter7) - { - xfer += oprot->writeString(_iter7->first); - xfer += _iter7->second.write(oprot); - } - xfer += oprot->writeMapEnd(); - } - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -void swap(Data &a, Data &b) { - using ::std::swap; - swap(a.keyValues, b.keyValues); - swap(a.__isset, b.__isset); -} - -const char* Header::ascii_fingerprint = "CE62EB594CA7F8C2ABE78957866085E9"; -const uint8_t Header::binary_fingerprint[16] = {0xCE,0x62,0xEB,0x59,0x4C,0xA7,0xF8,0xC2,0xAB,0xE7,0x89,0x57,0x86,0x60,0x85,0xE9}; - -uint32_t Header::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->version); - this->__isset.version = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->clientIpAddress); - this->__isset.clientIpAddress = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_I64) { - xfer += iprot->readI64(this->clientTimestamp); - this->__isset.clientTimestamp = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->clientUserName); - this->__isset.clientUserName = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 5: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->clientApplication); - this->__isset.clientApplication = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 6: - if (ftype == ::apache::thrift::protocol::T_I64) { - xfer += iprot->readI64(this->serverLastUpdated); - this->__isset.serverLastUpdated = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t Header::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("Header"); - - xfer += oprot->writeFieldBegin("version", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->version); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("clientIpAddress", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->clientIpAddress); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("clientTimestamp", ::apache::thrift::protocol::T_I64, 3); - xfer += oprot->writeI64(this->clientTimestamp); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("clientUserName", ::apache::thrift::protocol::T_STRING, 4); - xfer += oprot->writeString(this->clientUserName); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("clientApplication", ::apache::thrift::protocol::T_STRING, 5); - xfer += oprot->writeString(this->clientApplication); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("serverLastUpdated", ::apache::thrift::protocol::T_I64, 6); - xfer += oprot->writeI64(this->serverLastUpdated); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -void swap(Header &a, Header &b) { - using ::std::swap; - swap(a.version, b.version); - swap(a.clientIpAddress, b.clientIpAddress); - swap(a.clientTimestamp, b.clientTimestamp); - swap(a.clientUserName, b.clientUserName); - swap(a.clientApplication, b.clientApplication); - swap(a.serverLastUpdated, b.serverLastUpdated); - swap(a.__isset, b.__isset); -} - -const char* Request::ascii_fingerprint = "252EE71CE3D737D84E12CF91BC47093B"; -const uint8_t Request::binary_fingerprint[16] = {0x25,0x2E,0xE7,0x1C,0xE3,0xD7,0x37,0xD8,0x4E,0x12,0xCF,0x91,0xBC,0x47,0x09,0x3B}; - -uint32_t Request::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->header.read(iprot); - this->__isset.header = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->data.read(iprot); - this->__isset.data = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t Request::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("Request"); - - xfer += oprot->writeFieldBegin("header", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->header.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("data", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->data.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -void swap(Request &a, Request &b) { - using ::std::swap; - swap(a.header, b.header); - swap(a.data, b.data); - swap(a.__isset, b.__isset); -} - -const char* Response::ascii_fingerprint = "252EE71CE3D737D84E12CF91BC47093B"; -const uint8_t Response::binary_fingerprint[16] = {0x25,0x2E,0xE7,0x1C,0xE3,0xD7,0x37,0xD8,0x4E,0x12,0xCF,0x91,0xBC,0x47,0x09,0x3B}; - -uint32_t Response::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->header.read(iprot); - this->__isset.header = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->data.read(iprot); - this->__isset.data = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t Response::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("Response"); - - xfer += oprot->writeFieldBegin("header", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->header.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("data", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->data.write(oprot); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -void swap(Response &a, Response &b) { - using ::std::swap; - swap(a.header, b.header); - swap(a.data, b.data); - swap(a.__isset, b.__isset); -} - -const char* Stream::ascii_fingerprint = "5F9965D46A4F3845985AC0F9B81C3C69"; -const uint8_t Stream::binary_fingerprint[16] = {0x5F,0x99,0x65,0xD4,0x6A,0x4F,0x38,0x45,0x98,0x5A,0xC0,0xF9,0xB8,0x1C,0x3C,0x69}; - -uint32_t Stream::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->name); - this->__isset.name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->value); - this->__isset.value = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->comment); - this->__isset.comment = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: - if (ftype == ::apache::thrift::protocol::T_I64) { - xfer += iprot->readI64(this->timestamp); - this->__isset.timestamp = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t Stream::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("Stream"); - - xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("value", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->value); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("comment", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString(this->comment); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("timestamp", ::apache::thrift::protocol::T_I64, 4); - xfer += oprot->writeI64(this->timestamp); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -void swap(Stream &a, Stream &b) { - using ::std::swap; - swap(a.name, b.name); - swap(a.value, b.value); - swap(a.comment, b.comment); - swap(a.timestamp, b.timestamp); - swap(a.__isset, b.__isset); -} - -const char* StreamResponse::ascii_fingerprint = "DDDA8F891CEFA6BAEE9C7096D0B1642F"; -const uint8_t StreamResponse::binary_fingerprint[16] = {0xDD,0xDA,0x8F,0x89,0x1C,0xEF,0xA6,0xBA,0xEE,0x9C,0x70,0x96,0xD0,0xB1,0x64,0x2F}; - -uint32_t StreamResponse::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->streamList.clear(); - uint32_t _size8; - ::apache::thrift::protocol::TType _etype11; - xfer += iprot->readListBegin(_etype11, _size8); - this->streamList.resize(_size8); - uint32_t _i12; - for (_i12 = 0; _i12 < _size8; ++_i12) - { - xfer += this->streamList[_i12].read(iprot); - } - xfer += iprot->readListEnd(); - } - this->__isset.streamList = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t StreamResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("StreamResponse"); - - xfer += oprot->writeFieldBegin("streamList", ::apache::thrift::protocol::T_LIST, 1); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->streamList.size())); - std::vector ::const_iterator _iter13; - for (_iter13 = this->streamList.begin(); _iter13 != this->streamList.end(); ++_iter13) - { - xfer += (*_iter13).write(oprot); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -void swap(StreamResponse &a, StreamResponse &b) { - using ::std::swap; - swap(a.streamList, b.streamList); - swap(a.__isset, b.__isset); -} - -const char* Rule::ascii_fingerprint = "5F9965D46A4F3845985AC0F9B81C3C69"; -const uint8_t Rule::binary_fingerprint[16] = {0x5F,0x99,0x65,0xD4,0x6A,0x4F,0x38,0x45,0x98,0x5A,0xC0,0xF9,0xB8,0x1C,0x3C,0x69}; - -uint32_t Rule::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->name); - this->__isset.name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->value); - this->__isset.value = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->comment); - this->__isset.comment = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: - if (ftype == ::apache::thrift::protocol::T_I64) { - xfer += iprot->readI64(this->timestamp); - this->__isset.timestamp = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t Rule::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("Rule"); - - xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("value", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->value); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("comment", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString(this->comment); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("timestamp", ::apache::thrift::protocol::T_I64, 4); - xfer += oprot->writeI64(this->timestamp); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -void swap(Rule &a, Rule &b) { - using ::std::swap; - swap(a.name, b.name); - swap(a.value, b.value); - swap(a.comment, b.comment); - swap(a.timestamp, b.timestamp); - swap(a.__isset, b.__isset); -} - -const char* RuleResponse::ascii_fingerprint = "DDDA8F891CEFA6BAEE9C7096D0B1642F"; -const uint8_t RuleResponse::binary_fingerprint[16] = {0xDD,0xDA,0x8F,0x89,0x1C,0xEF,0xA6,0xBA,0xEE,0x9C,0x70,0x96,0xD0,0xB1,0x64,0x2F}; - -uint32_t RuleResponse::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->ruleList.clear(); - uint32_t _size14; - ::apache::thrift::protocol::TType _etype17; - xfer += iprot->readListBegin(_etype17, _size14); - this->ruleList.resize(_size14); - uint32_t _i18; - for (_i18 = 0; _i18 < _size14; ++_i18) - { - xfer += this->ruleList[_i18].read(iprot); - } - xfer += iprot->readListEnd(); - } - this->__isset.ruleList = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t RuleResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("RuleResponse"); - - xfer += oprot->writeFieldBegin("ruleList", ::apache::thrift::protocol::T_LIST, 1); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->ruleList.size())); - std::vector ::const_iterator _iter19; - for (_iter19 = this->ruleList.begin(); _iter19 != this->ruleList.end(); ++_iter19) - { - xfer += (*_iter19).write(oprot); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -void swap(RuleResponse &a, RuleResponse &b) { - using ::std::swap; - swap(a.ruleList, b.ruleList); - swap(a.__isset, b.__isset); -} - -const char* WorkloadResponse::ascii_fingerprint = "D0D9EE8C6C26E09B1A4E0CDB1751C878"; -const uint8_t WorkloadResponse::binary_fingerprint[16] = {0xD0,0xD9,0xEE,0x8C,0x6C,0x26,0xE0,0x9B,0x1A,0x4E,0x0C,0xDB,0x17,0x51,0xC8,0x78}; - -uint32_t WorkloadResponse::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->workloadList.clear(); - uint32_t _size20; - ::apache::thrift::protocol::TType _etype23; - xfer += iprot->readListBegin(_etype23, _size20); - this->workloadList.resize(_size20); - uint32_t _i24; - for (_i24 = 0; _i24 < _size20; ++_i24) - { - xfer += this->workloadList[_i24].read(iprot); - } - xfer += iprot->readListEnd(); - } - this->__isset.workloadList = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t WorkloadResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("WorkloadResponse"); - - xfer += oprot->writeFieldBegin("workloadList", ::apache::thrift::protocol::T_LIST, 1); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->workloadList.size())); - std::vector ::const_iterator _iter25; - for (_iter25 = this->workloadList.begin(); _iter25 != this->workloadList.end(); ++_iter25) - { - xfer += (*_iter25).write(oprot); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -void swap(WorkloadResponse &a, WorkloadResponse &b) { - using ::std::swap; - swap(a.workloadList, b.workloadList); - swap(a.__isset, b.__isset); -} - -const char* IOError::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1"; -const uint8_t IOError::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1}; - -uint32_t IOError::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->message); - this->__isset.message = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t IOError::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("IOError"); - - xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->message); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -void swap(IOError &a, IOError &b) { - using ::std::swap; - swap(a.message, b.message); - swap(a.__isset, b.__isset); -} - -const char* IllegalArgument::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1"; -const uint8_t IllegalArgument::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1}; - -uint32_t IllegalArgument::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->message); - this->__isset.message = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t IllegalArgument::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - xfer += oprot->writeStructBegin("IllegalArgument"); - - xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->message); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -void swap(IllegalArgument &a, IllegalArgument &b) { - using ::std::swap; - swap(a.message, b.message); - swap(a.__isset, b.__isset); -} - -}}} // namespace diff --git a/wms/src/main/c/generated/WmsService_types.h b/wms/src/main/c/generated/WmsService_types.h deleted file mode 100644 index d6768764..00000000 --- a/wms/src/main/c/generated/WmsService_types.h +++ /dev/null @@ -1,740 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -#ifndef WmsService_TYPES_H -#define WmsService_TYPES_H - -#include -#include -#include -#include - - - -namespace trafodion { namespace wms { namespace thrift { - -struct Operation { - enum type { - OPERATION_BEGIN = 100, - OPERATION_UPDATE = 101, - OPERATION_END = 102, - OPERATION_UPDATE_PARENT_ID = 103, - OPERATION_CANCEL_CHILDREN = 104 - }; -}; - -extern const std::map _Operation_VALUES_TO_NAMES; - -struct Action { - enum type { - ACTION_CONTINUE = 200, - ACTION_REJECT = 201, - ACTION_CANCEL = 202, - ACTION_KILL = 203, - ACTION_WARNING = 204, - ACTION_PRIORITY_LOW = 205, - ACTION_PRIORITY_MEDIUM = 206, - ACTION_PRIORITY_HIGH = 207 - }; -}; - -extern const std::map _Action_VALUES_TO_NAMES; - -typedef struct _KeyValue__isset { - _KeyValue__isset() : boolValue(false), byteValue(false), shortValue(false), intValue(false), longValue(false), floatValue(false), stringValue(false) {} - bool boolValue; - bool byteValue; - bool shortValue; - bool intValue; - bool longValue; - bool floatValue; - bool stringValue; -} _KeyValue__isset; - -class KeyValue { - public: - - static const char* ascii_fingerprint; // = "8030FA0B1A7FF7D870C593DE44E90298"; - static const uint8_t binary_fingerprint[16]; // = {0x80,0x30,0xFA,0x0B,0x1A,0x7F,0xF7,0xD8,0x70,0xC5,0x93,0xDE,0x44,0xE9,0x02,0x98}; - - KeyValue() : boolValue(0), byteValue(0), shortValue(0), intValue(0), longValue(0), floatValue(0), stringValue() { - } - - virtual ~KeyValue() throw() {} - - bool boolValue; - int8_t byteValue; - int16_t shortValue; - int32_t intValue; - int64_t longValue; - double floatValue; - std::string stringValue; - - _KeyValue__isset __isset; - - void __set_boolValue(const bool val) { - boolValue = val; - __isset.boolValue = true; - } - - void __set_byteValue(const int8_t val) { - byteValue = val; - __isset.byteValue = true; - } - - void __set_shortValue(const int16_t val) { - shortValue = val; - __isset.shortValue = true; - } - - void __set_intValue(const int32_t val) { - intValue = val; - __isset.intValue = true; - } - - void __set_longValue(const int64_t val) { - longValue = val; - __isset.longValue = true; - } - - void __set_floatValue(const double val) { - floatValue = val; - __isset.floatValue = true; - } - - void __set_stringValue(const std::string& val) { - stringValue = val; - __isset.stringValue = true; - } - - bool operator == (const KeyValue & rhs) const - { - if (__isset.boolValue != rhs.__isset.boolValue) - return false; - else if (__isset.boolValue && !(boolValue == rhs.boolValue)) - return false; - if (__isset.byteValue != rhs.__isset.byteValue) - return false; - else if (__isset.byteValue && !(byteValue == rhs.byteValue)) - return false; - if (__isset.shortValue != rhs.__isset.shortValue) - return false; - else if (__isset.shortValue && !(shortValue == rhs.shortValue)) - return false; - if (__isset.intValue != rhs.__isset.intValue) - return false; - else if (__isset.intValue && !(intValue == rhs.intValue)) - return false; - if (__isset.longValue != rhs.__isset.longValue) - return false; - else if (__isset.longValue && !(longValue == rhs.longValue)) - return false; - if (__isset.floatValue != rhs.__isset.floatValue) - return false; - else if (__isset.floatValue && !(floatValue == rhs.floatValue)) - return false; - if (__isset.stringValue != rhs.__isset.stringValue) - return false; - else if (__isset.stringValue && !(stringValue == rhs.stringValue)) - return false; - return true; - } - bool operator != (const KeyValue &rhs) const { - return !(*this == rhs); - } - - bool operator < (const KeyValue & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -void swap(KeyValue &a, KeyValue &b); - -typedef struct _Data__isset { - _Data__isset() : keyValues(false) {} - bool keyValues; -} _Data__isset; - -class Data { - public: - - static const char* ascii_fingerprint; // = "3744E9C024E14DD4F9ECA86B921D240A"; - static const uint8_t binary_fingerprint[16]; // = {0x37,0x44,0xE9,0xC0,0x24,0xE1,0x4D,0xD4,0xF9,0xEC,0xA8,0x6B,0x92,0x1D,0x24,0x0A}; - - Data() { - } - - virtual ~Data() throw() {} - - std::map keyValues; - - _Data__isset __isset; - - void __set_keyValues(const std::map & val) { - keyValues = val; - __isset.keyValues = true; - } - - bool operator == (const Data & rhs) const - { - if (__isset.keyValues != rhs.__isset.keyValues) - return false; - else if (__isset.keyValues && !(keyValues == rhs.keyValues)) - return false; - return true; - } - bool operator != (const Data &rhs) const { - return !(*this == rhs); - } - - bool operator < (const Data & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -void swap(Data &a, Data &b); - -typedef struct _Header__isset { - _Header__isset() : version(false), clientIpAddress(false), clientTimestamp(false), clientUserName(false), clientApplication(false), serverLastUpdated(false) {} - bool version; - bool clientIpAddress; - bool clientTimestamp; - bool clientUserName; - bool clientApplication; - bool serverLastUpdated; -} _Header__isset; - -class Header { - public: - - static const char* ascii_fingerprint; // = "CE62EB594CA7F8C2ABE78957866085E9"; - static const uint8_t binary_fingerprint[16]; // = {0xCE,0x62,0xEB,0x59,0x4C,0xA7,0xF8,0xC2,0xAB,0xE7,0x89,0x57,0x86,0x60,0x85,0xE9}; - - Header() : version(), clientIpAddress(), clientTimestamp(0), clientUserName(), clientApplication(), serverLastUpdated(0) { - } - - virtual ~Header() throw() {} - - std::string version; - std::string clientIpAddress; - int64_t clientTimestamp; - std::string clientUserName; - std::string clientApplication; - int64_t serverLastUpdated; - - _Header__isset __isset; - - void __set_version(const std::string& val) { - version = val; - } - - void __set_clientIpAddress(const std::string& val) { - clientIpAddress = val; - } - - void __set_clientTimestamp(const int64_t val) { - clientTimestamp = val; - } - - void __set_clientUserName(const std::string& val) { - clientUserName = val; - } - - void __set_clientApplication(const std::string& val) { - clientApplication = val; - } - - void __set_serverLastUpdated(const int64_t val) { - serverLastUpdated = val; - } - - bool operator == (const Header & rhs) const - { - if (!(version == rhs.version)) - return false; - if (!(clientIpAddress == rhs.clientIpAddress)) - return false; - if (!(clientTimestamp == rhs.clientTimestamp)) - return false; - if (!(clientUserName == rhs.clientUserName)) - return false; - if (!(clientApplication == rhs.clientApplication)) - return false; - if (!(serverLastUpdated == rhs.serverLastUpdated)) - return false; - return true; - } - bool operator != (const Header &rhs) const { - return !(*this == rhs); - } - - bool operator < (const Header & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -void swap(Header &a, Header &b); - -typedef struct _Request__isset { - _Request__isset() : header(false), data(false) {} - bool header; - bool data; -} _Request__isset; - -class Request { - public: - - static const char* ascii_fingerprint; // = "252EE71CE3D737D84E12CF91BC47093B"; - static const uint8_t binary_fingerprint[16]; // = {0x25,0x2E,0xE7,0x1C,0xE3,0xD7,0x37,0xD8,0x4E,0x12,0xCF,0x91,0xBC,0x47,0x09,0x3B}; - - Request() { - } - - virtual ~Request() throw() {} - - Header header; - Data data; - - _Request__isset __isset; - - void __set_header(const Header& val) { - header = val; - } - - void __set_data(const Data& val) { - data = val; - } - - bool operator == (const Request & rhs) const - { - if (!(header == rhs.header)) - return false; - if (!(data == rhs.data)) - return false; - return true; - } - bool operator != (const Request &rhs) const { - return !(*this == rhs); - } - - bool operator < (const Request & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -void swap(Request &a, Request &b); - -typedef struct _Response__isset { - _Response__isset() : header(false), data(false) {} - bool header; - bool data; -} _Response__isset; - -class Response { - public: - - static const char* ascii_fingerprint; // = "252EE71CE3D737D84E12CF91BC47093B"; - static const uint8_t binary_fingerprint[16]; // = {0x25,0x2E,0xE7,0x1C,0xE3,0xD7,0x37,0xD8,0x4E,0x12,0xCF,0x91,0xBC,0x47,0x09,0x3B}; - - Response() { - } - - virtual ~Response() throw() {} - - Header header; - Data data; - - _Response__isset __isset; - - void __set_header(const Header& val) { - header = val; - } - - void __set_data(const Data& val) { - data = val; - } - - bool operator == (const Response & rhs) const - { - if (!(header == rhs.header)) - return false; - if (!(data == rhs.data)) - return false; - return true; - } - bool operator != (const Response &rhs) const { - return !(*this == rhs); - } - - bool operator < (const Response & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -void swap(Response &a, Response &b); - -typedef struct _Stream__isset { - _Stream__isset() : name(false), value(false), comment(false), timestamp(false) {} - bool name; - bool value; - bool comment; - bool timestamp; -} _Stream__isset; - -class Stream { - public: - - static const char* ascii_fingerprint; // = "5F9965D46A4F3845985AC0F9B81C3C69"; - static const uint8_t binary_fingerprint[16]; // = {0x5F,0x99,0x65,0xD4,0x6A,0x4F,0x38,0x45,0x98,0x5A,0xC0,0xF9,0xB8,0x1C,0x3C,0x69}; - - Stream() : name(), value(), comment(), timestamp(0) { - } - - virtual ~Stream() throw() {} - - std::string name; - std::string value; - std::string comment; - int64_t timestamp; - - _Stream__isset __isset; - - void __set_name(const std::string& val) { - name = val; - } - - void __set_value(const std::string& val) { - value = val; - } - - void __set_comment(const std::string& val) { - comment = val; - } - - void __set_timestamp(const int64_t val) { - timestamp = val; - } - - bool operator == (const Stream & rhs) const - { - if (!(name == rhs.name)) - return false; - if (!(value == rhs.value)) - return false; - if (!(comment == rhs.comment)) - return false; - if (!(timestamp == rhs.timestamp)) - return false; - return true; - } - bool operator != (const Stream &rhs) const { - return !(*this == rhs); - } - - bool operator < (const Stream & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -void swap(Stream &a, Stream &b); - -typedef struct _StreamResponse__isset { - _StreamResponse__isset() : streamList(false) {} - bool streamList; -} _StreamResponse__isset; - -class StreamResponse { - public: - - static const char* ascii_fingerprint; // = "DDDA8F891CEFA6BAEE9C7096D0B1642F"; - static const uint8_t binary_fingerprint[16]; // = {0xDD,0xDA,0x8F,0x89,0x1C,0xEF,0xA6,0xBA,0xEE,0x9C,0x70,0x96,0xD0,0xB1,0x64,0x2F}; - - StreamResponse() { - } - - virtual ~StreamResponse() throw() {} - - std::vector streamList; - - _StreamResponse__isset __isset; - - void __set_streamList(const std::vector & val) { - streamList = val; - } - - bool operator == (const StreamResponse & rhs) const - { - if (!(streamList == rhs.streamList)) - return false; - return true; - } - bool operator != (const StreamResponse &rhs) const { - return !(*this == rhs); - } - - bool operator < (const StreamResponse & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -void swap(StreamResponse &a, StreamResponse &b); - -typedef struct _Rule__isset { - _Rule__isset() : name(false), value(false), comment(false), timestamp(false) {} - bool name; - bool value; - bool comment; - bool timestamp; -} _Rule__isset; - -class Rule { - public: - - static const char* ascii_fingerprint; // = "5F9965D46A4F3845985AC0F9B81C3C69"; - static const uint8_t binary_fingerprint[16]; // = {0x5F,0x99,0x65,0xD4,0x6A,0x4F,0x38,0x45,0x98,0x5A,0xC0,0xF9,0xB8,0x1C,0x3C,0x69}; - - Rule() : name(), value(), comment(), timestamp(0) { - } - - virtual ~Rule() throw() {} - - std::string name; - std::string value; - std::string comment; - int64_t timestamp; - - _Rule__isset __isset; - - void __set_name(const std::string& val) { - name = val; - } - - void __set_value(const std::string& val) { - value = val; - } - - void __set_comment(const std::string& val) { - comment = val; - } - - void __set_timestamp(const int64_t val) { - timestamp = val; - } - - bool operator == (const Rule & rhs) const - { - if (!(name == rhs.name)) - return false; - if (!(value == rhs.value)) - return false; - if (!(comment == rhs.comment)) - return false; - if (!(timestamp == rhs.timestamp)) - return false; - return true; - } - bool operator != (const Rule &rhs) const { - return !(*this == rhs); - } - - bool operator < (const Rule & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -void swap(Rule &a, Rule &b); - -typedef struct _RuleResponse__isset { - _RuleResponse__isset() : ruleList(false) {} - bool ruleList; -} _RuleResponse__isset; - -class RuleResponse { - public: - - static const char* ascii_fingerprint; // = "DDDA8F891CEFA6BAEE9C7096D0B1642F"; - static const uint8_t binary_fingerprint[16]; // = {0xDD,0xDA,0x8F,0x89,0x1C,0xEF,0xA6,0xBA,0xEE,0x9C,0x70,0x96,0xD0,0xB1,0x64,0x2F}; - - RuleResponse() { - } - - virtual ~RuleResponse() throw() {} - - std::vector ruleList; - - _RuleResponse__isset __isset; - - void __set_ruleList(const std::vector & val) { - ruleList = val; - } - - bool operator == (const RuleResponse & rhs) const - { - if (!(ruleList == rhs.ruleList)) - return false; - return true; - } - bool operator != (const RuleResponse &rhs) const { - return !(*this == rhs); - } - - bool operator < (const RuleResponse & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -void swap(RuleResponse &a, RuleResponse &b); - -typedef struct _WorkloadResponse__isset { - _WorkloadResponse__isset() : workloadList(false) {} - bool workloadList; -} _WorkloadResponse__isset; - -class WorkloadResponse { - public: - - static const char* ascii_fingerprint; // = "D0D9EE8C6C26E09B1A4E0CDB1751C878"; - static const uint8_t binary_fingerprint[16]; // = {0xD0,0xD9,0xEE,0x8C,0x6C,0x26,0xE0,0x9B,0x1A,0x4E,0x0C,0xDB,0x17,0x51,0xC8,0x78}; - - WorkloadResponse() { - } - - virtual ~WorkloadResponse() throw() {} - - std::vector workloadList; - - _WorkloadResponse__isset __isset; - - void __set_workloadList(const std::vector & val) { - workloadList = val; - } - - bool operator == (const WorkloadResponse & rhs) const - { - if (!(workloadList == rhs.workloadList)) - return false; - return true; - } - bool operator != (const WorkloadResponse &rhs) const { - return !(*this == rhs); - } - - bool operator < (const WorkloadResponse & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -void swap(WorkloadResponse &a, WorkloadResponse &b); - -typedef struct _IOError__isset { - _IOError__isset() : message(false) {} - bool message; -} _IOError__isset; - -class IOError : public ::apache::thrift::TException { - public: - - static const char* ascii_fingerprint; // = "EFB929595D312AC8F305D5A794CFEDA1"; - static const uint8_t binary_fingerprint[16]; // = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1}; - - IOError() : message() { - } - - virtual ~IOError() throw() {} - - std::string message; - - _IOError__isset __isset; - - void __set_message(const std::string& val) { - message = val; - } - - bool operator == (const IOError & rhs) const - { - if (!(message == rhs.message)) - return false; - return true; - } - bool operator != (const IOError &rhs) const { - return !(*this == rhs); - } - - bool operator < (const IOError & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -void swap(IOError &a, IOError &b); - -typedef struct _IllegalArgument__isset { - _IllegalArgument__isset() : message(false) {} - bool message; -} _IllegalArgument__isset; - -class IllegalArgument : public ::apache::thrift::TException { - public: - - static const char* ascii_fingerprint; // = "EFB929595D312AC8F305D5A794CFEDA1"; - static const uint8_t binary_fingerprint[16]; // = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1}; - - IllegalArgument() : message() { - } - - virtual ~IllegalArgument() throw() {} - - std::string message; - - _IllegalArgument__isset __isset; - - void __set_message(const std::string& val) { - message = val; - } - - bool operator == (const IllegalArgument & rhs) const - { - if (!(message == rhs.message)) - return false; - return true; - } - bool operator != (const IllegalArgument &rhs) const { - return !(*this == rhs); - } - - bool operator < (const IllegalArgument & ) const; - - uint32_t read(::apache::thrift::protocol::TProtocol* iprot); - uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; - -}; - -void swap(IllegalArgument &a, IllegalArgument &b); - -}}} // namespace - -#endif diff --git a/wms/src/main/c/include/GlobalData.h b/wms/src/main/c/include/GlobalData.h deleted file mode 100644 index eeb6a779..00000000 --- a/wms/src/main/c/include/GlobalData.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef LIBWMS_WMSGLOBALDATA_H -#define LIBWMS_WMSGLOBALDATA_H - -#define WAIT_TIME_SECONDS 15 -// -// zookeeper -// -extern zhandle_t *zh; -extern clientid_t myid; -extern stringstream zk_ip_port; -extern pthread_cond_t cond; -extern pthread_mutex_t lock; -//extern void *watcherCtx; -// -extern char *wmshost; -extern int wmsport; -extern deque wmsServers; -extern bool isOpen; - -extern pid_t pid;//Linux process id -extern char programName[128];//Linux program name from /proc -extern char userName[256]; -extern char hostName[256]; -extern char myHostName[HOST_NAME_MAX]; -extern char myIpAddr[256]; - -extern long connectionInfo; - -extern short lastErrorType; -extern long lastErrorNum; -extern stringstream lastErrorText; - -extern bool myZkHandle; - -#endif /* LIBWMS_WMSGLOBALDATA_H */ diff --git a/wms/src/main/c/include/GlobalHeader.h b/wms/src/main/c/include/GlobalHeader.h deleted file mode 100644 index 8df95a39..00000000 --- a/wms/src/main/c/include/GlobalHeader.h +++ /dev/null @@ -1,66 +0,0 @@ -/** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef GLOBAL_HEADER_H -#define GLOBAL_HEADER_H - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -using namespace std; -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include // for file i/o constants -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* for p-thread semaphores */ -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include "GlobalData.h" -#endif diff --git a/wms/src/main/c/include/Wms.h b/wms/src/main/c/include/Wms.h deleted file mode 100644 index 406ad468..00000000 --- a/wms/src/main/c/include/Wms.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef LIBWMS_WMS_H -#define LIBWMS_WMS_H - -using namespace std; -using namespace trafodion::wms::thrift; - -// +++ Temp fix for ZK session expiry issue. Will use zk handle if sent as input -int wmsOpen(const char* zkhost, int zkport, void *zhIn=NULL); -int wmsWriteRead(Data &,Data &); -void resetKeyValue(); -int wmsClose(); -const char* wmsGetLastErrorText(); -int getServer(); - -void setKeyValue(const string& key, const short value); -void setKeyValue(const string& key, const int value); -void setKeyValue(const string& key, const long value); -void setKeyValue(const string& key, const double value); -void setKeyValue(const string& key, const string value); -void setKeyValue(const string& key, const char *value); -// -void getKeyValue(const string& key, short& value); -void getKeyValue(const string& key, int& value); -void getKeyValue(const string& key, long& value); -void getKeyValue(const string& key, double& value); -void getKeyValue(const string& key, string& value); - -#endif /*LIBWMS_WMS_H*/ - - diff --git a/wms/src/main/c/include/WmsException.h b/wms/src/main/c/include/WmsException.h deleted file mode 100644 index c0997abf..00000000 --- a/wms/src/main/c/include/WmsException.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef LIBWMS_WMSEXCEPTION_H -#define LIBWMS_WMSEXCEPTION_H - -#define ERROR_INIT -1 -#define ERROR_MEMORY_ALLOCATION -2 -#define ERROR_CREATE_WORKLOAD -3 -#define ERROR_OPEN -4 -#define ERROR_WRITEREAD -4 -#define ERROR_CLOSE -5 -#define ERROR_WORKLOAD_NULL -7 -#define ERROR_WORKLOAD_VALIDATION -8 -#define ERROR_CONF_FILE -9 -#define ERROR_OTHER -10 - -class WmsException -{ -public: - WmsException(short error_type, long errorNum, const char *errorText); - ~WmsException(); - const char* what(); - - short m_errorType; - long m_errorNum; - const char *m_errorText; - -}; - -#endif /*LIBWMS_WMSEXCEPTION_H*/ - - diff --git a/wms/src/main/c/include/WmsZookeeper.h b/wms/src/main/c/include/WmsZookeeper.h deleted file mode 100644 index 027e9f51..00000000 --- a/wms/src/main/c/include/WmsZookeeper.h +++ /dev/null @@ -1,28 +0,0 @@ -/** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef LIBWMS_WMSZOOKEEPER_H -#define LIBWMS_WMSZOOKEEPER_H - -#define WMS_SERVERS "/wms/servers/running" -#define WMS_CLIENTS "/wms/clients" - -void watcher(zhandle_t *zzh, int type, int state, const char *path, void *watcherCtx); -void getZkServerList(); -void getHostAddr(); -void removeClientZnode(); -void closeZkSession(); - -#endif /* LIBWMS_WMSZOOKEEPER_H */ diff --git a/wms/src/main/c/src/GlobalData.cpp b/wms/src/main/c/src/GlobalData.cpp deleted file mode 100644 index 653c7412..00000000 --- a/wms/src/main/c/src/GlobalData.cpp +++ /dev/null @@ -1,79 +0,0 @@ -/********************************************************************** -// @@@ START COPYRIGHT @@@ -// -// HP CONFIDENTIAL: NEED TO KNOW ONLY -// -// Copyright 2013 -// Hewlett-Packard Development Company, L.P. -// Protected as an unpublished work. -// -// The computer program listings, specifications and documentation -// herein are the property of Hewlett-Packard Development Company, -// L.P., or a third party supplier and shall not be reproduced, -// copied, disclosed, or used in whole or in part for any reason -// without the prior express written permission of Hewlett-Packard -// Development Company, L.P. -// -// @@@ END COPYRIGHT @@@ -********************************************************************/ -#include "GlobalHeader.h" -// -// zookeeper -// -zhandle_t *zh; -clientid_t myid; -stringstream zk_ip_port; -void *watcherCtx; - -pthread_cond_t cond=PTHREAD_COND_INITIALIZER; -pthread_mutex_t lock=PTHREAD_MUTEX_INITIALIZER; - -char *wmshost; -int wmsport; -deque wmsServers; -bool isOpen = false; - -pid_t pid;//Linux process id -char programName[128];//Linux program name from /proc -char userName[256]; -char hostName[256]; -char myHostName[HOST_NAME_MAX]; -char myIpAddr[256]; - -long connectionInfo; - -short lastErrorType; -long lastErrorNum; -stringstream lastErrorText; - -bool myZkHandle; - -void setProcessInfo() { - FILE *fp = NULL; - int num; - char buf[256] = {0}; - char procId[12] = {0}; - char progName[128] = {0}; - char tmp[256] = {0}; - pid = getpid(); - - sprintf(tmp,"/proc/%d/stat",pid); - - if (!fp){ - fp = (fopen(tmp, "r")); - if (fp){ - if (fgets(buf, sizeof(buf), fp)){ - num = sscanf(buf, "%s%s", procId, progName); - strncpy(programName,&progName[1],strlen(progName)-2);//don't copy parenthesis - } - } - } - - if(fp) { - fclose(fp); - } - - cuserid(userName); - gethostname(hostName, 256); - -} diff --git a/wms/src/main/c/src/Wms.cpp b/wms/src/main/c/src/Wms.cpp deleted file mode 100644 index d09ab86d..00000000 --- a/wms/src/main/c/src/Wms.cpp +++ /dev/null @@ -1,294 +0,0 @@ -/** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -using namespace std; - -#include "GlobalHeader.h" -#include "WmsException.h" -#include "WmsZookeeper.h" -#include "generated/WmsService.h" -#include "generated/WmsService_types.h" -#include "Wms.h" - -using namespace trafodion::wms::thrift; -using namespace apache::thrift; -using namespace apache::thrift::protocol; -using namespace apache::thrift::transport; - -boost::shared_ptr tsocket_ptr; -boost::shared_ptr ttransport_ptr; -boost::shared_ptr tprotocol_ptr; - -Header header; -Request request; -Response response; - -map _keyValues; - -const char* -wmsGetLastErrorText(){ - return lastErrorText.str().c_str(); -} - -int -wmsOpen(const char* zkhost, int zkport, void *zhIn){ - try { - if(isOpen==true) - return 0; - - // +++ Temp fix for ZK session expiry issue. Will use zk handle if sent as input - if( zhIn ) - { - zh = (zhandle_t *)zhIn; - myZkHandle = false; - } - else - { - if(zkhost == 0) - zkhost = "localhost"; - if(zkport == 0) - zkport = 2181; - - myZkHandle = true; - } - - zk_ip_port << zkhost << ":" << zkport; - - getServer(); - isOpen=true; - } catch( WmsException e) { - lastErrorType = e.m_errorType; - lastErrorNum = e.m_errorNum; - lastErrorText.str(""); - lastErrorText << e.m_errorText; - return -1; - } - return 0; -} - -int -wmsWriteRead(Data &requestData,Data &responseData){ - try { - if(isOpen==false) - throw WmsException(ERROR_WRITEREAD, 0, (char *)"Connection is not open"); - - try { - - requestData.__set_keyValues(_keyValues); - request.__set_header(header); - request.__set_data(requestData); - header.__set_clientUserName(getenv("USER")); - header.__set_clientTimestamp(connectionInfo); - - WmsServiceClient client(tprotocol_ptr); - client.writeread(response,request); - - request.header.__set_serverLastUpdated(response.header.serverLastUpdated); - - responseData = response.data; - _keyValues.clear(); - _keyValues = responseData.keyValues; - - } catch (TException &tx) { - wmsClose(); - throw WmsException(ERROR_WRITEREAD, 0, (char *)tx.what()); - } catch (WmsException e){ - wmsClose(); - throw WmsException(ERROR_WRITEREAD, 0, (char *)e.what()); - } - } catch( WmsException e) { - lastErrorType = e.m_errorType; - lastErrorNum = e.m_errorNum; - lastErrorText.str(""); - lastErrorText << e.m_errorText; - return -1; - } - return 0; -} - -void resetKeyValue() -{ - _keyValues.clear(); -} - -void setKeyValue(const string& key, const short value) -{ - KeyValue kv; - kv.__set_shortValue(value); - _keyValues.insert( std::pair(key,kv)); -} - -void setKeyValue(const string& key, const int value) -{ - KeyValue kv; - kv.__set_intValue(value); - _keyValues.insert( std::pair(key,kv)); -} - -void setKeyValue(const string& key, const long value) -{ - KeyValue kv; - kv.__set_longValue(value); - _keyValues.insert( std::pair(key,kv)); -} - -void setKeyValue(const string& key, const double value) -{ - KeyValue kv; - kv.__set_floatValue(value); - _keyValues.insert( std::pair(key,kv)); -} - -void setKeyValue(const string& key, const string value) -{ - KeyValue kv; - kv.__set_stringValue(value); - _keyValues.insert( std::pair(key,kv)); -} - -void setKeyValue(const string& key, const char *value) -{ - KeyValue kv; - - if(value) { - kv.__set_stringValue(value); - _keyValues.insert( std::pair(key,kv)); - } -} - -void getKeyValue(const string& key, short& value) -{ - KeyValue kv; - kv = _keyValues[key]; - value = kv.shortValue; -} - -void getKeyValue(const string& key, int& value) -{ - KeyValue kv; - kv = _keyValues[key]; - value = kv.intValue; -} - -void getKeyValue(const string& key, long& value) -{ - KeyValue kv; - kv = _keyValues[key]; - value = kv.longValue; -} - -void getKeyValue(const string& key, double& value) -{ - KeyValue kv; - kv = _keyValues[key]; - value = kv.floatValue; -} - -void getKeyValue(const string& key, string& value) -{ - KeyValue kv; - kv = _keyValues[key]; - value = kv.stringValue; -} - -int -wmsClose(){ - try { - if( isOpen == false ) - return 0; - - ttransport_ptr->close(); - ttransport_ptr.reset(); - tsocket_ptr.reset(); - tprotocol_ptr.reset(); - wmsServers.clear(); - closeZkSession(); - isOpen=false; - } catch( WmsException e) { - lastErrorType = e.m_errorType; - lastErrorNum = e.m_errorNum; - lastErrorText.str(""); - lastErrorText << e.m_errorText; - return -1; - } - return 0; -} - -int -getServer() { - timeval tv; - gettimeofday (&tv, NULL); - connectionInfo = tv.tv_sec * 1000 + tv.tv_usec/1000; -// unsigned int roundRobinIndex=0; -// unsigned int roundRobinMax; -// static bool bSetSeed = true; - -// if (bSetSeed == true){ -// bSetSeed = false; -// srand ( time(NULL) ); -// } - - getZkServerList(); - - if (wmsServers.empty()) - throw WmsException(ERROR_OPEN, 0, (char *)"No running WMS servers found"); - -// roundRobinMax=rand() % 10 + 1; - -// for(unsigned int j=0; j < roundRobinMax; j++) -// roundRobinIndex = rand() % (wmsServers.size() - 1) + 1; - - char *token,*lasts; - for(unsigned i=0; i < wmsServers.size(); ++i) { -// for(unsigned i=0; i < wmsServers.size(); ++i, ++roundRobinIndex){ -// roundRobinIndex = roundRobinIndex % wmsServers.size(); - - string host = wmsServers.front(); -// string host = wmsServers[roundRobinIndex]; - - if ((token = strtok_r((char*)host.c_str(), ":", &lasts)) != NULL)//ip addr - wmshost = token; - if ((token = strtok_r(NULL, ":", &lasts)) != NULL)//skip server instance id - if ((token = strtok_r(NULL, ":", &lasts)) != NULL)//thrift port - wmsport = atoi(token); - - cout << "wmshost:" << wmshost << ",wmsport:" << wmsport << endl; - - try { - boost::shared_ptr socket(new TSocket(wmshost, wmsport)); - //boost::shared_ptr transport(new TBufferedTransport(socket)); - boost::shared_ptr transport(new TFramedTransport(socket)); - boost::shared_ptr protocol(new TBinaryProtocol(transport)); - - WmsServiceClient client(protocol); - transport->open(); - client.ping(connectionInfo); - tsocket_ptr = socket; - ttransport_ptr = transport; - tprotocol_ptr = protocol; - return 0; - } catch (TException &tx) { - wmsServers.pop_front(); - wmsServers.push_back(host); - lastErrorType = ERROR_OPEN; - lastErrorNum = 0; - lastErrorText.str(""); - lastErrorText << tx.what(); - } - } - - throw WmsException(ERROR_OPEN, 0, (char *)"No active WMS servers found"); -} - diff --git a/wms/src/main/c/src/WmsException.cpp b/wms/src/main/c/src/WmsException.cpp deleted file mode 100644 index 21fc5cbf..00000000 --- a/wms/src/main/c/src/WmsException.cpp +++ /dev/null @@ -1,33 +0,0 @@ -/** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "GlobalHeader.h" -#include "WmsException.h" - -WmsException::WmsException(short errorType, long errorNum, const char *errorText) -{ - m_errorType = errorType; - m_errorNum = errorNum; - m_errorText = errorText; -} - -WmsException::~WmsException() -{ -} - -const char* WmsException::what() -{ - return m_errorText; -} diff --git a/wms/src/main/c/src/WmsZookeeper.cpp b/wms/src/main/c/src/WmsZookeeper.cpp deleted file mode 100644 index 4d7e59c9..00000000 --- a/wms/src/main/c/src/WmsZookeeper.cpp +++ /dev/null @@ -1,286 +0,0 @@ -/** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "GlobalHeader.h" -#include "WmsException.h" -#include "WmsZookeeper.h" - -static const char* state2String(int state){ - if (state == 0) - return "CLOSED_STATE"; - if (state == ZOO_CONNECTING_STATE) - return "CONNECTING_STATE"; - if (state == ZOO_ASSOCIATING_STATE) - return "ASSOCIATING_STATE"; - if (state == ZOO_CONNECTED_STATE) - return "CONNECTED_STATE"; - if (state == ZOO_EXPIRED_SESSION_STATE) - return "EXPIRED_SESSION_STATE"; - if (state == ZOO_AUTH_FAILED_STATE) - return "AUTH_FAILED_STATE"; - - return "INVALID_STATE"; -} - -void -ensureConnected(){ - int rc; - static struct timespec time_to_wait = {0, 0}; - - pthread_mutex_lock(&lock); - while (zoo_state(zh)!=ZOO_CONNECTED_STATE) { - time_to_wait.tv_sec = time(NULL) + WAIT_TIME_SECONDS; - rc = pthread_cond_timedwait(&cond, &lock, &time_to_wait); - if (rc == ETIMEDOUT) { - pthread_mutex_unlock(&lock); - stringstream message; - message << "ZOOKEEPER is not running [" << zk_ip_port.str().c_str() << "]"; - throw WmsException(ERROR_OPEN, 0, message.str().c_str()); - } - } - pthread_mutex_unlock(&lock); -} - -void -watcher(zhandle_t *zzh, int type, int state, const char *path, void *watcherCtx) { - /* Be careful using zh here rather than zzh - as this may be mt code - * the client lib may call the watcher before zookeeper_init returns */ - (void)watcherCtx; - - printf( "Watcher %d state = %s", type, state2String(state)); - if (path && strlen(path) > 0) { - printf(" for path %s", path); - } - printf("\n"); - - if(type == ZOO_SESSION_EVENT){ - if(state == ZOO_CONNECTED_STATE){ - pthread_mutex_lock(&lock); - pthread_cond_broadcast(&cond); - pthread_mutex_unlock(&lock); - } - } - - if (type == ZOO_SESSION_EVENT) { - if (state == ZOO_CONNECTED_STATE) { - const clientid_t *id = zoo_client_id(zzh); - if (myid.client_id == 0 || myid.client_id != id->client_id) { - myid = *id; - printf("Got a new session id: %lld\n", (long long)myid.client_id); - } - } else if (state == ZOO_AUTH_FAILED_STATE) { - printf("Authentication failure.\n"); - zookeeper_close(zzh); - zh=0; - } else if (state == ZOO_EXPIRED_SESSION_STATE) { - printf("Session expired.\n"); - zookeeper_close(zzh); - zh=0; - } - } -} - -void -getZkServerList() { - stringstream message; - String_vector servers; - stringstream path; - - getHostAddr(); - - try { - - // +++ Temp fix for ZK session expiry issue. - if( myZkHandle ) - { - zoo_set_debug_level(ZOO_LOG_LEVEL_WARN); - zh = zookeeper_init(zk_ip_port.str().c_str(), watcher, 30000, &myid, NULL, 0); - ensureConnected(); - } - - struct Stat stat; - path << "/" << getenv("USER") << WMS_SERVERS; - if (ZNONODE == zoo_exists(zh, path.str().c_str(), 0, &stat)) { - message << "znode " << path << " does not exist"; - throw message.str(); - } - - int rc = zoo_get_children(zh, path.str().c_str(), 0, &servers); - switch(rc) { - case ZOK: - printf("zoo_get_children succeeded\n"); - break; - case ZNONODE: - message << "The node does not exist"; - throw message.str(); - break; - case ZNOAUTH: - message << "The client does not have permission"; - throw message.str(); - break; - case ZBADARGUMENTS: - message << "Invalid input parameters"; - throw message.str(); - break; - case ZINVALIDSTATE: - message << "zhandle state is either ZOO_SESSION_EXPIRED_STATE or ZOO_AUTH_FAILED_STATE"; - throw message.str(); - break; - case ZMARSHALLINGERROR: - message << "Failed to marshall a request; possibly, out of memory"; - throw message.str(); - break; - } - - path.str(""); - path << "/" << getenv("USER") << WMS_CLIENTS << "/" << myIpAddr << ":" << connectionInfo; - rc = zoo_create(zh, path.str().c_str(), NULL, -1, &ZOO_OPEN_ACL_UNSAFE, ZOO_EPHEMERAL, 0, 0); - switch(rc) { - case ZOK: - printf("created znode successfully\n"); - break; - case ZNONODE: - message << "The node does not exist"; - throw message.str(); - break; - case ZNOAUTH: - message << "The client does not have permission"; - throw message.str(); - break; - case ZBADARGUMENTS: - message << "Invalid input parameters"; - throw message.str(); - break; - case ZINVALIDSTATE: - message << "zhandle state is either ZOO_SESSION_EXPIRED_STATE or ZOO_AUTH_FAILED_STATE"; - throw message.str(); - break; - case ZMARSHALLINGERROR: - message << "Failed to marshall a request; possibly, out of memory"; - throw message.str(); - break; - } - } catch (string str) { - closeZkSession(); - throw WmsException(ERROR_OPEN, 0, str.c_str()); - } - -// if (zh != 0) -// zookeeper_close(zh); - - wmsServers.clear(); - for (int i=0; i < servers.count; i++) { - wmsServers.push_back(servers.data[i]); - } -} - -void removeClientZnode() -{ - stringstream path; - stringstream message; - - if( !zh ) - return; - - try { - path.str(""); - path << "/" << getenv("USER") << WMS_CLIENTS << "/" << myIpAddr << ":" << connectionInfo; - int rc = zoo_delete(zh, path.str().c_str(), -1); - switch(rc) { - case ZOK: - printf("deleted znode successfully\n"); - break; - case ZNONODE: - message << "The node does not exist"; - throw message.str(); - break; - case ZNOAUTH: - message << "The client does not have permission"; - throw message.str(); - break; - case ZBADARGUMENTS: - message << "Invalid input parameters"; - throw message.str(); - break; - case ZINVALIDSTATE: - message << "zhandle state is either ZOO_SESSION_EXPIRED_STATE or ZOO_AUTH_FAILED_STATE"; - throw message.str(); - break; - case ZMARSHALLINGERROR: - message << "Failed to marshall a request; possibly, out of memory"; - throw message.str(); - break; - } - } catch (string str) { - closeZkSession(); - throw WmsException(ERROR_OPEN, 0, str.c_str()); - } -} - -void closeZkSession() -{ - removeClientZnode(); - if (myZkHandle && zh != 0) - zookeeper_close(zh); -} - -void -getHostAddr() { - struct ifaddrs *myaddrs, *ifa; - void *in_addr; - char buf[64]; - - if(getifaddrs(&myaddrs) != 0) - throw WmsException(ERROR_OTHER, 0, strerror(errno)); - - for (ifa = myaddrs; ifa != NULL; ifa = ifa->ifa_next) { - if (ifa->ifa_addr == NULL) - continue; - //if (!(ifa->ifa_flags & IFF_UP)) - // continue; - - switch (ifa->ifa_addr->sa_family) { - case AF_INET: - { - struct sockaddr_in *s4 = (struct sockaddr_in *)ifa->ifa_addr; - in_addr = &s4->sin_addr; - break; - } - //case AF_INET6: - //{ - // struct sockaddr_in6 *s6 = (struct sockaddr_in6 *)ifa->ifa_addr; - // in_addr = &s6->sin6_addr; - break; - //} - default: - continue; - } - - if (!inet_ntop(ifa->ifa_addr->sa_family, in_addr, buf, sizeof(buf))){ - //printf("%s: inet_ntop failed!\n", ifa->ifa_name); - stringstream message; - message << "inet_ntop failed! " << ifa->ifa_name; - freeifaddrs(myaddrs); - throw WmsException(ERROR_OPEN, 0, message.str().c_str()); - } else { - //printf("%s: %s\n", ifa->ifa_name, buf); - strcpy(myHostName,ifa->ifa_name); - strcpy(myIpAddr,buf); - } - } - - freeifaddrs(myaddrs); -} - diff --git a/wms/src/main/c/tests/wms_test.cpp b/wms/src/main/c/tests/wms_test.cpp deleted file mode 100644 index aa0e7947..00000000 --- a/wms/src/main/c/tests/wms_test.cpp +++ /dev/null @@ -1,175 +0,0 @@ -#include -#include -using namespace std; -#include -#include -#include -#include -#include "generated/WmsService_types.h" -#include "Wms.h" - -using namespace trafodion::wms::thrift; - -string zkhost; -int zkport; - -static bool parse_arguments( int argc, char *argv[] ) -{ - zkhost = "localhost"; - zkport = 2182; - - char arguments[] = "i:p:"; - int character; - optarg = NULL; - - while ( ( character = getopt( argc, argv, arguments ) ) != -1 ){ - switch ( character ) - { - case 'p': - zkport = atoi( optarg ); - break; - case 'i': - zkhost = optarg; - break; - default : - ; - } - } - if (zkhost.length() == 0 || zkport == 0 ) - return false; - - return true; -} - -int main(int argc, char **argv) -{ - if(false == parse_arguments(argc, argv )){ - cout << "parse_arguments returned false" << endl; - } - - cout << "zkhost " << zkhost.c_str() << "," << "zkport " << zkport << endl; - - int result = wmsOpen(zkhost.c_str(), zkport); - if(result) { - cout << "wmsOpen failed=" << wmsGetLastErrorText() << endl; - exit(EXIT_FAILURE); - } - else - cout << "wmsOpen succeeded, result=" << result << endl; - - timeval tv; - gettimeofday (&tv, NULL); - long currentTimestamp = tv.tv_sec * 1000 + tv.tv_usec/1000; - - map keyValues; - KeyValue kv; - - kv.__set_intValue(Operation::OPERATION_BEGIN); - keyValues.insert ( std::pair("operation",kv)); - kv.__set_stringValue("RUNNING"); - keyValues.insert ( std::pair("state",kv)); - kv.__set_stringValue("BEGIN"); - keyValues.insert ( std::pair("subState",kv)); - kv.__set_longValue(currentTimestamp); - keyValues.insert ( std::pair("beginTimestamp",kv)); - kv.__set_longValue(currentTimestamp); - keyValues.insert ( std::pair("endTimestamp",kv)); - kv.__set_stringValue("trafodion"); - keyValues.insert ( std::pair("type",kv)); - kv.__set_stringValue("Select * from manageability.nwms_schema.services;"); - keyValues.insert ( std::pair("queryText",kv)); - kv.__set_stringValue("MXID11000001075212235857042874154000000000106U6553500_4_SQL_DATASOURCE_Q8"); - keyValues.insert ( std::pair("queryId",kv)); - kv.__set_longValue(9010203); - keyValues.insert ( std::pair("deltaNumRows",kv)); - kv.__set_longValue(1000); - keyValues.insert ( std::pair("deltaRowsRetrieved",kv)); - kv.__set_longValue(5000); - keyValues.insert ( std::pair("deltaRowsAccessed",kv)); - kv.__set_longValue(1000000); - keyValues.insert ( std::pair("aggrRowsRetrieved",kv)); - kv.__set_longValue(400); - keyValues.insert ( std::pair("aggrRowsAccessed",kv)); - kv.__set_stringValue("wms_test"); - keyValues.insert ( std::pair("applicationId",kv)); - kv.__set_floatValue(600000); - keyValues.insert ( std::pair("aggrEstimatedRowsUsed",kv)); - kv.__set_longValue(66600000); - keyValues.insert ( std::pair("aggrNumRowsIUD",kv)); - kv.__set_stringValue("SQL_CUR_3"); - keyValues.insert ( std::pair("sessionId",kv)); - kv.__set_stringValue("trafuser"); - keyValues.insert ( std::pair("userName",kv)); - kv.__set_floatValue(10000); - keyValues.insert ( std::pair("deltaEstimatedRowsUsed",kv)); - kv.__set_floatValue(60987653); - keyValues.insert ( std::pair("deltaEstimatedRowsAccessed",kv)); - kv.__set_floatValue(2000000); - keyValues.insert ( std::pair("aggrEstimatedRowsAccessed",kv)); - - Data request; - Data response; - - request.__set_keyValues(keyValues); - result = wmsWriteRead(request,response); - if(result) { - cout << "wmsWriteRead failed =" << wmsGetLastErrorText() << endl; - exit(EXIT_FAILURE); - } else { - cout << "wmsWriteRead succeeded, result=" << result << endl; - //map ::iterator it; - //for (it = response.begin(); it != response.end(); ++it) { - // cout << it->first << "=" << it->second << endl; - //} - } - - keyValues.clear(); -/* - request.__set_operation(Operation::OPERATION_UPDATE); - request.__set_state("EXECUTING"); - request.__set_subState("UPDATE"); - request.__set_workloadId(response.workloadId); - request.__set_beginTimestamp(currentTimestamp); - request.__set_endTimestamp(currentTimestamp); -*/ - result = wmsWriteRead(request,response); - if(result) { - cout << "wmsWriteRead failed =" << wmsGetLastErrorText() << endl; - exit(EXIT_FAILURE); - } else { - cout << "wmsWriteRead succeeded, result=" << result << endl; - //map ::iterator it; - //for (it = response.begin(); it != response.end(); ++it) { - // cout << it->first << "=" << it->second << endl; - //} - } -/* - request.__set_operation(Operation::OPERATION_END); - request.__set_state("COMPLETED"); - request.__set_subState("SUCCEEDED"); - request.__set_beginTimestamp(currentTimestamp); - request.__set_endTimestamp(currentTimestamp); -*/ - result = wmsWriteRead(request,response); - if(result) { - cout << "wmsWriteRead failed =" << wmsGetLastErrorText() << endl; - exit(EXIT_FAILURE); - } else { - cout << "wmsWriteRead succeeded, result=" << result << endl; - //map ::iterator it; - //for (it = response.begin(); it != response.end(); ++it) { - // cout << it->first << "=" << it->second << endl; - //} - } - - result = wmsClose(); - if(result) { - cout << "wmsClose failed = " << wmsGetLastErrorText() << endl; - exit(EXIT_FAILURE); - } - else - cout << "wmsClose succeeded, result=" << result << endl; - - return 0; -} - diff --git a/wms/src/main/jamon/org/trafodion/wms/tmpl/master/MasterStatusTmpl.jamon b/wms/src/main/jamon/org/trafodion/wms/tmpl/master/MasterStatusTmpl.jamon index cec46db3..2aa749a5 100644 --- a/wms/src/main/jamon/org/trafodion/wms/tmpl/master/MasterStatusTmpl.jamon +++ b/wms/src/main/jamon/org/trafodion/wms/tmpl/master/MasterStatusTmpl.jamon @@ -1,5 +1,5 @@ <%doc> -(C) Copyright 2013 Hewlett-Packard Development Company, L.P. +(C) Copyright 2015 Hewlett-Packard Development Company, L.P. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -37,9 +37,6 @@ String filter = "general"; String format = "html"; ArrayList servers = null; ArrayList clients = null; -ArrayList streams = null; -ArrayList rules = null; -ArrayList workloads = null; <%import> java.io.*; @@ -48,9 +45,6 @@ org.trafodion.wms.master.WmsMaster; org.trafodion.wms.util.WmsConfiguration; org.trafodion.wms.Constants; org.trafodion.wms.util.Bytes; -org.trafodion.wms.thrift.generated.Request; -org.trafodion.wms.thrift.generated.Stream; -org.trafodion.wms.thrift.generated.Rule; <%java> @@ -97,18 +91,6 @@ org.trafodion.wms.thrift.generated.Rule; <& WmsClients &> -<%if (streams != null) %> -<& WmsStreams &> - - -<%if (rules != null) %> -<& WmsRules &> - - -<%if (workloads != null) %> -<& WmsWorkloads &> - - @@ -119,7 +101,6 @@ org.trafodion.wms.thrift.generated.Rule; int totalServers = 0; String hostName; String instance; - String thriftPort; String infoPort; long startTime; @@ -143,7 +124,6 @@ org.trafodion.wms.thrift.generated.Rule; scn.useDelimiter(":"); hostName = scn.next(); instance = scn.next(); - thriftPort = scn.next(); infoPort = scn.next(); startTime = Long.parseLong(scn.next()); scn.close(); @@ -198,162 +178,4 @@ No servers found. <%else> No clients found. - - -<%def WmsStreams> -

CEP Streams

-<%if (streams != null && streams.size() > 0)%> -<%java> - int totalStreams = 0; - - - - - - - - - - -<%java> - for (Stream aStream: streams) { - String streamValue = aStream.getValue(); - streamValue = streamValue.replaceAll(",",", "); - - - - - - - -<%java> - } - - -
IdTextCommentLast Updated
<% aStream.getName() %><% streamValue %><% aStream.getComment() %><% new Date(aStream.getTimestamp()) %>
Total: streams: <% streams.size() %>
-<%else> -No streams found. - - - -<%def WmsRules> -

CEP Rules

-<%if (rules != null && rules.size() > 0)%> -<%java> - int totalRules = 0; - - - - - - - - - - -<%java> - for (Rule aRule: rules) { - - - - - - - -<%java> - } - - -
IdTextCommentLast Updated
<% aRule.getName() %><% aRule.getValue() %><% aRule.getComment() %><% new Date(aRule.getTimestamp()) %>
Total: rules: <% rules.size() %>
-<%else> -No rules found. - - - -<%def WmsWorkloads> -

Workloads

-<%if (workloads != null && workloads.size() > 0)%> -<%java> - int totalWorkloads = 0; - - - - - - - - - - - - -<%java> - StringBuilder sb = null; - for (Request aWorkload: workloads) { - - - - - - -<%java> - String workloadText = null; - if(aWorkload.getData().getKeyValues().get("request") != null) - workloadText = aWorkload.getData().getKeyValues().get("request").getStringValue(); - else if (aWorkload.getData().getKeyValues().get("queryText") != null) - workloadText = aWorkload.getData().getKeyValues().get("queryText").getStringValue(); - - -<%java> - sb = new StringBuilder(); - if(aWorkload.getData().getKeyValues() != null){ - boolean isFirst = true; - for (String key: aWorkload.getData().getKeyValues().keySet()) { - if(aWorkload.getData().getKeyValues().get(key).isSetByteValue()) { - if(! isFirst) sb.append(", "); - sb.append(key + "=" + aWorkload.getData().getKeyValues().get(key).getByteValue()); - } else if(aWorkload.getData().getKeyValues().get(key).isSetShortValue()) { - if(! isFirst) sb.append(", "); - sb.append(key + "=" + aWorkload.getData().getKeyValues().get(key).getShortValue()); - } else if(aWorkload.getData().getKeyValues().get(key).isSetIntValue()) { - if(! isFirst) sb.append(", "); - sb.append(key + "=" + aWorkload.getData().getKeyValues().get(key).getIntValue()); - } else if(aWorkload.getData().getKeyValues().get(key).isSetLongValue()) { - if(! isFirst) sb.append(", "); - if(key.equalsIgnoreCase("beginTimestamp")) - sb.append(key + "=" + new Date(aWorkload.getData().getKeyValues().get(Constants.BEGIN_TIMESTAMP).getLongValue())); - else if(key.equalsIgnoreCase("endTimestamp")) - sb.append(key + "=" + new Date(aWorkload.getData().getKeyValues().get(Constants.END_TIMESTAMP).getLongValue())); - else if(key.equalsIgnoreCase("lastUpdated")) - sb.append(key + "=" + new Date(aWorkload.getHeader().getServerLastUpdated())); - else - sb.append(key + "=" + aWorkload.getData().getKeyValues().get(key).getLongValue()); - } else if(aWorkload.getData().getKeyValues().get(key).isSetFloatValue()) { - if(! isFirst) sb.append(", "); - sb.append(key + "=" + aWorkload.getData().getKeyValues().get(key).getFloatValue()); - } else if(aWorkload.getData().getKeyValues().get(key).isSetStringValue()) { - if(key.equalsIgnoreCase("queryText") || key.equalsIgnoreCase("request") - || key.equalsIgnoreCase("state") || key.equalsIgnoreCase("subState") - || key.equalsIgnoreCase("type") || key.equalsIgnoreCase("workloadId")) - continue; - else { - if(! isFirst) sb.append(", "); - sb.append(key + "=" + aWorkload.getData().getKeyValues().get(key).getStringValue()); - } - } - - isFirst = false; - } - } - - - -<%java> - } - - -
Workload IdStateSub StateTypeWorkload TextWorkload Details
<% aWorkload.getData().getKeyValues().get(Constants.WORKLOAD_ID).getStringValue() %><% aWorkload.getData().getKeyValues().get(Constants.STATE).getStringValue() %><% aWorkload.getData().getKeyValues().get(Constants.SUBSTATE).getStringValue() %><% aWorkload.getData().getKeyValues().get(Constants.TYPE).getStringValue() %><% workloadText %><% sb.toString() %>
Total: workloads: <% workloads.size() %>
-<%else> -No workloads found. - \ No newline at end of file diff --git a/wms/src/main/jamon/org/trafodion/wms/tmpl/server/ServerStatusTmpl.jamon b/wms/src/main/jamon/org/trafodion/wms/tmpl/server/ServerStatusTmpl.jamon index 491413ee..af1d302b 100644 --- a/wms/src/main/jamon/org/trafodion/wms/tmpl/server/ServerStatusTmpl.jamon +++ b/wms/src/main/jamon/org/trafodion/wms/tmpl/server/ServerStatusTmpl.jamon @@ -1,5 +1,5 @@ <%doc> -(C) Copyright 2013 Hewlett-Packard Development Company, L.P. +(C) Copyright 2015 Hewlett-Packard Development Company, L.P. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/wms/src/main/java/org/trafodion/wms/Constants.java b/wms/src/main/java/org/trafodion/wms/Constants.java index a05a5e73..880dc6d6 100644 --- a/wms/src/main/java/org/trafodion/wms/Constants.java +++ b/wms/src/main/java/org/trafodion/wms/Constants.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/wms/src/main/java/org/trafodion/wms/cep/CepObserver.java b/wms/src/main/java/org/trafodion/wms/cep/CepObserver.java deleted file mode 100644 index fadd0700..00000000 --- a/wms/src/main/java/org/trafodion/wms/cep/CepObserver.java +++ /dev/null @@ -1,8 +0,0 @@ -package org.trafodion.wms.cep; - -import java.util.*; - -public interface CepObserver { - public void update(); - public void setSubject(CepSubject sub); -} diff --git a/wms/src/main/java/org/trafodion/wms/cep/CepObserverImpl.java b/wms/src/main/java/org/trafodion/wms/cep/CepObserverImpl.java deleted file mode 100644 index bb0cbefc..00000000 --- a/wms/src/main/java/org/trafodion/wms/cep/CepObserverImpl.java +++ /dev/null @@ -1,45 +0,0 @@ -package org.trafodion.wms.cep; - -import java.util.*; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.log4j.Logger; -import org.trafodion.wms.server.rpc.thrift.ThriftRpcHandler; -import org.trafodion.wms.thrift.generated.*; -import org.trafodion.wms.Constants; - -public class CepObserverImpl implements CepObserver { - private static final Log LOG = LogFactory.getLog(CepObserverImpl.class.getName()); - private String name; - private CepSubject subject; - private ThriftRpcHandler trpch; - - public CepObserverImpl(String name,ThriftRpcHandler trpch) { - LOG.debug("CepObserverImpl " + name); - this.name = name; - this.trpch = trpch; - } - - @Override - public void update() { - LOG.debug("update "); - String message = (String) subject.getUpdate(this); - if(message == null){ - LOG.debug(name + "No new message"); - } else { - LOG.debug(name + "Consuming message [" + message + "]"); - Map rspkv = trpch.getResponse().getData().getKeyValues(); - if(message.equalsIgnoreCase("REJECT")) - rspkv.put(Constants.ACTION,new KeyValue().setIntValue(Action.ACTION_REJECT.getValue())); - else if (message.equalsIgnoreCase("CANCEL")) - rspkv.put(Constants.ACTION,new KeyValue().setIntValue(Action.ACTION_CANCEL.getValue())); - } - } - - @Override - public void setSubject(CepSubject subject) - { - LOG.debug("setSubject " + subject); - this.subject = subject; - } -} diff --git a/wms/src/main/java/org/trafodion/wms/cep/CepSubject.java b/wms/src/main/java/org/trafodion/wms/cep/CepSubject.java deleted file mode 100644 index d7f541cf..00000000 --- a/wms/src/main/java/org/trafodion/wms/cep/CepSubject.java +++ /dev/null @@ -1,10 +0,0 @@ -package org.trafodion.wms.cep; - -import java.util.*; - -public interface CepSubject { - public void register(CepObserver obj); - public void unregister(CepObserver obj); - public void notifyObservers(); - public Object getUpdate(CepObserver obj); -} diff --git a/wms/src/main/java/org/trafodion/wms/cep/CepSubjectImpl.java b/wms/src/main/java/org/trafodion/wms/cep/CepSubjectImpl.java deleted file mode 100644 index 61b88baf..00000000 --- a/wms/src/main/java/org/trafodion/wms/cep/CepSubjectImpl.java +++ /dev/null @@ -1,66 +0,0 @@ -package org.trafodion.wms.cep; - -import java.util.*; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.log4j.Logger; - -public class CepSubjectImpl implements CepSubject { - private static final Log LOG = LogFactory.getLog(CepSubjectImpl.class.getName()); - private List observers; - private String message; - private boolean changed; - private final Object MUTEX = new Object(); - - public CepSubjectImpl(){ - this.observers = new ArrayList(); - } - - @Override - public void register(CepObserver obj) { - LOG.debug("register " + obj); - if(obj == null) throw new NullPointerException("Null Observer"); - synchronized (MUTEX) { - if(!observers.contains(obj)) observers.add(obj); - } - } - - @Override - public void unregister(CepObserver obj) { - LOG.debug("unregister " + obj); - synchronized (MUTEX) { - observers.remove(obj); - } - } - - @Override - public void notifyObservers() - { - LOG.debug("notifyObservers"); - List observersLocal = null; - - synchronized (MUTEX) { - if (!changed) - return; - observersLocal = new ArrayList(this.observers); - this.changed=false; - } - - for (CepObserver obj : observersLocal) { - obj.update(); - } - } - - @Override - public Object getUpdate(CepObserver obj) { - LOG.debug("getUpdate " + obj); - return this.message; - } - - public void post(String value) { - LOG.debug("post " + value); - this.message = value; - this.changed = true; - notifyObservers(); - } -} diff --git a/wms/src/main/java/org/trafodion/wms/cep/ComplexEventProcessor.java b/wms/src/main/java/org/trafodion/wms/cep/ComplexEventProcessor.java deleted file mode 100644 index d83df8bb..00000000 --- a/wms/src/main/java/org/trafodion/wms/cep/ComplexEventProcessor.java +++ /dev/null @@ -1,538 +0,0 @@ -package org.trafodion.wms.cep; - -import java.util.*; - -import org.apache.commons.io.IOUtils; -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.GnuParser; -import org.apache.commons.cli.Options; -import org.apache.commons.cli.ParseException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.conf.Configuration; - -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.ZooDefs; -import org.apache.zookeeper.data.Stat; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.WatchedEvent; -import org.apache.zookeeper.KeeperException; - -import org.apache.thrift.TBase; -import org.apache.thrift.TException; -import org.apache.thrift.TSerializer; -import org.apache.thrift.TDeserializer; -import org.apache.thrift.transport.TSSLTransportFactory; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TFramedTransport; -import org.apache.thrift.transport.TSSLTransportFactory.TSSLTransportParameters; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocol; - -import org.trafodion.wms.util.RetryCounter; -import org.trafodion.wms.util.RetryCounterFactory; -import org.trafodion.wms.util.Bytes; -import org.trafodion.wms.thrift.generated.*; -import org.trafodion.wms.Constants; -import org.trafodion.wms.util.WmsConfiguration; -import org.trafodion.wms.util.VersionInfo; -import org.trafodion.wms.zookeeper.ZkClient; -import org.trafodion.wms.server.ServerLeaderElection; - -import org.wso2.siddhi.core.SiddhiManager; -import org.wso2.siddhi.core.event.Event; -import org.wso2.siddhi.core.stream.input.InputHandler; -import org.wso2.siddhi.core.stream.output.StreamCallback; -import org.wso2.siddhi.core.util.EventPrinter; -import org.wso2.siddhi.query.compiler.exception.SiddhiPraserException; - -public class ComplexEventProcessor { - private static final Log LOG = LogFactory.getLog(ComplexEventProcessor.class); - private String[] args; - private static Configuration conf; - private ZkClient zkc = null; - private RetryCounterFactory retryCounterFactory; - private int maxWaitAttempts; - private int retryIntervalMillis; - private static String parentZnode; - private ServerLeaderElection sle = null; - private TSerializer serializer = new TSerializer(new TBinaryProtocol.Factory()); - private TDeserializer deserializer = new TDeserializer(new TBinaryProtocol.Factory()); - private SiddhiManager siddhiManager = new SiddhiManager(); - private CepSubjectImpl actionSubject = new CepSubjectImpl(); - private Map streamMap = new HashMap(); - - public ComplexEventProcessor(ZkClient zkc,String parentZnode,Configuration conf,ServerLeaderElection sle) throws Exception { - this.zkc = zkc; - this.parentZnode = parentZnode; - this.conf = conf; - this.sle = sle;//WmsMaster always calls with null value - maxWaitAttempts = conf.getInt(Constants.WMS_CEP_WAIT_INIT_ATTEMPTS,Constants.DEFAULT_WMS_CEP_WAIT_INIT_ATTEMPTS); - retryIntervalMillis = conf.getInt(Constants.WMS_CEP_WAIT_INIT_RETRY_INTERVAL_MILLIS,Constants.DEFAULT_WMS_CEP_WAIT_INIT_RETRY_INTERVAL_MILLIS); - retryCounterFactory = new RetryCounterFactory(maxWaitAttempts, retryIntervalMillis); - init(); - } - - public void init() throws Exception { - LOG.debug("Init"); - checkStreams(); - checkRules(); - watchZkStreams(); - watchZkRules(); - LOG.info("Complex event processor started"); - } - - public CepSubjectImpl getActionSubject(){ - return actionSubject; - } - - void checkStreams() throws Exception { - List children = getChildren(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS,null); - if(children.isEmpty() && sle != null){ //WmsServer - LOG.info("No children found in znode" + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS); - LOG.info("Waiting for WmsMaster to initialize CEP streams/rules"); - RetryCounter retryCounter = retryCounterFactory.create(); - while(true){ - String znode = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS + "/" + Constants.VERTICA; - Stat stat = zkc.exists(znode,false); - if(stat != null) { - znode = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS + "/" + Constants.TRAFODION; - stat = zkc.exists(znode,false); - if(stat != null) { - LOG.info("Found CEP streams/rules"); - continue; - } - } - - if (! retryCounter.shouldRetry()) { - LOG.error("WmsMaster failed to initialize CEP streams/rules"); - throw new Exception("WmsMaster failed to initialize CEP streams/rules"); - } else { - LOG.info("Waiting for WmsMaster to initialize CEP streams/rules"); - retryCounter.sleepUntilNextRetry(); - retryCounter.useRetry(); - } - } - } else if(children.isEmpty() && sle == null){ //WmsMaster - LOG.info("No children found in znode " + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS); - LOG.info("Adding default streams"); - - //Add Default vertica stream - String streamId = Constants.VERTICA; - String keys = conf.get("wms.server.cep.vertica.keys",""); - keys = keys.replaceAll("[\\n\\r\\t]", ""); - keys = keys.replaceAll(" ", ""); - addStream(streamId,keys); - - Stream stream = new Stream(streamId,keys,"Added by administrator",System.currentTimeMillis()); - String znode = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS + "/" + Constants.VERTICA; - Stat stat = zkc.exists(znode,false); - if(stat == null) { - byte[] bytes = serializer.serialize(stream); - zkc.create(znode,bytes,ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); - LOG.info("Created znode [" + znode + "]"); - } - //default trafodion stream - streamId = Constants.TRAFODION; - keys = conf.get("wms.server.cep.trafodion.keys",""); - keys = keys.replaceAll("[\\n\\r\\t]", ""); - keys = keys.replaceAll(" ", ""); - addStream(streamId,keys); - - stream = new Stream(streamId,keys,"Added by administrator",System.currentTimeMillis()); - znode = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS + "/" + Constants.TRAFODION; - stat = zkc.exists(znode,false); - if(stat == null) { - byte[] bytes = serializer.serialize(stream); - zkc.create(znode,bytes,ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); - LOG.info("Created znode [" + znode + "]"); - } - - //default action stream - streamId = "action"; - addStream(streamId,"action,string"); - addCallback(streamId); - stream = new Stream(streamId,"action,string","Added by administrator",System.currentTimeMillis()); - znode = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS + "/" + Constants.ACTION; - stat = zkc.exists(znode,false); - if(stat == null) { - byte[] bytes = serializer.serialize(stream); - zkc.create(znode,bytes,ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); - LOG.info("Created znode [" + znode + "]"); - } - - return; - } - - children = getChildren(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS,null); - if(! children.isEmpty()){ - LOG.info("Found children in znode [" + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS + "]"); - for(String aChild : children) { - LOG.debug("child [" + aChild + "]"); - Stream stream = new Stream(); - String znode = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS + "/" + aChild; - Stat stat = zkc.exists(znode, false); - if (stat != null){ - byte[] bytes = zkc.getData(znode, false, stat); - try { - deserializer.deserialize(stream, bytes); - String keys = stream.getValue(); - keys = keys.replaceAll("[\\n\\r\\t]", ""); - keys = keys.replaceAll(" ", ""); - addStream(stream.getName(),keys); - if(stream.getName().equals("action")) - addCallback(stream.getName()); - } catch (TException e) { - e.printStackTrace(); - } - } - } - } else { - LOG.info("No children found in znode [" + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS + "]"); - } - } - - void checkRules() throws Exception { - LOG.info("Looking for CEP rules in znode " + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES); - String rulesZnode = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES; - List children = getChildren(rulesZnode,null); - if( ! children.isEmpty()) { - for(String aChild : children) { - Rule rule = new Rule(); - String znode = rulesZnode + "/" + aChild; - Stat stat = zkc.exists(znode, false); - if (stat != null){ - byte[] bytes = zkc.getData(znode, false, stat); - try { - deserializer.deserialize(rule, bytes); - addQuery(rule.getValue()); - } catch (TException e) { - e.printStackTrace(); - } - } - } - } else { - LOG.debug("No children found in znode [" + rulesZnode + "]"); - } - } - - public void workload(String streamName,HashMap keyValues) throws Exception { - LOG.debug("workload " + keyValues); - StreamContext streamContext = streamMap.get(streamName); - send(getInputHandler(streamName),streamContext.keyValuesToArray(keyValues)); - } - - public void addStream(String name,String keys) throws Exception { - StreamContext streamContext = new StreamContext(name,keys); - streamMap.put(name,streamContext); - defineStream(streamContext.getDefineStreamString()); - } - - public void alterStream(String name,String keys) throws Exception { - deleteStream(name); - addStream(name,keys); - } - - public void deleteStream(String name) throws Exception { - StreamContext streamContext = streamMap.get(name); - if(streamContext != null){ - streamMap.remove(name); - removeStream(name); - } - } - - //Siddhi specific methods - public void defineStream(String value) throws Exception { - LOG.debug("defineStream [" + value + "]"); - try { - siddhiManager.defineStream(value); - } catch (Exception e) { - LOG.error(e.getMessage()); - } - } - - public void removeStream(String value) { - LOG.debug("removeStream [" + value + "]"); - siddhiManager.removeStream(value); - } - - public void addQuery(String text) throws Exception { - LOG.debug("addQuery [" + text + "]"); - try { - siddhiManager.addQuery(text); - } catch (Exception e){ - LOG.error(e.getMessage()); - throw e; - } - } - - public void deleteQuery(String text) { - LOG.debug("deleteQuery [" + text + "]"); - siddhiManager.removeQuery(text); - } - - void addCallback(String name) { - LOG.debug("addCallback [" + name + "]"); - siddhiManager.addCallback(name, new StreamCallback() { - - @Override - public void receive(Event[] events) { - //EventPrinter.print(events); - LOG.debug("event received on stream [" + (String) events[0].getStreamId() + "], Data [" + (String) events[0].getData0() + "]"); - actionSubject.post((String) events[0].getData0()); - } - }); - } - - public InputHandler getInputHandler(String name) throws InterruptedException { - LOG.debug("getInputHandler [" + name + "]"); - return siddhiManager.getInputHandler(name); - } - - public void send(InputHandler handler, Object[] value) throws InterruptedException { - LOG.debug("send [" + handler + "," + value + "]"); - handler.send(value); - } - - class StreamsWatcher implements Watcher { - public void process(WatchedEvent event) { - LOG.debug("StreamsWatcher fired [" + event.getPath() + "]"); - if(event.getType() == Event.EventType.NodeChildrenChanged - || event.getType() == Event.EventType.NodeDeleted) { - LOG.debug("Streams children changed [" + event.getPath() + "]"); - try { - watchZkStreams(); - } catch (Exception e) { - e.printStackTrace(); - LOG.error(e); - } - } else if(event.getType() == Event.EventType.NodeCreated) { - LOG.debug("Streams znode created [" + event.getPath() + "]"); - try { - Stream stream = new Stream(); - String znode = event.getPath(); - Stat stat = zkc.exists(znode, false); - if (stat != null){ - byte[] bytes = zkc.getData(znode, false, stat); - try { - deserializer.deserialize(stream, bytes); - defineStream(stream.getValue()); - addCallback(stream.getName()); - } catch (TException e) { - e.printStackTrace(); - } - } - } catch (Exception e) { - e.printStackTrace(); - LOG.error(e); - } - } else if(event.getType() == Event.EventType.NodeDataChanged) { - String znodePath = event.getPath(); - LOG.debug("Streams znode data changed [" + znodePath + "]"); - try { - Stream stream = new Stream(); - String znode = event.getPath(); - Stat stat = zkc.exists(znode, false); - if (stat != null){ - byte[] bytes = zkc.getData(znode, false, stat); - try { - deserializer.deserialize(stream, bytes); - removeStream(stream.getName()); - defineStream(stream.getValue()); - addCallback(stream.getName()); - } catch (TException e) { - e.printStackTrace(); - } - } - } catch (Exception e) { - e.printStackTrace(); - LOG.error(e); - } - } else if(event.getType() == Event.EventType.NodeDeleted) { - String znodePath = event.getPath(); - LOG.debug("Streams znode deleted [" + znodePath + "]"); - try { - Stream stream = new Stream(); - String znode = event.getPath(); - Stat stat = zkc.exists(znode, false); - if (stat != null){ - byte[] bytes = zkc.getData(znode, false, stat); - try { - deserializer.deserialize(stream, bytes); - removeStream(stream.getName()); - } catch (TException e) { - e.printStackTrace(); - } - } - } catch (Exception e) { - e.printStackTrace(); - LOG.error(e); - } - } - } - } - - class RulesWatcher implements Watcher { - public void process(WatchedEvent event) { - LOG.debug("RulesWatcher fired [" + event.getPath() + "]"); - if(event.getType() == Event.EventType.NodeChildrenChanged - || event.getType() == Event.EventType.NodeDeleted) { - LOG.debug("Rules children changed [" + event.getPath() + "]"); - try { - watchZkRules(); - } catch (Exception e) { - e.printStackTrace(); - LOG.error(e); - } - } else if(event.getType() == Event.EventType.NodeCreated) { - LOG.debug("Rules znode created [" + event.getPath() + "]"); - try { - Rule rule = new Rule(); - String znode = event.getPath(); - Stat stat = zkc.exists(znode, false); - if (stat != null){ - byte[] bytes = zkc.getData(znode, false, stat); - try { - deserializer.deserialize(rule, bytes); - addQuery(rule.getValue()); - watchZkRules(); - } catch (TException e) { - e.printStackTrace(); - } - } - } catch (Exception e) { - e.printStackTrace(); - LOG.error(e); - } - } else if(event.getType() == Event.EventType.NodeDataChanged) { - String znodePath = event.getPath(); - LOG.debug("Rules znode data changed [" + znodePath + "]"); - try { - Rule rule = new Rule(); - String znode = event.getPath(); - Stat stat = zkc.exists(znode, false); - if (stat != null){ - byte[] bytes = zkc.getData(znode, false, stat); - try { - deserializer.deserialize(rule, bytes); - deleteQuery(rule.getValue()); - addQuery(rule.getValue()); - watchZkRules(); - } catch (TException e) { - e.printStackTrace(); - } - } - } catch (Exception e) { - e.printStackTrace(); - LOG.error(e); - } - } else if(event.getType() == Event.EventType.NodeDeleted) { - String znodePath = event.getPath(); - LOG.debug("Rules znode deleted [" + znodePath + "]"); - try { - Rule rule = new Rule(); - String znode = event.getPath(); - Stat stat = zkc.exists(znode, false); - if (stat != null){ - byte[] bytes = zkc.getData(znode, false, stat); - try { - deserializer.deserialize(rule, bytes); - deleteQuery(rule.getName()); - watchZkRules(); - } catch (TException e) { - e.printStackTrace(); - } - } - } catch (Exception e) { - e.printStackTrace(); - LOG.error(e); - } - } - } - } - - List getChildren(String znode,Watcher watcher) throws Exception { - List children=null; - children = zkc.getChildren(znode,watcher); - if( ! children.isEmpty()) - Collections.sort(children); - return children; - } - - List watchZkStreams() throws Exception { - LOG.debug("Reading " + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS); - return getChildren(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS, new StreamsWatcher()); - } - - List watchZkRules() throws Exception { - LOG.debug("Reading " + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES); - return getChildren(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES, new RulesWatcher()); - } - -/* - String getStreamMap(String checksum) { - String name = streamMap.get(checksum); - if(name == null){ - //LOG.debug("Stream map [" + name + "]" + " not found); - System.out.println("getStreamMap " + checksum + " not found"); - } else { - //LOG.debug("Stream map [" + name + "]" + " found"); - System.out.println("getStreamMap " + checksum + " is " + name); - } - return name; - } - - void addStreamMap(String checksum,String name) { - streamMap.put(checksum,name); - //LOG.debug("addStreamMap [" + checksum + "," + name + "]"); - System.out.println("addStreamMap [" + checksum + "," + name + "]"); - } - - String keysToString(HashMap keyValues) { - StringBuilder sb = new StringBuilder(); - if(keyValues.keySet() != null){ - for (String key: keyValues.keySet()) { - sb.append(key); - } - } - return sb.toString(); - } - - String getChecksum(String keyString) { - BigInteger bi = null; - try { - MessageDigest md = MessageDigest.getInstance( "SHA1" ); - md.update( keyString.getBytes() ); - bi = new BigInteger(1, md.digest()); - } - catch (NoSuchAlgorithmException e) { - LOG.debug(e.getMessage()); - } - - return bi.toString(16); - } - - String buildDefineStreamString(String name,HashMap keyValues) { - StringBuilder sb = new StringBuilder(); - if(keyValues.keySet() != null && name != null){ - sb.append("define stream " + name + "("); - boolean isFirst = true; - for (String key: keyValues.keySet()) { - if(isFirst) { - sb.append(key + " string"); - isFirst = false; - } else { - sb.append(", " + key + " string"); - } - } - sb.append(")"); - //LOG.debug("keyValuesToDefineStreamString [" sb.toString() + "]"); - System.out.println("keyValuesToDefineStreamString [" + sb.toString() + "]"); - } - return sb.toString(); - } -*/ - -} diff --git a/wms/src/main/java/org/trafodion/wms/cep/StreamContext.java b/wms/src/main/java/org/trafodion/wms/cep/StreamContext.java deleted file mode 100644 index c78df0d8..00000000 --- a/wms/src/main/java/org/trafodion/wms/cep/StreamContext.java +++ /dev/null @@ -1,87 +0,0 @@ -package org.trafodion.wms.cep; - -import java.util.*; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.trafodion.wms.Constants; -import org.trafodion.wms.thrift.generated.*; - -public class StreamContext { - static final Log LOG = LogFactory.getLog(StreamContext.class); - String name; - Map keyOrderMap = new HashMap(); - StringBuilder defineString = new StringBuilder(); - - public StreamContext(String name,String keys) throws Exception { - this.name = name; - buildDefineStreamString(name,keys); - LOG.debug("StreamContext [" + name + "], keyOrderMap [" + keyOrderMap + "]"); - } - - public String getDefineStreamString(){ - return defineString.toString(); - } - - void buildDefineStreamString(String name,String keys) { - if(keys != null && name != null){ - keys = keys.replaceAll("[\\n\\r\\t]", ""); - defineString.append("define stream " + name + "("); - boolean isFirst = true; - Scanner scn = new Scanner(keys).useDelimiter(","); - Integer index = 0; - String key,value; - while(scn.hasNext()){ - if(isFirst) { - key = scn.next(); - key = key.replaceAll(" ", ""); - value = scn.next(); - value = value.replaceAll(" ", ""); - defineString.append(key + " " + value); - isFirst = false; - } else { - key = scn.next(); - key = key.replaceAll(" ", ""); - value = scn.next(); - value = value.replaceAll(" ", ""); - defineString.append(", " + key + " " + value); - } - LOG.debug("key [" + key + "],index [" + index + "]"); - keyOrderMap.put(key,index); - index++; - } - scn.close(); - defineString.append(")"); - } - LOG.debug("buildDefineStreamString [" + name + "," + defineString.toString() + "]"); - } - - public Object[] keyValuesToArray(HashMap keyValues){ - Object[] values = new Object[keyOrderMap.size()]; - LOG.debug("keyValuesToArray"); - - if(keyValues.keySet() != null){ - Integer index = 0; - for (String key: keyValues.keySet()) { - index = keyOrderMap.get(key); - LOG.debug("key" + "[" + key + ", index [" + index + "]"); - if(keyValues.get(key).isSetByteValue()) { - values[index] = keyValues.get(key).getByteValue(); - } else if(keyValues.get(key).isSetShortValue()) { - values[index] = keyValues.get(key).getShortValue(); - } else if(keyValues.get(key).isSetIntValue()) { - values[index] = keyValues.get(key).getIntValue(); - } else if(keyValues.get(key).isSetLongValue()) { - values[index] = keyValues.get(key).getLongValue(); - } else if(keyValues.get(key).isSetFloatValue()) { - values[index] = keyValues.get(key).getFloatValue(); - } else if(keyValues.get(key).isSetStringValue()) { - values[index] = keyValues.get(key).getStringValue(); - } - index = 0; - } - } - return values; - } - -} \ No newline at end of file diff --git a/wms/src/main/java/org/trafodion/wms/client/ClientData.java b/wms/src/main/java/org/trafodion/wms/client/ClientData.java deleted file mode 100644 index 6654c198..00000000 --- a/wms/src/main/java/org/trafodion/wms/client/ClientData.java +++ /dev/null @@ -1,88 +0,0 @@ -package org.trafodion.wms.client; - -import java.io.*; -import java.net.*; -import java.nio.charset.Charset; -import java.util.List; -import java.util.Iterator; -import java.util.StringTokenizer; -import java.util.HashMap; -import java.util.Map; -import java.util.Date; - -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.data.Stat; -import org.apache.zookeeper.ZooDefs; -import org.apache.zookeeper.CreateMode; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.thrift.TException; -import org.apache.thrift.transport.TSSLTransportFactory; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TFramedTransport; -import org.apache.thrift.transport.TSSLTransportFactory.TSSLTransportParameters; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocol; -import org.apache.hadoop.conf.Configuration; - -import org.trafodion.wms.util.WmsConfiguration; -import org.trafodion.wms.Constants; -import org.trafodion.wms.zookeeper.ZkClient; -import org.trafodion.wms.thrift.generated.*; - -public class ClientData extends Data{ - private static final Log LOG = LogFactory.getLog(ClientData.class); - - public ClientData(){ - } - public ClientData(Data data){ - super(data); - } - public void putKeyValue(String key, Operation value){ - getKeyValues().put(key,new KeyValue().setIntValue(value.getValue())); - } - public void putKeyValue(String key, String value){ - getKeyValues().put(key,new KeyValue().setStringValue(value)); - } - public void putKeyValue(String key, Long value){ - getKeyValues().put(key,new KeyValue().setLongValue(value)); - } - public String getKeyValueAsString(String key) { - KeyValue kvl = getKeyValues().get(key); - if (kvl.isSetStringValue()) { - return kvl.getStringValue(); - } - else throw new IllegalStateException(); - } - public long getKeyValueAsLong(String key) { - long value = 0; - KeyValue kvl = getKeyValues().get(key); - if(kvl.isSetBoolValue()){ - if(true == kvl.boolValue) - value = 1; - } - else if(kvl.isSetByteValue()){ - value = Byte.valueOf(kvl.getByteValue()).longValue(); - } - else if(kvl.isSetShortValue()){ - value = Short.valueOf(kvl.getShortValue()).longValue(); - } - else if(kvl.isSetIntValue()){ - value = Integer.valueOf(kvl.getIntValue()).longValue(); - } - else if(kvl.isSetLongValue()){ - value = kvl.getLongValue(); - } - else if(kvl.isSetFloatValue()){ - value = Double.doubleToLongBits(kvl.getFloatValue()); - } - else throw new IllegalStateException(); - - return value; - } - - public Action getKeyValueAction(){ - return Action.findByValue(keyValues.get("action").getIntValue()); - } -} diff --git a/wms/src/main/java/org/trafodion/wms/client/WmsAdmin.java b/wms/src/main/java/org/trafodion/wms/client/WmsAdmin.java deleted file mode 100644 index 321d1e97..00000000 --- a/wms/src/main/java/org/trafodion/wms/client/WmsAdmin.java +++ /dev/null @@ -1,343 +0,0 @@ -package org.trafodion.wms.client; - -import java.io.*; -import java.net.*; -import java.nio.charset.Charset; -import java.util.*; - -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.data.Stat; -import org.apache.zookeeper.ZooDefs; -import org.apache.zookeeper.CreateMode; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.thrift.TException; -import org.apache.thrift.transport.TSSLTransportFactory; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TFramedTransport; -import org.apache.thrift.transport.TSSLTransportFactory.TSSLTransportParameters; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocol; -import org.apache.hadoop.conf.Configuration; - -import org.trafodion.wms.MasterNotRunningException; -import org.trafodion.wms.util.WmsConfiguration; -import org.trafodion.wms.Constants; -import org.trafodion.wms.zookeeper.ZkClient; -import org.trafodion.wms.thrift.generated.WmsAdminService; -import org.trafodion.wms.thrift.generated.Stream; -import org.trafodion.wms.thrift.generated.StreamResponse; -import org.trafodion.wms.thrift.generated.Rule; -import org.trafodion.wms.thrift.generated.RuleResponse; -import org.trafodion.wms.thrift.generated.WorkloadResponse; - -public class WmsAdmin { - private static final Log LOG = LogFactory.getLog(WmsAdmin.class); - private static final Charset CHARSET = Charset.forName("UTF-8"); - private static ZkClient zkc = null; - private boolean isOpen = false; - private String serverIpAddress; - private String serverInstance; - private int thriftPort=0; - private long startTime=0L; - private String clientZnode; - private String clientIpAddress; - private long clientTimestamp=0L; - private Configuration conf; - private String parentZnode; - private JVMShutdownHook jvmShutdownHook; - private TFramedTransport transport; - private TProtocol protocol; - private WmsAdminService.Client tClient; - - public WmsAdmin(Configuration conf) { - this.conf = conf; - parentZnode = conf.get(Constants.ZOOKEEPER_ZNODE_PARENT,Constants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - jvmShutdownHook = new JVMShutdownHook(); - Runtime.getRuntime().addShutdownHook(jvmShutdownHook); - } - - public WmsAdmin() { - Configuration conf = new WmsConfiguration().create(); - WmsAdmin wmsClient = new WmsAdmin(conf); - } - - private class JVMShutdownHook extends Thread { - public void run() { - LOG.debug("JVM shutdown hook is running"); - try { - if(zkc != null) - zkc.close(); - } catch (InterruptedException ie) {}; - } - } - - void getServer() throws IOException, MasterNotRunningException { - serverIpAddress = null; - List servers = new ArrayList(); - - try { - if(zkc == null) - zkc = new ZkClient(); - zkc.connect(); - - Stat stat = zkc.exists(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_MASTER,false); - if(stat != null) { - servers = zkc.getChildren(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_MASTER,null); - } - } catch (Exception e) { - throw new IOException(e.getMessage()); - } - - if( ! servers.isEmpty()) { - StringTokenizer st = new StringTokenizer(servers.get(0), ":"); - while(st.hasMoreTokens()) { - serverIpAddress=st.nextToken(); - thriftPort=Integer.parseInt(st.nextToken()); - startTime=Long.parseLong(st.nextToken()); - } - } else { - throw new MasterNotRunningException("WmsMaster " + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_MASTER + " not found"); - } - } - - public synchronized void open() throws IOException, MasterNotRunningException { - LOG.debug("open()"); - - getServer(); - - try { - transport = new TFramedTransport(new TSocket(serverIpAddress, thriftPort)); - transport.open(); - tClient = new WmsAdminService.Client(new TBinaryProtocol(transport)); - } catch (Exception e) { - LOG.error("Exception thrown in open(), " + e.getMessage()); - throw new MasterNotRunningException("Exception thrown in pingServer(): " + e.getMessage()); - } - - pingServer(); - - isOpen = true; - } - - void pingServer() throws MasterNotRunningException { - LOG.debug("pingServer()" ); - - try { - LOG.debug("Pinging Thrift server " + serverIpAddress + ":" + thriftPort); - long startTs = System.currentTimeMillis(); - long endTs = tClient.ping(startTs); - LOG.debug("Thrift ping successful [" + (endTs - startTs) + " millisecond(s)]"); - } catch (Exception e) { - LOG.error("Exception thrown in pingServer(): " + e.getMessage()); - throw new MasterNotRunningException("Exception thrown in pingServer(): " + e.getMessage()); - } - } - - public synchronized void addStream(Stream stream) throws IOException { - if(isOpen == false) - throw new IllegalStateException("Connection is not open"); - - try { - tClient.addStream(stream); - } catch (Exception e) { - throw new IOException("WmsAdmin addStream error: " + e.getMessage()); - } - } - public synchronized void alterStream(Stream stream) throws IOException { - if(isOpen == false) - throw new IllegalStateException("Connection is not open"); - - try { - tClient.alterStream(stream); - } catch (Exception e) { - throw new IOException("WmsAdmin alterStream error: " + e.getMessage()); - } - } - public synchronized void deleteStream(Stream stream) throws IOException { - if(isOpen == false) - throw new IllegalStateException("Connection is not open"); - - try { - tClient.deleteStream(stream); - } catch (Exception e) { - throw new IOException("WmsAdmin deleteStream error: " + e.getMessage()); - } - } - public synchronized StreamResponse stream() throws IOException { - if(isOpen == false) - throw new IllegalStateException("Connection is not open"); - - StreamResponse response = new StreamResponse(); - - try { - response = tClient.stream(); - } catch (Exception e) { - throw new IOException("Stream error: " + e.getMessage()); - } - - LOG.debug("stream response(" + response.toString()); - - return response; - } - - public synchronized void addRule(Rule rule) throws IOException { - if(isOpen == false) - throw new IllegalStateException("Connection is not open"); - - try { - tClient.addRule(rule); - } catch (Exception e) { - throw new IOException("WmsAdmin addRule error: " + e.getMessage()); - } - } - public synchronized void alterRule(Rule rule) throws IOException { - if(isOpen == false) - throw new IllegalStateException("Connection is not open"); - - try { - tClient.alterRule(rule); - } catch (Exception e) { - throw new IOException("WmsAdmin alterRule error: " + e.getMessage()); - } - } - public synchronized void deleteRule(Rule rule) throws IOException { - if(isOpen == false) - throw new IllegalStateException("Connection is not open"); - - try { - tClient.deleteRule(rule); - } catch (Exception e) { - throw new IOException("WmsAdmin deleteRule error: " + e.getMessage()); - } - } - public synchronized RuleResponse rule() throws IOException { - if(isOpen == false) - throw new IllegalStateException("Connection is not open"); - - RuleResponse response = new RuleResponse(); - - try { - response = tClient.rule(); - } catch (Exception e) { - throw new IOException("Rule error: " + e.getMessage()); - } - - LOG.debug("rule response(" + response.toString()); - - return response; - } - - public synchronized WorkloadResponse workload() throws IOException { - if(isOpen == false) - throw new IllegalStateException("Connection is not open"); - - WorkloadResponse response = new WorkloadResponse(); - - try { - response = tClient.workload(); - } catch (Exception e) { - throw new IOException("Workload error: " + e.getMessage()); - } - - LOG.debug("workload response(" + response.toString()); - - return response; - } - - public synchronized boolean close() throws MasterNotRunningException, IOException { - LOG.debug("close()"); - - if(isOpen == false) - return true; - - transport.close(); - transport = null; - protocol = null; - tClient = null; - - try { - if(zkc != null) - zkc.close(); - } catch (Exception e) { - throw new IOException(e.getMessage()); - } - - isOpen = false; - zkc = null; - serverIpAddress = null; - serverInstance = null; - thriftPort=0; - clientIpAddress = null; - clientZnode = null; - clientTimestamp=0L; - - return true; - } - - public List getWorkloads() { - List workloads = new ArrayList(); - - return workloads; - } - - public void addUpdateRule() { - - } - - public void deleteRule() { - - } - - public static void main(String[] args) { - try { - Configuration conf = new WmsConfiguration().create(); - WmsAdmin wmsClient = new WmsAdmin(conf); - - wmsClient.open(); -/* - Data request = new Data(); - request.setOperation(Operation.OPERATION_BEGIN); - request.setState("BEGIN"); - request.setSubState("RUNNING"); - request.setBeginTimestamp(System.currentTimeMillis()); - request.setEndTimestamp(System.currentTimeMillis()); - Map m = new HashMap(); - request.setKeyValues(m); - m.put("type","TRAFODION"); - m.put("query_id","MXID11000001075212235857042874154000000000106U6553500_4_SQL_DATASOURCE_Q8"); - m.put("query_text","This is some query text"); - System.out.println("Request=" + request); - Data response = wmsClient.writeread(request); - System.out.println("Response=" + response); - - request.setOperation(Operation.OPERATION_UPDATE); - request.setState("EXECUTING"); - request.setSubState("UPDATE"); - request.setWorkloadId(response.getWorkloadId()); - request.setEndTimestamp(System.currentTimeMillis()); - System.out.println("Request=" + request); - response = wmsClient.writeread(request); - System.out.println("Response=" + response); - - request.setOperation(Operation.OPERATION_END); - request.setState("COMPLETED"); - request.setSubState("SUCCEEDED"); - request.setEndTimestamp(System.currentTimeMillis()); - System.out.println("Request=" + request); - response = wmsClient.writeread(request); - System.out.println("Response=" + response); - if(response.getKeyValues() != null){ - for (String key: response.getKeyValues().keySet()) { - System.out.println(key + "=" + response.getKeyValues().get(key)); - } - } -*/ - wmsClient.close(); - - } catch (Exception e) { - e.printStackTrace(); - } - } -} diff --git a/wms/src/main/java/org/trafodion/wms/client/WmsClient.java b/wms/src/main/java/org/trafodion/wms/client/WmsClient.java deleted file mode 100644 index 03a206cf..00000000 --- a/wms/src/main/java/org/trafodion/wms/client/WmsClient.java +++ /dev/null @@ -1,321 +0,0 @@ -package org.trafodion.wms.client; - -import java.io.*; -import java.net.*; -import java.nio.charset.Charset; -import java.util.List; -import java.util.Iterator; -import java.util.StringTokenizer; -import java.util.HashMap; -import java.util.Map; -import java.util.Date; - -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.data.Stat; -import org.apache.zookeeper.ZooDefs; -import org.apache.zookeeper.CreateMode; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.thrift.TException; -import org.apache.thrift.transport.TSSLTransportFactory; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TFramedTransport; -import org.apache.thrift.transport.TSSLTransportFactory.TSSLTransportParameters; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocol; -import org.apache.hadoop.conf.Configuration; - -import org.trafodion.wms.util.WmsConfiguration; -import org.trafodion.wms.Constants; -import org.trafodion.wms.zookeeper.ZkClient; -import org.trafodion.wms.thrift.generated.*; - -public class WmsClient { - private static final Log LOG = LogFactory.getLog(WmsClient.class); - private static final Charset CHARSET = Charset.forName("UTF-8"); - private static ZkClient zkc = null; - private boolean isOpen = false; - private String serverIpAddress; - private String serverInstance; - private int thriftPort=0; - private long startTime=0L; - private String clientZnode; - private String clientIpAddress; - private long clientTimestamp=0L; - private Configuration config; - private String lastError; - private String parentZnode; - private JVMShutdownHook jvmShutdownHook; - private Request request = new Request(new Header(),new Data()); - private Response response = new Response(new Header(),new Data()); - private Data requestData; - - public WmsClient() { - config = WmsConfiguration.create(); - parentZnode = config.get(Constants.ZOOKEEPER_ZNODE_PARENT,Constants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - jvmShutdownHook = new JVMShutdownHook(); - Runtime.getRuntime().addShutdownHook(jvmShutdownHook); - } - private class JVMShutdownHook extends Thread { - public void run() { - LOG.debug("JVM shutdown hook is running"); - try { - if(zkc != null) - zkc.close(); - } catch (InterruptedException ie) {}; - } - } - - public String getLastError() { - return lastError; - } - - void setLastError(String err) { - lastError = err; - } - - void register() throws IOException { - LOG.debug("WmsClient.register() called."); - - try { - InetAddress ip = null; - try { - ip = InetAddress.getLocalHost(); - } catch (UnknownHostException e) { - LOG.error("UnknownHostException " + e.getMessage()); - throw e; - } - - //register in zookeeper /wms/clients - clientTimestamp = System.currentTimeMillis(); - clientIpAddress = ip.getHostAddress(); - clientZnode = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_CLIENTS + "/" + clientIpAddress + ":" + clientTimestamp; - zkc.create(clientZnode,new byte[0],ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL); - LOG.info("Client registered as [" + clientZnode + "]"); - - } catch (InterruptedException e) { - throw new IOException(e.getMessage()); - } catch (KeeperException e) { - throw new IOException(e.getMessage()); - } - } - - private List getServerList() throws IOException { - LOG.debug("WmsClient.getServerList() called."); - - List servers; - try { - if(zkc == null) - zkc = new ZkClient();//CTRL-C...set sessionTimeout,maxRetries,retryIntervalMillis - zkc.connect(); - servers = zkc.getChildren(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_RUNNING,zkc); - } catch (InterruptedException e) { - throw new IOException(e.getMessage()); - } catch (KeeperException e) { - throw new IOException(e.getMessage()); - } - return servers; - } - - boolean pingServer() { - LOG.debug("WmsClient.pingServer() called. " ); - long startTs; - long endTs; - - try { - TFramedTransport transport; - TProtocol protocol; - WmsService.Client tClient; - - LOG.debug("Pinging Thrift server " + serverIpAddress + ":" + thriftPort); - //transport = new TSocket(serverIpAddress, thriftPort); - transport = new TFramedTransport(new TSocket(serverIpAddress, thriftPort)); - transport.open(); - protocol = new TBinaryProtocol(transport); - tClient = new WmsService.Client(protocol); - startTs = System.currentTimeMillis(); - endTs = tClient.ping(startTs); - LOG.debug("Thrift ping successful [" + (endTs - startTs) + " millisecond(s)]"); - transport.close(); - - } catch (Exception e) { - LOG.error("Exception thrown in WmsClient.pingServer(): " + e.getMessage()); - return false; - } - return true; - } - - void getServer() throws Exception { - List servers = getServerList(); - serverIpAddress=null; - serverInstance=null; - - boolean result=false; - int roundRobinIndex=0; - int roundRobinMax=(int)(Math.random() * 10.0) + 1; - - for(int j=0; j < roundRobinMax; j++) - roundRobinIndex = (int)(Math.random() * (float)(servers.size() - 1) + 0.5); - - for(int i=0; i m = new HashMap(); - request.setKeyValues(m); - - request.putKeyValue("operation",Operation.OPERATION_BEGIN); - request.putKeyValue("state","RUNNING"); - request.putKeyValue("subState","BEGIN"); - request.putKeyValue("beginTimestamp",System.currentTimeMillis()); - request.putKeyValue("endTimestamp",System.currentTimeMillis()); - request.putKeyValue("type","trafodion"); - request.putKeyValue("queryId","MXID11000001075212235857042874154000000000106U6553500_4_SQL_DATASOURCE_Q8"); - request.putKeyValue("queryText","This is some query text"); - System.out.println("Request=" + request); - response = wmsClient.writeread(request); - System.out.println("Response=" + response); - - request.putKeyValue("operation",Operation.OPERATION_UPDATE); - request.putKeyValue("state","RUNNING"); - request.putKeyValue("subState","UPDATE"); - request.putKeyValue("workloadId",response.getKeyValueAsString("workloadId")); - request.putKeyValue("endTimestamp",System.currentTimeMillis()); - System.out.println("Request=" + request); - response = wmsClient.writeread(request); - System.out.println("Response=" + response); - - request.putKeyValue("operation",Operation.OPERATION_END); - request.putKeyValue("state","COMPLETED"); - request.putKeyValue("subState","SUCCEEDED"); - request.putKeyValue("workloadId",response.getKeyValueAsString("workloadId")); - request.putKeyValue("endTimestamp",System.currentTimeMillis()); - System.out.println("Request=" + request); - request = wmsClient.writeread(request); - System.out.println("Response=" + response); - if(response.getKeyValues() != null){ - for (String key: response.getKeyValues().keySet()) { - System.out.println(key + "=" + response.getKeyValues().get(key)); - } - } - - wmsClient.close(); - - - } catch (Exception e) { - e.printStackTrace(); - } - } -} diff --git a/wms/src/main/java/org/trafodion/wms/client/WmsClientTest.java b/wms/src/main/java/org/trafodion/wms/client/WmsClientTest.java deleted file mode 100644 index 15baf58e..00000000 --- a/wms/src/main/java/org/trafodion/wms/client/WmsClientTest.java +++ /dev/null @@ -1,261 +0,0 @@ -package org.trafodion.wms.client; - -import java.io.*; -import java.net.*; -import java.util.*; -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.GnuParser; -import org.apache.commons.cli.Options; -import org.apache.commons.cli.ParseException; -import org.trafodion.wms.thrift.generated.*; -import org.trafodion.wms.client.WmsClient; - -public class WmsClientTest { - String[] args; - - private static void test() { - try { - ClientData request = new ClientData(); - ClientData response = new ClientData(); - Map m = new HashMap(); - request.setKeyValues(m); - request.putKeyValue("operation",Operation.OPERATION_BEGIN); - request.putKeyValue("state","RUNNING"); - request.putKeyValue("subState","BEGIN"); - request.putKeyValue("type","trafodion"); - request.putKeyValue("queryId","MXID11000001075212235857042874154000000000106U6553500_4_SQL_DATASOURCE_Q8"); - request.putKeyValue("queryText","This is some query text"); - request.putKeyValue("beginTimestamp",System.currentTimeMillis()); - request.putKeyValue("endTimestamp",System.currentTimeMillis()); - - //Begin a workload - WmsClient wmsClient = new WmsClient(); - wmsClient.open(); - response = wmsClient.writeread(request); - - //Update a workload - request.putKeyValue("operation",Operation.OPERATION_UPDATE); - request.putKeyValue("state","RUNNING"); - request.putKeyValue("subState","UPDATE"); - request.putKeyValue("endTimestamp",System.currentTimeMillis()); - request.putKeyValue("workloadId",response.getKeyValueAsString("workloadId")); - response = wmsClient.writeread(request); - - //End a workload - request.putKeyValue("operation",Operation.OPERATION_UPDATE); - request.putKeyValue("state","COMPLETED"); - request.putKeyValue("subState","SUCCEEDED"); - request.putKeyValue("endTimestamp",System.currentTimeMillis()); - request.putKeyValue("workloadId",response.getKeyValueAsString("workloadId")); - response = wmsClient.writeread(request); - - wmsClient.close(); - - } catch (Exception e) { - e.printStackTrace(); - System.exit(-1); - } - -/* - try { - WmsClient conn = new WmsClient(); - conn.open(); - WmsWorkloadFactory factory = new WmsWorkloadFactory(); - WmsWorkload request = WmsWorkloadFactory.getWorkload("org.trafodion.wms.HadoopWorkload"); - if(request != null) { - //Lets begin a workload - request.setOperation(OperationType.BEGIN); - request.setWorkloadId(""); - request.setParentId(""); - request.setParentKey(""); - request.setJobType(JobType.HADOOP); - request.setJobInfo("Some job ID"); - request.setJobText("This is some text"); - request.setUserInfo("Administrator"); - request.setJobInfo("some job info"); - request.setJobState("RUNNING"); - request.setJobSubState("BEGIN"); - request.setStartTime(System.currentTimeMillis()); - request.setEndTime(System.currentTimeMillis()); - request.setMapPct(0); - request.setReducePct(0); - request.setDuration(request.getEndTime() - request.getStartTime()); - //WorkloadResponse response = new WorkloadResponse(); - WorkloadResponse response = conn.writeread(request); - //Lets update the workload - request.setOperation(OperationType.UPDATE); - request.setWorkloadId(response.getWorkloadId().toString()); - request.setJobType(JobType.HADOOP); - request.setJobText("This is my test text"); - request.setJobState("RUNNING"); - request.setJobSubState("UPDATE"); - request.setEndTime(System.currentTimeMillis()); - request.setMapPct(10); - request.setReducePct(100); - request.setDuration(request.getEndTime() - request.getStartTime()); - response = conn.writeread(request); - //Lets end the workload - request.setOperation(OperationType.END); - request.setWorkloadId(response.getWorkloadId().toString()); - request.setJobType(JobType.HADOOP); - request.setJobText("This is my test text"); - request.setJobState("COMPLETED"); - request.setJobSubState("SUCCESSFUL"); - request.setEndTime(System.currentTimeMillis()); - request.setMapPct(10); - request.setReducePct(100); - request.setDuration(request.getEndTime() - request.getStartTime()); - response = conn.writeread(request); - } - conn.close(); - conn = null; - } catch (Exception e) { - System.out.print(e); - e.printStackTrace(); - System.exit(-1); - } -*/ - } - - private static void test2() { - int TIMEOUT = 30000; - - -/* - try { - WmsClient conn = new WmsClient(); - conn.open(); - WmsWorkloadFactory factory = new WmsWorkloadFactory(); - WmsWorkload request = WmsWorkloadFactory.getWorkload("org.trafodion.wms.HadoopWorkload"); - if(request != null) { - //Lets begin a workload - request.setOperation(OperationType.BEGIN); - request.setWorkloadId(""); - request.setParentId(""); - request.setParentKey(""); - request.setJobType(JobType.HADOOP); - request.setJobInfo("Some job ID"); - request.setJobText("This is some text"); - request.setUserInfo("Administrator"); - request.setJobInfo("some job info"); - request.setJobState("RUNNING"); - request.setJobSubState("BEGIN"); - request.setStartTime(System.currentTimeMillis()); - request.setEndTime(System.currentTimeMillis()); - request.setMapPct(0); - request.setReducePct(0); - request.setDuration(request.getEndTime() - request.getStartTime()); - WorkloadResponse response = conn.writeread(request); - System.out.println("Response:[" + response.getAction() + "]"); - //Lets update the workload - request.setOperation(OperationType.UPDATE); - request.setWorkloadId(response.getWorkloadId().toString()); - request.setJobType(JobType.HADOOP); - request.setJobText("This is my test text"); - request.setJobState("RUNNING"); - request.setJobSubState("UPDATE"); - request.setEndTime(System.currentTimeMillis()); - request.setMapPct(10); - request.setReducePct(100); - request.setDuration(request.getEndTime() - request.getStartTime()); - response = conn.writeread(request); - - System.out.println("Sleeping for " + TIMEOUT/1000 + " seconds"); - try { - Thread.sleep(TIMEOUT); - } catch (InterruptedException e) { - } - - System.out.println("Awake !"); - conn.close(); - conn = null; - } - } catch (Exception e) { - System.out.print(e); - System.exit(-1); - } -*/ - } - - private static void test3() { - int TIMEOUT = 600000; -/* - try { - WmsClient conn = new WmsClient(); - conn.open(); - WmsWorkloadFactory factory = new WmsWorkloadFactory(); - WmsWorkload request = WmsWorkloadFactory.getWorkload("org.trafodion.wms.HadoopWorkload"); - if(request != null) { - //Lets begin a workload - request.setOperation(OperationType.BEGIN); - request.setWorkloadId(""); - request.setParentId(""); - request.setParentKey(""); - request.setJobType(JobType.TRAFODION); - request.setJobInfo("MXID11000001075212235857042874154000000000106U6553500_4_SQL_DATASOURCE_Q8"); - request.setJobText("This is some text"); - request.setUserInfo("Administrator"); - request.setJobState("RUNNING"); - request.setJobSubState("BEGIN"); - request.setStartTime(System.currentTimeMillis()); - request.setEndTime(System.currentTimeMillis()); - request.setMapPct(0); - request.setReducePct(0); - request.setDuration(request.getEndTime() - request.getStartTime()); - WorkloadResponse response = conn.writeread(request); - System.out.println("Response:[" + response.getAction() + "]"); - System.out.println("Sleeping for " + TIMEOUT/1000 + " seconds"); - try { - Thread.sleep(TIMEOUT); - } catch (InterruptedException e) { - } - - System.out.println("Awake !"); - conn.close(); - conn = null; - } - } catch (Exception e) { - System.out.print(e); - System.exit(-1); - } -*/ - } - - public static void main(String [] args) { - args = args; - boolean done=false; - - Options opt = new Options(); - CommandLine cmd; - try { - cmd = new GnuParser().parse(opt, args); - } catch (ParseException e) { - System.out.print("Could not parse: " + e); - return; - } - - BufferedReader br = new BufferedReader(new InputStreamReader(System.in)); - while(done==false) { - System.out.print("\nwms shell>"); - String line = null; - - try { - line = br.readLine(); - if(line.equalsIgnoreCase("exit") || line.equalsIgnoreCase("quit")) - done=true; - else if(line.equalsIgnoreCase("test")) - test(); - else if(line.equalsIgnoreCase("test2")) - test2(); - else if(line.equalsIgnoreCase("test3")) - test3(); - else - System.out.print("Unknown or invalid command\n"); - } catch (IOException e) { - System.out.print("Error reading your command..." + e); - done=true; - } - } - System.exit(0); - } -} diff --git a/wms/src/main/java/org/trafodion/wms/hive/ClientPublisher.java b/wms/src/main/java/org/trafodion/wms/hive/ClientPublisher.java deleted file mode 100644 index c8da6ca9..00000000 --- a/wms/src/main/java/org/trafodion/wms/hive/ClientPublisher.java +++ /dev/null @@ -1,18 +0,0 @@ -package org.trafodion.wms.hive; - -import java.io.*; -import java.util.*; -import org.apache.hadoop.hive.ql.stats.*; - -public class ClientPublisher implements ClientStatsPublisher { - - @Override - public void run(Map counterValues, String jobID) { - this.run_(counterValues,jobID); - } - - public void run_(Map counterValues, String jobID){ - //PreExecute.printUpdateWms(counterValues, jobID); - } -} - diff --git a/wms/src/main/java/org/trafodion/wms/hive/PostExecute.java b/wms/src/main/java/org/trafodion/wms/hive/PostExecute.java deleted file mode 100644 index a4f4117a..00000000 --- a/wms/src/main/java/org/trafodion/wms/hive/PostExecute.java +++ /dev/null @@ -1,182 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.trafodion.wms.hive; - -import java.util.Collections; -import java.util.Comparator; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.Map; -import java.util.Set; - -import org.apache.hadoop.hive.ql.hooks.*; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.ql.hooks.HookContext.HookType; -import org.apache.hadoop.hive.ql.hooks.LineageInfo.BaseColumnInfo; -import org.apache.hadoop.hive.ql.hooks.LineageInfo.Dependency; -import org.apache.hadoop.hive.ql.hooks.LineageInfo.DependencyKey; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; -import org.apache.hadoop.security.UserGroupInformation; - -/** - * Implementation of a post execute hook that simply prints out its parameters - * to standard output. - */ -public class PostExecute implements ExecuteWithHookContext { - - public class DependencyKeyComp implements - Comparator> { - - @Override - public int compare(Map.Entry o1, - Map.Entry o2) { - if (o1 == null && o2 == null) { - return 0; - } - else if (o1 == null && o2 != null) { - return -1; - } - else if (o1 != null && o2 == null) { - return 1; - } - else { - // Both are non null. - // First compare the table names. - int ret = o1.getKey().getDataContainer().getTable().getTableName() - .compareTo(o2.getKey().getDataContainer().getTable().getTableName()); - - if (ret != 0) { - return ret; - } - - // The table names match, so check on the partitions - if (!o1.getKey().getDataContainer().isPartition() && - o2.getKey().getDataContainer().isPartition()) { - return -1; - } - else if (o1.getKey().getDataContainer().isPartition() && - !o2.getKey().getDataContainer().isPartition()) { - return 1; - } - - if (o1.getKey().getDataContainer().isPartition() && - o2.getKey().getDataContainer().isPartition()) { - // Both are partitioned tables. - ret = o1.getKey().getDataContainer().getPartition().toString() - .compareTo(o2.getKey().getDataContainer().getPartition().toString()); - - if (ret != 0) { - return ret; - } - } - - // The partitons are also the same so check the fieldschema - return (o1.getKey().getFieldSchema().getName().compareTo( - o2.getKey().getFieldSchema().getName())); - } - } - } - - @Override - public void run(HookContext hookContext) throws Exception { - assert(hookContext.getHookType() == HookType.POST_EXEC_HOOK); - SessionState ss = SessionState.get(); - Set inputs = hookContext.getInputs(); - Set outputs = hookContext.getOutputs(); - LineageInfo linfo = hookContext.getLinfo(); - UserGroupInformation ugi = hookContext.getUgi(); - HiveConf hconf = hookContext.getConf(); - this.run(ss,inputs,outputs,linfo,ugi,hconf); - } - - public void run(SessionState sess, Set inputs, - Set outputs, LineageInfo linfo, - UserGroupInformation ugi, HiveConf hconf) throws Exception { - - LogHelper console = SessionState.getConsole(); - - if (console == null) { - return; - } - - if (sess != null) { - console.printError("POSTHOOK: query: " + sess.getCmd().trim()); - console.printError("POSTHOOK: type: " + sess.getCommandType()); - console.printError("POSTHOOK: queryId: " + sess.getQueryId()); - console.printError("POSTHOOK: sessionId: " + sess.getSessionId()); - //PreExecute.printWms(sess,hconf,console); - } - - //PreExecute.printEntities(console, inputs, "POSTHOOK: Input: "); - //PreExecute.printEntities(console, outputs, "POSTHOOK: Output: "); - - // Also print out the generic lineage information if there is any - if (linfo != null) { - LinkedList> entry_list = - new LinkedList>(linfo.entrySet()); - Collections.sort(entry_list, new DependencyKeyComp()); - Iterator> iter = entry_list.iterator(); - while(iter.hasNext()) { - Map.Entry it = iter.next(); - Dependency dep = it.getValue(); - DependencyKey depK = it.getKey(); - - if(dep == null) { - continue; - } - - StringBuilder sb = new StringBuilder(); - sb.append("POSTHOOK: Lineage: "); - if (depK.getDataContainer().isPartition()) { - Partition part = depK.getDataContainer().getPartition(); - sb.append(part.getTableName()); - sb.append(" PARTITION("); - int i = 0; - for (FieldSchema fs : depK.getDataContainer().getTable().getPartitionKeys()) { - if (i != 0) { - sb.append(","); - } - sb.append(fs.getName() + "=" + part.getValues().get(i++)); - } - sb.append(")"); - } - else { - sb.append(depK.getDataContainer().getTable().getTableName()); - } - sb.append("." + depK.getFieldSchema().getName() + " " + - dep.getType() + " "); - - sb.append("["); - for(BaseColumnInfo col: dep.getBaseCols()) { - sb.append("("+col.getTabAlias().getTable().getTableName() + ")" - + col.getTabAlias().getAlias() + "." - + col.getColumn() + ", "); - } - sb.append("]"); - - console.printError(sb.toString()); - } - } - } -} - - diff --git a/wms/src/main/java/org/trafodion/wms/hive/PreExecute.java b/wms/src/main/java/org/trafodion/wms/hive/PreExecute.java deleted file mode 100644 index 6e60d084..00000000 --- a/wms/src/main/java/org/trafodion/wms/hive/PreExecute.java +++ /dev/null @@ -1,313 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/* -package org.trafodion.wms.hive; - -import java.io.*; -import java.util.*; - -import org.apache.hadoop.hive.ql.hooks.*; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; -import org.apache.hadoop.security.UserGroupInformation; -import org.trafodion.wms.util.WmsConfiguration; -import org.trafodion.wms.server.WorkloadsQueue; -import org.trafodion.wms.thrift.generated.*; -import org.trafodion.wms.client.WmsClient; -// -// Implementation of a pre execute hook that simply prints out its parameters to -// standard output. -// -public class PreExecute implements ExecuteWithHookContext { - - final static long ONE_SECOND = 1000; - - private static void setQueryId(String value){ - queryId = value; - } - private static void setJobID(String value){ - jobID = value; - } - private static void setConsole(LogHelper value){ - console = value; - } - private static void setLastUpdateTimestamp(long value){ - lastUpdateTimestamp = value; - } - private static String getQueryId(){ - return queryId; - } - private static String getJobID(){ - return jobID; - } - private static LogHelper getConsole(){ - return console; - } - private static long getLastUpdateTimestamp(){ - return lastUpdateTimestamp; - } - - private static HashMap< String, JobStore> workloadMap = new HashMap(); - private static WmsClient conn = null; - // private static WmsWorkloadFactory factory = null; - // private static WmsWorkload w = null; - private static String queryId = ""; - private static String jobID = ""; - private static LogHelper console = null; - private static long lastUpdateTimestamp = 0; - - @Override - public void run(HookContext hookContext) throws Exception { - SessionState ss = SessionState.get(); - Set inputs = hookContext.getInputs(); - Set outputs = hookContext.getOutputs(); - UserGroupInformation ugi = hookContext.getUgi(); - HiveConf hconf = hookContext.getConf(); - this.run(ss,inputs,outputs,ugi,hconf); - } - - public void run(SessionState sess, Set inputs, - Set outputs, UserGroupInformation ugi, HiveConf hconf) - throws Exception { - - LogHelper console = SessionState.getConsole(); - - if (console == null) { - return; - } - setConsole(console); - - setQueryId(""); - setJobID(""); - - if (sess != null) { - console.printError("PREHOOK: query: " + sess.getCmd().trim()); - console.printError("PREHOOK: type: " + sess.getCommandType()); - console.printError("PREHOOK: queryId: " + sess.getQueryId()); - console.printError("PREHOOK: sessionId: " + sess.getSessionId()); - printWms(sess,hconf,console); - } - - printEntities(console, inputs, "PREHOOK: Input: "); - printEntities(console, outputs, "PREHOOK: Output: "); - } - - static void printEntities(LogHelper console, Set entities, String prefix) { - List strings = new ArrayList(); - for (Object o : entities) { - strings.add(o.toString()); - } - Collections.sort(strings); - for (String s : strings) { - console.printError(prefix + s); - } - } - private static class JobStore { - private long startTime; - private String workloadId; - // private ActionType action; - - JobStore() { - this.startTime = 0; - this.workloadId = ""; - //action = ActionType.CONTINUE; - } - } - - static void printWms(SessionState sess, HiveConf hconf, LogHelper console){ - String workloadId; - String queryId = sess.getQueryId(); - - try { - if (conn == null){ - conn = new WmsClient(); - conn.open(); - } - //if (factory == null){ - // factory = new WmsWorkloadFactory(); - // w = WmsWorkloadFactory.getWorkload("org.trafodion.wms.HadoopWorkload"); - //} - } catch (Exception e) { - console.printError("HOOK: printWms:Exception " + e); - } - - try { - long startTime; - long endTime; - long duration; - - if (!workloadMap.containsKey(sess.getQueryId())){ - console.printError("HOOK: printWms:BEGIN "); - setQueryId(sess.getQueryId()); - JobStore job = new JobStore(); - if (job != null ){ - startTime = System.currentTimeMillis(); - endTime = startTime; - duration = endTime - startTime; - job.startTime = startTime; - - if (conn != null && w != null) { - - w.setOperation(OperationType.BEGIN); - w.setJobType(JobType.HIVE); - w.setJobText(sess.getCmd().trim()); - w.setUserInfo(hconf.getUser()); - w.setJobInfo(sess.getCommandType()); - w.setJobState("RUNNING"); - w.setJobSubState("BEGIN"); - // w.setUserInfo(sess.conf.getUser()); - w.setStartTime(startTime); - w.setEndTime(endTime); - w.setDuration(duration); - w.setParentId(""); - w.setParentKey(""); - console.printError("HOOK: printWms:BEGIN " + w); - WorkloadResponse response = conn.writeread(w); - job.workloadId = response.getWorkloadId().toString(); - job.action = response.getAction(); - } - workloadMap.put(getQueryId(), job); - } - } else { - JobStore job = workloadMap.get(sess.getQueryId()); - startTime = job.startTime; - endTime = System.currentTimeMillis(); - duration = endTime - startTime; - if (conn != null && w != null) { - w.setOperation(OperationType.END); - w.setWorkloadId(job.workloadId); - w.setJobType(JobType.HIVE); - w.setJobText(sess.getCmd().trim()); - w.setUserInfo(hconf.getUser()); - w.setJobInfo(sess.getCommandType()); - w.setJobState("COMPLETED"); - w.setJobSubState("SUCCEEDED"); - w.setStartTime(startTime); - w.setEndTime(endTime); - w.setDuration(duration); - WorkloadResponse response = conn.writeread(w); - } - workloadMap.remove(sess.getQueryId()); - } - } catch (IOException e) { - console.printError("HOOK: printWms:IOException " + e); - } catch (IllegalStateException e){ - //no connection open - return back - console.printError("HOOK: printWms:IllegalStateException " + e); - } - } - - static void printUpdateWms(Map counterValues, String jobID){ - LogHelper console = getConsole(); - if (getJobID().length() == 0 || false == getJobID().equals(jobID)){ - setJobID(jobID); - if (workloadMap.containsKey(getQueryId())){ - JobStore job = workloadMap.get(getQueryId()); - console.printError("PUBLISHER UPDATE_PARENT_ID: jobID:" + jobID + " queryId: " + getQueryId() ); - if (conn != null && w != null) { - w.setOperation(OperationType.UPDATE_PARENT_ID); - w.setParentId(job.workloadId); - w.setParentKey(jobID); - WorkloadResponse response = conn.writeread(w); - if(response == null) - console.printError("PUBLISHER: printUpdateWms:Error " + conn.getLastError()); - } - } - } - if (workloadMap.containsKey(getQueryId())){ - long currentTimestamp = System.currentTimeMillis(); - if ( currentTimestamp > getLastUpdateTimestamp() + 5 * ONE_SECOND){ - setLastUpdateTimestamp(currentTimestamp); - JobStore job = workloadMap.get(getQueryId()); - if (job.action == ActionType.CONTINUE){ - if (conn != null && w != null){ - console.printError("PUBLISHER UPDATE: jobID:" + jobID + " queryId: " + getQueryId() ); - long startTime = job.startTime; - long endTime = getLastUpdateTimestamp(); - long duration = endTime - startTime; - - w.setOperation(OperationType.UPDATE); - w.setWorkloadId(job.workloadId); - w.setJobType(JobType.HIVE); - w.setJobState("RUNNING"); - w.setJobSubState("UPDATE"); - w.setStartTime(startTime); - w.setEndTime(endTime); - w.setDuration(duration); - - WorkloadResponse response = conn.writeread(w); - if(response != null) { - job.action = response.getAction(); - workloadMap.put(getQueryId(), job); - - switch(job.action){ - case REJECT: - case CANCEL: - console.printError("PUBLISHER CANCEL: jobID:" + jobID + " queryId: " + getQueryId() ); - w.setOperation(OperationType.CANCEL_CHILDREN); - w.setParentId(job.workloadId); - w.setParentKey(jobID); - response = conn.writeread(w); - if(response != null) - console.printError("PUBLISHER: printUpdateWms:Error " + conn.getLastError()); - break; - default: - break; - } - - } else { - console.printError("PUBLISHER: printUpdateWms:Error " + conn.getLastError()); - } - - } - } else { - if (conn != null && w != null){ - console.printError("PUBLISHER UPDATE: jobID:" + jobID + " queryId: " + getQueryId() ); - long startTime = job.startTime; - long endTime = getLastUpdateTimestamp(); - long duration = endTime - startTime; - w.setOperation(OperationType.UPDATE); - w.setWorkloadId(job.workloadId); - w.setJobType(JobType.HIVE); - w.setJobState("COMPLETED"); - w.setJobSubState("KILLED"); - w.setStartTime(startTime); - w.setEndTime(endTime); - w.setDuration(duration); - - WorkloadResponse response = conn.writeread(w); - if(response != null) { - job.action = response.getAction(); - workloadMap.put(getQueryId(), job); - console.printError("PUBLISHER: printUpdateWms:Error " + conn.getLastError()); - } - } - workloadMap.remove(getQueryId()); - } - } - } - - console.printError("jobID: " + jobID + " queryId: " + queryId); - Set keys = counterValues.keySet(); - for(String key : keys){ - console.printError(key + ": " + counterValues.get(key)); - } - -} -*/ \ No newline at end of file diff --git a/wms/src/main/java/org/trafodion/wms/http/FilterContainer.java b/wms/src/main/java/org/trafodion/wms/http/FilterContainer.java index f15348f7..b2d519e0 100644 --- a/wms/src/main/java/org/trafodion/wms/http/FilterContainer.java +++ b/wms/src/main/java/org/trafodion/wms/http/FilterContainer.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/wms/src/main/java/org/trafodion/wms/http/HtmlQuoting.java b/wms/src/main/java/org/trafodion/wms/http/HtmlQuoting.java index f1df4ce9..08f3ee4d 100644 --- a/wms/src/main/java/org/trafodion/wms/http/HtmlQuoting.java +++ b/wms/src/main/java/org/trafodion/wms/http/HtmlQuoting.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/wms/src/main/java/org/trafodion/wms/http/HttpServer.java b/wms/src/main/java/org/trafodion/wms/http/HttpServer.java index 43c791eb..596add9f 100644 --- a/wms/src/main/java/org/trafodion/wms/http/HttpServer.java +++ b/wms/src/main/java/org/trafodion/wms/http/HttpServer.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/wms/src/main/java/org/trafodion/wms/master/MasterStatusServlet.java b/wms/src/main/java/org/trafodion/wms/master/MasterStatusServlet.java index 325e59d7..534f381b 100644 --- a/wms/src/main/java/org/trafodion/wms/master/MasterStatusServlet.java +++ b/wms/src/main/java/org/trafodion/wms/master/MasterStatusServlet.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -46,44 +46,41 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.trafodion.wms.thrift.generated.Request; -import org.trafodion.wms.thrift.generated.Stream; -import org.trafodion.wms.thrift.generated.Rule; import org.trafodion.wms.tmpl.master.MasterStatusTmpl; /** - * The servlet responsible for rendering the index page of the - * master. + * The servlet responsible for rendering the index page of the master. */ public class MasterStatusServlet extends HttpServlet { - private static final Log LOG = LogFactory.getLog(MasterStatusServlet.class); - private static final long serialVersionUID = 1L; - - @Override - public void doGet(HttpServletRequest request, HttpServletResponse response) - throws IOException - { - WmsMaster master = (WmsMaster) getServletContext().getAttribute(WmsMaster.MASTER); - assert master != null : "No Master in context!"; - - Configuration conf = master.getConfiguration(); - ArrayList servers = master.getServerManager().getServersList(); - ArrayList clients = master.getServerManager().getClientsList(); - ArrayList workloads = master.getServerManager().getWorkloadsList(); - ArrayList rules = master.getServerManager().getRulesList(); - ArrayList streams = master.getServerManager().getStreamsList(); + private static final Log LOG = LogFactory.getLog(MasterStatusServlet.class); + private static final long serialVersionUID = 1L; - response.setContentType("text/html"); - MasterStatusTmpl tmpl = new MasterStatusTmpl() - .setServers(servers) - .setClients(clients) - .setStreams(streams) - .setRules(rules) - .setWorkloads(workloads); - if (request.getParameter("filter") != null) - tmpl.setFilter(request.getParameter("filter")); - if (request.getParameter("format") != null) - tmpl.setFormat(request.getParameter("format")); - tmpl.render(response.getWriter(), master); - } + @Override + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws IOException { + WmsMaster master = (WmsMaster) getServletContext().getAttribute( + WmsMaster.MASTER); + assert master != null : "No Master in context!"; + + Configuration conf = master.getConfiguration(); + ArrayList servers = master.getServerManager().getServersList(); + ArrayList clients = master.getServerManager().getClientsList(); + /* + * ArrayList workloads = master.getServerManager() + * .getWorkloadsList(); ArrayList rules = + * master.getServerManager().getRulesList(); ArrayList streams = + * master.getServerManager().getStreamsList(); + * + * response.setContentType("text/html"); MasterStatusTmpl tmpl = new + * MasterStatusTmpl().setServers(servers) + * .setClients(clients).setStreams(streams).setRules(rules) + * .setWorkloads(workloads); + * + * if (request.getParameter("filter") != null) + * tmpl.setFilter(request.getParameter("filter")); if + * (request.getParameter("format") != null) + * tmpl.setFormat(request.getParameter("format")); + * tmpl.render(response.getWriter(), master); + */ + } } diff --git a/wms/src/main/java/org/trafodion/wms/master/Metrics.java b/wms/src/main/java/org/trafodion/wms/master/Metrics.java index 2a1ec73f..ee3d9931 100644 --- a/wms/src/main/java/org/trafodion/wms/master/Metrics.java +++ b/wms/src/main/java/org/trafodion/wms/master/Metrics.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/wms/src/main/java/org/trafodion/wms/master/ServerManager.java b/wms/src/main/java/org/trafodion/wms/master/ServerManager.java index 8f83e0ea..3f113945 100644 --- a/wms/src/main/java/org/trafodion/wms/master/ServerManager.java +++ b/wms/src/main/java/org/trafodion/wms/master/ServerManager.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,19 +27,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.thrift.TBase; -import org.apache.thrift.TException; -import org.apache.thrift.TDeserializer; -import org.apache.thrift.transport.TSSLTransportFactory; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TFramedTransport; -import org.apache.thrift.transport.TSSLTransportFactory.TSSLTransportParameters; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocol; import org.trafodion.wms.master.Metrics; -import org.trafodion.wms.master.rpc.thrift.ThriftRpcServer; import org.trafodion.wms.script.ScriptManager; import org.trafodion.wms.script.ScriptContext; import org.trafodion.wms.Constants; @@ -47,22 +36,10 @@ import org.trafodion.wms.util.WmsConfiguration; import org.trafodion.wms.util.RetryCounter; import org.trafodion.wms.util.RetryCounterFactory; -import org.trafodion.wms.cep.ComplexEventProcessor; -import org.trafodion.wms.thrift.generated.Action; -import org.trafodion.wms.thrift.generated.Data; -import org.trafodion.wms.thrift.generated.Header; -import org.trafodion.wms.thrift.generated.IllegalArgument; -import org.trafodion.wms.thrift.generated.IOError; -import org.trafodion.wms.thrift.generated.Operation; -import org.trafodion.wms.thrift.generated.Request; -import org.trafodion.wms.thrift.generated.Response; -import org.trafodion.wms.thrift.generated.WmsService; -import org.trafodion.wms.thrift.generated.Rule; -import org.trafodion.wms.thrift.generated.Stream; public class ServerManager implements Callable { - private static final Log LOG = LogFactory.getLog(ServerManager.class); - private WmsMaster wmsMaster; + private static final Log LOG = LogFactory.getLog(ServerManager.class); + private WmsMaster wmsMaster; private Configuration conf; private ZkClient zkc = null; private InetAddress ia; @@ -72,388 +49,397 @@ public class ServerManager implements Callable { private ExecutorService pool = null; private Metrics metrics; private String parentZnode; - private RetryCounterFactory retryCounterFactory; - private ComplexEventProcessor cep; - private ThriftRpcServer trpcs; - private final ArrayList configuredServers = new ArrayList(); - private final ArrayList runningServers = new ArrayList(); - private final Queue restartQueue = new LinkedList(); - TDeserializer deserializer = new TDeserializer(new TBinaryProtocol.Factory()); - - public ServerManager(WmsMaster wmsMaster) throws Exception { - try { - this.wmsMaster = wmsMaster; - this.conf = wmsMaster.getConfiguration(); - this.zkc = wmsMaster.getZkClient(); - this.ia = wmsMaster.getInetAddress(); - this.startupTimestamp = wmsMaster.getStartTime(); - this.metrics = wmsMaster.getMetrics(); - maxRestartAttempts = conf.getInt(Constants.WMS_MASTER_SERVER_RESTART_HANDLER_ATTEMPTS,Constants.DEFAULT_WMS_MASTER_SERVER_RESTART_HANDLER_ATTEMPTS); - retryIntervalMillis = conf.getInt(Constants.WMS_MASTER_SERVER_RESTART_HANDLER_RETRY_INTERVAL_MILLIS,Constants.DEFAULT_WMS_MASTER_SERVER_RESTART_HANDLER_RETRY_INTERVAL_MILLIS); - retryCounterFactory = new RetryCounterFactory(maxRestartAttempts, retryIntervalMillis); - parentZnode = conf.get(Constants.ZOOKEEPER_ZNODE_PARENT,Constants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - pool = Executors.newSingleThreadExecutor(); - } catch ( Exception e ) { - e.printStackTrace(); - LOG.error(e); - throw e; - } - } - - class RestartHandler implements Callable { - private ScriptContext scriptContext = new ScriptContext(); - private String znodePath; - - public RestartHandler(String znodePath) { - this.znodePath = znodePath; - } - - @Override - public ScriptContext call() throws Exception { - try { - Scanner scn = new Scanner(znodePath); - scn.useDelimiter(":"); - String hostName = scn.next();//host name - String instance = scn.next();//instance - int thriftPort = Integer.parseInt(scn.next());//thriftPort - int infoPort = Integer.parseInt(scn.next());//UI info port - long serverStartTimestamp = Long.parseLong(scn.next()); - scn.close(); - - //Get the --config property from classpath...it's always first in the classpath - String cp = System.getProperty("java.class.path"); - scn = new Scanner(cp); - scn.useDelimiter(":"); - String confDir=scn.next(); - scn.close(); - LOG.debug("conf dir [" + confDir + "]"); - - //Get -Dwms.home.dir - String wmsHome = System.getProperty("wms.home.dir"); - - //If stop-wms.sh is executed and WMS_MANAGES_ZK then zookeeper is stopped abruptly. - //Second scenario is when ZooKeeper fails for some reason regardless of whether WMS - //manages it. When either happens the WmsServer running znodes still exist in ZooKeeper - //and we see them at next startup. When they eventually timeout - //we get node deleted events for a server that no longer exists. So, only recognize - //WmsServer running znodes that have timestamps after last WmsMaster startup. - if(serverStartTimestamp > startupTimestamp){ - scriptContext.setHostName(hostName); - scriptContext.setScriptName("sys_shell.py"); - if(hostName.equalsIgnoreCase(ia.getCanonicalHostName())) - scriptContext.setCommand("bin/wms-daemon.sh --config " + confDir + " start server " + instance); - else - scriptContext.setCommand("pdsh -w " + hostName + " \"cd " + wmsHome + ";bin/wms-daemon.sh --config " + confDir + " start server " + instance + "\""); - - RetryCounter retryCounter = retryCounterFactory.create(); - while(true) { - if(scriptContext.getStdOut().length() > 0) - scriptContext.getStdOut().delete(0,scriptContext.getStdOut().length()); - if(scriptContext.getStdErr().length() > 0) - scriptContext.getStdErr().delete(0,scriptContext.getStdErr().length()); - LOG.info("Restarting WmsServer [" + hostName + ":" + instance + "], script [ " + scriptContext.toString() + " ]"); - ScriptManager.getInstance().runScript(scriptContext); - - if(scriptContext.getExitCode() == 0) { - LOG.info("WmsServer [" + hostName + ":" + instance + "] restarted"); - break; - } else { - StringBuilder sb = new StringBuilder(); - sb.append("exit code [" + scriptContext.getExitCode() + "]"); - if(! scriptContext.getStdOut().toString().isEmpty()) - sb.append(", stdout [" + scriptContext.getStdOut().toString() + "]"); - if(! scriptContext.getStdErr().toString().isEmpty()) - sb.append(", stderr [" + scriptContext.getStdErr().toString() + "]"); - LOG.error(sb.toString()); - - if (! retryCounter.shouldRetry()) { - LOG.error("WmsServer [" + hostName + ":" + instance + "] restart failed after " - + retryCounter.getMaxRetries() + " retries"); - break; - } else { - retryCounter.sleepUntilNextRetry(); - retryCounter.useRetry(); - } - } - } - } else { - LOG.debug("No restart for " + znodePath + "\nbecause WmsServer start time [" + DateFormat.getDateTimeInstance().format(new Date(serverStartTimestamp)) + "] was before WmsMaster start time [" + DateFormat.getDateTimeInstance().format(new Date(startupTimestamp)) + "]"); - } - } catch (Exception e) { - e.printStackTrace(); - LOG.error(e); - } - - return scriptContext; - } - } - - class RunningWatcher implements Watcher { - public void process(WatchedEvent event) { - if(event.getType() == Event.EventType.NodeChildrenChanged) { - LOG.debug("Running children changed [" + event.getPath() + "]"); - try { - getZkRunning(); - } catch (Exception e) { - e.printStackTrace(); - LOG.error(e); - } - } else if(event.getType() == Event.EventType.NodeDeleted) { - String znodePath = event.getPath(); - LOG.debug("Running znode deleted [" + znodePath + "]"); - try { - restartServer(znodePath); - } catch (Exception e) { - e.printStackTrace(); - LOG.error(e); - } - } - } - } - - @Override - public Boolean call() throws Exception { - - long timeoutMillis=5000; - - try { - getServersFile(); - getZkRunning(); - cep = new ComplexEventProcessor(zkc,parentZnode,conf,null); - trpcs = new ThriftRpcServer(wmsMaster); - - while (true) { - while (! restartQueue.isEmpty()) { - LOG.debug("Restart queue size [" + restartQueue.size() + "]"); - RestartHandler handler = restartQueue.poll(); - Future runner = pool.submit(handler); - ScriptContext scriptContext = runner.get();//blocking call - if(scriptContext.getExitCode() != 0) - restartQueue.add(handler); - } - - try { - Thread.sleep(timeoutMillis); - } catch (InterruptedException e) { } - } - - } catch (Exception e) { - e.printStackTrace(); - LOG.error(e); - pool.shutdown(); - throw e; - } - } - + private RetryCounterFactory retryCounterFactory; + private final ArrayList configuredServers = new ArrayList(); + private final ArrayList runningServers = new ArrayList(); + private final Queue restartQueue = new LinkedList(); + + public ServerManager(WmsMaster wmsMaster) throws Exception { + try { + this.wmsMaster = wmsMaster; + this.conf = wmsMaster.getConfiguration(); + this.zkc = wmsMaster.getZkClient(); + this.ia = wmsMaster.getInetAddress(); + this.startupTimestamp = wmsMaster.getStartTime(); + this.metrics = wmsMaster.getMetrics(); + maxRestartAttempts = conf + .getInt(Constants.WMS_MASTER_SERVER_RESTART_HANDLER_ATTEMPTS, + Constants.DEFAULT_WMS_MASTER_SERVER_RESTART_HANDLER_ATTEMPTS); + retryIntervalMillis = conf + .getInt(Constants.WMS_MASTER_SERVER_RESTART_HANDLER_RETRY_INTERVAL_MILLIS, + Constants.DEFAULT_WMS_MASTER_SERVER_RESTART_HANDLER_RETRY_INTERVAL_MILLIS); + retryCounterFactory = new RetryCounterFactory(maxRestartAttempts, + retryIntervalMillis); + parentZnode = conf.get(Constants.ZOOKEEPER_ZNODE_PARENT, + Constants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + pool = Executors.newSingleThreadExecutor(); + } catch (Exception e) { + e.printStackTrace(); + LOG.error(e); + throw e; + } + } + + class RestartHandler implements Callable { + private ScriptContext scriptContext = new ScriptContext(); + private String znodePath; + + public RestartHandler(String znodePath) { + this.znodePath = znodePath; + } + + @Override + public ScriptContext call() throws Exception { + try { + Scanner scn = new Scanner(znodePath); + scn.useDelimiter(":"); + String hostName = scn.next();// host name + String instance = scn.next();// instance + int infoPort = Integer.parseInt(scn.next());// UI info port + long serverStartTimestamp = Long.parseLong(scn.next()); + scn.close(); + + // Get the --config property from classpath...it's always first + // in the classpath + String cp = System.getProperty("java.class.path"); + scn = new Scanner(cp); + scn.useDelimiter(":"); + String confDir = scn.next(); + scn.close(); + LOG.debug("conf dir [" + confDir + "]"); + + // Get -Dwms.home.dir + String wmsHome = System.getProperty("wms.home.dir"); + + // If stop-wms.sh is executed and WMS_MANAGES_ZK then zookeeper + // is stopped abruptly. + // Second scenario is when ZooKeeper fails for some reason + // regardless of whether WMS + // manages it. When either happens the WmsServer running znodes + // still exist in ZooKeeper + // and we see them at next startup. When they eventually timeout + // we get node deleted events for a server that no longer + // exists. So, only recognize + // WmsServer running znodes that have timestamps after last + // WmsMaster startup. + if (serverStartTimestamp > startupTimestamp) { + scriptContext.setHostName(hostName); + scriptContext.setScriptName("sys_shell.py"); + if (hostName.equalsIgnoreCase(ia.getCanonicalHostName())) + scriptContext.setCommand("bin/wms-daemon.sh --config " + + confDir + " start server " + instance); + else + scriptContext.setCommand("pdsh -w " + hostName + + " \"cd " + wmsHome + + ";bin/wms-daemon.sh --config " + confDir + + " start server " + instance + "\""); + + RetryCounter retryCounter = retryCounterFactory.create(); + while (true) { + if (scriptContext.getStdOut().length() > 0) + scriptContext.getStdOut().delete(0, + scriptContext.getStdOut().length()); + if (scriptContext.getStdErr().length() > 0) + scriptContext.getStdErr().delete(0, + scriptContext.getStdErr().length()); + LOG.info("Restarting WmsServer [" + hostName + ":" + + instance + "], script [ " + + scriptContext.toString() + " ]"); + ScriptManager.getInstance().runScript(scriptContext); + + if (scriptContext.getExitCode() == 0) { + LOG.info("WmsServer [" + hostName + ":" + instance + + "] restarted"); + break; + } else { + StringBuilder sb = new StringBuilder(); + sb.append("exit code [" + + scriptContext.getExitCode() + "]"); + if (!scriptContext.getStdOut().toString().isEmpty()) + sb.append(", stdout [" + + scriptContext.getStdOut().toString() + + "]"); + if (!scriptContext.getStdErr().toString().isEmpty()) + sb.append(", stderr [" + + scriptContext.getStdErr().toString() + + "]"); + LOG.error(sb.toString()); + + if (!retryCounter.shouldRetry()) { + LOG.error("WmsServer [" + hostName + ":" + + instance + "] restart failed after " + + retryCounter.getMaxRetries() + + " retries"); + break; + } else { + retryCounter.sleepUntilNextRetry(); + retryCounter.useRetry(); + } + } + } + } else { + LOG.debug("No restart for " + + znodePath + + "\nbecause WmsServer start time [" + + DateFormat.getDateTimeInstance().format( + new Date(serverStartTimestamp)) + + "] was before WmsMaster start time [" + + DateFormat.getDateTimeInstance().format( + new Date(startupTimestamp)) + "]"); + } + } catch (Exception e) { + e.printStackTrace(); + LOG.error(e); + } + + return scriptContext; + } + } + + class RunningWatcher implements Watcher { + public void process(WatchedEvent event) { + if (event.getType() == Event.EventType.NodeChildrenChanged) { + LOG.debug("Running children changed [" + event.getPath() + "]"); + try { + getZkRunning(); + } catch (Exception e) { + e.printStackTrace(); + LOG.error(e); + } + } else if (event.getType() == Event.EventType.NodeDeleted) { + String znodePath = event.getPath(); + LOG.debug("Running znode deleted [" + znodePath + "]"); + try { + restartServer(znodePath); + } catch (Exception e) { + e.printStackTrace(); + LOG.error(e); + } + } + } + } + + @Override + public Boolean call() throws Exception { + + long timeoutMillis = 5000; + + try { + getServersFile(); + getZkRunning(); + + while (true) { + while (!restartQueue.isEmpty()) { + LOG.debug("Restart queue size [" + restartQueue.size() + + "]"); + RestartHandler handler = restartQueue.poll(); + Future runner = pool.submit(handler); + ScriptContext scriptContext = runner.get();// blocking call + if (scriptContext.getExitCode() != 0) + restartQueue.add(handler); + } + + try { + Thread.sleep(timeoutMillis); + } catch (InterruptedException e) { + } + } + + } catch (Exception e) { + e.printStackTrace(); + LOG.error(e); + pool.shutdown(); + throw e; + } + } + private void getServersFile() throws Exception { - InputStream is = this.getClass().getResourceAsStream("/servers"); - if(is == null) - throw new IOException("Cannot find servers file"); - - BufferedReader br = new BufferedReader(new InputStreamReader(is)); - configuredServers.clear(); - String line; - while((line = br.readLine()) != null) { - configuredServers.add(line); - } - - Collections.sort(configuredServers); - - if(configuredServers.size() < 1) - throw new IOException("No entries found in servers file"); - - int lnum=1; - for(int i=0; i < configuredServers.size(); i++) { - LOG.debug("servers file line " + lnum + " [" + configuredServers.get(i) + "]"); - lnum++; - } + InputStream is = this.getClass().getResourceAsStream("/servers"); + if (is == null) + throw new IOException("Cannot find servers file"); + + BufferedReader br = new BufferedReader(new InputStreamReader(is)); + configuredServers.clear(); + String line; + while ((line = br.readLine()) != null) { + configuredServers.add(line); + } + + Collections.sort(configuredServers); + + if (configuredServers.size() < 1) + throw new IOException("No entries found in servers file"); + + int lnum = 1; + for (int i = 0; i < configuredServers.size(); i++) { + LOG.debug("servers file line " + lnum + " [" + + configuredServers.get(i) + "]"); + lnum++; + } } - - private List getChildren(String znode,Watcher watcher) throws Exception { - List children=null; - children = zkc.getChildren(znode,watcher); - if( ! children.isEmpty()) - Collections.sort(children); - return children; - } - + + private List getChildren(String znode, Watcher watcher) + throws Exception { + List children = null; + children = zkc.getChildren(znode, watcher); + if (!children.isEmpty()) + Collections.sort(children); + return children; + } + private synchronized void getZkRunning() throws Exception { - LOG.debug("Reading " + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_RUNNING); - List children = getChildren(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_RUNNING, new RunningWatcher()); - - if( ! children.isEmpty()) { - for(String child : children) { - //If stop-wms.sh is executed and WMS_MANAGES_ZK then zookeeper is stopped abruptly. - //Second scenario is when ZooKeeper fails for some reason regardless of whether WMS - //manages it. When either happens the WmsServer running znodes still exist in ZooKeeper - //and we see them at next startup. When they eventually timeout - //we get node deleted events for a server that no longer exists. So, only recognize - //WmsServer running znodes that have timestamps after last WmsMaster startup. - Scanner scn = new Scanner(child); - scn.useDelimiter(":"); - String hostName = scn.next(); - String instance = scn.next(); - int thriftPort = Integer.parseInt(scn.next()); - int infoPort = Integer.parseInt(scn.next()); - long serverStartTimestamp = Long.parseLong(scn.next()); - scn.close(); - - if(serverStartTimestamp < startupTimestamp) - continue; - - if(! runningServers.contains(child)) { - LOG.debug("Watching running [" + child + "]"); - zkc.exists(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_RUNNING + "/" + child, new RunningWatcher()); - runningServers.add(child); - } - } - metrics.setTotalRunning(runningServers.size()); - } else { - metrics.setTotalRunning(0); - } + LOG.debug("Reading " + parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_RUNNING); + List children = getChildren(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_RUNNING, + new RunningWatcher()); + + if (!children.isEmpty()) { + for (String child : children) { + // If stop-wms.sh is executed and WMS_MANAGES_ZK then zookeeper + // is stopped abruptly. + // Second scenario is when ZooKeeper fails for some reason + // regardless of whether WMS + // manages it. When either happens the WmsServer running znodes + // still exist in ZooKeeper + // and we see them at next startup. When they eventually timeout + // we get node deleted events for a server that no longer + // exists. So, only recognize + // WmsServer running znodes that have timestamps after last + // WmsMaster startup. + Scanner scn = new Scanner(child); + scn.useDelimiter(":"); + String hostName = scn.next(); + String instance = scn.next(); + int infoPort = Integer.parseInt(scn.next()); + long serverStartTimestamp = Long.parseLong(scn.next()); + scn.close(); + + if (serverStartTimestamp < startupTimestamp) + continue; + + if (!runningServers.contains(child)) { + LOG.debug("Watching running [" + child + "]"); + zkc.exists(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_RUNNING + + "/" + child, new RunningWatcher()); + runningServers.add(child); + } + } + metrics.setTotalRunning(runningServers.size()); + } else { + metrics.setTotalRunning(0); + } } - - private synchronized void restartServer(String znodePath) throws Exception { - String child = znodePath.replace(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_RUNNING + "/",""); - Scanner scn = new Scanner(child); - scn.useDelimiter(":"); - String hostName = scn.next(); - String instance = scn.next(); - int thriftPort = Integer.parseInt(scn.next()); - int infoPort = Integer.parseInt(scn.next()); - long serverStartTimestamp = Long.parseLong(scn.next()); - scn.close(); - - LOG.error("WmsServer [" + hostName + ":" + instance + "] failed."); - - if(runningServers.contains(child)) { - LOG.debug("Found [" + child + "], deleting from running servers list"); - runningServers.remove(child); - metrics.setTotalRunning(runningServers.size()); - } - - RestartHandler handler = new RestartHandler(child); - restartQueue.add(handler); - } - - public synchronized ArrayList getServersList() { - return runningServers; - } - - public synchronized ArrayList getWorkloadsList() { - ArrayList workloads = new ArrayList(); - - LOG.debug("Reading " + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_WORKLOADS); - - try { - List children = getChildren(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_WORKLOADS, null); - - if( ! children.isEmpty()) { - for(String child : children) { - Request request = new Request(); - String workloadZnode = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_WORKLOADS + "/" + child; - Stat stat = zkc.exists(workloadZnode, false); - if (stat != null){ - byte[] bytes = zkc.getData(workloadZnode, false, stat); - try { - deserializer.deserialize(request, bytes); - workloads.add(request); - } catch (TException e) { - e.printStackTrace(); - } - } - } - } - }catch (Exception e){ - e.printStackTrace(); - } - - return workloads; - } - - public synchronized ArrayList getClientsList() { - ArrayList clients = new ArrayList(); - - LOG.debug("Reading " + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_CLIENTS); - - try { - List children = getChildren(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_CLIENTS, null); - if( ! children.isEmpty()) { - for(String child : children) { - clients.add(child); - } - } - }catch (Exception e){ - e.printStackTrace(); - } - - return clients; - } - - public synchronized ArrayList getStreamsList() { - ArrayList streams = new ArrayList(); - - LOG.debug("Reading " + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES); - - try { - List children = getChildren(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS, null); - - if( ! children.isEmpty()) { - for(String child : children) { - Stream stream = new Stream(); - String streamZnode = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS + "/" + child; - Stat stat = zkc.exists(streamZnode, false); - if (stat != null){ - byte[] bytes = zkc.getData(streamZnode, false, stat); - try { - deserializer.deserialize(stream, bytes); - streams.add(stream); - } catch (TException e) { - e.printStackTrace(); - } - } - } - } - }catch (Exception e){ - e.printStackTrace(); - } - - return streams; - } - - public synchronized ArrayList getRulesList() { - ArrayList rules = new ArrayList(); - - LOG.debug("Reading " + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES); - - try { - List children = getChildren(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES, null); - - if( ! children.isEmpty()) { - for(String child : children) { - Rule rule = new Rule(); - String ruleZnode = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES + "/" + child; - Stat stat = zkc.exists(ruleZnode, false); - if (stat != null){ - byte[] bytes = zkc.getData(ruleZnode, false, stat); - try { - deserializer.deserialize(rule, bytes); - rules.add(rule); - } catch (TException e) { - e.printStackTrace(); - } - } - } - } - }catch (Exception e){ - e.printStackTrace(); - } - - return rules; - } - - public ComplexEventProcessor getComplexEventProcessor(){ - return cep; - } + private synchronized void restartServer(String znodePath) throws Exception { + String child = znodePath.replace(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_RUNNING + "/", ""); + Scanner scn = new Scanner(child); + scn.useDelimiter(":"); + String hostName = scn.next(); + String instance = scn.next(); + int infoPort = Integer.parseInt(scn.next()); + long serverStartTimestamp = Long.parseLong(scn.next()); + scn.close(); + + LOG.error("WmsServer [" + hostName + ":" + instance + "] failed."); + + if (runningServers.contains(child)) { + LOG.debug("Found [" + child + + "], deleting from running servers list"); + runningServers.remove(child); + metrics.setTotalRunning(runningServers.size()); + } + + RestartHandler handler = new RestartHandler(child); + restartQueue.add(handler); + } + + public synchronized ArrayList getServersList() { + return runningServers; + } + + /* + * public synchronized ArrayList getWorkloadsList() { + * ArrayList workloads = new ArrayList(); + * + * LOG.debug("Reading " + parentZnode + + * Constants.DEFAULT_ZOOKEEPER_ZNODE_WORKLOADS); + * + * try { List children = getChildren(parentZnode + + * Constants.DEFAULT_ZOOKEEPER_ZNODE_WORKLOADS, null); + * + * if (!children.isEmpty()) { for (String child : children) { Request + * request = new Request(); String workloadZnode = parentZnode + + * Constants.DEFAULT_ZOOKEEPER_ZNODE_WORKLOADS + "/" + child; Stat stat = + * zkc.exists(workloadZnode, false); if (stat != null) { byte[] bytes = + * zkc.getData(workloadZnode, false, stat); try { + * deserializer.deserialize(request, bytes); workloads.add(request); } catch + * (TException e) { e.printStackTrace(); } } } } } catch (Exception e) { + * e.printStackTrace(); } + * + * return workloads; } + */ + public synchronized ArrayList getClientsList() { + ArrayList clients = new ArrayList(); + + LOG.debug("Reading " + parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_CLIENTS); + + try { + List children = getChildren(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_CLIENTS, null); + if (!children.isEmpty()) { + for (String child : children) { + clients.add(child); + } + } + } catch (Exception e) { + e.printStackTrace(); + } + + return clients; + } + /* + * public synchronized ArrayList getStreamsList() { + * ArrayList streams = new ArrayList(); + * + * LOG.debug("Reading " + parentZnode + + * Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES); + * + * try { List children = getChildren(parentZnode + + * Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS, null); + * + * if (!children.isEmpty()) { for (String child : children) { Stream stream + * = new Stream(); String streamZnode = parentZnode + + * Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS + "/" + child; Stat stat = + * zkc.exists(streamZnode, false); if (stat != null) { byte[] bytes = + * zkc.getData(streamZnode, false, stat); try { + * deserializer.deserialize(stream, bytes); streams.add(stream); } catch + * (TException e) { e.printStackTrace(); } } } } } catch (Exception e) { + * e.printStackTrace(); } + * + * return streams; } + */ + /* + * public synchronized ArrayList getRulesList() { ArrayList + * rules = new ArrayList(); + * + * LOG.debug("Reading " + parentZnode + + * Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES); + * + * try { List children = getChildren(parentZnode + + * Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES, null); + * + * if (!children.isEmpty()) { for (String child : children) { Rule rule = + * new Rule(); String ruleZnode = parentZnode + + * Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES + "/" + child; Stat stat = + * zkc.exists(ruleZnode, false); if (stat != null) { byte[] bytes = + * zkc.getData(ruleZnode, false, stat); try { deserializer.deserialize(rule, + * bytes); rules.add(rule); } catch (TException e) { e.printStackTrace(); } + * } } } } catch (Exception e) { e.printStackTrace(); } + * + * return rules; } + */ } \ No newline at end of file diff --git a/wms/src/main/java/org/trafodion/wms/master/WmsMaster.java b/wms/src/main/java/org/trafodion/wms/master/WmsMaster.java index 888571a0..e9fdd46f 100644 --- a/wms/src/main/java/org/trafodion/wms/master/WmsMaster.java +++ b/wms/src/main/java/org/trafodion/wms/master/WmsMaster.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -54,255 +54,315 @@ import org.trafodion.wms.rest.WmsRest; public class WmsMaster implements Runnable { - private static final Log LOG = LogFactory.getLog(WmsMaster.class); - private Thread thrd; - private ZkClient zkc=null; + private static final Log LOG = LogFactory.getLog(WmsMaster.class); + private Thread thrd; + private ZkClient zkc = null; private Configuration conf; - private String[] args; - private InetAddress ia; - private String instance=null; - private InfoServer infoServer; - private WmsRest restServer; - private String serverName; - private int thriftPort; - private int infoPort; - private long startTime; - private ServerManager serverManager; + private String[] args; + private InetAddress ia; + private String instance = null; + private InfoServer infoServer; + private WmsRest restServer; + private String serverName; + private int infoPort; + private long startTime; + private ServerManager serverManager; public static final String MASTER = "master"; private Metrics metrics; private String parentZnode; - private ExecutorService pool=null; + private ExecutorService pool = null; private JVMShutdownHook jvmShutdownHook; - + private class JVMShutdownHook extends Thread { - public void run() { - LOG.debug("JVM shutdown hook is running"); - try { - zkc.close(); - } catch (InterruptedException ie) {}; - } + public void run() { + LOG.debug("JVM shutdown hook is running"); + try { + zkc.close(); + } catch (InterruptedException ie) { + } + ; + } + } + + public WmsMaster(String[] args) { + this.args = args; + conf = WmsConfiguration.create(); + parentZnode = conf.get(Constants.ZOOKEEPER_ZNODE_PARENT, + Constants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + jvmShutdownHook = new JVMShutdownHook(); + Runtime.getRuntime().addShutdownHook(jvmShutdownHook); + thrd = new Thread(this); + thrd.start(); + } + + private static int findFreePort() throws IOException { + ServerSocket server = new ServerSocket(0); + int port = server.getLocalPort(); + server.close(); + return port; + } + + public void run() { + VersionInfo.logVersion(); + + Options opt = new Options(); + CommandLine cmd; + try { + cmd = new GnuParser().parse(opt, args); + instance = cmd.getArgList().get(0).toString(); + } catch (NullPointerException e) { + LOG.error("No args found: ", e); + System.exit(1); + } catch (ParseException e) { + LOG.error("Could not parse: ", e); + System.exit(1); + } + + try { + zkc = new ZkClient(); + zkc.connect(); + LOG.info("Connected to ZooKeeper"); + } catch (Exception e) { + LOG.error(e); + System.exit(1); + } + + try { + // Create the persistent WMS znodes + Stat stat = zkc.exists(parentZnode, false); + if (stat == null) { + zkc.create(parentZnode, new byte[0], + ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); + } + stat = zkc.exists(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_PARENT, false); + if (stat == null) { + zkc.create(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_PARENT, + new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT); + } + stat = zkc.exists(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_MASTER, false); + if (stat == null) { + zkc.create(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_MASTER, + new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT); + } + stat = zkc.exists(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS, false); + if (stat == null) { + zkc.create(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS, + new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT); + } + stat = zkc.exists(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_RUNNING, false); + if (stat == null) { + zkc.create(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_RUNNING, + new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT); + } + stat = zkc.exists(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_LEADER, false); + if (stat == null) { + zkc.create(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_LEADER, + new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT); + } + stat = zkc.exists(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_CLIENTS, false); + if (stat == null) { + zkc.create(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_CLIENTS, + new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT); + } + stat = zkc.exists(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_WORKLOADS, false); + if (stat == null) { + zkc.create(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_WORKLOADS, + new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT); + } + stat = zkc.exists(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS, false); + if (stat == null) { + zkc.create(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS, + new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT); + } + stat = zkc.exists(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES, false); + if (stat == null) { + zkc.create(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES, new byte[0], + ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); + } + + String schema = IOUtils.toString(getClass().getResourceAsStream( + Constants.PLATFORM_STATS_SCHEMA_FILENAME)); + stat = zkc.exists(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_STATS, false); + if (stat == null) { + zkc.create(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_STATS, + schema.getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT); + } + stat = zkc.exists(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_TRAFODION, false); + if (stat == null) { + zkc.create(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_TRAFODION, + new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT); + } + + schema = IOUtils.toString(getClass().getResourceAsStream( + Constants.TRAFODION_RMS_SCHEMA_FILENAME)); + stat = zkc.exists(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_TRAFODION_RMS, false); + if (stat == null) { + zkc.create(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_TRAFODION_RMS, + schema.getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.PERSISTENT); + } + + } catch (KeeperException.NodeExistsException e) { + // do nothing...some other server has created znodes + } catch (Exception e) { + LOG.error(e); + System.exit(0); + } + + metrics = new Metrics(); + startTime = System.currentTimeMillis(); + + try { + + ia = InetAddress.getLocalHost(); + + String interfaceName = conf.get(Constants.WMS_DNS_INTERFACE, + Constants.DEFAULT_WMS_DNS_INTERFACE); + if (interfaceName.equalsIgnoreCase("default")) { + LOG.info("Using local host [" + ia.getCanonicalHostName() + "," + + ia.getHostAddress() + "]"); + } else { + // For all nics get all hostnames and IPs + // and try to match against dcs.dns.interface property + Enumeration nics = NetworkInterface + .getNetworkInterfaces(); + while (nics.hasMoreElements()) { + NetworkInterface ni = nics.nextElement(); + Enumeration rawAdrs = ni.getInetAddresses(); + while (rawAdrs.hasMoreElements()) { + InetAddress inet = rawAdrs.nextElement(); + LOG.info("Found interface [" + ni.getDisplayName() + + "," + inet.getCanonicalHostName() + "," + + inet.getHostAddress() + "]"); + if (interfaceName.equalsIgnoreCase(ni.getDisplayName()) + && inet.getCanonicalHostName().contains(".")) { + LOG.info("Using interface [" + ni.getDisplayName() + + "," + inet.getCanonicalHostName() + "," + + inet.getHostAddress() + "]"); + ia = inet; + break; + } + } + } + } + + serverName = ia.getCanonicalHostName(); + + // Register in zookeeper + String path = parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_MASTER + "/" + + ia.getCanonicalHostName() + ":" + startTime; + zkc.create(path, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.EPHEMERAL); + LOG.info("Created znode [" + path + "]"); + + // Start the Web UI info server. + infoPort = conf.getInt(Constants.WMS_MASTER_INFO_PORT, + Constants.DEFAULT_WMS_MASTER_INFO_PORT); + if (infoPort >= 0) { + String a = conf.get(Constants.WMS_MASTER_INFO_BIND_ADDRESS, + Constants.DEFAULT_WMS_MASTER_INFO_BIND_ADDRESS); + infoServer = new InfoServer(MASTER, a, infoPort, false, conf); + infoServer.addServlet("status", "/master-status", + MasterStatusServlet.class); + infoServer.setAttribute(MASTER, this); + infoServer.start(); + } + + // Start the REST service + restServer = new WmsRest(conf); + LOG.info("REST service listening on port [" + + conf.getInt("wms.rest.port", 8080) + "]"); + + // Start the server manager + pool = Executors.newSingleThreadExecutor(); + serverManager = new ServerManager(this); + Future future = pool.submit(serverManager); + future.get();// blocking call + + } catch (Exception e) { + LOG.error(e); + e.printStackTrace(); + pool.shutdown(); + System.exit(0); + } + } + + public InetAddress getInetAddress() { + return ia; + } + + public String getServerName() { + return serverName; + } + + public int getInfoPort() { + return infoPort; + } + + public Configuration getConfiguration() { + return conf; + } + + public ServerManager getServerManager() { + return serverManager; + } + + public long getStartTime() { + return startTime; + } + + public ZkClient getZkClient() { + return zkc; + } + + public String getZKQuorumServersString() { + return ZKConfig.getZKQuorumServersString(conf); + } + + public String getZKParentZnode() { + return parentZnode; + } + + public Metrics getMetrics() { + return metrics; + } + + public static void main(String[] args) { + WmsMaster server = new WmsMaster(args); } - - public WmsMaster(String[] args) { - this.args = args; - conf = WmsConfiguration.create(); - parentZnode = conf.get(Constants.ZOOKEEPER_ZNODE_PARENT,Constants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - jvmShutdownHook = new JVMShutdownHook(); - Runtime.getRuntime().addShutdownHook(jvmShutdownHook); - thrd = new Thread(this); - thrd.start(); - } - - private static int findFreePort()throws IOException { - ServerSocket server = new ServerSocket(0); - int port = server.getLocalPort(); - server.close(); - return port; - } - - public void run () { - VersionInfo.logVersion(); - - Options opt = new Options(); - CommandLine cmd; - try { - cmd = new GnuParser().parse(opt, args); - instance = cmd.getArgList().get(0).toString(); - } catch (NullPointerException e) { - LOG.error("No args found: ", e); - System.exit(1); - } catch (ParseException e) { - LOG.error("Could not parse: ", e); - System.exit(1); - } - - try { - zkc = new ZkClient(); - zkc.connect(); - LOG.info("Connected to ZooKeeper"); - } catch (Exception e) { - LOG.error(e); - System.exit(1); - } - - try { - //Create the persistent WMS znodes - Stat stat = zkc.exists(parentZnode,false); - if(stat == null) { - zkc.create(parentZnode,new byte[0],ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); - } - stat = zkc.exists(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_PARENT,false); - if(stat == null) { - zkc.create(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_PARENT,new byte[0],ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); - } - stat = zkc.exists(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_MASTER,false); - if(stat == null) { - zkc.create(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_MASTER,new byte[0],ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); - } - stat = zkc.exists(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS,false); - if(stat == null) { - zkc.create(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS,new byte[0],ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); - } - stat = zkc.exists(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_RUNNING,false); - if(stat == null) { - zkc.create(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_RUNNING,new byte[0],ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); - } - stat = zkc.exists(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_LEADER,false); - if(stat == null) { - zkc.create(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_LEADER,new byte[0],ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); - } - stat = zkc.exists(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_CLIENTS,false); - if(stat == null) { - zkc.create(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_CLIENTS,new byte[0],ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); - } - stat = zkc.exists(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_WORKLOADS,false); - if(stat == null) { - zkc.create(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_WORKLOADS,new byte[0],ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); - } - stat = zkc.exists(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS,false); - if(stat == null) { - zkc.create(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS,new byte[0],ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); - } - stat = zkc.exists(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES,false); - if(stat == null) { - zkc.create(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES,new byte[0],ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); - } - - String schema = IOUtils.toString(getClass().getResourceAsStream(Constants.PLATFORM_STATS_SCHEMA_FILENAME)); - stat = zkc.exists(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STATS,false); - if(stat == null) { - zkc.create(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STATS,schema.getBytes(),ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); - } - stat = zkc.exists(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_TRAFODION,false); - if(stat == null) { - zkc.create(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_TRAFODION,new byte[0],ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); - } - - schema = IOUtils.toString(getClass().getResourceAsStream(Constants.TRAFODION_RMS_SCHEMA_FILENAME)); - stat = zkc.exists(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_TRAFODION_RMS,false); - if(stat == null) { - zkc.create(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_TRAFODION_RMS,schema.getBytes(),ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); - } - - } catch (KeeperException.NodeExistsException e) { - //do nothing...some other server has created znodes - } catch (Exception e) { - LOG.error(e); - System.exit(0); - } - - metrics = new Metrics(); - startTime = System.currentTimeMillis(); - - try { - - ia = InetAddress.getLocalHost(); - - String interfaceName = conf.get(Constants.WMS_DNS_INTERFACE, Constants.DEFAULT_WMS_DNS_INTERFACE); - if(interfaceName.equalsIgnoreCase("default")) { - LOG.info("Using local host [" + ia.getCanonicalHostName() + "," + ia.getHostAddress() + "]"); - } else { - // For all nics get all hostnames and IPs - // and try to match against dcs.dns.interface property - Enumeration nics = NetworkInterface.getNetworkInterfaces(); - while(nics.hasMoreElements()) { - NetworkInterface ni = nics.nextElement(); - Enumeration rawAdrs = ni.getInetAddresses(); - while(rawAdrs.hasMoreElements()) { - InetAddress inet = rawAdrs.nextElement(); - LOG.info("Found interface [" + ni.getDisplayName() + "," + inet.getCanonicalHostName() + "," + inet.getHostAddress() + "]"); - if( interfaceName.equalsIgnoreCase(ni.getDisplayName()) && inet.getCanonicalHostName().contains(".") ) { - LOG.info("Using interface [" + ni.getDisplayName() + "," + inet.getCanonicalHostName() + "," + inet.getHostAddress() + "]"); - ia = inet; - break; - } - } - } - } - - serverName = ia.getCanonicalHostName(); - - //Setup RPC services - thriftPort = findFreePort(); - - //Register in zookeeper - String path = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_MASTER + "/" + ia.getCanonicalHostName() + ":" + thriftPort + ":" + startTime; - zkc.create(path,new byte[0],ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL); - LOG.info("Created znode [" + path + "]"); - - // Start the Web UI info server. - infoPort = conf.getInt(Constants.WMS_MASTER_INFO_PORT, Constants.DEFAULT_WMS_MASTER_INFO_PORT); - if (infoPort >= 0) { - String a = conf.get(Constants.WMS_MASTER_INFO_BIND_ADDRESS, Constants.DEFAULT_WMS_MASTER_INFO_BIND_ADDRESS); - infoServer = new InfoServer(MASTER, a, infoPort, false, conf); - infoServer.addServlet("status", "/master-status", MasterStatusServlet.class); - infoServer.setAttribute(MASTER, this); - infoServer.start(); - } - - //Start the REST service - restServer = new WmsRest(conf); - LOG.info("REST service listening on port [" + conf.getInt("wms.rest.port", 8080) + "]"); - - //Start the server manager - pool = Executors.newSingleThreadExecutor(); - serverManager = new ServerManager(this); - Future future = pool.submit(serverManager); - future.get();//blocking call - - } catch (Exception e) { - LOG.error(e); - e.printStackTrace(); - pool.shutdown(); - System.exit(0); - } - } - - public InetAddress getInetAddress(){ - return ia; - } - - public int getThriftPort(){ - return thriftPort; - } - - public String getServerName(){ - return serverName; - } - - public int getInfoPort(){ - return infoPort; - } - - public Configuration getConfiguration(){ - return conf; - } - - public ServerManager getServerManager(){ - return serverManager; - } - - public long getStartTime(){ - return startTime; - } - - public ZkClient getZkClient() { - return zkc; - } - - public String getZKQuorumServersString() { - return ZKConfig.getZKQuorumServersString(conf); - } - - public String getZKParentZnode() { - return parentZnode; - } - - public Metrics getMetrics(){ - return metrics; - } - - public static void main(String [] args) { - WmsMaster server = new WmsMaster(args); - } } diff --git a/wms/src/main/java/org/trafodion/wms/master/rpc/thrift/ThriftRpcHandler.java b/wms/src/main/java/org/trafodion/wms/master/rpc/thrift/ThriftRpcHandler.java deleted file mode 100644 index b97a1e1c..00000000 --- a/wms/src/main/java/org/trafodion/wms/master/rpc/thrift/ThriftRpcHandler.java +++ /dev/null @@ -1,244 +0,0 @@ -package org.trafodion.wms.master.rpc.thrift; - -import java.net.*; -import java.util.*; -import java.io.Writer; -import java.io.StringWriter; -import java.io.PrintWriter; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.log4j.Logger; - -import org.apache.thrift.TSerializer; -import org.apache.thrift.TDeserializer; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.protocol.TProtocolFactory; -import org.apache.thrift.transport.TIOStreamTransport; -import org.apache.thrift.transport.TTransport; - -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.ZooDefs; -import org.apache.zookeeper.data.Stat; -import org.apache.zookeeper.KeeperException; - -import org.trafodion.wms.zookeeper.ZkClient; -import org.trafodion.wms.master.WmsMaster; -import org.trafodion.wms.master.ServerManager; -import org.trafodion.wms.script.ScriptManager; -import org.trafodion.wms.Constants; -import org.trafodion.wms.cep.ComplexEventProcessor; -import org.trafodion.wms.thrift.generated.WmsAdminService; -import org.trafodion.wms.thrift.generated.IOError; -import org.trafodion.wms.thrift.generated.IllegalArgument; -import org.trafodion.wms.thrift.generated.Stream; -import org.trafodion.wms.thrift.generated.StreamResponse; -import org.trafodion.wms.thrift.generated.Rule; -import org.trafodion.wms.thrift.generated.RuleResponse; -import org.trafodion.wms.thrift.generated.WorkloadResponse; -import org.trafodion.wms.thrift.generated.Request; - -public class ThriftRpcHandler implements WmsAdminService.Iface { - private static final Log LOG = LogFactory.getLog(ThriftRpcHandler.class.getName()); - private WmsMaster wmsMaster; - private long beginTs; - private long endTs; - private long totalTime; - private ZkClient zkc; - private String parentZnode; - private ComplexEventProcessor cep; - private TSerializer serializer = new TSerializer(new TBinaryProtocol.Factory()); - private TDeserializer deserializer = new TDeserializer(new TBinaryProtocol.Factory()); - - public ThriftRpcHandler(WmsMaster wmsMaster){ - this.wmsMaster = wmsMaster; - this.zkc = wmsMaster.getZkClient(); - this.parentZnode = wmsMaster.getZKParentZnode(); - this.cep = wmsMaster.getServerManager().getComplexEventProcessor(); - } - - public long ping(long timestamp) throws IOError { - LOG.debug("ping [" + timestamp + "]"); - return System.currentTimeMillis(); - } - - public void addStream(Stream stream) throws IOError, IllegalArgument { - LOG.debug("addStream [" + stream.toString() + "]"); - - String streamZnode = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS + "/" + stream.getName(); - try { - cep.addStream(stream.getName(),stream.getValue()); - byte[] bytes = serializer.serialize(stream); - Stat stat = zkc.exists(streamZnode, false); - if (stat != null){ - zkc.setData(streamZnode,bytes,-1); - } else { - zkc.create(streamZnode,bytes,ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); - } - } catch (Exception e) { - IOError ioe = new IOError(); - ioe.setMessage("Error adding " + streamZnode + ", " + e.getMessage()); - throw ioe; - } - - } - public void alterStream(Stream stream) throws IOError, IllegalArgument { - LOG.debug("alterStream [" + stream.toString() + "]"); - - String streamZnode = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS + "/" + stream.getName(); - try { - cep.alterStream(stream.getName(),stream.getValue()); - byte[] bytes = serializer.serialize(stream); - Stat stat = zkc.exists(streamZnode, false); - if (stat != null){ - zkc.setData(streamZnode,bytes,-1); - } else { - IOError ioe = new IOError(); - ioe.setMessage("Stream " + streamZnode + " not found"); - throw ioe; - } - } catch (Exception e) { - IOError ioe = new IOError(); - ioe.setMessage("Error altering " + streamZnode + ", " + e.getMessage()); - throw ioe; - } - } - public void deleteStream(Stream stream) throws IOError, IllegalArgument { - LOG.debug("deleteStream [" + stream.toString() + "]"); - - String streamZnode = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STREAMS + "/" + stream.getName(); - try { - LOG.debug("znode=" + streamZnode); - cep.deleteStream(stream.getName()); - Stat stat = zkc.exists(streamZnode, false); - if (stat != null){ - zkc.delete(streamZnode,-1); - } else { - IOError ioe = new IOError(); - ioe.setMessage("Stream " + streamZnode + " not found"); - throw ioe; - } - } catch (Exception e) { - IOError ioe = new IOError(); - ioe.setMessage("Error deleting " + streamZnode + ", " + e.getMessage()); - throw ioe; - } - } - public StreamResponse stream() throws IOError { - LOG.debug("stream"); - - StreamResponse response = new StreamResponse(); - ArrayList streams = wmsMaster.getServerManager().getStreamsList(); - - if( ! streams.isEmpty()) { - for(Stream aStream : streams) { - response.addToStreamList(aStream); - } - } - - return response; - } - - public void addRule(Rule rule) throws IOError, IllegalArgument { - LOG.debug("addRule [" + rule.toString() + "]"); - - String ruleZnode = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES + "/" + rule.getName(); - try { - cep.addQuery(rule.getValue()); - byte[] bytes = serializer.serialize(rule); - Stat stat = zkc.exists(ruleZnode, false); - if (stat != null){ - zkc.setData(ruleZnode,bytes,-1); - } else { - zkc.create(ruleZnode,bytes,ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); - } - } catch (Exception e) { - IOError ioe = new IOError(); - ioe.setMessage("Error adding " + ruleZnode + ", " + e.getMessage()); - throw ioe; - } - - } - public void alterRule(Rule rule) throws IOError, IllegalArgument { - LOG.debug("alterRule [" + rule.toString() + "]"); - - String ruleZnode = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES + "/" + rule.getName(); - try { - cep.deleteQuery(rule.getName()); - cep.addQuery(rule.getValue()); - byte[] bytes = serializer.serialize(rule); - Stat stat = zkc.exists(ruleZnode, false); - if (stat != null){ - zkc.setData(ruleZnode,bytes,-1); - } else { - IOError ioe = new IOError(); - ioe.setMessage("Rule " + ruleZnode + " not found"); - throw ioe; - } - } catch (Exception e) { - IOError ioe = new IOError(); - ioe.setMessage("Error altering " + ruleZnode + ", " + e.getMessage()); - throw ioe; - } - } - public void deleteRule(Rule rule) throws IOError, IllegalArgument { - LOG.debug("deleteRule [" + rule.toString() + "]"); - - String ruleZnode = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_RULES + "/" + rule.getName(); - try { - LOG.debug("znode=" + ruleZnode); - cep.deleteQuery(rule.getName()); - Stat stat = zkc.exists(ruleZnode, false); - if (stat != null){ - zkc.delete(ruleZnode,-1); - } else { - IOError ioe = new IOError(); - ioe.setMessage("Rule " + ruleZnode + " not found"); - throw ioe; - } - } catch (Exception e) { - IOError ioe = new IOError(); - ioe.setMessage("Error deleting " + ruleZnode + ", " + e.getMessage()); - throw ioe; - } - } - public RuleResponse rule() throws IOError { - LOG.debug("rule"); - - RuleResponse response = new RuleResponse(); - ArrayList rules = wmsMaster.getServerManager().getRulesList(); - - if( ! rules.isEmpty()) { - for(Rule aRule : rules) { - response.addToRuleList(aRule); - } - } - - return response; - } - - public WorkloadResponse workload() throws IOError { - LOG.debug("workload"); - - WorkloadResponse response = new WorkloadResponse(); - ArrayList workloads = wmsMaster.getServerManager().getWorkloadsList(); - - if( ! workloads.isEmpty()) { - for(Request aWorkload : workloads) { - response.addToWorkloadList(aWorkload); - } - } - - return response; - } - - String stackTraceToString(Exception e) { - Writer writer = new StringWriter(); - PrintWriter printWriter = new PrintWriter(writer); - e.printStackTrace(printWriter); - String s = writer.toString(); - return s; - } - -} diff --git a/wms/src/main/java/org/trafodion/wms/master/rpc/thrift/ThriftRpcServer.java b/wms/src/main/java/org/trafodion/wms/master/rpc/thrift/ThriftRpcServer.java deleted file mode 100644 index dc39cabe..00000000 --- a/wms/src/main/java/org/trafodion/wms/master/rpc/thrift/ThriftRpcServer.java +++ /dev/null @@ -1,73 +0,0 @@ -package org.trafodion.wms.master.rpc.thrift; - -import java.net.*; -import java.io.IOException; -import java.nio.charset.Charset; - -import org.apache.thrift.server.TServer; -import org.apache.thrift.server.TServer.Args; -import org.apache.thrift.server.TSimpleServer; -import org.apache.thrift.server.TThreadPoolServer; -import org.apache.thrift.server.TNonblockingServer; -import org.apache.thrift.transport.TSSLTransportFactory; -import org.apache.thrift.transport.TServerSocket; -import org.apache.thrift.transport.TServerTransport; -import org.apache.thrift.transport.TNonblockingTransport; -import org.apache.thrift.transport.TNonblockingSocket; -import org.apache.thrift.transport.TNonblockingServerTransport; -import org.apache.thrift.transport.TNonblockingServerSocket; -import org.apache.thrift.transport.TTransportException; -import org.apache.thrift.transport.TSSLTransportFactory.TSSLTransportParameters; - -import org.apache.hadoop.conf.Configuration; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import org.trafodion.wms.zookeeper.ZkClient; -import org.trafodion.wms.util.WmsConfiguration; -import org.trafodion.wms.Constants; -import org.trafodion.wms.master.WmsMaster; -import org.trafodion.wms.master.rpc.thrift.ThriftRpcHandler; -import org.trafodion.wms.thrift.generated.*; - -public class ThriftRpcServer implements Runnable { - private static final Log LOG = LogFactory.getLog(ThriftRpcServer.class.getName()); - private static final Charset CHARSET = Charset.forName("UTF-8"); - private WmsMaster wmsMaster; - private Thread thrd; - private ThriftRpcHandler trpch = null; -// private WmsService.Processor processor=null; - - public ThriftRpcServer(WmsMaster wmsMaster){ - this.wmsMaster = wmsMaster; - trpch = new ThriftRpcHandler(wmsMaster); - //processor = new WmsService.Processor(trpch); - thrd = new Thread(this); - thrd.start(); - } - - public ThriftRpcHandler getHandler() { - return this.trpch; - } - - public void run() { - TServer server=null; - int port = wmsMaster.getThriftPort(); - - try { - //TServerTransport serverTransport = new TServerSocket(new InetSocketAddress(port)); - //server = new TSimpleServer(new Args(serverTransport).processor(new WmsService.Processor(trpch))); - // Use this for a multithreaded server - //server = new TThreadPoolServer(new TThreadPoolServer.Args(serverTransport).processor(new WmsService.Processor(trpch))); - // Use this for non blocking server - TNonblockingServerTransport serverTransport = new TNonblockingServerSocket(new InetSocketAddress(port)); - server = new TNonblockingServer(new TNonblockingServer.Args(serverTransport).processor(new WmsAdminService.Processor(trpch))); - LOG.info("Thrift RPC listening to [" + wmsMaster.getServerName() + ":" + port + "]"); - } catch (TTransportException e) { - LOG.error("TTransportException " + e); - System.exit(-1); - } - - server.serve(); - } -} diff --git a/wms/src/main/java/org/trafodion/wms/rest/RESTServlet.java b/wms/src/main/java/org/trafodion/wms/rest/RESTServlet.java index e71794e3..05e5b603 100644 --- a/wms/src/main/java/org/trafodion/wms/rest/RESTServlet.java +++ b/wms/src/main/java/org/trafodion/wms/rest/RESTServlet.java @@ -8,72 +8,69 @@ import org.trafodion.wms.rest.RestConstants; import org.trafodion.wms.rest.model.WorkloadModel; import org.trafodion.wms.rest.model.WorkloadListModel; -import org.trafodion.wms.thrift.generated.*; -import org.trafodion.wms.client.WmsAdmin; import org.apache.hadoop.conf.Configuration; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; + /** * Singleton class encapsulating global REST servlet state and functions. */ public class RESTServlet implements RestConstants { - private static final Log LOG = LogFactory.getLog(RESTServlet.class); - private static RESTServlet INSTANCE; - private final Configuration conf; - private final WmsAdmin admin; + private static final Log LOG = LogFactory.getLog(RESTServlet.class); + private static RESTServlet INSTANCE; + private final Configuration conf; - /** - * @return the RESTServlet singleton instance - * @throws IOException - */ - public synchronized static RESTServlet getInstance() - throws IOException { - assert(INSTANCE != null); - return INSTANCE; - } + /** + * @return the RESTServlet singleton instance + * @throws IOException + */ + public synchronized static RESTServlet getInstance() throws IOException { + assert (INSTANCE != null); + return INSTANCE; + } - /** - * @param conf Existing configuration to use in rest servlet - * @return the RESTServlet singleton instance - * @throws IOException - */ - public synchronized static RESTServlet getInstance(Configuration conf) - throws IOException { - if (INSTANCE == null) { - INSTANCE = new RESTServlet(conf); - } - return INSTANCE; - } + /** + * @param conf + * Existing configuration to use in rest servlet + * @return the RESTServlet singleton instance + * @throws IOException + */ + public synchronized static RESTServlet getInstance(Configuration conf) + throws IOException { + if (INSTANCE == null) { + INSTANCE = new RESTServlet(conf); + } + return INSTANCE; + } - public synchronized static void stop() { - if (INSTANCE != null) INSTANCE = null; - } + public synchronized static void stop() { + if (INSTANCE != null) + INSTANCE = null; + } - /** - * Constructor with existing configuration - * @param conf existing configuration - * @throws IOException. - */ - RESTServlet(Configuration conf) throws IOException { - this.conf = conf; - this.admin = new WmsAdmin(conf); - } + /** + * Constructor with existing configuration + * + * @param conf + * existing configuration + * @throws IOException. + */ + RESTServlet(Configuration conf) throws IOException { + this.conf = conf; + } - Configuration getConfiguration() { - return conf; - } - - WmsAdmin getAdmin() { - return admin; - } + Configuration getConfiguration() { + return conf; + } - /** - * Helper method to determine if server should - * only respond to GET HTTP method requests. - * @return boolean for server read-only state - */ - boolean isReadOnly() { - return getConfiguration().getBoolean("wms.rest.readonly", false); - } + /** + * Helper method to determine if server should only respond to GET HTTP + * method requests. + * + * @return boolean for server read-only state + */ + boolean isReadOnly() { + return getConfiguration().getBoolean("wms.rest.readonly", false); + } } diff --git a/wms/src/main/java/org/trafodion/wms/rest/RestHandler.java b/wms/src/main/java/org/trafodion/wms/rest/RestHandler.java index 1e7b6196..505cd9fe 100644 --- a/wms/src/main/java/org/trafodion/wms/rest/RestHandler.java +++ b/wms/src/main/java/org/trafodion/wms/rest/RestHandler.java @@ -1,92 +1,88 @@ /* - -package org.trafodion.wms.rest; - -import java.io.IOException; -import java.util.*; -import java.text.DateFormat; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.mortbay.jetty.Handler; -import org.mortbay.jetty.Request; -import org.mortbay.jetty.handler.AbstractHandler; -import javax.servlet.http.HttpServletResponse; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.ServletException; -import org.trafodion.wms.server.WorkloadsQueue; -import org.trafodion.wms.server.WorkloadItem; -import org.trafodion.wms.thrift.generated.*; - -public class RestHandler extends AbstractHandler { - private static final Log LOG = LogFactory.getLog(RestHandler.class.getName()); - WorkloadsQueue wlq; - - public RestHandler(WorkloadsQueue wlq) { - super(); - this.wlq = wlq; - } - - public void handle(String target, HttpServletRequest request, HttpServletResponse response, int dispatch) - throws IOException, ServletException { - response.setContentType("text/html"); - response.setStatus(HttpServletResponse.SC_OK); - ((Request)request).setHandled(true); - - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println("WMS (Hadoop Workload Management Services)"); - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - - Iterator it = wlq.getWorkloads().iterator(); - if(it.hasNext()) { - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - - while(it.hasNext()) { - WorkloadItem workload = it.next(); - - if(workload.getRequest().getJobState().toString().equalsIgnoreCase("Running")) - response.getWriter().println(""); - else if(workload.getRequest().getJobState().toString().equalsIgnoreCase("Completed")) - response.getWriter().println(""); - else - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - response.getWriter().println(""); - Date date = new Date(workload.getRequest().getStartTimestamp()); - response.getWriter().println(""); - response.getWriter().println(""); - date = new Date(workload.getRequest().getEndTimestamp()); - response.getWriter().println(""); - String ts = String.format("%1$tH%2$s%1$tM%2$s%1$tS",workload.getRequest().getDuration(),":"); - response.getWriter().println(""); - response.getWriter().println(""); - } - response.getWriter().println("
WMS IdParent WMS IdUserType/IdTextStateSub-StateStart TimeStatsLast UpdateDuration
" + workload.getRequest().getWorkloadId().toString() + "" + workload.getRequest().getParentId().toString() + "" + workload.getRequest().getUserName().toString() + "" + "[" + workload.getRequest().getJobType().toString() + "]" + workload.getRequest().getJobId().toString() + "" + workload.getRequest().getJobText().toString() + "" + workload.getRequest().getJobState().toString() + "" + workload.getRequest().getJobSubState().toString() + "" + DateFormat.getDateTimeInstance().format(date) + "" + "MapRed[" + workload.getRequest().getMapPct().toString() + "][" + workload.getRequest().getReducePct().toString() + "]" + "" + DateFormat.getDateTimeInstance().format(date) + "" + ts + "
"); - } else { - response.getWriter().println("

No workloads found

"); - } - response.getWriter().println(""); - response.getWriter().println(""); - } -} -*/ \ No newline at end of file + * + * package org.trafodion.wms.rest; + * + * import java.io.IOException; import java.util.*; import java.text.DateFormat; + * import org.apache.commons.logging.Log; import + * org.apache.commons.logging.LogFactory; import org.mortbay.jetty.Handler; + * import org.mortbay.jetty.Request; import + * org.mortbay.jetty.handler.AbstractHandler; import + * javax.servlet.http.HttpServletResponse; import + * javax.servlet.http.HttpServletRequest; import javax.servlet.ServletException; + * import org.trafodion.wms.server.WorkloadsQueue; import + * org.trafodion.wms.server.WorkloadItem; + * + * public class RestHandler extends AbstractHandler { private static final Log + * LOG = LogFactory.getLog(RestHandler.class.getName()); WorkloadsQueue wlq; + * + * public RestHandler(WorkloadsQueue wlq) { super(); this.wlq = wlq; } + * + * public void handle(String target, HttpServletRequest request, + * HttpServletResponse response, int dispatch) throws IOException, + * ServletException { response.setContentType("text/html"); + * response.setStatus(HttpServletResponse.SC_OK); + * ((Request)request).setHandled(true); + * + * response.getWriter().println(""); + * response.getWriter().println(""); response.getWriter().println( + * "WMS (Hadoop Workload Management Services)"); + * response.getWriter().println(""); + * response.getWriter().println(""); + * response.getWriter().println(""); + * + * Iterator it = wlq.getWorkloads().iterator(); if(it.hasNext()) { + * response.getWriter().println(""); + * response.getWriter().println(""); + * response.getWriter().println(""); + * response.getWriter().println(""); + * response.getWriter().println(""); + * response.getWriter().println(""); + * response.getWriter().println(""); + * response.getWriter().println(""); + * response.getWriter().println(""); + * response.getWriter().println(""); + * response.getWriter().println(""); + * response.getWriter().println(""); + * response.getWriter().println(""); + * response.getWriter().println(""); + * + * while(it.hasNext()) { WorkloadItem workload = it.next(); + * + * if(workload.getRequest().getJobState().toString().equalsIgnoreCase("Running")) + * response.getWriter().println(""); else + * if(workload. + * getRequest().getJobState().toString().equalsIgnoreCase("Completed")) + * response.getWriter().println(""); else + * response.getWriter().println(""); response.getWriter().println(""); + * response.getWriter().println(""); + * response.getWriter().println(""); + * response.getWriter().println(""); + * response.getWriter().println(""); + * response.getWriter().println(""); + * response.getWriter().println(""); Date date = new + * Date(workload.getRequest().getStartTimestamp()); + * response.getWriter().println(""); + * response.getWriter().println(""); date = new + * Date(workload.getRequest().getEndTimestamp()); + * response.getWriter().println(""); String ts = + * String.format + * ("%1$tH%2$s%1$tM%2$s%1$tS",workload.getRequest().getDuration(),":"); + * response.getWriter().println(""); + * response.getWriter().println(""); } + * response.getWriter().println("
WMS IdParent WMS IdUserType/IdTextStateSub-StateStart TimeStatsLast UpdateDuration
" + + * workload.getRequest().getWorkloadId().toString() + "" + + * workload.getRequest().getParentId().toString() + "" + + * workload.getRequest().getUserName().toString() + "" + "[" + + * workload.getRequest().getJobType().toString() + "]" + + * workload.getRequest().getJobId().toString() + "" + + * workload.getRequest().getJobText().toString() + "" + + * workload.getRequest().getJobState().toString() + "" + + * workload.getRequest().getJobSubState().toString() + "" + + * DateFormat.getDateTimeInstance().format(date) + "" + "MapRed[" + + * workload.getRequest().getMapPct().toString() + "][" + + * workload.getRequest().getReducePct().toString() + "]" + "" + + * DateFormat.getDateTimeInstance().format(date) + "" + ts + "
"); } else { + * response.getWriter().println("

No workloads found

"); } + * response.getWriter().println(""); + * response.getWriter().println(""); } } + */ \ No newline at end of file diff --git a/wms/src/main/java/org/trafodion/wms/rest/RootResource.java b/wms/src/main/java/org/trafodion/wms/rest/RootResource.java index 19e7df2b..db07a6fc 100644 --- a/wms/src/main/java/org/trafodion/wms/rest/RootResource.java +++ b/wms/src/main/java/org/trafodion/wms/rest/RootResource.java @@ -1,4 +1,4 @@ - package org.trafodion.wms.rest; +package org.trafodion.wms.rest; import java.io.*; import java.util.List; @@ -19,70 +19,37 @@ import org.trafodion.wms.rest.model.WorkloadModel; import org.trafodion.wms.rest.model.WorkloadListModel; import org.trafodion.wms.rest.model.ServerListModel; -import org.trafodion.wms.thrift.generated.*; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @Path("/") public class RootResource extends ResourceBase { - private static final Log LOG = LogFactory.getLog(RootResource.class); - - static CacheControl cacheControl; - static { - cacheControl = new CacheControl(); - cacheControl.setNoCache(true); - cacheControl.setNoTransform(false); - } - - public RootResource() throws IOException { - super(); - } - - @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON}) - public Response get(final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("GET " + uriInfo.getAbsolutePath()); - } - - try { - return new WorkloadResource().get(uriInfo); - } catch (IOException e) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); - } - - } - - @Path("servers") - public ServerListResource getServerListResource() throws IOException { - return new ServerListResource(); - } - - @Path("version") - public VersionResource getVersionResource() throws IOException { - return new VersionResource(); - } - - @Path("rule") - public RuleResource getRuleResource() throws IOException { - return new RuleResource(); - } - - @Path("stream") - public StreamResource getStreamResource() throws IOException { - return new StreamResource(); - } - - @Path("service") - public ServiceResource getServiceResource() throws IOException { - return new ServiceResource(); - } - - @Path("workload") - public WorkloadResource getWorkloadResource() throws IOException { - return new WorkloadResource(); - } + private static final Log LOG = LogFactory.getLog(RootResource.class); + + static CacheControl cacheControl; + static { + cacheControl = new CacheControl(); + cacheControl.setNoCache(true); + cacheControl.setNoTransform(false); + } + + public RootResource() throws IOException { + super(); + } + + @Path("servers") + public ServerListResource getServerListResource() throws IOException { + return new ServerListResource(); + } + + @Path("version") + public VersionResource getVersionResource() throws IOException { + return new VersionResource(); + } + + @Path("service") + public ServiceResource getServiceResource() throws IOException { + return new ServiceResource(); + } } diff --git a/wms/src/main/java/org/trafodion/wms/rest/RuleResource.java b/wms/src/main/java/org/trafodion/wms/rest/RuleResource.java index bb787983..c1f786fe 100644 --- a/wms/src/main/java/org/trafodion/wms/rest/RuleResource.java +++ b/wms/src/main/java/org/trafodion/wms/rest/RuleResource.java @@ -40,170 +40,169 @@ import org.trafodion.wms.rest.model.RuleListModel; import org.trafodion.wms.rest.model.RuleModel; -import org.trafodion.wms.thrift.generated.Rule; -import org.trafodion.wms.thrift.generated.RuleResponse; -import org.trafodion.wms.thrift.generated.WorkloadResponse; public class RuleResource extends ResourceBase { - private static final Log LOG = LogFactory.getLog(RuleResource.class); - - static CacheControl cacheControl; - static { - cacheControl = new CacheControl(); - cacheControl.setNoCache(true); - cacheControl.setNoTransform(false); - } - /** - * Constructor - * @throws IOException - */ - public RuleResource() throws IOException { - super(); - } - - @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON}) - public Response get(final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("GET " + uriInfo.getAbsolutePath() - + " " + uriInfo.getQueryParameters()); - } - - RuleListModel ruleListModel = new RuleListModel(); - RuleResponse ruleResponse = new RuleResponse(); - - try { - servlet.getAdmin().open(); - ruleResponse = servlet.getAdmin().rule(); - servlet.getAdmin().close(); - } catch (IOException e) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); - } - - //To test: - //curl -v -X GET -H "Accept: application/json" http://sqws123.houston.hp.com:50030/rule - // - //Should see something like this: - //{"rules":[{"comment":"Added by Matt","name":"trafodion","text":"select * from table T1","timestamp":1398215824738}]} - // - if(ruleResponse.getRuleList() != null){ - for(Rule aRule: ruleResponse.getRuleList()){ - ruleListModel.add(new RuleModel(aRule.getName(),aRule.getValue(),aRule.getComment(),aRule.getTimestamp())); - } - } - - ResponseBuilder response; - if(ruleResponse.getRuleList() == null){ - response = Response.ok("[]"); - } else { - response = Response.ok(ruleListModel); - } - - response.cacheControl(cacheControl); - return response.build(); - - } - - @PUT - @Consumes({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON}) - public Response put(final RuleModel model, - final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("PUT " + uriInfo.getAbsolutePath() - + " " + uriInfo.getQueryParameters()); - } - - if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); - } - //To test: - - //Vertica - //curl -v -X PUT -H "Accept: application/json" -H "Content-type: application/json" -d '{"name":"vertica","text":"from vertica [ operation == 102 and duration >= 1000 ] insert into action \u0027CANCEL\u0027 as action","comment":"Added by Administrator"}' http://sqws123.houston.hp.com:50030/rule - - //Trafodion - //curl -v -X PUT -H "Accept: application/json" -H "Content-type: application/json" -d '{"name":"trafodion","text":"from trafodion [ operation == 100 and beginTimestamp >= 123456 ] insert into action \u0027REJECT\u0027 as action","comment":"Added by Administrator"}' http://sqws123.houston.hp.com:50030/rule - Rule rule = new Rule(model.getName(),model.getText(),model.getComment(),System.currentTimeMillis()); - try { - servlet.getAdmin().open(); - servlet.getAdmin().alterRule(rule); - servlet.getAdmin().close(); - } catch (IOException e) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable " + e.getMessage() + CRLF) - .build(); - } - ResponseBuilder response = Response.ok(); - return response.build(); - } - - @POST - @Consumes({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON}) - public Response post(final RuleModel model, - final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("POST " + uriInfo.getAbsolutePath() - + " " + uriInfo.getQueryParameters()); - } - - if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); - } - - //To test: - - //Vertica - //curl -v -X POST -H "Accept: application/json" -H "Content-type: application/json" -d '{"name":"vertica","text":"from vertica [ operation == 102 and duration >= 1000 ] insert into action \u0027CANCEL\u0027 as action","comment":"Added by Administrator"}' http://sqws123.houston.hp.com:50030/rule - - //Trafodion - //curl -v -X POST -H "Accept: application/json" -H "Content-type: application/json" -d '{"name":"trafodion","text":"from trafodion [ operation == 100 and beginTimestamp >= 123456 ] insert into action \u0027REJECT\u0027 as action","comment":"Added by Administrator"}' http://sqws123.houston.hp.com:50030/rule - Rule rule = new Rule(model.getName(),model.getText(),model.getComment(),System.currentTimeMillis()); - try { - servlet.getAdmin().open(); - servlet.getAdmin().addRule(rule); - servlet.getAdmin().close(); - } catch (IOException e) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable " + e.getMessage() + CRLF) - .build(); - } - ResponseBuilder response = Response.ok(); - return response.build(); - } - - @DELETE - public Response delete(final RuleModel model, - final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("DELETE " + uriInfo.getAbsolutePath() - + " " + uriInfo.getQueryParameters()); - } - - if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); - } - - //To test: - //curl -v -X DELETE -H "Accept: application/json" -H "Content-type: application/json" -d '{"name":"trafodion"}' http://sqws123.houston.hp.com:50030/rule - LOG.debug("Delete rule " + model.toString()); - Rule rule = new Rule(model.getName(),model.getText(),model.getComment(),System.currentTimeMillis()); - try { - servlet.getAdmin().open(); - servlet.getAdmin().deleteRule(rule); - servlet.getAdmin().close(); - } catch (IOException e) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); - } - return Response.ok().build(); - } + private static final Log LOG = LogFactory.getLog(RuleResource.class); + + static CacheControl cacheControl; + static { + cacheControl = new CacheControl(); + cacheControl.setNoCache(true); + cacheControl.setNoTransform(false); + } + + /** + * Constructor + * + * @throws IOException + */ + public RuleResource() throws IOException { + super(); + } + + @GET + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON }) + public Response get(final @Context UriInfo uriInfo) { + if (LOG.isDebugEnabled()) { + LOG.debug("GET " + uriInfo.getAbsolutePath() + " " + + uriInfo.getQueryParameters()); + } + /* + * RuleListModel ruleListModel = new RuleListModel(); RuleResponse + * ruleResponse = new RuleResponse(); + * + * try { servlet.getAdmin().open(); ruleResponse = + * servlet.getAdmin().rule(); servlet.getAdmin().close(); } catch + * (IOException e) { return + * Response.status(Response.Status.SERVICE_UNAVAILABLE) + * .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF).build(); } + * + * // To test: // curl -v -X GET -H "Accept: application/json" // + * http://sqws123.houston.hp.com:50030/rule // // Should see something + * like this: // + * {"rules":[{"comment":"Added by Matt","name":"trafodion", + * "text":"select * from table T1","timestamp":1398215824738}]} // /* if + * (ruleResponse.getRuleList() != null) { for (Rule aRule : + * ruleResponse.getRuleList()) { ruleListModel.add(new + * RuleModel(aRule.getName(), aRule .getValue(), aRule.getComment(), + * aRule.getTimestamp())); } } + */ + ResponseBuilder response = null; + /* + * if (ruleResponse.getRuleList() == null) { response = + * Response.ok("[]"); } else { response = Response.ok(ruleListModel); } + */ + response.cacheControl(cacheControl); + return response.build(); + + } + + @PUT + @Consumes({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON }) + public Response put(final RuleModel model, final @Context UriInfo uriInfo) { + if (LOG.isDebugEnabled()) { + LOG.debug("PUT " + uriInfo.getAbsolutePath() + " " + + uriInfo.getQueryParameters()); + } + + if (servlet.isReadOnly()) { + return Response.status(Response.Status.FORBIDDEN) + .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF).build(); + } + // To test: + + // Vertica + // curl -v -X PUT -H "Accept: application/json" -H + // "Content-type: application/json" -d '{"name":"vertica","text":"from + // vertica [ operation == 102 and duration >= 1000 ] insert into action + // \u0027CANCEL\u0027 as action","comment":"Added by Administrator"}' + // http://sqws123.houston.hp.com:50030/rule + + // Trafodion + // curl -v -X PUT -H "Accept: application/json" -H + // "Content-type: application/json" -d '{"name":"trafodion","text":"from + // trafodion [ operation == 100 and beginTimestamp >= 123456 ] insert + // into action \u0027REJECT\u0027 as action","comment":"Added by + // Administrator"}' http://sqws123.houston.hp.com:50030/rule + /* + * Rule rule = new Rule(model.getName(), model.getText(), + * model.getComment(), System.currentTimeMillis()); try { + * servlet.getAdmin().open(); servlet.getAdmin().alterRule(rule); + * servlet.getAdmin().close(); } catch (IOException e) { return + * Response.status(Response.Status.SERVICE_UNAVAILABLE) + * .type(MIMETYPE_TEXT) .entity("Unavailable " + e.getMessage() + + * CRLF).build(); } + */ + ResponseBuilder response = Response.ok(); + return response.build(); + } + + @POST + @Consumes({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON }) + public Response post(final RuleModel model, final @Context UriInfo uriInfo) { + if (LOG.isDebugEnabled()) { + LOG.debug("POST " + uriInfo.getAbsolutePath() + " " + + uriInfo.getQueryParameters()); + } + + if (servlet.isReadOnly()) { + return Response.status(Response.Status.FORBIDDEN) + .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF).build(); + } + + // To test: + + // Vertica + // curl -v -X POST -H "Accept: application/json" -H + // "Content-type: application/json" -d '{"name":"vertica","text":"from + // vertica [ operation == 102 and duration >= 1000 ] insert into action + // \u0027CANCEL\u0027 as action","comment":"Added by Administrator"}' + // http://sqws123.houston.hp.com:50030/rule + + // Trafodion + // curl -v -X POST -H "Accept: application/json" -H + // "Content-type: application/json" -d '{"name":"trafodion","text":"from + // trafodion [ operation == 100 and beginTimestamp >= 123456 ] insert + // into action \u0027REJECT\u0027 as action","comment":"Added by + // Administrator"}' http://sqws123.houston.hp.com:50030/rule + /* + * Rule rule = new Rule(model.getName(), model.getText(), + * model.getComment(), System.currentTimeMillis()); try { + * servlet.getAdmin().open(); servlet.getAdmin().addRule(rule); + * servlet.getAdmin().close(); } catch (IOException e) { return + * Response.status(Response.Status.SERVICE_UNAVAILABLE) + * .type(MIMETYPE_TEXT) .entity("Unavailable " + e.getMessage() + + * CRLF).build(); } + */ + ResponseBuilder response = Response.ok(); + return response.build(); + } + + @DELETE + public Response delete(final RuleModel model, final @Context UriInfo uriInfo) { + if (LOG.isDebugEnabled()) { + LOG.debug("DELETE " + uriInfo.getAbsolutePath() + " " + + uriInfo.getQueryParameters()); + } + + if (servlet.isReadOnly()) { + return Response.status(Response.Status.FORBIDDEN) + .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF).build(); + } + + // To test: + // curl -v -X DELETE -H "Accept: application/json" -H + // "Content-type: application/json" -d '{"name":"trafodion"}' + // http://sqws123.houston.hp.com:50030/rule + /* + * LOG.debug("Delete rule " + model.toString()); Rule rule = new + * Rule(model.getName(), model.getText(), model.getComment(), + * System.currentTimeMillis()); try { servlet.getAdmin().open(); + * servlet.getAdmin().deleteRule(rule); servlet.getAdmin().close(); } + * catch (IOException e) { return + * Response.status(Response.Status.SERVICE_UNAVAILABLE) + * .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF).build(); } + */ + return Response.ok().build(); + } } diff --git a/wms/src/main/java/org/trafodion/wms/rest/StreamResource.java b/wms/src/main/java/org/trafodion/wms/rest/StreamResource.java deleted file mode 100644 index 09b71646..00000000 --- a/wms/src/main/java/org/trafodion/wms/rest/StreamResource.java +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright 2010 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.trafodion.wms.rest; - -import java.io.IOException; - -import javax.ws.rs.DELETE; -import javax.ws.rs.GET; -import javax.ws.rs.POST; -import javax.ws.rs.PUT; -import javax.ws.rs.Produces; -import javax.ws.rs.Consumes; -import javax.ws.rs.core.CacheControl; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.Response.ResponseBuilder; -import javax.ws.rs.core.UriInfo; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import org.trafodion.wms.rest.model.StreamListModel; -import org.trafodion.wms.rest.model.StreamModel; -import org.trafodion.wms.thrift.generated.Stream; -import org.trafodion.wms.thrift.generated.StreamResponse; - -public class StreamResource extends ResourceBase { - private static final Log LOG = LogFactory.getLog(StreamResource.class); - - static CacheControl cacheControl; - static { - cacheControl = new CacheControl(); - cacheControl.setNoCache(true); - cacheControl.setNoTransform(false); - } - /** - * Constructor - * @throws IOException - */ - public StreamResource() throws IOException { - super(); - } - - @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON}) - public Response get(final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("GET " + uriInfo.getAbsolutePath() - + " " + uriInfo.getQueryParameters()); - } - - StreamListModel streamListModel = new StreamListModel(); - StreamResponse streamResponse = new StreamResponse(); - - try { - servlet.getAdmin().open(); - streamResponse = servlet.getAdmin().stream(); - servlet.getAdmin().close(); - } catch (IOException e) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); - } - - //To test: - //curl -v -X GET -H "Accept: application/json" http://sqws123.houston.hp.com:50030/stream - // - //Should see something like this: - //{"streams":[{"comment":"Added by Administrator","name":"vertica","text":"user,string,application,string,transactionId,string,poolName,string,requestId,string,memoryAllocated,string,rowsSent,string,requestType,string,type,string,duration,string,sessionId,string,request,string,userName,string,statementId,string","timestamp":1398215824738}]} - // - if(streamResponse.getStreamList() != null){ - for(Stream aStream: streamResponse.getStreamList()){ - streamListModel.add(new StreamModel(aStream.getName(),aStream.getValue(),aStream.getComment(),aStream.getTimestamp())); - } - } - - ResponseBuilder response; - if(streamResponse.getStreamList() == null){ - response = Response.ok("[]"); - } else { - response = Response.ok(streamListModel); - } - - response.cacheControl(cacheControl); - return response.build(); - - } - - @PUT - @Consumes({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON}) - public Response put(final StreamModel model, - final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("PUT " + uriInfo.getAbsolutePath() - + " " + uriInfo.getQueryParameters()); - } - - if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); - } - //To test: - //Vertica - //curl -v -X PUT -H "Accept: application/json" -H "Content-type: application/json" -d '{"name":"vertica","text":"user,string,application,string,transactionId,string,poolName,string,requestId,string,memoryAllocated,string,rowsSent,string,requestType,string,type,string,duration,string,sessionId,string,request,string,userName,string,statementId,string"}' http://sqws123.houston.hp.com:50030/stream - - //Trafodion - //curl -v -X PUT -H "Accept: application/json" -H "Content-type: application/json" -d '{"name":"trafodion","text":"type,string,userName,string,sessionId,string,aggrEstimatedRowsUsed,float,deltaEstimatedRowsUsed,float,deltaEstimatedRowsAccessed,float,aggrEstimatedRowsAccessed,float,deltaNumRows,long,deltaRowsRetrieved,long,deltaRowsAccessed,long,aggrRowsRetrieved,long,aggrRowsAccessed,long,aggrNumRowsIUD,long"}' http://sqws123.houston.hp.com:50030/stream - LOG.debug("Alter stream " + model.toString()); - Stream stream = new Stream(model.getName(),model.getText(),model.getComment(),System.currentTimeMillis()); - try { - servlet.getAdmin().open(); - servlet.getAdmin().alterStream(stream); - servlet.getAdmin().close(); - } catch (IOException e) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); - } - ResponseBuilder response = Response.ok(); - return response.build(); - } - - @POST - @Consumes({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON}) - public Response post(final StreamModel model, - final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("POST " + uriInfo.getAbsolutePath() - + " " + uriInfo.getQueryParameters()); - } - - if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); - } - - //To test: - - //Vertica - //curl -v -X POST -H "Accept: application/json" -H "Content-type: application/json" -d '{"name":"vertica","text":"user,string,application,string,transactionId,string,poolName,string,requestId,string,memoryAllocated,string,rowsSent,string,requestType,string,type,string,duration,string,sessionId,string,request,string,userName,string,statementId,string"}' http://sqws123.houston.hp.com:50030/stream - - //Trafodion - //curl -v -X POST -H "Accept: application/json" -H "Content-type: application/json" -d '{"name":"trafodion","text":"type,string,userName,string,sessionId,string,aggrEstimatedRowsUsed,float,deltaEstimatedRowsUsed,float,deltaEstimatedRowsAccessed,float,aggrEstimatedRowsAccessed,float,deltaNumRows,long,deltaRowsRetrieved,long,deltaRowsAccessed,long,aggrRowsRetrieved,long,aggrRowsAccessed,long,aggrNumRowsIUD,long"}' http://sqws123.houston.hp.com:50030/stream - LOG.debug("Add stream " + model.toString()); - Stream stream = new Stream(model.getName(),model.getText(),model.getComment(),System.currentTimeMillis()); - try { - servlet.getAdmin().open(); - servlet.getAdmin().addStream(stream); - servlet.getAdmin().close(); - } catch (IOException e) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); - } - ResponseBuilder response = Response.ok(); - return response.build(); - } - - @DELETE - public Response delete(final StreamModel model, - final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("DELETE " + uriInfo.getAbsolutePath() - + " " + uriInfo.getQueryParameters()); - } - - if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); - } - - //To test: - - //Vertica - //curl -v -X DELETE -H "Accept: application/json" -H "Content-type: application/json" -d '{"name":"vertica"}' http://sqws123.houston.hp.com:50030/stream - - //Trafodion - //curl -v -X DELETE -H "Accept: application/json" -H "Content-type: application/json" -d '{"name":"trafodion"}' http://sqws123.houston.hp.com:50030/stream - LOG.debug("Delete stream " + model.toString()); - Stream stream = new Stream(model.getName(),model.getText(),model.getComment(),System.currentTimeMillis()); - try { - servlet.getAdmin().open(); - servlet.getAdmin().deleteStream(stream); - servlet.getAdmin().close(); - } catch (IOException e) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); - } - return Response.ok().build(); - } -} diff --git a/wms/src/main/java/org/trafodion/wms/rest/WorkloadResource.java b/wms/src/main/java/org/trafodion/wms/rest/WorkloadResource.java deleted file mode 100644 index d982c117..00000000 --- a/wms/src/main/java/org/trafodion/wms/rest/WorkloadResource.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Copyright 2010 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.trafodion.wms.rest; - -import java.util.*; -import java.io.IOException; - -import javax.ws.rs.GET; -import javax.ws.rs.Produces; -import javax.ws.rs.core.CacheControl; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.Response.ResponseBuilder; -import javax.ws.rs.core.UriInfo; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import org.joda.time.*; -import org.joda.time.format.*; - -import org.trafodion.wms.Constants; -import org.trafodion.wms.thrift.generated.*; -import org.trafodion.wms.rest.model.WorkloadListModel; -import org.trafodion.wms.rest.model.WorkloadModel; -import org.trafodion.wms.thrift.generated.WorkloadResponse; - -public class WorkloadResource extends ResourceBase { - private static final Log LOG = - LogFactory.getLog(WorkloadResource.class); - - static CacheControl cacheControl; - static { - cacheControl = new CacheControl(); - cacheControl.setNoCache(true); - cacheControl.setNoTransform(false); - } - - /** - * Constructor - * @throws IOException - */ - public WorkloadResource() throws IOException { - super(); - - } - - public static String getDateTimeFromJulian(long aJulianTimestamp) { - DateTimeFormatter fmt = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss"); - long NANO_SECOND_FROM_1970 = 621355968000000000L; - long millis_till_1970 = 210866760000009L; - long milliTime = (aJulianTimestamp / 1000) - millis_till_1970; - - long secondsSinceEpoch = milliTime / 1000; - //DateTime theDateTime = new DateTime((secondsSinceEpoch * 10000000) + NANO_SECOND_FROM_1970); - DateTime theDateTime = new DateTime(milliTime); - return theDateTime.toString(fmt); -} - - @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON}) - public Response get(final @Context UriInfo uriInfo) { - if (LOG.isDebugEnabled()) { - LOG.debug("GET " + uriInfo.getAbsolutePath()); - } - - WorkloadListModel workloadListModel = new WorkloadListModel(); - WorkloadResponse workloadResponse = new WorkloadResponse(); - - try { - servlet.getAdmin().open(); - workloadResponse = servlet.getAdmin().workload(); - servlet.getAdmin().close(); - } catch (IOException e) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); - } - - //To test: - //curl -v -X GET -H "Accept: application/json" http://sqws123.houston.hp.com:50030/workload - // - if(workloadResponse.getWorkloadList() != null){ - for(Request aWorkload: workloadResponse.getWorkloadList()){ - //Fixed fields - String workloadId = aWorkload.getData().getKeyValues().get(Constants.WORKLOAD_ID).getStringValue(); - String state = aWorkload.getData().getKeyValues().get(Constants.STATE).getStringValue(); - String subState = aWorkload.getData().getKeyValues().get(Constants.SUBSTATE).getStringValue(); - String type = aWorkload.getData().getKeyValues().get(Constants.TYPE).getStringValue(); - String workloadText = null; - if(aWorkload.getData().getKeyValues().get("request") != null) - workloadText = aWorkload.getData().getKeyValues().get("request").getStringValue(); - else if (aWorkload.getData().getKeyValues().get("queryText") != null) - workloadText = aWorkload.getData().getKeyValues().get("queryText").getStringValue(); - if(workloadText != null) - workloadText = workloadText.replaceAll("[\\r\\t]",""); - //Key value pairs - StringBuilder workloadDetails = new StringBuilder(); - if(aWorkload.getData().getKeyValues() != null){ - boolean isFirst = true; - for (String key: aWorkload.getData().getKeyValues().keySet()) { - if(aWorkload.getData().getKeyValues().get(key).isSetByteValue()) { - if(! isFirst) workloadDetails.append(", "); - workloadDetails.append(key + "=" + aWorkload.getData().getKeyValues().get(key).getByteValue()); - } else if(aWorkload.getData().getKeyValues().get(key).isSetShortValue()) { - if(! isFirst) workloadDetails.append(", "); - workloadDetails.append(key + "=" + aWorkload.getData().getKeyValues().get(key).getShortValue()); - } else if(aWorkload.getData().getKeyValues().get(key).isSetIntValue()) { - if(! isFirst) workloadDetails.append(", "); - workloadDetails.append(key + "=" + aWorkload.getData().getKeyValues().get(key).getIntValue()); - } else if(aWorkload.getData().getKeyValues().get(key).isSetLongValue()) { - long time; - if(! isFirst) workloadDetails.append(", "); - if(key.equalsIgnoreCase("beginTimestamp")) - workloadDetails.append(key + "=" + new Date(aWorkload.getData().getKeyValues().get(Constants.BEGIN_TIMESTAMP).getLongValue())); - else if(key.equalsIgnoreCase("endTimestamp")) - workloadDetails.append(key + "=" + new Date(aWorkload.getData().getKeyValues().get(Constants.END_TIMESTAMP).getLongValue())); - else if(key.equalsIgnoreCase("lastUpdated")) - workloadDetails.append(key + "=" + new Date(aWorkload.getHeader().getServerLastUpdated())); - else if(key.equalsIgnoreCase("queryStartTime")) { - time = aWorkload.getData().getKeyValues().get("queryStartTime").getLongValue(); - if(time != 0L) - workloadDetails.append(key + "=" + getDateTimeFromJulian(time)); - else - workloadDetails.append(key + "=" + time); - } else if(key.equalsIgnoreCase("queryEndTime")) { - time = aWorkload.getData().getKeyValues().get("queryEndTime").getLongValue(); - if(time != 0L) - workloadDetails.append(key + "=" + getDateTimeFromJulian(time)); - else - workloadDetails.append(key + "=" + time); - } else if(key.equalsIgnoreCase("compileStartTime")) { - time = aWorkload.getData().getKeyValues().get("compileStartTime").getLongValue(); - if(time != 0L) - workloadDetails.append(key + "=" + getDateTimeFromJulian(time)); - else - workloadDetails.append(key + "=" + time); - } else if(key.equalsIgnoreCase("compileEndTime")) { - time = aWorkload.getData().getKeyValues().get("compileEndTime").getLongValue(); - if(time != 0L) - workloadDetails.append(key + "=" + getDateTimeFromJulian(time)); - else - workloadDetails.append(key + "=" + time); - } else - workloadDetails.append(key + "=" + aWorkload.getData().getKeyValues().get(key).getLongValue()); - } else if(aWorkload.getData().getKeyValues().get(key).isSetFloatValue()) { - if(! isFirst) workloadDetails.append(", "); - workloadDetails.append(key + "=" + aWorkload.getData().getKeyValues().get(key).getFloatValue()); - } else if(aWorkload.getData().getKeyValues().get(key).isSetStringValue()) { - if(key.equalsIgnoreCase("queryText") || key.equalsIgnoreCase("request") - || key.equalsIgnoreCase("state") || key.equalsIgnoreCase("subState") - || key.equalsIgnoreCase("type") || key.equalsIgnoreCase("workloadId")) { - continue; - } else { - String s = aWorkload.getData().getKeyValues().get(key).getStringValue(); - if(! isFirst) workloadDetails.append(", "); - workloadDetails.append(key + "=" + s); - } - } - - isFirst = false; - } - } - - workloadListModel.add(new WorkloadModel(workloadId,state,subState,type,workloadText,workloadDetails.toString())); - } - } - - ResponseBuilder response; - if(workloadResponse.getWorkloadList() == null){ - response = Response.ok("[]"); - } else { - response = Response.ok(workloadListModel); - } - response.cacheControl(cacheControl); - return response.build(); - } -} diff --git a/wms/src/main/java/org/trafodion/wms/rest/model/ServerModel.java b/wms/src/main/java/org/trafodion/wms/rest/model/ServerModel.java index 1795517c..652bab15 100644 --- a/wms/src/main/java/org/trafodion/wms/rest/model/ServerModel.java +++ b/wms/src/main/java/org/trafodion/wms/rest/model/ServerModel.java @@ -10,119 +10,107 @@ /** * Simple representation of a server. */ -@XmlRootElement(name="server") +@XmlRootElement(name = "server") public class ServerModel implements Serializable { - private static final long serialVersionUID = 1L; - - private String name; - private String instance; - private String leader; - private String thriftPort; - private String timestamp; - - /** - * Default constructor - */ - public ServerModel() {} - - /** - * Constructor - * @param name - */ - public ServerModel(String name,String instance,String leader,String thriftPort,String ts) { - super(); - this.name = name; - this.instance = instance; - this.leader = leader; - this.thriftPort = thriftPort; - this.timestamp = ts; - } - - /** - * @return the name - */ - @XmlAttribute - public String getName() { - return name; - } - - /** - * @param name the name to set - */ - public void setName(String value) { - this.name = value; - } - - /** - * @return the instance - */ - @XmlAttribute - public String getInstance() { - return instance; - } - - /** - * @param instance the instance to set - */ - public void setInstance(String value) { - this.instance = value; - } - - /** - * @return the leader - */ - @XmlAttribute - public String getLeader() { - return leader; - } - - /** - * @param leader the leader to set - */ - public void setLeader(String value) { - this.leader = value; - } - - /** - * @return the thrift port - */ - @XmlAttribute - public String getthriftPort() { - return thriftPort; - } - - /** - * @param thriftPort - */ - public void setthriftPort(String value) { - this.thriftPort = value; - } - - /** - * @return the timestamp - */ - @XmlAttribute - public String getTimestamp() { - return timestamp; - } - - /** - * @param timestamp the timestamp - */ - public void setTimestamp(String value) { - this.timestamp = value; - } + private static final long serialVersionUID = 1L; + + private String name; + private String instance; + private String leader; + private String timestamp; + + /** + * Default constructor + */ + public ServerModel() { + } + + /** + * Constructor + * + * @param name + */ + public ServerModel(String name, String instance, String leader, String ts) { + super(); + this.name = name; + this.instance = instance; + this.leader = leader; + this.timestamp = ts; + } + + /** + * @return the name + */ + @XmlAttribute + public String getName() { + return name; + } + + /** + * @param name + * the name to set + */ + public void setName(String value) { + this.name = value; + } + + /** + * @return the instance + */ + @XmlAttribute + public String getInstance() { + return instance; + } + + /** + * @param instance + * the instance to set + */ + public void setInstance(String value) { + this.instance = value; + } + + /** + * @return the leader + */ + @XmlAttribute + public String getLeader() { + return leader; + } + + /** + * @param leader + * the leader to set + */ + public void setLeader(String value) { + this.leader = value; + } + + /** + * @return the timestamp + */ + @XmlAttribute + public String getTimestamp() { + return timestamp; + } + + /** + * @param timestamp + * the timestamp + */ + public void setTimestamp(String value) { + this.timestamp = value; + } @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("" + name + "\n"); - sb.append("" + instance + "\n"); - sb.append("" + leader + "\n"); - sb.append("" + thriftPort + "\n"); - sb.append("" + new Date(timestamp) + "\n"); - sb.append("\n"); - return sb.toString(); - } + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("" + name + "\n"); + sb.append("" + instance + "\n"); + sb.append("" + leader + "\n"); + sb.append("" + new Date(timestamp) + "\n"); + sb.append("\n"); + return sb.toString(); + } } diff --git a/wms/src/main/java/org/trafodion/wms/rest/model/WorkloadModel.java b/wms/src/main/java/org/trafodion/wms/rest/model/WorkloadModel.java index a1d0221d..a29da34c 100644 --- a/wms/src/main/java/org/trafodion/wms/rest/model/WorkloadModel.java +++ b/wms/src/main/java/org/trafodion/wms/rest/model/WorkloadModel.java @@ -7,140 +7,146 @@ import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; -import org.trafodion.wms.thrift.generated.*; - /** * Simple representation of a workload. */ -@XmlRootElement(name="workload") +@XmlRootElement(name = "workload") public class WorkloadModel implements Serializable { - private static final long serialVersionUID = 1L; - private String workloadId; - private String state; - private String subState; - private String type; - private String workloadText; - private String workloadDetails; - - /** - * Default constructor - */ - public WorkloadModel() {} - - /** - * Constructor - * @param name - */ - public WorkloadModel(String workloadId,String state,String subState,String type,String workloadText,String workloadDetails) { - super(); - - this.workloadId = workloadId; - this.state = state; - this.subState = subState; - this.type = type; - this.workloadText = workloadText; - this.workloadDetails = workloadDetails; - } - - /** - * @return the workoad Id - */ - @XmlAttribute - public String getWorkloadId() { - return workloadId; - } - - /** - * @param value the workload Id to set - */ - public void setWorkloadId(String value) { - this.workloadId = value; - } - - /** - * @return the state - */ - @XmlAttribute - public String getState() { - return state; - } - - /** - * @param value the state to set - */ - public void setState(String value) { - this.state = value; - } - - /** - * @return the subState - */ - @XmlAttribute - public String getSubState() { - return subState; - } - - /** - * @param name the subState to set - */ - public void setSubState(String value) { - this.subState = value; - } - - /** - * @return the type - */ - @XmlAttribute - public String getType() { - return type; - } - - /** - * @param value the type to set - */ - public void setType(String value) { - this.type = value; - } - - /** - * @return the workloadText - */ - @XmlAttribute - public String getWorkloadText() { - return workloadText; - } - - /** - * @param value the workloadText to set - */ - public void setWorkloadText(String value) { - this.workloadText = value; - } - - /** - * @return the workload details - */ - @XmlAttribute - public String getWorkloadDetails() { - return workloadDetails; - } - - /** - * @param value the workload details to set - */ - public void setWorkloadDetails(String value) { - this.workloadDetails = value; - } - - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("" + workloadId + "\n"); - sb.append("" + state + "\n"); - sb.append("" + subState + "\n"); - sb.append("" + workloadDetails + "\n"); - sb.append("\n"); - return sb.toString(); - } + private static final long serialVersionUID = 1L; + private String workloadId; + private String state; + private String subState; + private String type; + private String workloadText; + private String workloadDetails; + + /** + * Default constructor + */ + public WorkloadModel() { + } + + /** + * Constructor + * + * @param name + */ + public WorkloadModel(String workloadId, String state, String subState, + String type, String workloadText, String workloadDetails) { + super(); + + this.workloadId = workloadId; + this.state = state; + this.subState = subState; + this.type = type; + this.workloadText = workloadText; + this.workloadDetails = workloadDetails; + } + + /** + * @return the workoad Id + */ + @XmlAttribute + public String getWorkloadId() { + return workloadId; + } + + /** + * @param value + * the workload Id to set + */ + public void setWorkloadId(String value) { + this.workloadId = value; + } + + /** + * @return the state + */ + @XmlAttribute + public String getState() { + return state; + } + + /** + * @param value + * the state to set + */ + public void setState(String value) { + this.state = value; + } + + /** + * @return the subState + */ + @XmlAttribute + public String getSubState() { + return subState; + } + + /** + * @param name + * the subState to set + */ + public void setSubState(String value) { + this.subState = value; + } + + /** + * @return the type + */ + @XmlAttribute + public String getType() { + return type; + } + + /** + * @param value + * the type to set + */ + public void setType(String value) { + this.type = value; + } + + /** + * @return the workloadText + */ + @XmlAttribute + public String getWorkloadText() { + return workloadText; + } + + /** + * @param value + * the workloadText to set + */ + public void setWorkloadText(String value) { + this.workloadText = value; + } + + /** + * @return the workload details + */ + @XmlAttribute + public String getWorkloadDetails() { + return workloadDetails; + } + + /** + * @param value + * the workload details to set + */ + public void setWorkloadDetails(String value) { + this.workloadDetails = value; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("" + workloadId + "\n"); + sb.append("" + state + "\n"); + sb.append("" + subState + "\n"); + sb.append("" + workloadDetails + "\n"); + sb.append("\n"); + return sb.toString(); + } } diff --git a/wms/src/main/java/org/trafodion/wms/script/ScriptContext.java b/wms/src/main/java/org/trafodion/wms/script/ScriptContext.java index 5614d702..46e73cee 100644 --- a/wms/src/main/java/org/trafodion/wms/script/ScriptContext.java +++ b/wms/src/main/java/org/trafodion/wms/script/ScriptContext.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/wms/src/main/java/org/trafodion/wms/script/ScriptManager.java b/wms/src/main/java/org/trafodion/wms/script/ScriptManager.java index e11f1ae8..fcdf02f2 100644 --- a/wms/src/main/java/org/trafodion/wms/script/ScriptManager.java +++ b/wms/src/main/java/org/trafodion/wms/script/ScriptManager.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,167 +16,169 @@ package org.trafodion.wms.script; import java.util.List; -import java.util.HashMap; +import java.util.HashMap; import java.util.Map; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.FileReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; import java.io.FilenameFilter; -import javax.script.ScriptEngine; -import javax.script.ScriptEngineFactory; -import javax.script.ScriptEngineManager; -import javax.script.ScriptException; +import javax.script.ScriptEngine; +import javax.script.ScriptEngineFactory; +import javax.script.ScriptEngineManager; +import javax.script.ScriptException; import javax.script.CompiledScript; import javax.script.Compilable; import javax.script.Bindings; -import org.trafodion.wms.thrift.generated.*; import org.trafodion.wms.server.store.WorkloadThresholds; import org.apache.log4j.Logger; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; public final class ScriptManager { - private static ScriptManager instance = null; - private static final Log LOG = LogFactory.getLog(ScriptManager.class.getName()); - private ScriptEngineManager manager = new ScriptEngineManager(); - private Map m = new HashMap(); - private ScriptManagerWatcher watcherWorker = null; - private static final String PYTHON_SUFFIX = ".py"; - private static final String DEFAULT_SCRIPT_NAME = "hp_default_service" + PYTHON_SUFFIX; - private static String wmsHome = null; - - public synchronized static ScriptManager getInstance() { - if(instance == null) { - instance = new ScriptManager(); - } - return instance; - } - - private ScriptManager() { - StringBuffer sb = new StringBuffer(); - - List engines = manager.getEngineFactories(); - if (engines.isEmpty()) { - LOG.warn("No scripting engines were found"); - return; - } - - sb.append("\nThe following " + engines.size() + " scripting engine(s) were found"); - - for (ScriptEngineFactory engine : engines) { - sb.append("\nEngine name: " + engine.getEngineName() + "\nVersion: " + engine.getEngineVersion()+ "\nLanguage: " + engine.getLanguageName()); - List extensions = engine.getExtensions(); - if (extensions.size() > 0) { - sb.append("\n\tEngine supports the following extensions:"); - for (String e : extensions) { - sb.append("\n\t\t" + e); - } - } - List shortNames = engine.getNames(); - if (shortNames.size() > 0) { - sb.append("\n\tEngine has the following short names:"); - for (String n : engine.getNames()) { - sb.append("\n\t\t" + n); - } - } - sb.append("\n========================="); - } - LOG.debug(sb.toString()); - - //Get -Dwms.home.dir - wmsHome = System.getProperty("wms.home.dir"); - - //Start the scripts directory watcher - watcherWorker = new ScriptManagerWatcher ("ScriptManagerWatcher",wmsHome + "/bin/scripts"); - } - - public synchronized void runScript(Request request, Response response) { - String scriptName; - -// if(name.length() == 0) - scriptName = DEFAULT_SCRIPT_NAME; -// else if(! name.endsWith(".py")) -// scriptName = name + PYTHON_SUFFIX; -// else -// scriptName = name; - - try { - ScriptEngine engine = manager.getEngineByName("python"); - Bindings bindings = engine.createBindings(); - bindings.put("request", request); - bindings.put("response",response); - if(engine instanceof Compilable) { - CompiledScript script = m.get(scriptName); - if(script == null) { - LOG.info("Compiling script " + scriptName); - Compilable compilingEngine = (Compilable)engine; - try { - script = compilingEngine.compile(new FileReader("bin/scripts/" + scriptName)); - } catch (Exception e) { - LOG.warn(e.getMessage()); - } - m.put(scriptName, script); - } - script.eval(bindings); - } else { - try { - engine.eval(new FileReader("bin/scripts/" + scriptName), bindings); - } catch (Exception e) { - LOG.warn(e.getMessage()); - } - } - } catch (javax.script.ScriptException se) { - LOG.warn(se.getMessage()); - } - } - - public synchronized void runScript(ScriptContext ctx) { - String scriptName; - - if(ctx.getScriptName().length() == 0) - scriptName = DEFAULT_SCRIPT_NAME; - else if(! ctx.getScriptName().endsWith(".py")) - scriptName = ctx.getScriptName() + PYTHON_SUFFIX; - else - scriptName = ctx.getScriptName(); - - try { - ScriptEngine engine = manager.getEngineByName("python"); - Bindings bindings = engine.createBindings(); - bindings.put("scriptcontext", ctx); - if(engine instanceof Compilable) { - CompiledScript script = m.get(scriptName); - if(script == null) { - LOG.info("Compiling script " + scriptName); - Compilable compilingEngine = (Compilable)engine; - try { - script = compilingEngine.compile(new FileReader("bin/scripts/" + scriptName)); - } catch (Exception e) { - LOG.warn(e.getMessage()); - } - m.put(scriptName, script); - } - script.eval(bindings); - } else { - try { - engine.eval(new FileReader("bin/scripts/" + scriptName), bindings); - } catch (Exception e) { - LOG.warn(e.getMessage()); - } - } - } catch (javax.script.ScriptException se) { - LOG.warn(se.getMessage()); - } - } - - public synchronized void removeScript(String name) { - m.remove(name); - } -} + private static ScriptManager instance = null; + private static final Log LOG = LogFactory.getLog(ScriptManager.class + .getName()); + private ScriptEngineManager manager = new ScriptEngineManager(); + private Map m = new HashMap(); + private ScriptManagerWatcher watcherWorker = null; + private static final String PYTHON_SUFFIX = ".py"; + private static final String DEFAULT_SCRIPT_NAME = "hp_default_service" + + PYTHON_SUFFIX; + private static String wmsHome = null; + + public synchronized static ScriptManager getInstance() { + if (instance == null) { + instance = new ScriptManager(); + } + return instance; + } + + private ScriptManager() { + StringBuffer sb = new StringBuffer(); + + List engines = manager.getEngineFactories(); + if (engines.isEmpty()) { + LOG.warn("No scripting engines were found"); + return; + } + + sb.append("\nThe following " + engines.size() + + " scripting engine(s) were found"); + for (ScriptEngineFactory engine : engines) { + sb.append("\nEngine name: " + engine.getEngineName() + + "\nVersion: " + engine.getEngineVersion() + + "\nLanguage: " + engine.getLanguageName()); + List extensions = engine.getExtensions(); + if (extensions.size() > 0) { + sb.append("\n\tEngine supports the following extensions:"); + for (String e : extensions) { + sb.append("\n\t\t" + e); + } + } + List shortNames = engine.getNames(); + if (shortNames.size() > 0) { + sb.append("\n\tEngine has the following short names:"); + for (String n : engine.getNames()) { + sb.append("\n\t\t" + n); + } + } + sb.append("\n========================="); + } + LOG.debug(sb.toString()); + // Get -Dwms.home.dir + wmsHome = System.getProperty("wms.home.dir"); + // Start the scripts directory watcher + watcherWorker = new ScriptManagerWatcher("ScriptManagerWatcher", + wmsHome + "/bin/scripts"); + } + public synchronized void runScript() { + String scriptName; + // if(name.length() == 0) + scriptName = DEFAULT_SCRIPT_NAME; + // else if(! name.endsWith(".py")) + // scriptName = name + PYTHON_SUFFIX; + // else + // scriptName = name; - + try { + ScriptEngine engine = manager.getEngineByName("python"); + Bindings bindings = engine.createBindings(); + // bindings.put("request", request); + // bindings.put("response", response); + if (engine instanceof Compilable) { + CompiledScript script = m.get(scriptName); + if (script == null) { + LOG.info("Compiling script " + scriptName); + Compilable compilingEngine = (Compilable) engine; + try { + script = compilingEngine.compile(new FileReader( + "bin/scripts/" + scriptName)); + } catch (Exception e) { + LOG.warn(e.getMessage()); + } + m.put(scriptName, script); + } + script.eval(bindings); + } else { + try { + engine.eval(new FileReader("bin/scripts/" + scriptName), + bindings); + } catch (Exception e) { + LOG.warn(e.getMessage()); + } + } + } catch (javax.script.ScriptException se) { + LOG.warn(se.getMessage()); + } + } + + public synchronized void runScript(ScriptContext ctx) { + String scriptName; + + if (ctx.getScriptName().length() == 0) + scriptName = DEFAULT_SCRIPT_NAME; + else if (!ctx.getScriptName().endsWith(".py")) + scriptName = ctx.getScriptName() + PYTHON_SUFFIX; + else + scriptName = ctx.getScriptName(); + + try { + ScriptEngine engine = manager.getEngineByName("python"); + Bindings bindings = engine.createBindings(); + bindings.put("scriptcontext", ctx); + if (engine instanceof Compilable) { + CompiledScript script = m.get(scriptName); + if (script == null) { + LOG.info("Compiling script " + scriptName); + Compilable compilingEngine = (Compilable) engine; + try { + script = compilingEngine.compile(new FileReader( + "bin/scripts/" + scriptName)); + } catch (Exception e) { + LOG.warn(e.getMessage()); + } + m.put(scriptName, script); + } + script.eval(bindings); + } else { + try { + engine.eval(new FileReader("bin/scripts/" + scriptName), + bindings); + } catch (Exception e) { + LOG.warn(e.getMessage()); + } + } + } catch (javax.script.ScriptException se) { + LOG.warn(se.getMessage()); + } + } + + public synchronized void removeScript(String name) { + m.remove(name); + } +} diff --git a/wms/src/main/java/org/trafodion/wms/script/ScriptManagerWatcher.java b/wms/src/main/java/org/trafodion/wms/script/ScriptManagerWatcher.java index 277c62f5..b12e5b3c 100644 --- a/wms/src/main/java/org/trafodion/wms/script/ScriptManagerWatcher.java +++ b/wms/src/main/java/org/trafodion/wms/script/ScriptManagerWatcher.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/wms/src/main/java/org/trafodion/wms/server/Metrics.java b/wms/src/main/java/org/trafodion/wms/server/Metrics.java index 7a72f646..dce808a8 100644 --- a/wms/src/main/java/org/trafodion/wms/server/Metrics.java +++ b/wms/src/main/java/org/trafodion/wms/server/Metrics.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/wms/src/main/java/org/trafodion/wms/server/ServerManager.java b/wms/src/main/java/org/trafodion/wms/server/ServerManager.java index e975d8ed..398c1548 100644 --- a/wms/src/main/java/org/trafodion/wms/server/ServerManager.java +++ b/wms/src/main/java/org/trafodion/wms/server/ServerManager.java @@ -22,209 +22,177 @@ import org.apache.hadoop.conf.Configuration; -import org.apache.thrift.TBase; -import org.apache.thrift.TException; -import org.apache.thrift.TDeserializer; -import org.apache.thrift.transport.TSSLTransportFactory; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TFramedTransport; -import org.apache.thrift.transport.TSSLTransportFactory.TSSLTransportParameters; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocol; - -import org.trafodion.wms.thrift.generated.Action; -import org.trafodion.wms.thrift.generated.Data; -import org.trafodion.wms.thrift.generated.Header; -import org.trafodion.wms.thrift.generated.IllegalArgument; -import org.trafodion.wms.thrift.generated.IOError; -import org.trafodion.wms.thrift.generated.Operation; -import org.trafodion.wms.thrift.generated.Request; -import org.trafodion.wms.thrift.generated.Response; -import org.trafodion.wms.thrift.generated.WmsService; -import org.trafodion.wms.thrift.generated.Rule; -import org.trafodion.wms.thrift.generated.Stream; - import org.trafodion.wms.Constants; import org.trafodion.wms.util.Bytes; import org.trafodion.wms.util.WmsConfiguration; import org.trafodion.wms.zookeeper.ZkClient; import org.trafodion.wms.server.stats.PStats; -import org.trafodion.wms.server.rpc.thrift.ThriftRpcServer; import org.trafodion.wms.server.workload.WorkloadStore; -import org.trafodion.wms.cep.ComplexEventProcessor; public final class ServerManager implements Callable { - private static final Log LOG = LogFactory.getLog(ServerManager.class); - private WmsServer wmsServer; - private static Configuration conf; - private String instance; + private static final Log LOG = LogFactory.getLog(ServerManager.class); + private WmsServer wmsServer; + private static Configuration conf; + private String instance; private ZkClient zkc = null; private InetAddress ia; - private int thriftPort; private int infoPort; - private static String serverName; - private static String masterHostName; - private static int masterThriftPort; - private static long masterStartTime; - private ThriftRpcServer trpcs; - private static String parentZnode; + private static String serverName; + private static String masterHostName; + private static int masterThriftPort; + private static long masterStartTime; + // private ThriftRpcServer trpcs; + private static String parentZnode; private ServerLeaderElection sle = null; private PStats pstats = null; private WorkloadStore workloadStore; - private ComplexEventProcessor cep; - private TDeserializer deserializer = new TDeserializer(new TBinaryProtocol.Factory()); - - public ServerManager(WmsServer wmsServer) throws Exception { - this.wmsServer = wmsServer; - this.conf = wmsServer.getConfiguration(); - this.instance = wmsServer.getInstance(); - this.zkc = wmsServer.getZkClient(); - this.ia = wmsServer.getInetAddress(); - this.thriftPort = wmsServer.getThriftPort(); - this.infoPort = wmsServer.getInfoPort(); - this.serverName = wmsServer.getServerName(); - this.parentZnode = wmsServer.getZKParentZnode(); + + // private ComplexEventProcessor cep; + // private TDeserializer deserializer = new TDeserializer(new + // TBinaryProtocol.Factory()); + + public ServerManager(WmsServer wmsServer) throws Exception { + this.wmsServer = wmsServer; + this.conf = wmsServer.getConfiguration(); + this.instance = wmsServer.getInstance(); + this.zkc = wmsServer.getZkClient(); + this.ia = wmsServer.getInetAddress(); + this.infoPort = wmsServer.getInfoPort(); + this.serverName = wmsServer.getServerName(); + this.parentZnode = wmsServer.getZKParentZnode(); + } + + @Override + public Boolean call() throws Exception { + ExecutorService pool = Executors.newSingleThreadExecutor(); + + try { + getMaster(); + registerInRunning(); + sle = new ServerLeaderElection(this); + workloadStore = new WorkloadStore(conf, zkc, parentZnode, sle); + // cep = new ComplexEventProcessor(zkc,parentZnode,conf,sle); + pstats = new PStats(conf, instance); + // trpcs = new ThriftRpcServer(wmsServer); + + // Callable serverMonitor = new ServerMonitor(); + // Callable serverRunner = new ServerRunner(); + + long timeoutMillis = 5000; + + while (true) { + /* + * Future monitor = pool.submit(serverMonitor); + * if(false == monitor.get().booleanValue()) { //blocking call + * LOG.info("User program is not running"); + * Future runner = pool.submit(serverRunner); + * ScriptContext scriptContext = runner.get();//blocking call + * + * StringBuilder sb = new StringBuilder(); + * sb.append("exit code [" + scriptContext.getExitCode() + "]"); + * if(! scriptContext.getStdOut().toString().isEmpty()) + * sb.append(", stdout [" + scriptContext.getStdOut().toString() + * + "]"); if(! scriptContext.getStdErr().toString().isEmpty()) + * sb.append(", stderr [" + scriptContext.getStdErr().toString() + * + "]"); LOG.info(sb.toString()); + * + * switch(scriptContext.getExitCode()) { case 3: + * LOG.error("Trafodion is not running"); timeoutMillis=60000; + * break; case 127: + * LOG.error("Cannot find user program executable"); + * timeoutMillis=60000; break; default: timeoutMillis=5000; } + * + * } else { timeoutMillis=5000; } + */ + try { + Thread.sleep(timeoutMillis); + } catch (InterruptedException e) { + } + } + + } catch (Exception e) { + e.printStackTrace(); + LOG.error(e); + pool.shutdown(); + throw e; + } + } + + private void getMaster() { + boolean found = false; + + while (!found) { + try { + Stat stat = zkc.exists(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_MASTER, false); + if (stat != null) { + List nodes = zkc.getChildren(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_MASTER, null); + StringTokenizer st = new StringTokenizer(nodes.get(0), ":"); + while (st.hasMoreTokens()) { + masterHostName = st.nextToken(); + masterThriftPort = Integer.parseInt(st.nextToken()); + masterStartTime = Long.parseLong(st.nextToken()); + } + found = true; + } else { + try { + Thread.sleep(5000); + } catch (InterruptedException e) { + } + } + } catch (Exception e) { + e.printStackTrace(); + LOG.error(e); + } + } } - - @Override - public Boolean call() throws Exception { - ExecutorService pool = Executors.newSingleThreadExecutor(); - - try { - getMaster(); - registerInRunning(); - sle = new ServerLeaderElection(this); - workloadStore = new WorkloadStore(conf,zkc,parentZnode,sle); - cep = new ComplexEventProcessor(zkc,parentZnode,conf,sle); - pstats = new PStats(conf,instance); - trpcs = new ThriftRpcServer(wmsServer); - - //Callable serverMonitor = new ServerMonitor(); - //Callable serverRunner = new ServerRunner(); - - long timeoutMillis=5000; - - while(true) { -/* - Future monitor = pool.submit(serverMonitor); - if(false == monitor.get().booleanValue()) { //blocking call - LOG.info("User program is not running"); - Future runner = pool.submit(serverRunner); - ScriptContext scriptContext = runner.get();//blocking call - - StringBuilder sb = new StringBuilder(); - sb.append("exit code [" + scriptContext.getExitCode() + "]"); - if(! scriptContext.getStdOut().toString().isEmpty()) - sb.append(", stdout [" + scriptContext.getStdOut().toString() + "]"); - if(! scriptContext.getStdErr().toString().isEmpty()) - sb.append(", stderr [" + scriptContext.getStdErr().toString() + "]"); - LOG.info(sb.toString()); - - switch(scriptContext.getExitCode()) { - case 3: - LOG.error("Trafodion is not running"); - timeoutMillis=60000; - break; - case 127: - LOG.error("Cannot find user program executable"); - timeoutMillis=60000; - break; - default: - timeoutMillis=5000; - } - - } else { - timeoutMillis=5000; - } -*/ - try { - Thread.sleep(timeoutMillis); - } catch (InterruptedException e) { } - } - - } catch (Exception e) { - e.printStackTrace(); - LOG.error(e); - pool.shutdown(); - throw e; - } - } - - private void getMaster(){ - boolean found=false; - - while(! found){ - try { - Stat stat = zkc.exists(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_MASTER,false); - if(stat != null) { - List nodes = zkc.getChildren(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_MASTER,null); - StringTokenizer st = new StringTokenizer(nodes.get(0), ":"); - while(st.hasMoreTokens()) { - masterHostName=st.nextToken(); - masterThriftPort=Integer.parseInt(st.nextToken()); - masterStartTime=Long.parseLong(st.nextToken()); - } - found=true; - } else { - try { - Thread.sleep(5000); - } catch (InterruptedException e) { } - } - } catch (Exception e) { - e.printStackTrace(); - LOG.error(e); - } - } - } - private void registerInRunning() { - String znode = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_RUNNING + "/" + serverName + ":" + instance + ":" + thriftPort + ":" + infoPort + ":" + System.currentTimeMillis(); - try { - Stat stat = zkc.exists(znode,false); - if(stat == null) { - zkc.create(znode,new byte[0],ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL); - LOG.info("Created znode [" + znode + "]"); - } - } catch (KeeperException.NodeExistsException e) { - //do nothing...leftover from previous shutdown - } catch (Exception e) { - e.printStackTrace(); - LOG.error(e); - } - } - - public String getInstance(){ - return instance; + + private void registerInRunning() { + String znode = parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_RUNNING + "/" + + serverName + ":" + instance + ":" + infoPort + ":" + + System.currentTimeMillis(); + try { + Stat stat = zkc.exists(znode, false); + if (stat == null) { + zkc.create(znode, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.EPHEMERAL); + LOG.info("Created znode [" + znode + "]"); + } + } catch (KeeperException.NodeExistsException e) { + // do nothing...leftover from previous shutdown + } catch (Exception e) { + e.printStackTrace(); + LOG.error(e); + } } - - public String getHostName(){ - return ia.getHostName(); + + public String getInstance() { + return instance; } - - public int getThriftPort(){ - return thriftPort; + + public String getHostName() { + return ia.getHostName(); } - - public ZkClient getZkClient(){ - return zkc; + + public ZkClient getZkClient() { + return zkc; + } + + public String getMasterHostName() { + return masterHostName; } - - public String getMasterHostName(){ - return masterHostName; - } - - public int getMasterThriftPort(){ - return masterThriftPort; - } - - public String getZKParentZnode(){ - return parentZnode; - } - public WorkloadStore getWorkloadStore(){ - return workloadStore; - } - public ComplexEventProcessor getComplexEventProcessor(){ - return cep; - } -} + public int getMasterThriftPort() { + return masterThriftPort; + } + + public String getZKParentZnode() { + return parentZnode; + } + + public WorkloadStore getWorkloadStore() { + return workloadStore; + } +} diff --git a/wms/src/main/java/org/trafodion/wms/server/ServerStatusServlet.java b/wms/src/main/java/org/trafodion/wms/server/ServerStatusServlet.java index a1a45d45..d0620fde 100644 --- a/wms/src/main/java/org/trafodion/wms/server/ServerStatusServlet.java +++ b/wms/src/main/java/org/trafodion/wms/server/ServerStatusServlet.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/wms/src/main/java/org/trafodion/wms/server/WmsServer.java b/wms/src/main/java/org/trafodion/wms/server/WmsServer.java index b30f73fc..32dff891 100644 --- a/wms/src/main/java/org/trafodion/wms/server/WmsServer.java +++ b/wms/src/main/java/org/trafodion/wms/server/WmsServer.java @@ -42,7 +42,6 @@ import org.trafodion.wms.script.ScriptManager; ; import org.trafodion.wms.server.store.*; -import org.trafodion.wms.server.connectors.*; public class WmsServer extends Thread { private static final Log LOG = LogFactory.getLog(WmsServer.class); @@ -59,7 +58,6 @@ public class WmsServer extends Thread { private long startTime; private String parentZnode; private int infoPort; - private int thriftPort; private String[] args; private String instance=null; private ExecutorService pool=null; @@ -124,7 +122,6 @@ public void run () { //Setup RPC services ia = InetAddress.getLocalHost(); serverName = ia.getCanonicalHostName(); - thriftPort = findFreePort(); //Setup script manager ScriptManager.getInstance(); @@ -157,25 +154,25 @@ public void run () { } //Setup Hadoop Job Tracker client and Vertica Client - if(conf.getBoolean("hadoop.workloads.enabled",false)) { - LOG.info("Hadoop workloads enabled"); + //if(conf.getBoolean("hadoop.workloads.enabled",false)) { + // LOG.info("Hadoop workloads enabled"); //JTClient jtc = new JTClient(); - } else { - LOG.info("Hadoop workloads disabled"); - } - if(conf.getBoolean("vertica.workloads.enabled",false)) { - LOG.info("Vertica workloads enabled"); - VerticaClient vc = new VerticaClient(); - } else { - LOG.info("Vertica workloads disabled"); - } - if(conf.getBoolean(Constants.YARN_REST_ENABLED,false)) { - LOG.info("Yarn workloads enabled"); + //} else { + // LOG.info("Hadoop workloads disabled"); + //} + //if(conf.getBoolean("vertica.workloads.enabled",false)) { + // LOG.info("Vertica workloads enabled"); + // VerticaClient vc = new VerticaClient(); + //} else { + // LOG.info("Vertica workloads disabled"); + //} + //if(conf.getBoolean(Constants.YARN_REST_ENABLED,false)) { + // LOG.info("Yarn workloads enabled"); //final ExecutorService exService = Executors.newSingleThreadExecutor(); //final Future callFuture = exService.submit(new YarnClient(new YarnClientExecute()); - } else { - LOG.info("Yarn workloads disabled"); - } + //} else { + // LOG.info("Yarn workloads disabled"); + //} pool = Executors.newSingleThreadExecutor(); serverManager = new ServerManager(this); @@ -199,10 +196,6 @@ public InetAddress getInetAddress(){ return ia; } - public int getThriftPort(){ - return thriftPort; - } - public String getInstance(){ return instance; } diff --git a/wms/src/main/java/org/trafodion/wms/server/connectors/JTClient.java b/wms/src/main/java/org/trafodion/wms/server/connectors/JTClient.java deleted file mode 100644 index edffa29b..00000000 --- a/wms/src/main/java/org/trafodion/wms/server/connectors/JTClient.java +++ /dev/null @@ -1,255 +0,0 @@ -/* -package org.trafodion.wms.server.connectors; - -import java.io.IOException; -import java.util.*; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.*; -import org.apache.hadoop.mapred.JobClient; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.JobStatus; -import org.apache.hadoop.mapred.RunningJob; -import org.apache.hadoop.mapred.JobID; -import org.apache.hadoop.util.*; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import org.trafodion.wms.util.WmsConfiguration; -import org.trafodion.wms.thrift.generated.*; -import org.trafodion.wms.rpc.thrift.RpcHandler; - -public class JTClient implements Runnable { - private static final Log LOG = LogFactory.getLog(JTClient.class.getName()); - private Configuration conf = WmsConfiguration.create(); - private RpcHandler rpch; - private Thread thrd; - - public JTClient(RpcHandler rpch){ - this.rpch = rpch; - thrd = new Thread(this); - thrd.setDaemon(true); - thrd.start(); - } - - public void run() { - - class JobStore { - private String workloadId; - private long globalCounter; - private long jobStartTime; - private JobID jobID; - private WorkloadRequest request = null; - - JobStore(JobID jobID) { - this.workloadId = ""; - this.globalCounter = 0L; - this.jobStartTime = 0L; - this.jobID = jobID; - } - } - - HashMap< String, JobStore> progressMap = new HashMap(); - - JobClient client = null; - long globalCounter = 0; - -// conf.set("mapred.job.tracker", "sq084.houston.hp.com:9077"); -// long jobTimeoutMins = conf.getLong("wms.job.timeout.min", 0); -// LOG.info("Job timeout is " + jobTimeoutMins + " minutes"); - - String jobTracker = conf.get("hadoop.mapred.job.tracker", "sq084.houston.hp.com"); - String jobTrackerPort = conf.get("hadoop.mapred.job.tracker.port", "9077"); - jobTracker = jobTracker + ":" + jobTrackerPort; - LOG.info("Conf JobTracker is: " + jobTracker); - conf.set("mapred.job.tracker", jobTracker); - - while (true) { - try { - client = new JobClient(new JobConf(conf)); - LOG.info("JobClient started"); - break; - } catch (IOException ioe) { - LOG.error("Ignoring exception JobClient " + ioe); - } - try { - Thread.sleep(3000); - } catch (Exception e) { - LOG.info("Unable to sleep until next JobClient " + e); - } - } - - while (true){ - JobStatus[] jobStatuses = null; - RunningJob runningJob = null; - - try { - jobStatuses = client.jobsToComplete(); - } catch (Exception e) { - LOG.debug("Ignoring exception jobStatuses " + e); - } - - if (jobStatuses == null || jobStatuses.length <= 0) { - LOG.debug("There are no active jobs"); - try { - Thread.sleep(3000); - continue; - } catch (Exception e) { - LOG.info("Unable to sleep until next JobClient " + e); - } - } - - LOG.debug("There are " + + jobStatuses.length + " active jobs" ); - globalCounter++; - - for (JobStatus jobStatus : jobStatuses) { - try { - long jobEndTime = 0L; - long duration = 0L; - - String jobState; - int runState = jobStatus.getRunState(); - - runningJob = client.getJob(jobStatus.getJobID()); - //LOG.info("JobID=" + runningJob + ",JobState=" + runState); - TaskReport[] mapReports = client.getMapTaskReports(jobStatus.getJobID()); - for (TaskReport r : mapReports) { - if (lastTaskEndTime < r.getFinishTime()) { - lastTaskEndTime = r.getFinishTime(); - } - } - - TaskReport[] reduceReports = client.getReduceTaskReports(jobStatus.getJobID()); - for (TaskReport r : reduceReports) { - if (lastTaskEndTime < r.getFinishTime()) { - lastTaskEndTime = r.getFinishTime(); - } - } - - synchronized(progressMap){ - if (!progressMap.containsKey(jobStatus.getJobID().toString())){ - jobEndTime = System.currentTimeMillis(); - duration = jobEndTime - jobStatus.getStartTime(); - JobStore job = new JobStore(jobStatus.getJobID()); - if (job != null ){ - job.globalCounter = globalCounter; - job.jobStartTime = jobStatus.getStartTime(); - progressMap.put(jobStatus.getJobID().toString(), job); - LOG.debug("HADOOP BEGIN: " + jobStatus.getJobID().toString()); - job.request = WorkloadRequest.newBuilder() - .setWorkloadId("") - .setOperation(OperationType.BEGIN) - .setJobId(jobStatus.getJobID().toString()) - .setJobType(JobType.HADOOP) - .setJobText(runningJob.getJobName()) - .setJobState(JobStatus.getJobRunState(jobStatus.getRunState())) - .setJobSubState("BEGIN") - .setUserName(jobStatus.getUsername()) - .setStartTimestamp(jobStatus.getStartTime()) - .setEndTimestamp(jobEndTime) - .setMapPct((int)(100.0 * jobStatus.mapProgress())) - .setReducePct((int)(100.0 * jobStatus.reduceProgress())) - .setDuration(duration) - .setParentId("") - .setParentKey(jobStatus.getJobID().toString()) - .build(); - LOG.debug("Begin workload..." + job.request); - WorkloadResponse response = rpch.send(job.request); - switch(response.getAction()){ - case REJECT: - case CANCEL: - LOG.debug("killJob..." + job.request); - runningJob.killJob(); - break; - default: - break; - } - } - } else { - JobStore job = progressMap.get(jobStatus.getJobID().toString()); - LOG.debug("HADOOP UPDATE: " + jobStatus.getJobID().toString()); - job.globalCounter = globalCounter; - jobEndTime = System.currentTimeMillis(); - duration = jobEndTime - jobStatus.getStartTime(); - job.request.setOperation(OperationType.UPDATE); - job.request.setStartTimestamp(jobStatus.getStartTime()); - job.request.setEndTimestamp(jobEndTime); - job.request.setDuration(duration); - job.request.setUserName(jobStatus.getUsername()); - job.request.setJobId(jobStatus.getJobID().toString()); - job.request.setJobText(runningJob.getJobName()); - job.request.setJobState(JobStatus.getJobRunState(jobStatus.getRunState())); - job.request.setJobSubState("UPDATE"); - job.request.setMapPct((int)(100.0 * jobStatus.mapProgress())); - job.request.setReducePct((int)(100.0 * jobStatus.reduceProgress())); - LOG.debug("Update workload..." + job.request); - WorkloadResponse response = rpch.send(job.request); - switch(response.getAction()){ - case REJECT: - case CANCEL: - LOG.debug("killJob..." + job.request); - runningJob.killJob(); - break; - default: - break; - } - } //else - } //synchronized - client.getSetupTaskReports(jobStatus.getJobID()); - client.getCleanupTaskReports(jobStatus.getJobID()); - // - } catch (Exception e){ - LOG.error(e); - break; - } - } // for - - synchronized(progressMap){ - if (!progressMap.isEmpty()){ - - for (Iterator i = progressMap.keySet().iterator(); i.hasNext(); ) - { - String key = i.next(); - JobStore job = progressMap.get(key); - JobID jobID = job.jobID; - if (job.globalCounter != globalCounter){ - long jobStartTime = job.jobStartTime; - long jobEndTime = System.currentTimeMillis(); - long duration = jobEndTime - jobStartTime; - try { - LOG.debug("HADOOP END: " + jobID); - runningJob = client.getJob(jobID); - //String subState = JobStatus.getJobRunState(runningJob.getJobState()); - job.request.setOperation(OperationType.END); - job.request.setJobId(key); - job.request.setJobText(runningJob.getJobName()); - job.request.setJobState(JobStatus.getJobRunState(runningJob.getJobState())); - job.request.setJobSubState(job.request.getJobState()); - //job.request.setJobSubState(subState); - job.request.setStartTimestamp(jobStartTime); - job.request.setEndTimestamp(jobEndTime); - job.request.setDuration(duration); - job.request.setMapPct((int)(100.0 * runningJob.mapProgress())); - job.request.setReducePct((int)(100.0 * runningJob.reduceProgress())); - LOG.debug("End workload..." + job.request); - rpch.send(job.request); - i.remove(); - } catch (IOException ioe) { - LOG.error("Ignoring exception "+ ioe); - } //catch - }//if - }//for - }//if - } //synchronized - - try { - Thread.sleep(1000); - } catch (Exception e) { - LOG.error("Unable to sleep until next cycle" + e); - break; - } - } - } -} -*/ \ No newline at end of file diff --git a/wms/src/main/java/org/trafodion/wms/server/connectors/VerticaClient.java b/wms/src/main/java/org/trafodion/wms/server/connectors/VerticaClient.java deleted file mode 100644 index 55ac1b03..00000000 --- a/wms/src/main/java/org/trafodion/wms/server/connectors/VerticaClient.java +++ /dev/null @@ -1,438 +0,0 @@ -package org.trafodion.wms.server.connectors; - -import java.io.IOException; -import java.util.*; -import java.sql.*; -import java.text.*; - -import org.apache.hadoop.conf.Configuration; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import org.apache.thrift.TException; -import org.apache.thrift.transport.TSSLTransportFactory; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TFramedTransport; -import org.apache.thrift.transport.TSSLTransportFactory.TSSLTransportParameters; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocol; -import org.trafodion.wms.thrift.generated.*; -import org.trafodion.wms.thrift.generated.Action; -import org.trafodion.wms.client.WmsClient; -import org.trafodion.wms.client.ClientData; -import org.trafodion.wms.util.WmsConfiguration; - -import com.vertica.jdbc.Driver; - -public class VerticaClient implements Runnable { - private static final Log LOG = LogFactory.getLog(VerticaClient.class.getName()); - private Configuration conf = WmsConfiguration.create(); - private Thread thrd; - - private static final String USER_NAME_KEY = "vertica.jdbc.url.user"; - private static final String DEFAULT_USER_NAME = "vertica"; - - private static final String USER_PASSWORD_KEY = "vertica.jdbc.url.password"; - private static final String DEFAULT_USER_PASSWORD = "redhat06"; - - private static final String JDBC_URL_KEY = "vertica.jdbc.url"; - private static final String DEFAULT_JDBC_URL = "jdbc:vertica://sq151.houston.hp.com:5433/vmartdb"; - - private static final String COLLECT_DELAY_KEY = "vertica.jdbc.collect.delay"; //delay in seconds when connector starts to collect data measured from start of the query - private static final int DEFAULT_COLLECT_DELAY = 10; - - private static final String COLLECT_TIMEOUT_KEY = "vertica.jdbc.collect.timeout"; //timeout in seconds between collecting cicles - private static final int DEFAULT_COLLECT_TIMEOUT = 10; - - public VerticaClient(){ - thrd = new Thread(this); - thrd.setDaemon(true); - thrd.start(); - } - public VerticaClient(String args[]){ - thrd = new Thread(this); - thrd.setDaemon(true); - thrd.start(); - } - public void run() { - LOG.info("run..."); - class JobStore { - ClientData request; - ClientData response; - Map m; - - Timestamp timestamp; - - JobStore(Timestamp timestamp) { - request = new ClientData(); - response = new ClientData(); - m = new HashMap(); - request.setKeyValues(m); - this.timestamp = timestamp; - } - } - LOG.info("VerticaClient started."); -// Load JDBC driver - try { - Class.forName("com.vertica.jdbc.Driver"); - } catch (ClassNotFoundException e) { -// Could not find the driver class. Likely an issue -// with finding the .jar file. - LOG.info("Could not find the JDBC driver class." + e); - return; - } -// Create property object to hold username & password - Properties myProp = new Properties(); - - myProp.put("user", conf.get(USER_NAME_KEY, DEFAULT_USER_NAME)); - myProp.put("password", conf.get(USER_PASSWORD_KEY, DEFAULT_USER_PASSWORD)); - int collectDelay = conf.getInt(COLLECT_DELAY_KEY, DEFAULT_COLLECT_DELAY); - int collectTimeout = conf.getInt(COLLECT_TIMEOUT_KEY, DEFAULT_COLLECT_TIMEOUT); - Connection conn; - Statement stmt; - PreparedStatement pstmt; - PreparedStatement pstmtErrorMessage; - ResultSet rs; - ResultSet rsErrorMessage; - ResultSetMetaData md; - - String sessionPool = "SET SESSION RESOURCE_POOL = sysquery"; - - String queryErrorMessage = - "SELECT Error_Code, Message " - + "FROM error_messages " - + "WHERE Session_ID = ? and Transaction_ID = ? and Statement_ID = ?"; - - String query = "AT EPOCH LATEST SELECT /* aaaa */ " - + "t1.Session_ID, " - + "t1.Transaction_ID, " - + "t1.Statement_ID, " - + "t1.Request_Type, " - + "t1.Request, " - + "t1.Start_Timestamp, " - + "t1.user_name, " - + "case when t2.pool_name is null then '' else t2.pool_name end, " - + "t3.counter_name, " - + "sum(t3.counter_value) " - + "FROM query_requests as t1 " - + "left join v_monitor.resource_acquisitions as t2 " - + "on t2.Transaction_ID = t1.Transaction_ID and t2.Statement_ID = t1.Statement_ID " - + "left join execution_engine_profiles as t3 " - + "on t3.Session_ID = t1.Session_ID and t3.Transaction_ID = t1.Transaction_ID and " - + "t3.Statement_ID = t1.Statement_ID " - + "WHERE t3.counter_name in ('rows sent', 'memory allocated (bytes)') and " - + "substr(t1.request,0,34) <> 'AT EPOCH LATEST SELECT /* aaaa */' and " - + "t3.counter_name is not null and " - + "t1.is_executing is true " - + "group by " - + "t1.Session_ID, " - + "t1.Transaction_ID, " - + "t1.Statement_ID, " - + "t1.Request_Type, " - + "t1.Request, " - + "t1.Start_Timestamp, " - + "t1.user_name, " - + "t2.pool_name, " - + "t3.counter_name " - + "order by t1.Session_ID, " - + "t1.Transaction_ID, " - + "t1.Statement_ID, " - + "t3.counter_name "; - - LOG.debug("query: |" + query + "|"); - - String jdbcUrl = conf.get(JDBC_URL_KEY, DEFAULT_JDBC_URL); - - try { - conn = DriverManager.getConnection(jdbcUrl, myProp); - } catch (SQLException e) { - LOG.info("Could not connect to database." + e); - return; - } - try { - stmt = conn.createStatement(); - } catch (SQLException e) { - LOG.info("Could not create Statement." + e); - return; - } - LOG.debug("VerticaClient JDBC Driver started."); -//========================================================== - try { - stmt.execute(sessionPool); - stmt.close(); - } catch (SQLException e) { - LOG.info("Could not execute sessionPool." + e); - return; - } -//========================================================== - try { - pstmtErrorMessage = conn.prepareStatement(queryErrorMessage); - } catch (SQLException e) { - LOG.info("Could not prepare Query." + e); - return; - } -//========================================================== - try { - pstmt = conn.prepareStatement(query); - } catch (SQLException e) { - LOG.info("Could not prepare Query." + e); - return; - } - - HashMap< String, JobStore> progressMap = new HashMap(); - - String key; - String sessionId; - long transactionId; - long statementId; - Timestamp timestamp; - Timestamp startTimestamp; - Timestamp endTimestamp; - long duration = 0L; - boolean isExecuting = false; - String counterName = ""; - long counterSum = 0L; - - long startTime = 0L; - long endTime = 0L; - - JobStore job = null; - WmsClient wmsConn = null; - - wmsConn = new WmsClient(); - wmsConn.open(); - - while (true){ - long jobTimeoutMins = 0; - - synchronized(progressMap){ - - try { - startTime = System.currentTimeMillis(); - LOG.debug("***********************************Query started."); - rs = pstmt.executeQuery(); - endTime = System.currentTimeMillis(); - LOG.debug("***********************************Query elapse time: " + (endTime - startTime)); - - timestamp = new Timestamp(System.currentTimeMillis()); - - while (rs.next()) { - - sessionId = rs.getString("Session_Id").trim(); - transactionId = rs.getLong("Transaction_Id"); - statementId = rs.getLong("Statement_Id"); - key = sessionId + "_" + Long.toString(transactionId) + "_" + Long.toString(statementId); - - startTimestamp = rs.getTimestamp("Start_Timestamp"); - - LOG.debug("startTimestamp: " + startTimestamp); - - if (!progressMap.containsKey(key)){ - LOG.debug("NOT in MAP BEGIN startTimestamp: " + startTimestamp); - - endTimestamp = new Timestamp(System.currentTimeMillis()); - duration = endTimestamp.getTime() - startTimestamp.getTime(); - - job = new JobStore(timestamp); - job.request.putKeyValue("operation", Operation.OPERATION_BEGIN); - job.request.putKeyValue("state","RUNNING"); - job.request.putKeyValue("subState","BEGIN"); - job.request.putKeyValue("beginTimestamp",startTimestamp.getTime()); - job.request.putKeyValue("endTimestamp",endTimestamp.getTime()); - - job.request.putKeyValue("type","vertica"); - job.request.putKeyValue("userName", rs.getString("USER_NAME").trim()); - job.request.putKeyValue("applicationName","verticaConnector"); - job.request.putKeyValue("sessionId", sessionId); - job.request.putKeyValue("transactionId", transactionId); - job.request.putKeyValue("statementId", statementId); - job.request.putKeyValue("requestType", rs.getString("REQUEST_TYPE").trim()); - job.request.putKeyValue("request", rs.getString("REQUEST").trim()); - job.request.putKeyValue("poolName", rs.getString("POOL_NAME").trim()); - counterName = rs.getString("COUNTER_NAME").trim(); - counterSum = rs.getLong("SUM"); - if (counterName.equals("rows sent")){ - job.request.putKeyValue("rowsSent", counterSum); - } else if (counterName.equals("memory allocated (bytes)")){ - job.request.putKeyValue("memoryAllocated", counterSum); - } - progressMap.put(key, job); - job.response = wmsConn.writeread(job.request); - executeAction(job.response.getKeyValueAction(), transactionId, sessionId, statementId); - LOG.debug("Begin workload request..." + job.request); - LOG.debug("Begin workload response..." + job.response); - - } else { - LOG.debug("In MAP UPDATE startTimestamp: " + startTimestamp); - endTimestamp = new Timestamp(System.currentTimeMillis()); - duration = endTimestamp.getTime() - startTimestamp.getTime(); - - job = progressMap.get(key); - if (timestamp != job.timestamp){ - job.timestamp = timestamp; - job.request.putKeyValue("state","RUNNING"); - job.request.putKeyValue("subState","UPDATE"); - - } - job.request.putKeyValue("operation",Operation.OPERATION_UPDATE); - job.request.putKeyValue("workloadId",job.response.getKeyValueAsString("workloadId")); - job.request.putKeyValue("beginTimestamp",startTimestamp.getTime()); - job.request.putKeyValue("endTimestamp",endTimestamp.getTime()); - job.request.putKeyValue("duration", duration); - counterName = rs.getString("COUNTER_NAME").trim(); - counterSum = rs.getLong("SUM"); - if (counterName.equals("rows sent")){ - job.request.putKeyValue("rowsSent", counterSum); - } else if (counterName.equals("memory allocated (bytes)")){ - job.request.putKeyValue("memoryAllocated", counterSum); - } - job.response = wmsConn.writeread(job.request); - executeAction(job.response.getKeyValueAction(), transactionId, sessionId, statementId); - LOG.debug("Update workload request..." + job.request); - LOG.debug("Update workload response..." + job.response); - } - } // while rs - rs.close(); - - for (Iterator i = progressMap.keySet().iterator(); i.hasNext(); ) - { - key = i.next(); - job = progressMap.get(key); - - LOG.debug("progressMap timestamp: " + timestamp + ", job.timestamp: " + job.timestamp); - - if (job.timestamp != timestamp){ - - LOG.debug("In MAP DELETE startTimestamp: "); - - job = progressMap.get(key); - job.request.putKeyValue("operation",Operation.OPERATION_END); - job.request.putKeyValue("state","COMPLETED"); - - endTimestamp = new Timestamp(System.currentTimeMillis()); - duration = endTimestamp.getTime() - job.request.getKeyValueAsLong("beginTimestamp"); - - pstmtErrorMessage.setString(1, job.request.getKeyValueAsString("sessionId")); - pstmtErrorMessage.setLong(2, job.request.getKeyValueAsLong("transactionId")); - pstmtErrorMessage.setLong(3, job.request.getKeyValueAsLong("statementId")); - rsErrorMessage = pstmtErrorMessage.executeQuery(); - - LOG.debug("session_Id :" + job.request.getKeyValueAsString("sessionId")); - LOG.debug("transaction_Id :" + job.request.getKeyValueAsLong("transactionId")); - LOG.debug("statement_Id :" + job.request.getKeyValueAsLong("transactionId")); - - if (rsErrorMessage.next()) { - LOG.debug("Message :" + rsErrorMessage.getString("Message")); -// job.request.putKeyValue("subState","FAILED(" + rsErrorMessage.getString("Message") + ")"); - job.request.putKeyValue("subState","CANCELED BY ADMIN"); - } - else { - job.request.putKeyValue("subState","SUCCESS"); - } - job.request.putKeyValue("endTimestamp",endTimestamp.getTime()); - job.request.putKeyValue("duration", duration); - job.response = wmsConn.writeread(job.request); - LOG.debug("End workload..." + job.request); - i.remove(); - rsErrorMessage.close(); - } - } - }catch (SQLException e) { - LOG.error("SQL error." + e); - return; - } - - } //synchronize - - try { - Thread.sleep(1000 * collectTimeout); //in miliseconds - } catch (Exception e) { - LOG.error("Unable to sleep until next cycle" + e); - break; - } - } //while true - } // run - - private void executeAction(Action action, Long transactionId, String sessionId, long statementId){ - - String query = ""; - - switch(action){ - case ACTION_CANCEL: case ACTION_KILL: - LOG.debug("ACTION_CANCEL : sessionId: " + sessionId + " statementId: " + statementId); - query = "Select INTERRUPT_STATEMENT( '" + sessionId + "', " + statementId + ")"; - break; - case ACTION_WARNING: - LOG.debug("ACTION_WARNING"); - return; - case ACTION_PRIORITY_LOW: - LOG.debug("ACTION_PRIORITY_LOW : transactionId: " + transactionId); - query = "Select CHANGE_CURRENT_STATEMENT_RUNTIME_PRIORITY( " + transactionId + ", 'low')"; - break; - case ACTION_PRIORITY_MEDIUM: - LOG.debug("ACTION_PRIORITY_MEDIUM : transactionId: " + transactionId); - query = "Select CHANGE_CURRENT_STATEMENT_RUNTIME_PRIORITY( " + transactionId + ",'medium')"; - break; - case ACTION_PRIORITY_HIGH: - LOG.debug("ACTION_PRIORITY_HIGH : transactionId: " + transactionId); - query = "Select CHANGE_CURRENT_STATEMENT_RUNTIME_PRIORITY( " + transactionId + ",'high')"; - break; - case ACTION_CONTINUE: - LOG.debug("ACTION_CONTINUE"); - return; - default: - LOG.debug("Unknown action :" + action); - return; - } - LOG.debug("query: " + query); - - Properties myProp = new Properties(); - myProp.put("user", conf.get(USER_NAME_KEY, DEFAULT_USER_NAME)); - myProp.put("password", conf.get(USER_PASSWORD_KEY, DEFAULT_USER_PASSWORD)); - Connection conn; - Statement stmt; - ResultSet rs; - - String jdbcUrl = conf.get(JDBC_URL_KEY, DEFAULT_JDBC_URL); - - try { - conn = DriverManager.getConnection(jdbcUrl, myProp); - } catch (SQLException e) { - LOG.error("Could not connect to database." + e); - return; - } - try { - stmt = conn.createStatement(); - } catch (SQLException e) { - LOG.error("Could not create Statement." + e); - return; - } - - try { - rs = stmt.executeQuery(query); - while (rs.next()) { - LOG.debug(rs.getString("interrupt_statement").trim()); - } - rs.close(); - }catch (SQLException e) { - LOG.error("SQL error." + e); - } - try { - stmt.close(); - }catch (SQLException e) { - LOG.error("SQL error." + e); - } - try { - conn.close(); - }catch (SQLException e) { - LOG.error("SQL error." + e); - } - return; - } - - public static void main(String args[]) { - VerticaClient client = new VerticaClient(args); - client.run(); - } -} diff --git a/wms/src/main/java/org/trafodion/wms/server/connectors/YarnClient.java b/wms/src/main/java/org/trafodion/wms/server/connectors/YarnClient.java deleted file mode 100644 index 62d3340e..00000000 --- a/wms/src/main/java/org/trafodion/wms/server/connectors/YarnClient.java +++ /dev/null @@ -1,481 +0,0 @@ -/* -package org.trafodion.wms.server.connectors; - -import java.util.*; -import java.util.concurrent.ExecutorService; - -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.net.UnknownHostException; -import java.net.InetSocketAddress; - -import org.apache.http.HttpResponse; -import org.apache.http.client.ClientProtocolException; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.impl.client.DefaultHttpClient; -import org.codehaus.jackson.JsonGenerationException; -import org.codehaus.jackson.type.TypeReference; -import org.codehaus.jackson.JsonFactory; -import org.codehaus.jackson.JsonParser; -import org.codehaus.jackson.JsonToken; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.*; -import org.apache.hadoop.util.*; -import org.apache.hadoop.net.NetUtils; - -import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.api.ClientRMProtocol; -import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; -import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.ipc.YarnRPC; -import org.apache.hadoop.yarn.util.Records; -import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.util.ConverterUtils; - -import org.trafodion.wms.Constants; -import org.trafodion.wms.util.WmsConfiguration; -import org.trafodion.wms.rpc.thrift.RpcHandler; - -public class YarnClient { - private static final Log LOG = LogFactory.getLog(YarnClient.class.getName()); - private Configuration conf = WmsConfiguration.create(); - private RpcHandler rpch; - String[] args; - - public YarnClient(RpcHandler rpch){ - this.rpch = rpch; - } - - public YarnClient(String[] args) { - this.args = args; - } - - class Application { - - private String id; - private String user; - private String name; - private String queue; - private String state; - private String finalStatus; - private Integer progress; - private String trackingUI; - private String trackingUrl; - private String diagnostics; - private String clusterId; - private Long startedTime; - private Long finishedTime; - private Long elapsedTime; - private String amContainerLogs; - private String amHostHttpAddress; - - Application() throws java.io.IOException { - id = ""; - user = ""; - name = ""; - queue = ""; - state = ""; - finalStatus = ""; - progress = 0; - trackingUI = ""; - trackingUrl = ""; - diagnostics = ""; - clusterId = ""; - startedTime = 0L; - finishedTime = 0L; - elapsedTime = 0L; - amContainerLogs = ""; - amHostHttpAddress = ""; - - } - - void parse (JsonParser jp) throws java.io.IOException { - String fieldname; - while (jp.nextToken() != JsonToken.END_OBJECT) { - fieldname = jp.getCurrentName(); - if ("id".equals(fieldname)){ - jp.nextToken(); - id = jp.getText(); - } else if ("user".equals(fieldname)){ - jp.nextToken(); - user = jp.getText(); - } else if ("name".equals(fieldname)){ - jp.nextToken(); - name = jp.getText(); - } else if ("queue".equals(fieldname)){ - jp.nextToken(); - queue = jp.getText(); - } else if ("state".equals(fieldname)){ - jp.nextToken(); - state = jp.getText(); - } else if ("finalStatus".equals(fieldname)){ - jp.nextToken(); - finalStatus = jp.getText(); - } else if ("progress".equals(fieldname)){ - jp.nextToken(); - progress = jp.getIntValue(); - } else if ("trackingUI".equals(fieldname)){ - jp.nextToken(); - trackingUI = jp.getText(); - } else if ("trackingUrl".equals(fieldname)){ - jp.nextToken(); - trackingUrl = jp.getText(); - } else if ("diagnostics".equals(fieldname)){ - jp.nextToken(); - diagnostics = jp.getText(); - } else if ("clusterId".equals(fieldname)){ - jp.nextToken(); - clusterId = jp.getText(); - } else if ("startedTime".equals(fieldname)){ - jp.nextToken(); - startedTime = jp.getLongValue(); - } else if ("finishedTime".equals(fieldname)){ - jp.nextToken(); - finishedTime = jp.getLongValue(); - } else if ("elapsedTime".equals(fieldname)){ - jp.nextToken(); - elapsedTime = jp.getLongValue(); - } else if ("amContainerLogs".equals(fieldname)){ - jp.nextToken(); - amContainerLogs = jp.getText(); - } else if ("amHostHttpAddress".equals(fieldname)){ - jp.nextToken(); - amHostHttpAddress = jp.getText(); - } - } - } -// getters - String getId(){ - return id; - } - String getUser(){ - return user; - } - String getName(){ - return name; - } - String getQueue(){ - return queue; - } - String getState(){ - return state; - } - String getFinalStatus(){ - return finalStatus; - } - Integer getProgress(){ - return progress; - } - String getTrackingUI(){ - return trackingUI; - } - String getTrackingUrl(){ - return trackingUrl; - } - String getDiagnostics(){ - return diagnostics; - } - String getClusterId(){ - return clusterId; - } - Long getStartedTime(){ - return startedTime; - } - Long getFinishedTime(){ - return finishedTime; - } - Long getElapsedTime(){ - return elapsedTime; - } - String getAmContainerLogs(){ - return amContainerLogs; - } - String getAmHostHttpAddress(){ - return amHostHttpAddress; - } -//setters - void setId(String id){ - this.id=id; - } - void setUser(String user){ - this.user=user; - } - void setName(String name){ - this.name=name; - } - void setQueue(String queue){ - this.queue=queue; - } - void setState(String state){ - this.state=state; - } - void setFinalStatus(String finalStatus){ - this.finalStatus=finalStatus; - } - void setProgress(Integer progress){ - this.progress=progress; - } - void setTrackingUI(String trackingUI){ - this.trackingUI=trackingUI; - } - void setTrackingUrl(String trackingUrl){ - this.trackingUrl=trackingUrl; - } - void setDiagnostics(String diagnostics){ - this.diagnostics=diagnostics; - } - void setClusterId(String clusterId){ - this.clusterId=clusterId; - } - void setStartedTime(Long startedTime){ - this.startedTime=startedTime; - } - void setFinishedTime(Long finishedTime){ - this.finishedTime=finishedTime; - } - void setElapsedTime(Long elapsedTime){ - this.elapsedTime=elapsedTime; - } - void setAmContainerLogs(String amContainerLogs){ - this.amContainerLogs=amContainerLogs; - } - void setAmHostHttpAddress(String amHostHttpAddress){ - this.amHostHttpAddress=amHostHttpAddress; - } - - }; - - class ApplicationStore { - private String workloadId; - private long appStartTime; - private Application app; - private WorkloadRequest request = null; - - ApplicationStore(Application app) { - this.workloadId = ""; - this.appStartTime = 0L; - this.app = app; - } - String getWorkloadId(){ - return workloadId; - } - long getAppStartTime(){ - return appStartTime; - } - Application getApplication(){ - return app; - } - WorkloadRequest getRequest(){ - return request; - } - void setWorkloadId(String workloadId){ - this.workloadId = workloadId; - } - void setAppStartTime(long appStartTime){ - this.appStartTime = appStartTime; - } - void setApp(Application app){ - this.app = app; - } - void setRequest(WorkloadRequest request){ - this.request = request; - } - }; - - class YarnClientExecute implements Callable { - - @Override - public String call() { - - HashMap< String, ApplicationStore> progressMap = new HashMap(); - long globalCounter = 0; - String msg = ""; - WorkloadResponse wresponse; - String output; - - String yarnRestUrl = conf.get(Constants.YARN_REST_URL, Constants.DEFAULT_YARN_REST_URL); - LOG.info("Yarn Rest Conection url is : " + yarnRestUrl); - - try { - Application app = new Application(); - - while(true){ - DefaultHttpClient httpClient = new DefaultHttpClient(); - HttpGet getRequest = new HttpGet( yarnRestUrl); - getRequest.addHeader("accept", "application/json"); - HttpResponse response = httpClient.execute(getRequest); - - if (response.getStatusLine().getStatusCode() != 200) { - throw new RuntimeException("Failed : HTTP error code : " + response.getStatusLine().getStatusCode()); - } - BufferedReader br = new BufferedReader(new InputStreamReader( (response.getEntity().getContent()))); - - JsonFactory f = new JsonFactory(); - while ((output = br.readLine()) != null) { - LOG.info("raw json=" + output); - - JsonParser jp = f.createJsonParser(output); - while (jp.nextToken() == JsonToken.START_OBJECT) { - - long appEndTime = 0L; - long duration = 0L; - - app.parse(jp); - - synchronized(progressMap){ - String state = app.getState(); - if ("FINISHED".equals(state) || "FAILED".equals(state) || "KILLED".equals(state)) { - if (progressMap.containsKey(app.getId())){ - LOG.debug("YARN END : Application id : " + app.getId() + " state : " + app.getState()); - ApplicationStore as = progressMap.get(app.getId()); - appEndTime = System.currentTimeMillis(); - duration = appEndTime - app.getStartedTime(); - as.getRequest().setOperation(OperationType.END); - as.getRequest().setEndTimestamp(appEndTime); - as.getRequest().setDuration(duration); - as.getRequest().setJobState(app.getState()); - as.getRequest().setJobSubState(app.getFinalStatus()); - as.getRequest().setMapPct((int)(app.getProgress())); - LOG.debug("End workload..." + as.getRequest()); - wresponse = rpch.send(as.getRequest()); - progressMap.remove(app.getId()); - } - } else if (!progressMap.containsKey(app.getId())){ - appEndTime = System.currentTimeMillis(); - duration = appEndTime - app.getStartedTime(); - ApplicationStore as = new ApplicationStore(app); - if (as != null ){ - LOG.debug("YARN BEGIN : Application id : " + app.getId() + " state : " + app.getState()); - as.setAppStartTime(app.getStartedTime()); - progressMap.put(app.getId(), as); - as.request = WorkloadRequest.newBuilder() - .setWorkloadId("") - .setOperation(OperationType.BEGIN) - .setJobId(app.getId()) - .setJobType(JobType.HADOOP) - .setJobText(app.getName()) - .setJobState(app.getState()) - .setJobSubState(app.getFinalStatus()) - .setUserName(app.getName()) - .setStartTimestamp(app.getStartedTime()) - .setEndTimestamp(appEndTime) - .setMapPct((int)(app.getProgress())) - .setReducePct((int)(0.0)) - .setDuration(duration) - .setParentId("") - .setParentKey(app.getId()) - .build(); - LOG.debug("Begin workload..." + as.getRequest()); - wresponse = rpch.send(as.getRequest()); - switch(wresponse.getAction()){ - case REJECT: - case CANCEL: - LOG.debug("killJob..." + as.getRequest()); - killApplication(app.getId()); - break; - default: - break; - } - } - } else{ - LOG.debug("YARN UPDATE : Application id : " + app.getId() + " state : " + app.getState()); - ApplicationStore as = progressMap.get(app.getId()); - appEndTime = System.currentTimeMillis(); - duration = appEndTime - app.getStartedTime(); - as.getRequest().setOperation(OperationType.UPDATE); - as.getRequest().setEndTimestamp(appEndTime); - as.getRequest().setDuration(duration); - as.getRequest().setJobState(app.getState()); - as.getRequest().setJobSubState(app.getFinalStatus()); - as.getRequest().setMapPct((int)(app.getProgress())); - LOG.debug("Update workload..." + as.getRequest()); - wresponse = rpch.send(as.getRequest()); - switch(wresponse.getAction()){ - case REJECT: - case CANCEL: - LOG.debug("killJob..." + as.getRequest()); - killApplication(app.getId()); - break; - default: - break; - } - } //else - } //synchronized - } - jp.close(); - } - httpClient.getConnectionManager().shutdown(); - - try { - Thread.sleep(1000); - } catch (Exception e) { - throw new RuntimeException("Unable to sleep until next cycle" + e.getMessage()); - } - } - } catch (UnknownHostException e) { - msg = "Unknown Host Exception : " + e.getMessage(); - } catch (ClientProtocolException e) { - msg = "Client Protocal Exception : " + e.getMessage(); - } catch (IOException e) { - msg = "IO Exception : " + e.getMessage(); - } catch (RuntimeException e) { - msg = "Runtime Exception : " + e.getMessage(); - } - LOG.error(msg); - return msg; - } - } - - void killApplication(String applicationId){ - ApplicationId appId = ConverterUtils.toApplicationId(applicationId); - LOG.info("Killing application " + applicationId); - - ClientRMProtocol applicationsManager; - String yarnRmAddress = conf.get(Constants.YARN_RM_ADDRESS, Constants.DEFAULT_YARN_RM_ADDRESS); - conf.set(Constants.YARN_RM_ADDRESS, yarnRmAddress); - YarnConfiguration yarnConf = new YarnConfiguration(conf); - yarnConf.set("yarn.resourcemanager.address", "sq151.houston.hp.com:8032"); - LOG.info(yarnConf.get("yarn.resourcemanager.address", "")); - InetSocketAddress rmAddress = NetUtils.createSocketAddr(yarnConf.get( YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS)); - LOG.info("Connecting to ResourceManager at " + rmAddress); - YarnRPC rpc = YarnRPC.create(yarnConf); - - Configuration appsManagerServerConf = new Configuration(yarnConf); - applicationsManager = ((ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class, rmAddress, appsManagerServerConf)); - - KillApplicationRequest killRequest = Records.newRecord(KillApplicationRequest.class); - killRequest.setApplicationId(appId); - try { - applicationsManager.forceKillApplication(killRequest); - } catch (YarnRemoteException e){ - LOG.error("YarnRemoteException : " + e); - } - } - - public static void main(String[] args) throws InterruptedException, ExecutionException { - - final ExecutorService exService = Executors.newSingleThreadExecutor(); - - final Future callFuture = exService.submit(new YarnClient(args).new YarnClientExecute()); -// gets value of callable thread - final String callval = callFuture.get(); - System.out.println("Callable:" + callval); -// checks for thread termination - final boolean isTerminated = exService.isTerminated(); - System.out.println(isTerminated); - // waits for termination for 30 seconds only - exService.awaitTermination(30, TimeUnit.SECONDS); - exService.shutdownNow(); - } -} -*/ \ No newline at end of file diff --git a/wms/src/main/java/org/trafodion/wms/server/rpc/thrift/ThriftRpcHandler.java b/wms/src/main/java/org/trafodion/wms/server/rpc/thrift/ThriftRpcHandler.java deleted file mode 100644 index 50286940..00000000 --- a/wms/src/main/java/org/trafodion/wms/server/rpc/thrift/ThriftRpcHandler.java +++ /dev/null @@ -1,210 +0,0 @@ -package org.trafodion.wms.server.rpc.thrift; - -import java.net.*; -import java.io.Writer; -import java.io.StringWriter; -import java.io.PrintWriter; -import java.util.*; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.log4j.Logger; -import org.apache.thrift.TException; -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.ZooDefs; -import org.apache.zookeeper.data.Stat; -import org.apache.zookeeper.KeeperException; - -import org.trafodion.wms.zookeeper.ZkClient; -import org.trafodion.wms.server.WmsServer; -import org.trafodion.wms.script.ScriptManager; -import org.trafodion.wms.Constants; -import org.trafodion.wms.server.workload.WorkloadStore; -import org.trafodion.wms.thrift.generated.*; -import org.trafodion.wms.cep.ComplexEventProcessor; -import org.trafodion.wms.cep.CepObserverImpl; - -public class ThriftRpcHandler implements WmsService.Iface { - private static final Log LOG = LogFactory.getLog(ThriftRpcHandler.class.getName()); - private WmsServer wmsServer; - private long beginTs; - private long endTs; - private long totalTime; - private Response response = new Response(new Header(),new Data()); - private WorkloadStore workloadStore; - private ComplexEventProcessor cep; - private CepObserverImpl actionObserver = new CepObserverImpl("ThriftRpcHandler",this); - - public ThriftRpcHandler(WmsServer wmsServer){ - this.wmsServer = wmsServer; - this.workloadStore = wmsServer.getServerManager().getWorkloadStore(); - this.cep = wmsServer.getServerManager().getComplexEventProcessor(); - cep.getActionSubject().register(actionObserver); - actionObserver.setSubject(cep.getActionSubject()); - } - - public long ping(long timestamp) throws IOError { - LOG.debug("ping:" + timestamp + ")"); - return System.currentTimeMillis(); - } - - public Response writeread(Request request) throws IOError, IllegalArgument { - LOG.debug("writeread:" + request); - - if(! request.getData().isSetKeyValues()) - throw new IllegalArgument("No key values found"); - - //Setup the default response - response.getData().setKeyValues(new HashMap()); - Map rspkv = response.getData().getKeyValues(); - response.getHeader().setServerLastUpdated(System.currentTimeMillis()); - rspkv.put(Constants.ACTION,new KeyValue().setIntValue(Action.ACTION_CONTINUE.getValue())); - - //Check the request - Map reqkv = request.getData().getKeyValues(); - validate(reqkv); - - switch(Operation.findByValue(reqkv.get(Constants.OPERATION).getIntValue())) { - case OPERATION_BEGIN: - LOG.debug("OPERATION_BEGIN:"); - beginTs = System.currentTimeMillis(); - //ScriptManager.getInstance().runScript(request,response); - - try{ - String streamName = reqkv.get(Constants.TYPE).getStringValue(); - cep.workload(streamName,new HashMap(reqkv)); - if(rspkv.get(Constants.ACTION).getIntValue() != Action.ACTION_REJECT.getValue() ) { - String workloadId = workloadStore.createWorkloadId(); - reqkv.put(Constants.WORKLOAD_ID,new KeyValue().setStringValue(workloadId)); - workloadStore.put(workloadId,request); - rspkv.put(Constants.WORKLOAD_ID,new KeyValue().setStringValue(workloadId)); - } - } catch (Exception e){ - LOG.error(stackTraceToString(e)); - IOError ioe = new IOError(); - ioe.setMessage("IO error occurred [" + stackTraceToString(e) + "]"); - throw ioe; - } - - endTs = System.currentTimeMillis(); - totalTime = endTs - beginTs; - LOG.debug("Operation:[" + Operation.findByValue(reqkv.get(Constants.OPERATION).getIntValue()) + "] took [" + totalTime + "] millis"); - break; - case OPERATION_UPDATE: - LOG.debug("OPERATION_UPDATE:"); - beginTs = System.currentTimeMillis(); - //ScriptManager.getInstance().runScript(request,response); - - try{ - String streamName = reqkv.get(Constants.TYPE).getStringValue(); - String workloadId = reqkv.get(Constants.WORKLOAD_ID).getStringValue(); - rspkv.put(Constants.WORKLOAD_ID,new KeyValue().setStringValue(workloadId)); - cep.workload(streamName,new HashMap(reqkv)); - workloadStore.put(workloadId,request); - } catch (Exception e){ - LOG.error(stackTraceToString(e)); - IOError ioe = new IOError(); - ioe.setMessage("IO error occurred [" + stackTraceToString(e) + "]"); - throw ioe; - } - - endTs = System.currentTimeMillis(); - totalTime = endTs - beginTs; - LOG.debug("Operation:[" + Operation.findByValue(reqkv.get(Constants.OPERATION).getIntValue()) + "] took [" + totalTime + "] millis"); - break; - case OPERATION_END: - LOG.debug("OPERATION_END:"); - beginTs = System.currentTimeMillis(); - //ScriptManager.getInstance().runScript(request,response); - - try{ - String streamName = reqkv.get(Constants.TYPE).getStringValue(); - String workloadId = reqkv.get(Constants.WORKLOAD_ID).getStringValue(); - rspkv.put(Constants.WORKLOAD_ID,new KeyValue().setStringValue(workloadId)); - cep.workload(streamName,new HashMap(reqkv)); - workloadStore.put(workloadId,request); - } catch (Exception e){ - LOG.error(stackTraceToString(e)); - IOError ioe = new IOError(); - ioe.setMessage("IO error occurred [" + stackTraceToString(e) + "]"); - throw ioe; - } - - endTs = System.currentTimeMillis(); - totalTime = endTs - beginTs; - LOG.debug("Operation:[" + Operation.findByValue(reqkv.get(Constants.OPERATION).getIntValue()) + "] took [" + totalTime + "] millis"); - break; - case OPERATION_UPDATE_PARENT_ID: - LOG.debug("Workload UPDATE_PARENT_ID received"); - //Response.setWorkloadId(Request.getWorkloadId()); - //workloadQueue.processRequest(Request,Response,workloadThresholds); - break; - case OPERATION_CANCEL_CHILDREN: - LOG.debug("Workload CANCEL_CHILDREN received"); - //Response.setWorkloadId(Request.getWorkloadId()); - //workloadQueue.processRequest(Request,Response,workloadThresholds); - break; - default: - IllegalArgument ia = new IllegalArgument(); - ia.setMessage("Invalid or unknown operation specified"); - throw ia; - } - - LOG.debug("writeread:" + response); - - return response; - } - - void validate(Map reqkv) throws IllegalArgument { - - if(reqkv.get(Constants.TYPE).getStringValue().isEmpty()) { - IllegalArgument ia = new IllegalArgument(); - ia.setMessage("Missing type in request"); - throw ia; - } - - switch(Operation.findByValue(reqkv.get(Constants.OPERATION).getIntValue())) { - case OPERATION_BEGIN: - break; - case OPERATION_UPDATE: - if(reqkv.get(Constants.WORKLOAD_ID).getStringValue().isEmpty()) { - IllegalArgument ia = new IllegalArgument(); - ia.setMessage("Missing workloadId in request"); - throw ia; - } - break; - case OPERATION_END: - if(reqkv.get(Constants.WORKLOAD_ID).getStringValue().isEmpty()) { - IllegalArgument ia = new IllegalArgument(); - ia.setMessage("Missing workloadId in request"); - throw ia; - } - break; - case OPERATION_UPDATE_PARENT_ID: - break; - case OPERATION_CANCEL_CHILDREN: - if(reqkv.get(Constants.WORKLOAD_ID).getStringValue().isEmpty()) { - IllegalArgument ia = new IllegalArgument(); - ia.setMessage("Missing workloadId in request"); - throw ia; - } - break; - default: - IllegalArgument ia = new IllegalArgument(); - ia.setMessage("Invalid or unknown operation specified"); - throw ia; - } - } - - String stackTraceToString(Exception e) { - Writer writer = new StringWriter(); - PrintWriter printWriter = new PrintWriter(writer); - e.printStackTrace(printWriter); - String s = writer.toString(); - return s; - } - - public Response getResponse() { - return response; - } -} diff --git a/wms/src/main/java/org/trafodion/wms/server/rpc/thrift/ThriftRpcServer.java b/wms/src/main/java/org/trafodion/wms/server/rpc/thrift/ThriftRpcServer.java deleted file mode 100644 index b7a8211c..00000000 --- a/wms/src/main/java/org/trafodion/wms/server/rpc/thrift/ThriftRpcServer.java +++ /dev/null @@ -1,73 +0,0 @@ -package org.trafodion.wms.server.rpc.thrift; - -import java.net.*; -import java.io.IOException; -import java.nio.charset.Charset; - -import org.apache.thrift.server.TServer; -import org.apache.thrift.server.TServer.Args; -import org.apache.thrift.server.TSimpleServer; -import org.apache.thrift.server.TThreadPoolServer; -import org.apache.thrift.server.TNonblockingServer; -import org.apache.thrift.transport.TSSLTransportFactory; -import org.apache.thrift.transport.TServerSocket; -import org.apache.thrift.transport.TServerTransport; -import org.apache.thrift.transport.TNonblockingTransport; -import org.apache.thrift.transport.TNonblockingSocket; -import org.apache.thrift.transport.TNonblockingServerTransport; -import org.apache.thrift.transport.TNonblockingServerSocket; -import org.apache.thrift.transport.TTransportException; -import org.apache.thrift.transport.TSSLTransportFactory.TSSLTransportParameters; - -import org.apache.hadoop.conf.Configuration; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import org.trafodion.wms.zookeeper.ZkClient; -import org.trafodion.wms.util.WmsConfiguration; -import org.trafodion.wms.Constants; -import org.trafodion.wms.server.WmsServer; -import org.trafodion.wms.server.rpc.thrift.ThriftRpcHandler; -import org.trafodion.wms.thrift.generated.*; - -public class ThriftRpcServer implements Runnable { - private static final Log LOG = LogFactory.getLog(ThriftRpcServer.class.getName()); - private static final Charset CHARSET = Charset.forName("UTF-8"); - private WmsServer wmsServer; - private Thread thrd; - private ThriftRpcHandler trpch = null; -// private WmsService.Processor processor=null; - - public ThriftRpcServer(WmsServer wmsServer){ - this.wmsServer = wmsServer; - trpch = new ThriftRpcHandler(wmsServer); - //processor = new WmsService.Processor(trpch); - thrd = new Thread(this); - thrd.start(); - } - - public ThriftRpcHandler getHandler() { - return this.trpch; - } - - public void run() { - TServer server=null; - int port = wmsServer.getThriftPort(); - - try { - //TServerTransport serverTransport = new TServerSocket(new InetSocketAddress(port)); - //server = new TSimpleServer(new Args(serverTransport).processor(new WmsService.Processor(trpch))); - // Use this for a multithreaded server - //server = new TThreadPoolServer(new TThreadPoolServer.Args(serverTransport).processor(new WmsService.Processor(trpch))); - // Use this for non blocking server - TNonblockingServerTransport serverTransport = new TNonblockingServerSocket(new InetSocketAddress(port)); - server = new TNonblockingServer(new TNonblockingServer.Args(serverTransport).processor(new WmsService.Processor(trpch))); - LOG.info("Thrift RPC listening to [" + wmsServer.getServerName() + ":" + port + "]"); - } catch (TTransportException e) { - LOG.error("TTransportException " + e); - System.exit(-1); - } - - server.serve(); - } -} diff --git a/wms/src/main/java/org/trafodion/wms/server/stats/PStats.java b/wms/src/main/java/org/trafodion/wms/server/stats/PStats.java index 690e16eb..80ed50cb 100644 --- a/wms/src/main/java/org/trafodion/wms/server/stats/PStats.java +++ b/wms/src/main/java/org/trafodion/wms/server/stats/PStats.java @@ -38,158 +38,163 @@ import org.trafodion.wms.util.WmsConfiguration; import org.trafodion.wms.util.Bytes; import org.trafodion.wms.zookeeper.ZkClient; -import org.trafodion.wms.server.rpc.thrift.ThriftRpcServer; public class PStats implements Runnable { - private static final Log LOG = LogFactory.getLog(PStats.class.getName()); - private Configuration conf; - private Thread thrd; - private String[] args; - private String instance; - - private String zkhost; - private int zkport; - - public PStats(String[] args) { - this.args = args; - conf = WmsConfiguration.create(); - thrd = new Thread(this); - thrd.setDaemon(true); - thrd.start(); - } - - public PStats(Configuration conf,String instance) { - this.conf = conf; - this.instance = instance; - thrd = new Thread(this); - thrd.setDaemon(true); - thrd.start(); - } - - public void run () { -/* - Options opt = new Options(); - opt.addOption("i",true,"zookeeper ip net addres"); - opt.addOption("p",true,"zookeeper port number"); - CommandLine cmd; - try { - cmd = new GnuParser().parse(opt, args); - zkhost = cmd.getOptionValue("i", Constants.LOCALHOST); - try { - zkport = Integer.parseInt(cmd.getOptionValue("p", Integer.toString(Constants.DEFAULT_ZOOKEEPER_CLIENT_PORT))); - } catch (NumberFormatException e){ - LOG.error("Could not parse: ", e); - zkport = Constants.DEFAULT_ZOOKEEPER_CLIENT_PORT; - } - LOG.debug("Command=" + cmd.toString()); - LOG.debug("Options: " + zkhost + " " + zkport); - System.out.println("Options: " + zkhost + " " + zkport); - } catch (ParseException e) { - LOG.error("Could not parse: ", e); - zkhost = Constants.LOCALHOST; - zkport = Constants.DEFAULT_ZOOKEEPER_CLIENT_PORT; - } -*/ - try { -// ZkClient zkc = new ZkClient(zkhost, zkport); - ZkClient zkc = new ZkClient(); - zkc.connect(); - LOG.info("Connected to ZooKeeper"); - - String parentZnode = conf.get(Constants.ZOOKEEPER_ZNODE_PARENT,Constants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - Stat stat = zkc.exists(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STATS,false); - if(stat == null){ - LOG.error(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STATS + " does not exist"); - throw new IOException(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STATS + " does not exist"); - } - String schema = Bytes.toString(zkc.getData(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STATS, false, stat)); - - InetAddress ip = InetAddress.getLocalHost(); - LOG.info("hostname " + ip.getCanonicalHostName()); - - String znode_stats = parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STATS + "/" + ip.getCanonicalHostName() + ":" + instance; - stat = zkc.exists(znode_stats, false); - if (stat != null) - zkc.delete(znode_stats,-1); - zkc.create(znode_stats,new byte[0],ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL); - LOG.info("Created " + znode_stats); - // serialize data - Schema s = new Schema.Parser().parse(schema); - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - Encoder en = EncoderFactory.get().binaryEncoder(outputStream, null); - GenericRecord record = new GenericData.Record(s); - GenericDatumWriter w = new GenericDatumWriter(s); - // - DatumReader reader = new GenericDatumReader(s); - - Runtime runtime = Runtime.getRuntime(); - Process process = runtime.exec("uname -n"); - process.waitFor(); - BufferedReader buff = new BufferedReader(new InputStreamReader(process.getInputStream())); - String nodename = buff.readLine(); - LOG.info("nodename " + nodename); - - while(true){ - PStatsCpuBusy pstatscpubusy = new PStatsCpuBusy(); - String cpuStat1 = pstatscpubusy.readSystemStat(); - - try { - Thread.sleep(Constants.CPU_WINDOW); - } catch (Exception e) { - LOG.info(e); - } - String cpuStat2 = pstatscpubusy.readSystemStat(); - - float cpu = pstatscpubusy.getSystemCpuUsage(cpuStat1, cpuStat2); - PStatsMemoryMonitor pm = PStatsMemoryMonitor.get(); - PStatsMemoryUsage mu = pm.monitor(); - //--------------- encoding ------------------------------------------------ - Float cpubusy = cpu; - Float memusage = mu.getMemoryUsage(); - LOG.debug("get cpubusy " + Float.toString(cpubusy)); - LOG.debug("get memory usage " + Float.toString(memusage)); - - outputStream.reset(); - record.put("nodename", nodename); - record.put("cpubusy", cpubusy.floatValue()); - record.put("memusage", memusage.floatValue()); - LOG.debug("nodename " + record.get("nodename")); - LOG.debug("cpubusy " + record.get("cpubusy")); - LOG.debug("memusage " + record.get("memusage")); - // Encode - w.write(record, en); - en.flush(); - outputStream.close(); - stat = zkc.setData(znode_stats, outputStream.toByteArray(), -1); - - try { - Thread.sleep(Constants.PLATFORM_STATS_DELAY); - } catch (Exception e) { - LOG.info(e); - } - /*-------------- test ------------------------------- - LOG.debug(schema); - String encodedString = outputStream.toString(); - LOG.debug("encodedString: "+encodedString); - - byte[] b = zkc.getData(znode_stats, false, stat); - Decoder decoder = DecoderFactory.get().binaryDecoder(b, null); - GenericRecord result = reader.read(null, decoder); - LOG.debug("nodename " + result.get("nodename").toString()); - LOG.debug("cpubusy " + result.get("cpubusy").toString()); - LOG.debug("memusage " + result.get("memusage").toString()); - */ - } - } catch (IOException ioe) { - LOG.info(ioe); - } catch( InterruptedException ie){ - LOG.info(ie); - } catch( KeeperException ke){ - LOG.info(ke); - } - } - public static void main(String [] args) { - PStats ps = new PStats(args); - } + private static final Log LOG = LogFactory.getLog(PStats.class.getName()); + private Configuration conf; + private Thread thrd; + private String[] args; + private String instance; + + private String zkhost; + private int zkport; + + public PStats(String[] args) { + this.args = args; + conf = WmsConfiguration.create(); + thrd = new Thread(this); + thrd.setDaemon(true); + thrd.start(); + } + + public PStats(Configuration conf, String instance) { + this.conf = conf; + this.instance = instance; + thrd = new Thread(this); + thrd.setDaemon(true); + thrd.start(); + } + + public void run() { + /* + * Options opt = new Options(); + * opt.addOption("i",true,"zookeeper ip net addres"); + * opt.addOption("p",true,"zookeeper port number"); CommandLine cmd; try + * { cmd = new GnuParser().parse(opt, args); zkhost = + * cmd.getOptionValue("i", Constants.LOCALHOST); try { zkport = + * Integer.parseInt(cmd.getOptionValue("p", + * Integer.toString(Constants.DEFAULT_ZOOKEEPER_CLIENT_PORT))); } catch + * (NumberFormatException e){ LOG.error("Could not parse: ", e); zkport + * = Constants.DEFAULT_ZOOKEEPER_CLIENT_PORT; } LOG.debug("Command=" + + * cmd.toString()); LOG.debug("Options: " + zkhost + " " + zkport); + * System.out.println("Options: " + zkhost + " " + zkport); } catch + * (ParseException e) { LOG.error("Could not parse: ", e); zkhost = + * Constants.LOCALHOST; zkport = + * Constants.DEFAULT_ZOOKEEPER_CLIENT_PORT; } + */ + try { + // ZkClient zkc = new ZkClient(zkhost, zkport); + ZkClient zkc = new ZkClient(); + zkc.connect(); + LOG.info("Connected to ZooKeeper"); + + String parentZnode = conf.get(Constants.ZOOKEEPER_ZNODE_PARENT, + Constants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + Stat stat = zkc.exists(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_STATS, false); + if (stat == null) { + LOG.error(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_STATS + + " does not exist"); + throw new IOException(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_STATS + + " does not exist"); + } + String schema = Bytes.toString(zkc.getData(parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_STATS, false, stat)); + + InetAddress ip = InetAddress.getLocalHost(); + LOG.info("hostname " + ip.getCanonicalHostName()); + + String znode_stats = parentZnode + + Constants.DEFAULT_ZOOKEEPER_ZNODE_STATS + "/" + + ip.getCanonicalHostName() + ":" + instance; + stat = zkc.exists(znode_stats, false); + if (stat != null) + zkc.delete(znode_stats, -1); + zkc.create(znode_stats, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.EPHEMERAL); + LOG.info("Created " + znode_stats); + // serialize data + Schema s = new Schema.Parser().parse(schema); + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + Encoder en = EncoderFactory.get().binaryEncoder(outputStream, null); + GenericRecord record = new GenericData.Record(s); + GenericDatumWriter w = new GenericDatumWriter( + s); + // + DatumReader reader = new GenericDatumReader( + s); + + Runtime runtime = Runtime.getRuntime(); + Process process = runtime.exec("uname -n"); + process.waitFor(); + BufferedReader buff = new BufferedReader(new InputStreamReader( + process.getInputStream())); + String nodename = buff.readLine(); + LOG.info("nodename " + nodename); + + while (true) { + PStatsCpuBusy pstatscpubusy = new PStatsCpuBusy(); + String cpuStat1 = pstatscpubusy.readSystemStat(); + + try { + Thread.sleep(Constants.CPU_WINDOW); + } catch (Exception e) { + LOG.info(e); + } + String cpuStat2 = pstatscpubusy.readSystemStat(); + + float cpu = pstatscpubusy.getSystemCpuUsage(cpuStat1, cpuStat2); + PStatsMemoryMonitor pm = PStatsMemoryMonitor.get(); + PStatsMemoryUsage mu = pm.monitor(); + // --------------- encoding + // ------------------------------------------------ + Float cpubusy = cpu; + Float memusage = mu.getMemoryUsage(); + LOG.debug("get cpubusy " + Float.toString(cpubusy)); + LOG.debug("get memory usage " + Float.toString(memusage)); + + outputStream.reset(); + record.put("nodename", nodename); + record.put("cpubusy", cpubusy.floatValue()); + record.put("memusage", memusage.floatValue()); + LOG.debug("nodename " + record.get("nodename")); + LOG.debug("cpubusy " + record.get("cpubusy")); + LOG.debug("memusage " + record.get("memusage")); + // Encode + w.write(record, en); + en.flush(); + outputStream.close(); + stat = zkc.setData(znode_stats, outputStream.toByteArray(), -1); + + try { + Thread.sleep(Constants.PLATFORM_STATS_DELAY); + } catch (Exception e) { + LOG.info(e); + } + /*-------------- test ------------------------------- + LOG.debug(schema); + String encodedString = outputStream.toString(); + LOG.debug("encodedString: "+encodedString); + + byte[] b = zkc.getData(znode_stats, false, stat); + Decoder decoder = DecoderFactory.get().binaryDecoder(b, null); + GenericRecord result = reader.read(null, decoder); + LOG.debug("nodename " + result.get("nodename").toString()); + LOG.debug("cpubusy " + result.get("cpubusy").toString()); + LOG.debug("memusage " + result.get("memusage").toString()); + */ + } + } catch (IOException ioe) { + LOG.info(ioe); + } catch (InterruptedException ie) { + LOG.info(ie); + } catch (KeeperException ke) { + LOG.info(ke); + } + } + + public static void main(String[] args) { + PStats ps = new PStats(args); + } } - diff --git a/wms/src/main/java/org/trafodion/wms/server/workload/WorkloadStore.java b/wms/src/main/java/org/trafodion/wms/server/workload/WorkloadStore.java index 29af445b..9d2928db 100644 --- a/wms/src/main/java/org/trafodion/wms/server/workload/WorkloadStore.java +++ b/wms/src/main/java/org/trafodion/wms/server/workload/WorkloadStore.java @@ -27,282 +27,220 @@ import org.apache.hadoop.conf.Configuration; -import org.apache.thrift.TBase; -import org.apache.thrift.TException; -import org.apache.thrift.TSerializer; -import org.apache.thrift.TDeserializer; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.protocol.TProtocolFactory; -import org.apache.thrift.transport.TIOStreamTransport; -import org.apache.thrift.transport.TTransport; - import org.trafodion.wms.util.WmsConfiguration; import org.trafodion.wms.zookeeper.ZkClient; import org.trafodion.wms.server.ServerLeaderElection; import org.trafodion.wms.Constants; -import org.trafodion.wms.thrift.generated.*; import org.trafodion.wms.util.Bytes; public class WorkloadStore { - private static final Log LOG = LogFactory.getLog(WorkloadStore.class); - Configuration conf; - ZkClient zkc = null; + private static final Log LOG = LogFactory.getLog(WorkloadStore.class); + Configuration conf; + ZkClient zkc = null; String parentZnode; ServerLeaderElection sle; - long cleanerInitialDelay; - long cleanerPeriod; - final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1); - ScheduledFuture cleanerHandle; - TSerializer serializer = new TSerializer(new TBinaryProtocol.Factory()); - TDeserializer deserializer = new TDeserializer(new TBinaryProtocol.Factory()); - List children = new ArrayList(); - List clientIds = new ArrayList(); - List workloads = new ArrayList(); - - public WorkloadStore(Configuration conf,ZkClient zkc,String parentZnode,ServerLeaderElection sle){ - this.conf = conf; - this.zkc = zkc; - this.parentZnode = parentZnode; - this.sle = sle; - cleanerInitialDelay = conf.getInt(Constants.WMS_SERVER_WORKLOAD_CLEANER_INITIAL_DELAY, Constants.DEFAULT_WMS_SERVER_WORKLOAD_CLEANER_INITIAL_DELAY); - cleanerPeriod = conf.getInt(Constants.WMS_SERVER_WORKLOAD_CLEANER_PERIOD, Constants.DEFAULT_WMS_SERVER_WORKLOAD_CLEANER_PERIOD); - startCleaner(); - } - - public WorkloadStore(String[] args){ - if(zkc == null){ - try { - zkc = new ZkClient(); - zkc.connect(); - LOG.info("Connected to ZooKeeper"); - parentZnode = "/" + System.getProperty("user.name"); - } catch (Exception e) { - LOG.error(e); - System.exit(1); - } - } - - Configuration conf = WmsConfiguration.create(); - cleanerInitialDelay = conf.getInt(Constants.WMS_SERVER_WORKLOAD_CLEANER_INITIAL_DELAY, Constants.DEFAULT_WMS_SERVER_WORKLOAD_CLEANER_INITIAL_DELAY); - cleanerPeriod = conf.getInt(Constants.WMS_SERVER_WORKLOAD_CLEANER_PERIOD, Constants.DEFAULT_WMS_SERVER_WORKLOAD_CLEANER_PERIOD); - startCleaner(); - } - - final Runnable cleaner = new Runnable() { - public void run() { clean(); } - }; - - public void startCleaner(){ - cleanerHandle = scheduler.scheduleAtFixedRate(cleaner, cleanerInitialDelay, cleanerPeriod, SECONDS); - } - - public void stopCleaner(){ - cleanerHandle.cancel(true); - } - - synchronized void clean() { - LOG.debug("Workload cleaner is running"); - - if(! sle.isLeader()) - return; - - children.clear(); - try { - children = zkc.getChildren(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_CLIENTS, null); - } catch (Exception e) { - LOG.error("Exception reading Constants.DEFAULT_ZOOKEEPER_ZNODE_CLIENTS, " + e); - } - - if(children.isEmpty()) { - LOG.debug("No children found in " + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_CLIENTS); - } else { - clientIds.clear(); - for(String aChild: children) { - StringTokenizer st = new StringTokenizer(aChild, ":"); - while(st.hasMoreTokens()) { - st.nextToken(); //skip ip address - String timestamp = st.nextToken(); //timestamp - clientIds.add(timestamp); - } - } - } - - children.clear(); - try { - children = zkc.getChildren(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_WORKLOADS, null); - } catch (Exception e) { - LOG.error("Exception reading Constants.DEFAULT_ZOOKEEPER_ZNODE_WORKLOADS, " + e); - } - - if (children.isEmpty()) { - LOG.debug("No workloads found in " + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_WORKLOADS); - return; - } else { - workloads.clear(); - try { - for(String aChild : children) { - workloads.add(get(aChild)); - } - }catch (Exception e){ - e.printStackTrace(); - } - } - - LOG.debug("Workloads found in " + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_WORKLOADS); - - Date now = new Date(); - for (Request aWorkload : workloads) { - LOG.debug("Workload=" + aWorkload); - Map reqkv = aWorkload.getData().getKeyValues(); - if(reqkv.get(Constants.STATE).getStringValue() == null - || reqkv.get(Constants.STATE).getStringValue().equalsIgnoreCase(Constants.COMPLETED)) { - if(now.getTime() - reqkv.get(Constants.BEGIN_TIMESTAMP).getLongValue() >= Constants.THIRTY_SECONDS ){ - try{ - delete(reqkv.get(Constants.WORKLOAD_ID).getStringValue()); - LOG.debug("Deleted workload[" + reqkv.get(Constants.WORKLOAD_ID).getStringValue() + "], Workload[" + aWorkload + "]"); - }catch (Exception e){ - e.printStackTrace(); - } - } - } else { - //For external workloads e.g., HIVE clients that connect using WmsClient object - //and also don't have a client registered in /wms/clients change them - //to COMPLETED/CLIENT_DISAPPEARED state. This is mainly for HIVE queries that have been ctrl-c'd - //so they have begun but will never have end from client. - // - if(clientIds.contains(Long.valueOf(aWorkload.getHeader().getClientTimestamp()).toString())) { - LOG.debug("Client ID found for [" + aWorkload.getHeader().getClientTimestamp() + "], Workload[" + aWorkload + "]"); - continue; - } else { - LOG.debug("Client ID not found for [" + aWorkload.getHeader().getClientTimestamp() + "], Workload[" + aWorkload + "]"); - //aWorkload.getData().setState(Constants.COMPLETED); - reqkv.put(Constants.STATE,new KeyValue().setStringValue(Constants.COMPLETED)); - //aWorkload.getData().setSubState(Constants.CLIENT_DISAPPEARED); - reqkv.put(Constants.SUBSTATE,new KeyValue().setStringValue(Constants.CLIENT_DISAPPEARED)); - try{ - put(reqkv.get(Constants.WORKLOAD_ID).getStringValue(),aWorkload); - }catch (Exception e){ - e.printStackTrace(); - } - } - } - } - } - - public String createWorkloadId() throws Exception { - //see http://docs.oracle.com/javase/1.5.0/docs/api/java/util/Formatter.html - Date date = new Date(); - String root = String.format("%1$s%2$tY% reqkv = r.getData().getKeyValues(); - reqkv.put("workloadId",new KeyValue().setStringValue(workloadId)); - reqkv.put("operation",new KeyValue().setIntValue(Operation.OPERATION_BEGIN.getValue())); - reqkv.put("state",new KeyValue().setStringValue(Constants.RUNNING)); - reqkv.put("subState",new KeyValue().setStringValue(Constants.BEGIN)); - reqkv.put("beginTimestamp",new KeyValue().setLongValue(System.currentTimeMillis())); - reqkv.put("endTimestamp",new KeyValue().setLongValue(System.currentTimeMillis())); - reqkv.put("type",new KeyValue().setStringValue(Constants.TRAFODION)); - reqkv.put("queryId",new KeyValue().setStringValue("MXID11000001075212235857042874154000000000106U6553500_4_SQL_DATASOURCE_Q8")); - reqkv.put("queryText",new KeyValue().setStringValue("This is some query text")); - - //serialize - long beginTs = System.currentTimeMillis(); - byte[] bytes = store.serialize(r); - long endTs = System.currentTimeMillis(); - System.out.println("serialize took " + (endTs - beginTs) + " millis"); - System.out.println("bytes=" + bytes + "\n"); - - //deserialize - beginTs = System.currentTimeMillis(); - Request dr = store.deserialize(bytes); - endTs = System.currentTimeMillis(); - System.out.println("deserialize took " + (endTs - beginTs) + " millis"); - System.out.println("deserialize=" + dr + "\n"); - - //put - beginTs = System.currentTimeMillis(); - store.put(workloadId,r); - endTs = System.currentTimeMillis(); - System.out.println("put took " + (endTs - beginTs) + " millis"); - System.out.println("put=" + r + "\n"); - - //get - beginTs = System.currentTimeMillis(); - dr = store.get(workloadId); - endTs = System.currentTimeMillis(); - System.out.println("get took " + (endTs - beginTs) + " millis"); - System.out.println("get=" + dr + "\n"); - - } catch (Exception e) { - e.printStackTrace(); - } - } + long cleanerInitialDelay; + long cleanerPeriod; + final ScheduledExecutorService scheduler = Executors + .newScheduledThreadPool(1); + ScheduledFuture cleanerHandle; + List children = new ArrayList(); + List clientIds = new ArrayList(); + + // List workloads = new ArrayList(); + + public WorkloadStore(Configuration conf, ZkClient zkc, String parentZnode, + ServerLeaderElection sle) { + this.conf = conf; + this.zkc = zkc; + this.parentZnode = parentZnode; + this.sle = sle; + cleanerInitialDelay = conf.getInt( + Constants.WMS_SERVER_WORKLOAD_CLEANER_INITIAL_DELAY, + Constants.DEFAULT_WMS_SERVER_WORKLOAD_CLEANER_INITIAL_DELAY); + cleanerPeriod = conf.getInt( + Constants.WMS_SERVER_WORKLOAD_CLEANER_PERIOD, + Constants.DEFAULT_WMS_SERVER_WORKLOAD_CLEANER_PERIOD); + // startCleaner(); + } + + public WorkloadStore(String[] args) { + if (zkc == null) { + try { + zkc = new ZkClient(); + zkc.connect(); + LOG.info("Connected to ZooKeeper"); + parentZnode = "/" + System.getProperty("user.name"); + } catch (Exception e) { + LOG.error(e); + System.exit(1); + } + } + + Configuration conf = WmsConfiguration.create(); + cleanerInitialDelay = conf.getInt( + Constants.WMS_SERVER_WORKLOAD_CLEANER_INITIAL_DELAY, + Constants.DEFAULT_WMS_SERVER_WORKLOAD_CLEANER_INITIAL_DELAY); + cleanerPeriod = conf.getInt( + Constants.WMS_SERVER_WORKLOAD_CLEANER_PERIOD, + Constants.DEFAULT_WMS_SERVER_WORKLOAD_CLEANER_PERIOD); + // startCleaner(); + } + /* + * final Runnable cleaner = new Runnable() { public void run() { clean(); } + * }; + * + * public void startCleaner() { cleanerHandle = + * scheduler.scheduleAtFixedRate(cleaner, cleanerInitialDelay, + * cleanerPeriod, SECONDS); } + * + * public void stopCleaner() { cleanerHandle.cancel(true); } + * + * synchronized void clean() { LOG.debug("Workload cleaner is running"); + * + * if (!sle.isLeader()) return; + * + * children.clear(); try { children = zkc.getChildren(parentZnode + + * Constants.DEFAULT_ZOOKEEPER_ZNODE_CLIENTS, null); } catch (Exception e) { + * LOG.error("Exception reading Constants.DEFAULT_ZOOKEEPER_ZNODE_CLIENTS, " + * + e); } + * + * if (children.isEmpty()) { LOG.debug("No children found in " + parentZnode + * + Constants.DEFAULT_ZOOKEEPER_ZNODE_CLIENTS); } else { clientIds.clear(); + * for (String aChild : children) { StringTokenizer st = new + * StringTokenizer(aChild, ":"); while (st.hasMoreTokens()) { + * st.nextToken(); // skip ip address String timestamp = st.nextToken(); // + * timestamp clientIds.add(timestamp); } } } + * + * children.clear(); try { children = zkc.getChildren(parentZnode + + * Constants.DEFAULT_ZOOKEEPER_ZNODE_WORKLOADS, null); } catch (Exception e) + * { + * LOG.error("Exception reading Constants.DEFAULT_ZOOKEEPER_ZNODE_WORKLOADS, " + * + e); } + * + * if (children.isEmpty()) { LOG.debug("No workloads found in " + + * parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_WORKLOADS); return; } + * else { // workloads.clear(); try { for (String aChild : children) { // + * workloads.add(get(aChild)); } } catch (Exception e) { + * e.printStackTrace(); } } + * + * LOG.debug("Workloads found in " + parentZnode + + * Constants.DEFAULT_ZOOKEEPER_ZNODE_WORKLOADS); + * + * Date now = new Date(); for (Request aWorkload : workloads) { + * LOG.debug("Workload=" + aWorkload); Map reqkv = + * aWorkload.getData().getKeyValues(); if + * (reqkv.get(Constants.STATE).getStringValue() == null || + * reqkv.get(Constants.STATE).getStringValue() + * .equalsIgnoreCase(Constants.COMPLETED)) { if (now.getTime() - + * reqkv.get(Constants.BEGIN_TIMESTAMP).getLongValue() >= + * Constants.THIRTY_SECONDS) { try { delete(reqkv.get(Constants.WORKLOAD_ID) + * .getStringValue()); LOG.debug("Deleted workload[" + + * reqkv.get(Constants.WORKLOAD_ID) .getStringValue() + "], Workload[" + + * aWorkload + "]"); } catch (Exception e) { e.printStackTrace(); } } } else + * { // For external workloads e.g., HIVE clients that connect using // + * WmsClient object // and also don't have a client registered in + * /wms/clients // change them // to COMPLETED/CLIENT_DISAPPEARED state. + * This is mainly for // HIVE queries that have been ctrl-c'd // so they + * have begun but will never have end from client. // if + * (clientIds.contains(Long.valueOf( + * aWorkload.getHeader().getClientTimestamp()).toString())) { + * LOG.debug("Client ID found for [" + + * aWorkload.getHeader().getClientTimestamp() + "], Workload[" + aWorkload + + * "]"); continue; } else { LOG.debug("Client ID not found for [" + + * aWorkload.getHeader().getClientTimestamp() + "], Workload[" + aWorkload + + * "]"); // aWorkload.getData().setState(Constants.COMPLETED); + * reqkv.put(Constants.STATE, new + * KeyValue().setStringValue(Constants.COMPLETED)); // + * aWorkload.getData().setSubState(Constants.CLIENT_DISAPPEARED); + * reqkv.put(Constants.SUBSTATE, new KeyValue() + * .setStringValue(Constants.CLIENT_DISAPPEARED)); try { + * put(reqkv.get(Constants.WORKLOAD_ID).getStringValue(), aWorkload); } + * catch (Exception e) { e.printStackTrace(); } } } } } + * + * public String createWorkloadId() throws Exception { // see // + * http://docs.oracle.com/javase/1.5.0/docs/api/java/util/Formatter.html + * Date date = new Date(); String root = String.format( + * "%1$s%2$tY% + * reqkv = r.getData().getKeyValues(); reqkv.put("workloadId", new + * KeyValue().setStringValue(workloadId)); reqkv.put("operation", new + * KeyValue() .setIntValue(Operation.OPERATION_BEGIN.getValue())); + * reqkv.put("state", new KeyValue().setStringValue(Constants.RUNNING)); + * reqkv.put("subState", new KeyValue().setStringValue(Constants.BEGIN)); + * reqkv.put("beginTimestamp", new + * KeyValue().setLongValue(System.currentTimeMillis())); + * reqkv.put("endTimestamp", new + * KeyValue().setLongValue(System.currentTimeMillis())); reqkv.put("type", + * new KeyValue().setStringValue(Constants.TRAFODION)); reqkv.put( + * "queryId", new KeyValue() .setStringValue( + * "MXID11000001075212235857042874154000000000106U6553500_4_SQL_DATASOURCE_Q8" + * )); reqkv.put("queryText", new + * KeyValue().setStringValue("This is some query text")); + * + * // serialize long beginTs = System.currentTimeMillis(); byte[] bytes = + * store.serialize(r); long endTs = System.currentTimeMillis(); + * System.out.println("serialize took " + (endTs - beginTs) + " millis"); + * System.out.println("bytes=" + bytes + "\n"); + * + * // deserialize beginTs = System.currentTimeMillis(); Request dr = + * store.deserialize(bytes); endTs = System.currentTimeMillis(); + * System.out.println("deserialize took " + (endTs - beginTs) + " millis"); + * System.out.println("deserialize=" + dr + "\n"); + * + * // put beginTs = System.currentTimeMillis(); store.put(workloadId, r); + * endTs = System.currentTimeMillis(); System.out.println("put took " + + * (endTs - beginTs) + " millis"); System.out.println("put=" + r + "\n"); + * + * // get beginTs = System.currentTimeMillis(); dr = store.get(workloadId); + * endTs = System.currentTimeMillis(); System.out.println("get took " + + * (endTs - beginTs) + " millis"); System.out.println("get=" + dr + "\n"); + * + * } catch (Exception e) { e.printStackTrace(); } } + */ } \ No newline at end of file diff --git a/wms/src/main/java/org/trafodion/wms/server/workload/WorkloadsOffender.java b/wms/src/main/java/org/trafodion/wms/server/workload/WorkloadsOffender.java index 25857bae..d4334dec 100644 --- a/wms/src/main/java/org/trafodion/wms/server/workload/WorkloadsOffender.java +++ b/wms/src/main/java/org/trafodion/wms/server/workload/WorkloadsOffender.java @@ -10,64 +10,66 @@ import org.apache.zookeeper.data.Stat; import org.apache.zookeeper.KeeperException; -import org.trafodion.wms.thrift.generated.*; import org.trafodion.wms.Constants; import org.trafodion.wms.zookeeper.ZkClient; public final class WorkloadsOffender implements Runnable { - private static final Log LOG = LogFactory.getLog(WorkloadsOffender.class); - private Thread thrd; - private static ZkClient zkc = null; - private static String parentZnode = null; + private static final Log LOG = LogFactory.getLog(WorkloadsOffender.class); + private Thread thrd; + private static ZkClient zkc = null; + private static String parentZnode = null; - public WorkloadsOffender(ZkClient zkc,String parentZnode) { - this.zkc = zkc; - this.parentZnode = parentZnode; - thrd = new Thread(this); - thrd.setDaemon(true); - thrd.start(); - } + public WorkloadsOffender(ZkClient zkc, String parentZnode) { + this.zkc = zkc; + this.parentZnode = parentZnode; + thrd = new Thread(this); + thrd.setDaemon(true); + thrd.start(); + } - public void run() { - while(true){ - LOG.debug("Checking for offending workloads"); - publishOffenders(); + public void run() { + while (true) { + LOG.debug("Checking for offending workloads"); + publishOffenders(); - try { - Thread.sleep(30000); - } catch (InterruptedException e) { } - } - } - - public synchronized void publishOffenders() { - int published=0; - - try { -/* - workloads = target.getWorkloads(); - Date now = new Date(); - for (int i=0; i < workloads.size(); i++) { - WorkloadItem workload = workloads.get(i); - - if(workload.getRequest().getJobType() == JobType.TRAFODION) { - if(now.getTime() - workload.getRequest().getStartTimestamp() >= Constants.THIRTY_SECONDS ){ - Stat stat = zkc.exists(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_TRAFODION_RMS + "/" + workload.getRequest().getJobId().toString(),false); - if(stat == null) { - zkc.create(parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_TRAFODION_RMS + "/" + workload.getRequest().getJobId().toString(),new byte[0],ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL); - LOG.debug("Published offending workload [" + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_TRAFODION_RMS + "/" + workload.getRequest().getJobId().toString() + "] to " + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_TRAFODION_RMS); - published++; - } - } - } - - } - //LOG.debug("[" + published + "] offending workload(s) published to " + parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_TRAFODION_RMS); -*/ - } catch (Exception e) { - zkc = null; - LOG.error(e); - } - } -} + try { + Thread.sleep(30000); + } catch (InterruptedException e) { + } + } + } + public synchronized void publishOffenders() { + int published = 0; + try { + /* + * workloads = target.getWorkloads(); Date now = new Date(); for + * (int i=0; i < workloads.size(); i++) { WorkloadItem workload = + * workloads.get(i); + * + * if(workload.getRequest().getJobType() == JobType.TRAFODION) { + * if(now.getTime() - workload.getRequest().getStartTimestamp() >= + * Constants.THIRTY_SECONDS ){ Stat stat = zkc.exists(parentZnode + + * Constants.DEFAULT_ZOOKEEPER_ZNODE_TRAFODION_RMS + "/" + + * workload.getRequest().getJobId().toString(),false); if(stat == + * null) { zkc.create(parentZnode + + * Constants.DEFAULT_ZOOKEEPER_ZNODE_TRAFODION_RMS + "/" + + * workload.getRequest().getJobId().toString(),new + * byte[0],ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL); + * LOG.debug("Published offending workload [" + parentZnode + + * Constants.DEFAULT_ZOOKEEPER_ZNODE_TRAFODION_RMS + "/" + + * workload.getRequest().getJobId().toString() + "] to " + + * parentZnode + Constants.DEFAULT_ZOOKEEPER_ZNODE_TRAFODION_RMS); + * published++; } } } + * + * } //LOG.debug("[" + published + + * "] offending workload(s) published to " + parentZnode + + * Constants.DEFAULT_ZOOKEEPER_ZNODE_TRAFODION_RMS); + */ + } catch (Exception e) { + zkc = null; + LOG.error(e); + } + } +} diff --git a/wms/src/main/java/org/trafodion/wms/util/InfoServer.java b/wms/src/main/java/org/trafodion/wms/util/InfoServer.java index 8f6199ca..ba24f559 100644 --- a/wms/src/main/java/org/trafodion/wms/util/InfoServer.java +++ b/wms/src/main/java/org/trafodion/wms/util/InfoServer.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/wms/src/main/java/org/trafodion/wms/util/VersionInfo.java b/wms/src/main/java/org/trafodion/wms/util/VersionInfo.java index 1880323b..5e4ab98d 100644 --- a/wms/src/main/java/org/trafodion/wms/util/VersionInfo.java +++ b/wms/src/main/java/org/trafodion/wms/util/VersionInfo.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/wms/src/main/java/org/trafodion/wms/util/WmsConfiguration.java b/wms/src/main/java/org/trafodion/wms/util/WmsConfiguration.java index 34b91a40..0a853b5f 100644 --- a/wms/src/main/java/org/trafodion/wms/util/WmsConfiguration.java +++ b/wms/src/main/java/org/trafodion/wms/util/WmsConfiguration.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/wms/src/main/java/org/trafodion/wms/version/util/VerGen.java b/wms/src/main/java/org/trafodion/wms/version/util/VerGen.java index 9236bbe0..b2d4e82c 100644 --- a/wms/src/main/java/org/trafodion/wms/version/util/VerGen.java +++ b/wms/src/main/java/org/trafodion/wms/version/util/VerGen.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/wms/src/main/java/org/trafodion/wms/zookeeper/WmsQuorumPeer.java b/wms/src/main/java/org/trafodion/wms/zookeeper/WmsQuorumPeer.java index dde3b9d3..7cd53eac 100644 --- a/wms/src/main/java/org/trafodion/wms/zookeeper/WmsQuorumPeer.java +++ b/wms/src/main/java/org/trafodion/wms/zookeeper/WmsQuorumPeer.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/wms/src/main/java/org/trafodion/wms/zookeeper/ZKConfig.java b/wms/src/main/java/org/trafodion/wms/zookeeper/ZKConfig.java index e4b3376f..e9a62433 100644 --- a/wms/src/main/java/org/trafodion/wms/zookeeper/ZKConfig.java +++ b/wms/src/main/java/org/trafodion/wms/zookeeper/ZKConfig.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/wms/src/main/java/org/trafodion/wms/zookeeper/ZKServerTool.java b/wms/src/main/java/org/trafodion/wms/zookeeper/ZKServerTool.java index 82909ad4..99ceffaf 100644 --- a/wms/src/main/java/org/trafodion/wms/zookeeper/ZKServerTool.java +++ b/wms/src/main/java/org/trafodion/wms/zookeeper/ZKServerTool.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/wms/src/main/java/org/trafodion/wms/zookeeper/ZkClient.java b/wms/src/main/java/org/trafodion/wms/zookeeper/ZkClient.java index cee50dd3..bcde5ad9 100644 --- a/wms/src/main/java/org/trafodion/wms/zookeeper/ZkClient.java +++ b/wms/src/main/java/org/trafodion/wms/zookeeper/ZkClient.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/wms/src/main/java/org/trafodion/wms/zookeeper/ZooKeeperMainServerArg.java b/wms/src/main/java/org/trafodion/wms/zookeeper/ZooKeeperMainServerArg.java index 5cc33c28..df1a52c2 100644 --- a/wms/src/main/java/org/trafodion/wms/zookeeper/ZooKeeperMainServerArg.java +++ b/wms/src/main/java/org/trafodion/wms/zookeeper/ZooKeeperMainServerArg.java @@ -1,5 +1,5 @@ /** - *(C) Copyright 2013 Hewlett-Packard Development Company, L.P. + *(C) Copyright 2015 Hewlett-Packard Development Company, L.P. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/wms/src/main/resources/wms-default.xml b/wms/src/main/resources/wms-default.xml index 78750e1d..e052279e 100644 --- a/wms/src/main/resources/wms-default.xml +++ b/wms/src/main/resources/wms-default.xml @@ -2,7 +2,7 @@ - wms.tmp.dir ${java.io.tmpdir}/wms-${user.name} @@ -340,98 +325,6 @@ before timing out. The default is 0 or no timeout. - - vertica.workloads.enabled - false - - If set to true start Vertica client and monitor it's workloads. - - - - vertica.query.timeout.min - 0 - - The time in minutes that a workload duration is active - before timing out for vertica queries. The default is 0 or no timeout. - - - - vertica.jdbc.url - jdbc:vertica://localhost:5433/VMart - - The Vertica JDBC driver url. - - - - vertica.jdbc.url.user - vertica - - The Vertica JDBC driver user. - - - - vertica.jdbc.url.password - vertica - - The Vertica JDBC driver password. - - - - vertica.jdbc.collect.delay - 10 - - The delay in seconds when Vertica connector starts to collect data measured from start of the query. - - - - vertica.jdbc.collect.timeout - 10 - - The Vertica connector timeout in seconds between collecting cicles. - - - - hadoop.workloads.enabled - false - - If set to true start Hadoop client and monitor it's workloads. - - - - hadoop.mapred.job.tracker - localhost - - The Hadoop JobTracker hostname. The default is localhost - - - - hadoop.mapred.job.tracker.port - - - The Hadoop JobTracker port. - - - - yarn.rest.enabled - false - - If set to true start Yarn Rest Client and monitor it's workloads. - - - - yarn.rest.url - http://localhost:8088/ws/v1/cluster/apps - - The Yarn Rest Client url. - - - - yarn.resourcemanager.address - 0.0.0.0:8032 - - The Yarn RM address. - - wms.server.workload.cleaner.initial.delay 30 @@ -446,15 +339,6 @@ run of the workload store cleaner. - - wms.server.cep.vertica.keys - operation,int,type,string,userName,string,applicationName,string,transactionId,long,workloadId,string, - poolName,string,memoryAllocated,long,rowsSent,long,requestType,string,state,string,subState,string, - duration,long,sessionId,string,request,string,statementId,long,beginTimestamp,long,endTimestamp,long - - A comma separated list of Vertica workload keys/types. - - wms.server.cep.trafodion.keys operation,int,type,string,deltaNumRowsIUD,long,deltaRowsRetrieved,long,deltaRowsAccessed,long,deltaTotalSelects,long, diff --git a/wms/src/main/resources/wms-webapps/master/index.html b/wms/src/main/resources/wms-webapps/master/index.html index 69d5c754..532a4bb7 100644 --- a/wms/src/main/resources/wms-webapps/master/index.html +++ b/wms/src/main/resources/wms-webapps/master/index.html @@ -1,6 +1,6 @@ -
-HBase Default Configuration - - - - -HBase Default Configuration - -The documentation below is generated using the default hbase configuration file, -hbase-default.xml, as source. - - - - - - - - - - - - - - Default: - - - - - - - -
- - diff --git a/wms/src/site/resources/css/freebsd_docbook.css b/wms/src/site/resources/css/freebsd_docbook.css deleted file mode 100644 index 3d40fa70..00000000 --- a/wms/src/site/resources/css/freebsd_docbook.css +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Copyright (c) 2001, 2003, 2010 The FreeBSD Documentation Project - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD: doc/share/misc/docbook.css,v 1.15 2010/03/20 04:15:01 hrs Exp $ - */ - -BODY ADDRESS { - line-height: 1.3; - margin: .6em 0; -} - -BODY BLOCKQUOTE { - margin-top: .75em; - line-height: 1.5; - margin-bottom: .75em; -} - -HTML BODY { - margin: 1em 8% 1em 10%; - line-height: 1.2; -} - -.LEGALNOTICE { - font-size: small; - font-variant: small-caps; -} - -BODY DIV { - margin: 0; -} - -DL { - margin: .8em 0; - line-height: 1.2; -} - -BODY FORM { - margin: .6em 0; -} - -H1, H2, H3, H4, H5, H6, -DIV.EXAMPLE P B, -.QUESTION, -DIV.TABLE P B, -DIV.PROCEDURE P B { - color: #990000; -} - -BODY H1, BODY H2, BODY H3, BODY H4, BODY H5, BODY H6 { - line-height: 1.3; - margin-left: 0; -} - -BODY H1, BODY H2 { - margin: .8em 0 0 -4%; -} - -BODY H3, BODY H4 { - margin: .8em 0 0 -3%; -} - -BODY H5 { - margin: .8em 0 0 -2%; -} - -BODY H6 { - margin: .8em 0 0 -1%; -} - -BODY HR { - margin: .6em; - border-width: 0 0 1px 0; - border-style: solid; - border-color: #cecece; -} - -BODY IMG.NAVHEADER { - margin: 0 0 0 -4%; -} - -OL { - margin: 0 0 0 5%; - line-height: 1.2; -} - -BODY PRE { - margin: .75em 0; - line-height: 1.0; - font-family: monospace; -} - -BODY TD, BODY TH { - line-height: 1.2; -} - -UL, BODY DIR, BODY MENU { - margin: 0 0 0 5%; - line-height: 1.2; -} - -HTML { - margin: 0; - padding: 0; -} - -BODY P B.APPLICATION { - color: #000000; -} - -.FILENAME { - color: #007a00; -} - -.GUIMENU, .GUIMENUITEM, .GUISUBMENU, -.GUILABEL, .INTERFACE, -.SHORTCUT, .SHORTCUT .KEYCAP { - font-weight: bold; -} - -.GUIBUTTON { - background-color: #CFCFCF; - padding: 2px; -} - -.ACCEL { - background-color: #F0F0F0; - text-decoration: underline; -} - -.SCREEN { - padding: 1ex; -} - -.PROGRAMLISTING { - padding: 1ex; - background-color: #eee; - border: 1px solid #ccc; -} - -@media screen { /* hide from IE3 */ - a[href]:hover { background: #ffa } -} - -BLOCKQUOTE.NOTE { - color: #222; - background: #eee; - border: 1px solid #ccc; - padding: 0.4em 0.4em; - width: 85%; -} - -BLOCKQUOTE.TIP { - color: #004F00; - background: #d8ecd6; - border: 1px solid green; - padding: 0.2em 2em; - width: 85%; -} - -BLOCKQUOTE.IMPORTANT { - font-style:italic; - border: 1px solid #a00; - border-left: 12px solid #c00; - padding: 0.1em 1em; -} - -BLOCKQUOTE.WARNING { - color: #9F1313; - background: #f8e8e8; - border: 1px solid #e59595; - padding: 0.2em 2em; - width: 85%; -} - -.EXAMPLE { - background: #fefde6; - border: 1px solid #f1bb16; - margin: 1em 0; - padding: 0.2em 2em; - width: 90%; -} - -.INFORMALTABLE TABLE.CALSTABLE TR TD { - padding-left: 1em; - padding-right: 1em; -} diff --git a/wms/src/site/resources/css/site.css b/wms/src/site/resources/css/site.css deleted file mode 100644 index 7978259a..00000000 --- a/wms/src/site/resources/css/site.css +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -a.externalLink, a.externalLink:link, a.externalLink:visited, a.externalLink:active, a.externalLink:hover { - background: none; - padding-right: 0; -} - -/* -body ul { - list-style-type: square; -} -*/ - -#downloadbox { - float: right; - margin: 0 10px 20px 20px; - padding: 5px; - border: 1px solid #999; - background-color: #eee; -} - -#downloadbox h5 { - color: #000; - margin: 0; - border-bottom: 1px solid #aaaaaa; - font-size: smaller; - padding: 0; -} - -#downloadbox p { - margin-top: 1em; - margin-bottom: 0; -} - -#downloadbox ul { - margin-top: 0; - margin-bottom: 1em; - list-style-type: disc; -} - -#downloadbox li { - font-size: smaller; -} - -/* -h4 { - padding: 0; - border: none; - color: #000; - margin: 0; - font-size: larger; - font-weight: bold; -} -*/ - -#banner { - background: none; -} - -#banner img { - padding: 10px; - margin: auto; - display: block; - background: none; - float: center; - height:; -} - -#breadcrumbs { - background-image: url(); -} - -#footer { - border-top: 0px; -} - -.frontpagebox { - float: left; - text-align: center; - width: 15em; - margin-left: 0.5em; - margin-right: 0.5em; - margin-top: 2em; -} - -.headline { - font-size: 120%; - font-weight: bold; - padding-top: 1px; - padding-bottom: 5px; - background-image: url(../images/breadcrumbs.jpg); - background-repeat: repeat-x; -} - -/* -#leftColumn { - display: none !important -} - -#bodyColumn { - margin-left: 1.5em; -} -*/ - - diff --git a/wms/src/site/resources/images/architecture.gif b/wms/src/site/resources/images/architecture.gif deleted file mode 100644 index 8d84a23b..00000000 Binary files a/wms/src/site/resources/images/architecture.gif and /dev/null differ diff --git a/wms/src/site/resources/images/favicon.ico b/wms/src/site/resources/images/favicon.ico deleted file mode 100644 index 6e4d0f71..00000000 Binary files a/wms/src/site/resources/images/favicon.ico and /dev/null differ diff --git a/wms/src/site/resources/images/hadoop-logo.jpg b/wms/src/site/resources/images/hadoop-logo.jpg deleted file mode 100644 index 809525d9..00000000 Binary files a/wms/src/site/resources/images/hadoop-logo.jpg and /dev/null differ diff --git a/wms/src/site/resources/images/hbase_logo.png b/wms/src/site/resources/images/hbase_logo.png deleted file mode 100644 index 615b0a80..00000000 Binary files a/wms/src/site/resources/images/hbase_logo.png and /dev/null differ diff --git a/wms/src/site/resources/images/hbase_logo.svg b/wms/src/site/resources/images/hbase_logo.svg deleted file mode 100644 index c4b3343e..00000000 --- a/wms/src/site/resources/images/hbase_logo.svg +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - - - - - diff --git a/wms/src/site/resources/images/hfile.png b/wms/src/site/resources/images/hfile.png deleted file mode 100644 index 57629701..00000000 Binary files a/wms/src/site/resources/images/hfile.png and /dev/null differ diff --git a/wms/src/site/resources/images/hfilev2.png b/wms/src/site/resources/images/hfilev2.png deleted file mode 100644 index 54cc0cf5..00000000 Binary files a/wms/src/site/resources/images/hfilev2.png and /dev/null differ diff --git a/wms/src/site/resources/images/replication_overview.png b/wms/src/site/resources/images/replication_overview.png deleted file mode 100644 index 47d7b4c0..00000000 Binary files a/wms/src/site/resources/images/replication_overview.png and /dev/null differ diff --git a/wms/src/site/site.vm b/wms/src/site/site.vm deleted file mode 100644 index 03c2e758..00000000 --- a/wms/src/site/site.vm +++ /dev/null @@ -1,530 +0,0 @@ - -#* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*# - -#macro ( link $href $name $target $img $position $alt $border $width $height ) - #set ( $linkTitle = ' title="' + $name + '"' ) - #if( $target ) - #set ( $linkTarget = ' target="' + $target + '"' ) - #else - #set ( $linkTarget = "" ) - #end - #if ( ( $href.toLowerCase().startsWith("http") || $href.toLowerCase().startsWith("https") ) ) - #set ( $linkClass = ' class="externalLink"' ) - #else - #set ( $linkClass = "" ) - #end - #if ( $img ) - #if ( $position == "left" ) - #image($img $alt $border $width $height)$name - #else - $name #image($img $alt $border $width $height) - #end - #else - $name - #end -#end -## -#macro ( image $img $alt $border $width $height ) - #if( $img ) - #if ( ! ( $img.toLowerCase().startsWith("http") || $img.toLowerCase().startsWith("https") ) ) - #set ( $imgSrc = $PathTool.calculateLink( $img, $relativePath ) ) - #set ( $imgSrc = $imgSrc.replaceAll( "\\", "/" ) ) - #set ( $imgSrc = ' src="' + $imgSrc + '"' ) - #else - #set ( $imgSrc = ' src="' + $img + '"' ) - #end - #if( $alt ) - #set ( $imgAlt = ' alt="' + $alt + '"' ) - #else - #set ( $imgAlt = ' alt=""' ) - #end - #if( $border ) - #set ( $imgBorder = ' border="' + $border + '"' ) - #else - #set ( $imgBorder = "" ) - #end - #if( $width ) - #set ( $imgWidth = ' width="' + $width + '"' ) - #else - #set ( $imgWidth = "" ) - #end - #if( $height ) - #set ( $imgHeight = ' height="' + $height + '"' ) - #else - #set ( $imgHeight = "" ) - #end - - #end -#end -#macro ( banner $banner $id ) - #if ( $banner ) - #if( $banner.href ) - - #else - - #end - #end -#end -## -#macro ( links $links ) - #set ( $counter = 0 ) - #foreach( $item in $links ) - #set ( $counter = $counter + 1 ) - #set ( $currentItemHref = $PathTool.calculateLink( $item.href, $relativePath ) ) - #set ( $currentItemHref = $currentItemHref.replaceAll( "\\", "/" ) ) - #link( $currentItemHref $item.name $item.target $item.img $item.position $item.alt $item.border $item.width $item.height ) - #if ( $links.size() > $counter ) - | - #end - #end -#end -## -#macro ( breadcrumbs $breadcrumbs ) - #set ( $counter = 0 ) - #foreach( $item in $breadcrumbs ) - #set ( $counter = $counter + 1 ) - #set ( $currentItemHref = $PathTool.calculateLink( $item.href, $relativePath ) ) - #set ( $currentItemHref = $currentItemHref.replaceAll( "\\", "/" ) ) -## - #if ( $currentItemHref == $alignedFileName || $currentItemHref == "" ) - $item.name - #else - #link( $currentItemHref $item.name $item.target $item.img $item.position $item.alt $item.border $item.width $item.height ) - #end - #if ( $breadcrumbs.size() > $counter ) - > - #end - #end -#end -## -#macro ( displayTree $display $item ) - #if ( $item && $item.items && $item.items.size() > 0 ) - #foreach( $subitem in $item.items ) - #set ( $subitemHref = $PathTool.calculateLink( $subitem.href, $relativePath ) ) - #set ( $subitemHref = $subitemHref.replaceAll( "\\", "/" ) ) - #if ( $alignedFileName == $subitemHref ) - #set ( $display = true ) - #end -## - #displayTree( $display $subitem ) - #end - #end -#end -## -#macro ( menuItem $item ) - #set ( $collapse = "none" ) - #set ( $currentItemHref = $PathTool.calculateLink( $item.href, $relativePath ) ) - #set ( $currentItemHref = $currentItemHref.replaceAll( "\\", "/" ) ) -## - #if ( $item && $item.items && $item.items.size() > 0 ) - #if ( $item.collapse == false ) - #set ( $collapse = "expanded" ) - #else - ## By default collapsed - #set ( $collapse = "collapsed" ) - #end -## - #set ( $display = false ) - #displayTree( $display $item ) -## - #if ( $alignedFileName == $currentItemHref || $display ) - #set ( $collapse = "expanded" ) - #end - #end -
  • - #if ( $item.img ) - #if ( $item.position == "left" ) - #if ( $alignedFileName == $currentItemHref ) - #image($item.img $item.alt $item.border $item.width $item.height) $item.name - #else - #link($currentItemHref $item.name $item.target $item.img $item.position $item.alt $item.border $item.width $item.height) - #end - #else - #if ( $alignedFileName == $currentItemHref ) - $item.name #image($item.img $item.alt $item.border $item.width $item.height) - #else - #link($currentItemHref $item.name $item.target $item.img $item.position $item.alt $item.border $item.width $item.height) - #end - #end - #else - #if ( $alignedFileName == $currentItemHref ) - $item.name - #else - #link( $currentItemHref $item.name $item.target $item.img $item.position $item.alt $item.border $item.width $item.height ) - #end - #end - #if ( $item && $item.items && $item.items.size() > 0 ) - #if ( $collapse == "expanded" ) -
      - #foreach( $subitem in $item.items ) - #menuItem( $subitem ) - #end -
    - #end - #end -
  • -#end -## -#macro ( mainMenu $menus ) - #foreach( $menu in $menus ) - #if ( $menu.name ) - #if ( $menu.img ) - #if( $menu.position ) - #set ( $position = $menu.position ) - #else - #set ( $position = "left" ) - #end -## - #if ( ! ( $menu.img.toLowerCase().startsWith("http") || $menu.img.toLowerCase().startsWith("https") ) ) - #set ( $src = $PathTool.calculateLink( $menu.img, $relativePath ) ) - #set ( $src = $src.replaceAll( "\\", "/" ) ) - #set ( $src = ' src="' + $src + '"' ) - #else - #set ( $src = ' src="' + $menu.img + '"' ) - #end -## - #if( $menu.alt ) - #set ( $alt = ' alt="' + $menu.alt + '"' ) - #else - #set ( $alt = ' alt="' + $menu.name + '"' ) - #end -## - #if( $menu.border ) - #set ( $border = ' border="' + $menu.border + '"' ) - #else - #set ( $border = ' border="0"' ) - #end -## - #if( $menu.width ) - #set ( $width = ' width="' + $menu.width + '"' ) - #else - #set ( $width = "" ) - #end - #if( $menu.height ) - #set ( $height = ' height="' + $menu.height + '"' ) - #else - #set ( $height = "" ) - #end -## - #set ( $img = '" ) -## - #if ( $position == "left" ) -
    $img $menu.name
    - #else -
    $menu.name $img
    - #end - #else -
    $menu.name
    - #end - #end - #if ( $menu.items && $menu.items.size() > 0 ) -
      - #foreach( $item in $menu.items ) - #menuItem( $item ) - #end -
    - #end - #end -#end -## -#macro ( copyright ) - #if ( $project ) - #if ( ${project.organization} && ${project.organization.name} ) - #set ( $period = "" ) - #else - #set ( $period = "." ) - #end -## - #set ( $currentYear = ${currentDate.year} + 1900 ) -## - #if ( ${project.inceptionYear} && ( ${project.inceptionYear} != ${currentYear.toString()} ) ) - ${project.inceptionYear}-${currentYear}${period} - #else - ${currentYear}${period} - #end -## - #if ( ${project.organization} ) - #if ( ${project.organization.name} && ${project.organization.url} ) - ${project.organization.name}. - #elseif ( ${project.organization.name} ) - ${project.organization.name}. - #end - #end - #end -#end -## -#macro ( publishDate $position $publishDate $version ) - #if ( $publishDate && $publishDate.format ) - #set ( $format = $publishDate.format ) - #else - #set ( $format = "yyyy-MM-dd" ) - #end -## - $dateFormat.applyPattern( $format ) -## - #set ( $dateToday = $dateFormat.format( $currentDate ) ) -## - #if ( $publishDate && $publishDate.position ) - #set ( $datePosition = $publishDate.position ) - #else - #set ( $datePosition = "left" ) - #end -## - #if ( $version ) - #if ( $version.position ) - #set ( $versionPosition = $version.position ) - #else - #set ( $versionPosition = "left" ) - #end - #else - #set ( $version = "" ) - #set ( $versionPosition = "left" ) - #end -## - #set ( $breadcrumbs = $decoration.body.breadcrumbs ) - #set ( $links = $decoration.body.links ) - - #if ( $datePosition.equalsIgnoreCase( "right" ) && $links && $links.size() > 0 ) - #set ( $prefix = " |" ) - #else - #set ( $prefix = "" ) - #end -## - #if ( $datePosition.equalsIgnoreCase( $position ) ) - #if ( ( $datePosition.equalsIgnoreCase( "right" ) ) || ( $datePosition.equalsIgnoreCase( "bottom" ) ) ) - $prefix $i18n.getString( "site-renderer", $locale, "template.lastpublished" ): $dateToday - #if ( $versionPosition.equalsIgnoreCase( $position ) ) -  | $i18n.getString( "site-renderer", $locale, "template.version" ): ${project.version} - #end - #elseif ( ( $datePosition.equalsIgnoreCase( "navigation-bottom" ) ) || ( $datePosition.equalsIgnoreCase( "navigation-top" ) ) ) -
    - $i18n.getString( "site-renderer", $locale, "template.lastpublished" ): $dateToday - #if ( $versionPosition.equalsIgnoreCase( $position ) ) -  | $i18n.getString( "site-renderer", $locale, "template.version" ): ${project.version} - #end -
    - #elseif ( $datePosition.equalsIgnoreCase("left") ) -
    - $i18n.getString( "site-renderer", $locale, "template.lastpublished" ): $dateToday - #if ( $versionPosition.equalsIgnoreCase( $position ) ) -  | $i18n.getString( "site-renderer", $locale, "template.version" ): ${project.version} - #end - #if ( $breadcrumbs && $breadcrumbs.size() > 0 ) - | #breadcrumbs( $breadcrumbs ) - #end -
    - #end - #elseif ( $versionPosition.equalsIgnoreCase( $position ) ) - #if ( ( $versionPosition.equalsIgnoreCase( "right" ) ) || ( $versionPosition.equalsIgnoreCase( "bottom" ) ) ) - $prefix $i18n.getString( "site-renderer", $locale, "template.version" ): ${project.version} - #elseif ( ( $versionPosition.equalsIgnoreCase( "navigation-bottom" ) ) || ( $versionPosition.equalsIgnoreCase( "navigation-top" ) ) ) -
    - $i18n.getString( "site-renderer", $locale, "template.version" ): ${project.version} -
    - #elseif ( $versionPosition.equalsIgnoreCase("left") ) -
    - $i18n.getString( "site-renderer", $locale, "template.version" ): ${project.version} - #if ( $breadcrumbs && $breadcrumbs.size() > 0 ) - | #breadcrumbs( $breadcrumbs ) - #end -
    - #end - #elseif ( $position.equalsIgnoreCase( "left" ) ) - #if ( $breadcrumbs && $breadcrumbs.size() > 0 ) -
    - #breadcrumbs( $breadcrumbs ) -
    - #end - #end -#end -## -#macro ( poweredByLogo $poweredBy ) - #if( $poweredBy ) - #foreach ($item in $poweredBy) - #if( $item.href ) - #set ( $href = $PathTool.calculateLink( $item.href, $relativePath ) ) - #set ( $href = $href.replaceAll( "\\", "/" ) ) - #else - #set ( $href="http://maven.apache.org/" ) - #end -## - #if( $item.name ) - #set ( $name = $item.name ) - #else - #set ( $name = $i18n.getString( "site-renderer", $locale, "template.builtby" ) ) - #set ( $name = "${name} Maven" ) - #end -## - #if( $item.img ) - #set ( $img = $item.img ) - #else - #set ( $img = "images/logos/maven-feather.png" ) - #end -## - #if ( ! ( $img.toLowerCase().startsWith("http") || $img.toLowerCase().startsWith("https") ) ) - #set ( $img = $PathTool.calculateLink( $img, $relativePath ) ) - #set ( $img = $src.replaceAll( "\\", "/" ) ) - #end -## - #if( $item.alt ) - #set ( $alt = ' alt="' + $item.alt + '"' ) - #else - #set ( $alt = ' alt="' + $name + '"' ) - #end -## - #if( $item.border ) - #set ( $border = ' border="' + $item.border + '"' ) - #else - #set ( $border = "" ) - #end -## - #if( $item.width ) - #set ( $width = ' width="' + $item.width + '"' ) - #else - #set ( $width = "" ) - #end - #if( $item.height ) - #set ( $height = ' height="' + $item.height + '"' ) - #else - #set ( $height = "" ) - #end -## - - - - #end - #if( $poweredBy.isEmpty() ) - - $i18n.getString( - - #end - #else - - $i18n.getString( - - #end -#end -## - - - - $title - - - -#foreach( $author in $authors ) - -#end -#if ( $dateCreation ) - -#end -#if ( $dateRevision ) - -#end -#if ( $locale ) - -#end - #if ( $decoration.body.head ) - #foreach( $item in $decoration.body.head.getChildren() ) - ## Workaround for DOXIA-150 due to a non-desired behaviour in p-u - ## @see org.codehaus.plexus.util.xml.Xpp3Dom#toString() - ## @see org.codehaus.plexus.util.xml.Xpp3Dom#toUnescapedString() - #set ( $documentHeader = "" ) - #set ( $documentHeader = $documentHeader.replaceAll( "\\", "" ) ) - #if ( $item.name == "script" ) - $StringUtils.replace( $item.toUnescapedString(), $documentHeader, "" ) - #else - $StringUtils.replace( $item.toString(), $documentHeader, "" ) - #end - #end - #end - ## $headContent - - - - -
    - -
    -
    -
    - $bodyContent -
    -
    -
    -
    -
    - - - diff --git a/wms/src/site/site.xml b/wms/src/site/site.xml deleted file mode 100644 index 3728283c..00000000 --- a/wms/src/site/site.xml +++ /dev/null @@ -1,67 +0,0 @@ - - - - - - - HBase - images/hbase_logo.png - http://hbase.apache.org/ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - org.apache.maven.skins - maven-stylus-skin - - diff --git a/wms/src/site/xdoc/acid-semantics.xml b/wms/src/site/xdoc/acid-semantics.xml deleted file mode 100644 index 99b0392a..00000000 --- a/wms/src/site/xdoc/acid-semantics.xml +++ /dev/null @@ -1,232 +0,0 @@ - - - - - - - - - HBase ACID Properties - - - - -
    -

    HBase is not an ACID compliant database. However, it does guarantee certain specific - properties.

    -

    This specification enumerates the ACID properties of HBase.

    -
    -
    -

    For the sake of common vocabulary, we define the following terms:

    -
    -
    Atomicity
    -
    an operation is atomic if it either completes entirely or not at all
    - -
    Consistency
    -
    - all actions cause the table to transition from one valid state directly to another - (eg a row will not disappear during an update, etc) -
    - -
    Isolation
    -
    - an operation is isolated if it appears to complete independently of any other concurrent transaction -
    - -
    Durability
    -
    any update that reports "successful" to the client will not be lost
    - -
    Visibility
    -
    an update is considered visible if any subsequent read will see the update as having been committed
    -
    -

    - The terms must and may are used as specified by RFC 2119. - In short, the word "must" implies that, if some case exists where the statement - is not true, it is a bug. The word "may" implies that, even if the guarantee - is provided in a current release, users should not rely on it. -

    -
    -
    -
      -
    • Read APIs -
        -
      • get
      • -
      • scan
      • -
      -
    • -
    • Write APIs
    • -
        -
      • put
      • -
      • batch put
      • -
      • delete
      • -
      -
    • Combination (read-modify-write) APIs
    • -
        -
      • incrementColumnValue
      • -
      • checkAndPut
      • -
      -
    -
    - -
    - -
    - -
      -
    1. All mutations are atomic within a row. Any put will either wholely succeed or wholely fail.[3]
    2. -
        -
      1. An operation that returns a "success" code has completely succeeded.
      2. -
      3. An operation that returns a "failure" code has completely failed.
      4. -
      5. An operation that times out may have succeeded and may have failed. However, - it will not have partially succeeded or failed.
      6. -
      -
    3. This is true even if the mutation crosses multiple column families within a row.
    4. -
    5. APIs that mutate several rows will _not_ be atomic across the multiple rows. - For example, a multiput that operates on rows 'a','b', and 'c' may return having - mutated some but not all of the rows. In such cases, these APIs will return a list - of success codes, each of which may be succeeded, failed, or timed out as described above.
    6. -
    7. The checkAndPut API happens atomically like the typical compareAndSet (CAS) operation - found in many hardware architectures.
    8. -
    9. The order of mutations is seen to happen in a well-defined order for each row, with no - interleaving. For example, if one writer issues the mutation "a=1,b=1,c=1" and - another writer issues the mutation "a=2,b=2,c=2", the row must either - be "a=1,b=1,c=1" or "a=2,b=2,c=2" and must not be something - like "a=1,b=2,c=1".
    10. -
        -
      1. Please note that this is not true _across rows_ for multirow batch mutations.
      2. -
      -
    -
    -
    -
      -
    1. All rows returned via any access API will consist of a complete row that existed at - some point in the table's history.
    2. -
    3. This is true across column families - i.e a get of a full row that occurs concurrent - with some mutations 1,2,3,4,5 will return a complete row that existed at some point in time - between mutation i and i+1 for some i between 1 and 5.
    4. -
    5. The state of a row will only move forward through the history of edits to it.
    6. -
    - -
    -

    - A scan is not a consistent view of a table. Scans do - not exhibit snapshot isolation. -

    -

    - Rather, scans have the following properties: -

    - -
      -
    1. - Any row returned by the scan will be a consistent view (i.e. that version - of the complete row existed at some point in time) [1] -
    2. -
    3. - A scan will always reflect a view of the data at least as new as - the beginning of the scan. This satisfies the visibility guarantees - enumerated below.
    4. -
        -
      1. For example, if client A writes data X and then communicates via a side - channel to client B, any scans started by client B will contain data at least - as new as X.
      2. -
      3. A scan _must_ reflect all mutations committed prior to the construction - of the scanner, and _may_ reflect some mutations committed subsequent to the - construction of the scanner.
      4. -
      5. Scans must include all data written prior to the scan (except in - the case where data is subsequently mutated, in which case it _may_ reflect - the mutation)
      6. -
      -
    -

    - Those familiar with relational databases will recognize this isolation level as "read committed". -

    -

    - Please note that the guarantees listed above regarding scanner consistency - are referring to "transaction commit time", not the "timestamp" - field of each cell. That is to say, a scanner started at time t may see edits - with a timestamp value greater than t, if those edits were committed with a - "forward dated" timestamp before the scanner was constructed. -

    -
    -
    -
    -
      -
    1. When a client receives a "success" response for any mutation, that - mutation is immediately visible to both that client and any client with whom it - later communicates through side channels. [3]
    2. -
    3. A row must never exhibit so-called "time-travel" properties. That - is to say, if a series of mutations moves a row sequentially through a series of - states, any sequence of concurrent reads will return a subsequence of those states.
    4. -
        -
      1. For example, if a row's cells are mutated using the "incrementColumnValue" - API, a client must never see the value of any cell decrease.
      2. -
      3. This is true regardless of which read API is used to read back the mutation.
      4. -
      -
    5. Any version of a cell that has been returned to a read operation is guaranteed to - be durably stored.
    6. -
    - -
    -
    -
      -
    1. All visible data is also durable data. That is to say, a read will never return - data that has not been made durable on disk[2]
    2. -
    3. Any operation that returns a "success" code (eg does not throw an exception) - will be made durable.[3]
    4. -
    5. Any operation that returns a "failure" code will not be made durable - (subject to the Atomicity guarantees above)
    6. -
    7. All reasonable failure scenarios will not affect any of the guarantees of this document.
    8. - -
    -
    -
    -

    All of the above guarantees must be possible within HBase. For users who would like to trade - off some guarantees for performance, HBase may offer several tuning options. For example:

    -
      -
    • Visibility may be tuned on a per-read basis to allow stale reads or time travel.
    • -
    • Durability may be tuned to only flush data to disk on a periodic basis
    • -
    -
    -
    -
    -

    - For more information, see the client architecture or data model sections in the HBase book. -

    -
    - -
    -

    [1] A consistent view is not guaranteed intra-row scanning -- i.e. fetching a portion of - a row in one RPC then going back to fetch another portion of the row in a subsequent RPC. - Intra-row scanning happens when you set a limit on how many values to return per Scan#next - (See Scan#setBatch(int)). -

    - -

    [2] In the context of HBase, "durably on disk" implies an hflush() call on the transaction - log. This does not actually imply an fsync() to magnetic media, but rather just that the data has been - written to the OS cache on all replicas of the log. In the case of a full datacenter power loss, it is - possible that the edits are not truly durable.

    -

    [3] Puts will either wholely succeed or wholely fail, provided that they are actually sent - to the RegionServer. If the writebuffer is used, Puts will not be sent until the writebuffer is filled - or it is explicitly flushed.

    - -
    - - -
    diff --git a/wms/src/site/xdoc/bulk-loads.xml b/wms/src/site/xdoc/bulk-loads.xml deleted file mode 100644 index 869502a8..00000000 --- a/wms/src/site/xdoc/bulk-loads.xml +++ /dev/null @@ -1,171 +0,0 @@ - - - - - - Bulk Loads in HBase - - - -
    -

    - HBase includes several methods of loading data into tables. - The most straightforward method is to either use the TableOutputFormat - class from a MapReduce job, or use the normal client APIs; however, - these are not always the most efficient methods. -

    -

    - This document describes HBase's bulk load functionality. The bulk load - feature uses a MapReduce job to output table data in HBase's internal - data format, and then directly loads the data files into a running - cluster. Using bulk load will use less CPU and network resources than - simply using the HBase API. -

    -
    -
    -

    - The HBase bulk load process consists of two main steps. -

    -
    -

    - The first step of a bulk load is to generate HBase data files from - a MapReduce job using HFileOutputFormat. This output format writes - out data in HBase's internal storage format so that they can be - later loaded very efficiently into the cluster. -

    -

    - In order to function efficiently, HFileOutputFormat must be - configured such that each output HFile fits within a single region. - In order to do this, jobs whose output will be bulk loaded into HBase - use Hadoop's TotalOrderPartitioner class to partition the map output - into disjoint ranges of the key space, corresponding to the key - ranges of the regions in the table. -

    -

    - HFileOutputFormat includes a convenience function, - configureIncrementalLoad(), which automatically sets up - a TotalOrderPartitioner based on the current region boundaries of a - table. -

    -
    -
    -

    - After the data has been prepared using - HFileOutputFormat, it is loaded into the cluster using - completebulkload. This command line tool iterates - through the prepared data files, and for each one determines the - region the file belongs to. It then contacts the appropriate Region - Server which adopts the HFile, moving it into its storage directory - and making the data available to clients. -

    -

    - If the region boundaries have changed during the course of bulk load - preparation, or between the preparation and completion steps, the - completebulkloads utility will automatically split the - data files into pieces corresponding to the new boundaries. This - process is not optimally efficient, so users should take care to - minimize the delay between preparing a bulk load and importing it - into the cluster, especially if other clients are simultaneously - loading data through other means. -

    -
    -
    -
    -

    - After a data import has been prepared, either by using the - importtsv tool with the - "importtsv.bulk.output" option or by some other MapReduce - job using the HFileOutputFormat, the - completebulkload tool is used to import the data into the - running cluster. -

    -

    - The completebulkload tool simply takes the output path - where importtsv or your MapReduce job put its results, and - the table name to import into. For example: -

    - $ hadoop jar hbase-VERSION.jar completebulkload [-c /path/to/hbase/config/hbase-site.xml] /user/todd/myoutput mytable -

    - The -c config-file option can be used to specify a file - containing the appropriate hbase parameters (e.g., hbase-site.xml) if - not supplied already on the CLASSPATH (In addition, the CLASSPATH must - contain the directory that has the zookeeper configuration file if - zookeeper is NOT managed by HBase). -

    -

    - Note: If the target table does not already exist in HBase, this - tool will create the table automatically.

    -

    - This tool will run quickly, after which point the new data will be visible in - the cluster. -

    -
    -
    -

    - HBase ships with a command line tool called importtsv - which when given files containing data in TSV form can prepare this - data for bulk import into HBase. This tool by default uses the HBase - put API to insert data into HBase one row at a time, but - when the "importtsv.bulk.output" option is used, - importtsv will instead generate files using - HFileOutputFormat which can subsequently be bulk-loaded - into HBase using the completebulkload tool described - above. This tool is available by running "hadoop jar - /path/to/hbase-VERSION.jar importtsv". Running this command - with no arguments prints brief usage information: -

    -
    -Usage: importtsv -Dimporttsv.columns=a,b,c <tablename> <inputdir>
    -
    -Imports the given input directory of TSV data into the specified table.
    -
    -The column names of the TSV data must be specified using the -Dimporttsv.columns
    -option. This option takes the form of comma-separated column names, where each
    -column name is either a simple column family, or a columnfamily:qualifier. The special
    -column name HBASE_ROW_KEY is used to designate that this column should be used
    -as the row key for each imported record. You must specify exactly one column
    -to be the row key, and you must specify a column name for every column that exists in the
    -input data.
    -
    -By default importtsv will load data directly into HBase. To instead generate
    -HFiles of data to prepare for a bulk data load, pass the option:
    -  -Dimporttsv.bulk.output=/path/for/output
    -  Note: if you do not use this option, then the target table must already exist in HBase
    -
    -Other options that may be specified with -D include:
    -  -Dimporttsv.skip.bad.lines=false - fail if encountering an invalid line
    -  '-Dimporttsv.separator=|' - eg separate on pipes instead of tabs
    -  -Dimporttsv.timestamp=currentTimeAsLong - use the specified timestamp for the import
    -  -Dimporttsv.mapper.class=my.Mapper - A user-defined Mapper to use instead of org.apache.hadoop.hbase.mapreduce.TsvImporterMapper
    -
    -
    -
    -

    - Although the importtsv tool is useful in many cases, advanced users may - want to generate data programatically, or import data from other formats. To get - started doing so, dig into ImportTsv.java and check the JavaDoc for - HFileOutputFormat. -

    -

    - The import step of the bulk load can also be done programatically. See the - LoadIncrementalHFiles class for more information. -

    -
    - -
    diff --git a/wms/src/site/xdoc/cygwin.xml b/wms/src/site/xdoc/cygwin.xml deleted file mode 100644 index 2bdce12d..00000000 --- a/wms/src/site/xdoc/cygwin.xml +++ /dev/null @@ -1,242 +0,0 @@ - - - - - Installing HBase on Windows using Cygwin - - - -
    -

    HBase is a distributed, column-oriented store, modeled after Google's BigTable. HBase is built on top of Hadoop for its MapReduce and distributed file system implementation. All these projects are open-source and part of the Apache Software Foundation.

    - -

    As being distributed, large scale platforms, the Hadoop and HBase projects mainly focus on *nix environments for production installations. However, being developed in Java, both projects are fully portable across platforms and, hence, also to the Windows operating system. For ease of development the projects rely on Cygwin to have a *nix-like environment on Windows to run the shell scripts.

    -
    -
    -

    This document explains the intricacies of running HBase on Windows using Cygwin as an all-in-one single-node installation for testing and development. The HBase Overview and QuickStart guides on the other hand go a long way in explaning how to setup HBase in more complex deployment scenario's.

    -
    - -
    -

    For running HBase on Windows, 3 technologies are required: Java, Cygwin and SSH. The following paragraphs detail the installation of each of the aforementioned technologies.

    -
    -

    HBase depends on the Java Platform, Standard Edition, 6 Release. So the target system has to be provided with at least the Java Runtime Environment (JRE); however if the system will also be used for development, the Jave Development Kit (JDK) is preferred. You can download the latest versions for both from Sun's download page. Installation is a simple GUI wizard that guides you through the process.

    -
    -
    -

    Cygwin is probably the oddest technology in this solution stack. It provides a dynamic link library that emulates most of a *nix environment on Windows. On top of that a whole bunch of the most common *nix tools are supplied. Combined, the DLL with the tools form a very *nix-alike environment on Windows.

    - -

    For installation, Cygwin provides the setup.exe utility that tracks the versions of all installed components on the target system and provides the mechanism for installing or updating everything from the mirror sites of Cygwin.

    - -

    To support installation, the setup.exe utility uses 2 directories on the target system. The Root directory for Cygwin (defaults to C:\cygwin) which will become / within the eventual Cygwin installation; and the Local Package directory (e.g. C:\cygsetup that is the cache where setup.exe stores the packages before they are installed. The cache must not be the same folder as the Cygwin root.

    - -

    Perform following steps to install Cygwin, which are elaboratly detailed in the 2nd chapter of the Cygwin User's Guide:

    - -
      -
    1. Make sure you have Administrator privileges on the target system.
    2. -
    3. Choose and create you Root and Local Package directories. A good suggestion is to use C:\cygwin\root and C:\cygwin\setup folders.
    4. -
    5. Download the setup.exe utility and save it to the Local Package directory.
    6. -
    7. Run the setup.exe utility, -
        -
      1. Choose the Install from Internet option,
      2. -
      3. Choose your Root and Local Package folders
      4. -
      5. and select an appropriate mirror.
      6. -
      7. Don't select any additional packages yet, as we only want to install Cygwin for now.
      8. -
      9. Wait for download and install
      10. -
      11. Finish the installation
      12. -
      -
    8. -
    9. Optionally, you can now also add a shortcut to your Start menu pointing to the setup.exe utility in the Local Package folder.
    10. -
    11. Add CYGWIN_HOME system-wide environment variable that points to your Root directory.
    12. -
    13. Add %CYGWIN_HOME%\bin to the end of your PATH environment variable.
    14. -
    15. Reboot the sytem after making changes to the environment variables otherwise the OS will not be able to find the Cygwin utilities.
    16. -
    17. Test your installation by running your freshly created shortcuts or the Cygwin.bat command in the Root folder. You should end up in a terminal window that is running a Bash shell. Test the shell by issuing following commands: -
        -
      1. cd / should take you to thr Root directory in Cygwin;
      2. -
      3. the LS commands that should list all files and folders in the current directory.
      4. -
      5. Use the exit command to end the terminal.
      6. -
      -
    18. -
    19. When needed, to uninstall Cygwin you can simply delete the Root and Local Package directory, and the shortcuts that were created during installation.
    20. -
    -
    -
    -

    HBase (and Hadoop) rely on SSH for interprocess/-node communication and launching remote commands. SSH will be provisioned on the target system via Cygwin, which supports running Cygwin programs as Windows services!

    - -
      -
    1. Rerun the setup.exe utility.
    2. -
    3. Leave all parameters as is, skipping through the wizard using the Next button until the Select Packages panel is shown.
    4. -
    5. Maximize the window and click the View button to toggle to the list view, which is ordered alfabetically on Package, making it easier to find the packages we'll need.
    6. -
    7. Select the following packages by clicking the status word (normally Skip) so it's marked for installation. Use the Next button to download and install the packages. -
        -
      1. OpenSSH
      2. -
      3. tcp_wrappers
      4. -
      5. diffutils
      6. -
      7. zlib
      8. -
      -
    8. -
    9. Wait for the install to complete and finish the installation.
    10. -
    -
    -
    -

    Download the latest release of HBase from the website. As the HBase distributable is just a zipped archive, installation is as simple as unpacking the archive so it ends up in its final installation directory. Notice that HBase has to be installed in Cygwin and a good directory suggestion is to use /usr/local/ (or [Root directory]\usr\local in Windows slang). You should end up with a /usr/local/hbase-<version> installation in Cygwin.

    - -This finishes installation. We go on with the configuration. -
    -
    -
    -

    There are 3 parts left to configure: Java, SSH and HBase itself. Following paragraphs explain eacht topic in detail.

    -
    -

    One important thing to remember in shell scripting in general (i.e. *nix and Windows) is that managing, manipulating and assembling path names that contains spaces can be very hard, due to the need to escape and quote those characters and strings. So we try to stay away from spaces in path names. *nix environments can help us out here very easily by using symbolic links.

    - -
      -
    1. Create a link in /usr/local to the Java home directory by using the following command and substituting the name of your chosen Java environment: -
      LN -s /cygdrive/c/Program\ Files/Java/<jre name> /usr/local/<jre name>
      -
    2. -
    3. Test your java installation by changing directories to your Java folder CD /usr/local/<jre name> and issueing the command ./bin/java -version. This should output your version of the chosen JRE.
    4. -
    -
    -
    -SSH -

    Configuring SSH is quite elaborate, but primarily a question of launching it by default as a Windows service.

    - -
      -
    1. On Windows Vista and above make sure you run the Cygwin shell with elevated privileges, by right-clicking on the shortcut an using Run as Administrator.
    2. -
    3. First of all, we have to make sure the rights on some crucial files are correct. Use the commands underneath. You can verify all rights by using the LS -L command on the different files. Also, notice the auto-completion feature in the shell using <TAB> is extremely handy in these situations. -
        -
      1. chmod +r /etc/passwd to make the passwords file readable for all
      2. -
      3. chmod u+w /etc/passwd to make the passwords file writable for the owner
      4. -
      5. chmod +r /etc/group to make the groups file readable for all
      6. -
      -
        -
      1. chmod u+w /etc/group to make the groups file writable for the owner
      2. -
      -
        -
      1. chmod 755 /var to make the var folder writable to owner and readable and executable to all
      2. -
      -
    4. -
    5. Edit the /etc/hosts.allow file using your favorite editor (why not VI in the shell!) and make sure the following two lines are in there before the PARANOID line: -
        -
      1. ALL : localhost 127.0.0.1/32 : allow
      2. -
      3. ALL : [::1]/128 : allow
      4. -
      -
    6. -
    7. Next we have to configure SSH by using the script ssh-host-config -
        -
      1. If this script asks to overwrite an existing /etc/ssh_config, answer yes.
      2. -
      3. If this script asks to overwrite an existing /etc/sshd_config, answer yes.
      4. -
      5. If this script asks to use privilege separation, answer yes.
      6. -
      7. If this script asks to install sshd as a service, answer yes. Make sure you started your shell as Adminstrator!
      8. -
      9. If this script asks for the CYGWIN value, just <enter> as the default is ntsec.
      10. -
      11. If this script asks to create the sshd account, answer yes.
      12. -
      13. If this script asks to use a different user name as service account, answer no as the default will suffice.
      14. -
      15. If this script asks to create the cyg_server account, answer yes. Enter a password for the account.
      16. -
      -
    8. -
    9. Start the SSH service using net start sshd or cygrunsrv --start sshd. Notice that cygrunsrv is the utility that make the process run as a Windows service. Confirm that you see a message stating that the CYGWIN sshd service was started succesfully.
    10. -
    11. Harmonize Windows and Cygwin user account by using the commands: -
        -
      1. mkpasswd -cl > /etc/passwd
      2. -
      3. mkgroup --local > /etc/group
      4. -
      -
    12. -
    13. Test the installation of SSH: -
        -
      1. Open a new Cygwin terminal
      2. -
      3. Use the command whoami to verify your userID
      4. -
      5. Issue an ssh localhost to connect to the system itself -
          -
        1. Answer yes when presented with the server's fingerprint
        2. -
        3. Issue your password when prompted
        4. -
        5. test a few commands in the remote session
        6. -
        7. The exit command should take you back to your first shell in Cygwin
        8. -
        -
      6. -
      7. Exit should terminate the Cygwin shell.
      8. -
      -
    14. -
    -
    -
    -If all previous configurations are working properly, we just need some tinkering at the HBase config files to properly resolve on Windows/Cygwin. All files and paths referenced here start from the HBase [installation directory] as working directory. -
      -
    1. HBase uses the ./conf/hbase-env.sh to configure its dependencies on the runtime environment. Copy and uncomment following lines just underneath their original, change them to fit your environemnt. They should read something like: -
        -
      1. export JAVA_HOME=/usr/local/<jre name>
      2. -
      3. export HBASE_IDENT_STRING=$HOSTNAME as this most likely does not inlcude spaces.
      4. -
      -
    2. -
    3. HBase uses the ./conf/hbase-default.xml file for configuration. Some properties do not resolve to existing directories because the JVM runs on Windows. This is the major issue to keep in mind when working with Cygwin: within the shell all paths are *nix-alike, hence relative to the root /. However, every parameter that is to be consumed within the windows processes themself, need to be Windows settings, hence C:\-alike. Change following propeties in the configuration file, adjusting paths where necessary to conform with your own installation: -
        -
      1. hbase.rootdir must read e.g. file:///C:/cygwin/root/tmp/hbase/data
      2. -
      3. hbase.tmp.dir must read C:/cygwin/root/tmp/hbase/tmp
      4. -
      5. hbase.zookeeper.quorum must read 127.0.0.1 because for some reason localhost doesn't seem to resolve properly on Cygwin.
      6. -
      -
    4. -
    5. Make sure the configured hbase.rootdir and hbase.tmp.dir directories exist and have the proper rights set up e.g. by issuing a chmod 777 on them.
    6. -
    -
    -
    -
    -Testing -

    -This should conclude the installation and configuration of HBase on Windows using Cygwin. So it's time to test it. -

      -
    1. Start a Cygwin terminal, if you haven't already.
    2. -
    3. Change directory to HBase installation using CD /usr/local/hbase-<version>, preferably using auto-completion.
    4. -
    5. Start HBase using the command ./bin/start-hbase.sh -
        -
      1. When prompted to accept the SSH fingerprint, answer yes.
      2. -
      3. When prompted, provide your password. Maybe multiple times.
      4. -
      5. When the command completes, the HBase server should have started.
      6. -
      7. However, to be absolutely certain, check the logs in the ./logs directory for any exceptions.
      8. -
      -
    6. -
    7. Next we start the HBase shell using the command ./bin/hbase shell
    8. -
    9. We run some simple test commands -
        -
      1. Create a simple table using command create 'test', 'data'
      2. -
      3. Verify the table exists using the command list
      4. -
      5. Insert data into the table using e.g. -
        put 'test', 'row1', 'data:1', 'value1'
        -put 'test', 'row2', 'data:2', 'value2'
        -put 'test', 'row3', 'data:3', 'value3'
        -
      6. -
      7. List all rows in the table using the command scan 'test' that should list all the rows previously inserted. Notice how 3 new columns where added without changing the schema!
      8. -
      9. Finally we get rid of the table by issuing disable 'test' followed by drop 'test' and verified by list which should give an empty listing.
      10. -
      -
    10. -
    11. Leave the shell by exit
    12. -
    13. To stop the HBase server issue the ./bin/stop-hbase.sh command. And wait for it to complete!!! Killing the process might corrupt your data on disk.
    14. -
    15. In case of problems, -
        -
      1. verify the HBase logs in the ./logs directory.
      2. -
      3. Try to fix the problem
      4. -
      5. Get help on the forums or IRC (#hbase@freenode.net). People are very active and keen to help out!
      6. -
      7. Stopr, restart and retest the server.
      8. -
      -
    16. -
    -

    -
    - -
    -

    -Now your HBase server is running, start coding and build that next killer app on this particular, but scalable datastore! -

    -
    - -
    diff --git a/wms/src/site/xdoc/index.xml b/wms/src/site/xdoc/index.xml deleted file mode 100644 index 2c330fe7..00000000 --- a/wms/src/site/xdoc/index.xml +++ /dev/null @@ -1,85 +0,0 @@ - - - - - HBase Home - - - - -
    -

    HBase is the Hadoop database. Think of it as a distributed scalable Big Data store. -

    -

    When Would I Use HBase?

    -

    - Use HBase when you need random, realtime read/write access to your Big Data. - This project's goal is the hosting of very large tables -- billions of rows X millions of columns -- atop clusters of commodity hardware. -HBase is an open-source, distributed, versioned, column-oriented store modeled after Google's Bigtable: A Distributed Storage System for Structured Data by Chang et al. - Just as Bigtable leverages the distributed data storage provided by the Google File System, HBase provides Bigtable-like capabilities on top of Hadoop and HDFS. -

    -

    Features

    -

    -HBase provides: -

      -
    • Linear and modular scalability. -
    • -
    • Strictly consistent reads and writes. -
    • -
    • Automatic and configurable sharding of tables -
    • -
    • Automatic failover support between RegionServers. -
    • -
    • Convenient base classes for backing Hadoop MapReduce jobs with HBase tables. -
    • -
    • Easy to use Java API for client access. -
    • -
    • Block cache and Bloom Filters for real-time queries. -
    • -
    • Query predicate push down via server side Filters -
    • -
    • Thrift gateway and a REST-ful Web service that supports XML, Protobuf, and binary data encoding options -
    • -
    • Extensible jruby-based (JIRB) shell -
    • -
    • Support for exporting metrics via the Hadoop metrics subsystem to files or Ganglia; or via JMX -
    • -
    -

    -

    Where Can I Get More Information?

    -

    See the Architecture Overview, the Apache HBase Reference Guide FAQ, - and the other documentation links on the left! -

    -
    -
    -

    May 22nd, 2012 HBaseCon2012 in San Francisco

    -

    January 23rd, 2012 HBase 0.92.0 released. Download it!

    -

    January 19th, 2012 Meetup @ EBay

    -

    December 23rd, 2011 HBase 0.90.5 released. Download it!

    -

    November 29th, 2011 Developer Pow-Wow in SF at Salesforce HQ

    -

    November 7th, 2011 HBase Meetup in NYC (6PM) at the AppNexus office

    -

    August 22nd, 2011 HBase Hackathon (11AM) and Meetup (6PM) at FB in PA

    -

    June 30th, 2011 HBase Contributor Day, the day after the Hadoop Summit hosted by Y!

    -

    June 8th, 2011 HBase Hackathon in Berlin to coincide with Berlin Buzzwords

    -

    May 19th, 2011 HBase 0.90.3 released. Download it!

    -

    April 12th, 2011 HBase 0.90.2 released. Download it!

    -

    Old News

    -
    - - -
    diff --git a/wms/src/site/xdoc/metrics.xml b/wms/src/site/xdoc/metrics.xml deleted file mode 100644 index 9ff14e60..00000000 --- a/wms/src/site/xdoc/metrics.xml +++ /dev/null @@ -1,147 +0,0 @@ - - - - - - HBase Metrics - - - - -
    -

    - HBase emits Hadoop metrics. -

    -
    -
    -

    First read up on Hadoop metrics. - If you are using ganglia, the GangliaMetrics - wiki page is useful read.

    -

    To have HBase emit metrics, edit $HBASE_HOME/conf/hadoop-metrics.properties - and enable metric 'contexts' per plugin. As of this writing, hadoop supports - file and ganglia plugins. - Yes, the hbase metrics files is named hadoop-metrics rather than - hbase-metrics because currently at least the hadoop metrics system has the - properties filename hardcoded. Per metrics context, - comment out the NullContext and enable one or more plugins instead. -

    -

    - If you enable the hbase context, on regionservers you'll see total requests since last - metric emission, count of regions and storefiles as well as a count of memstore size. - On the master, you'll see a count of the cluster's requests. -

    -

    - Enabling the rpc context is good if you are interested in seeing - metrics on each hbase rpc method invocation (counts and time taken). -

    -

    - The jvm context is - useful for long-term stats on running hbase jvms -- memory used, thread counts, etc. - As of this writing, if more than one jvm is running emitting metrics, at least - in ganglia, the stats are aggregated rather than reported per instance. -

    -
    - -
    -

    - In addition to the standard output contexts supported by the Hadoop - metrics package, you can also export HBase metrics via Java Management - Extensions (JMX). This will allow viewing HBase stats in JConsole or - any other JMX client. -

    -
    -

    - To enable JMX support in HBase, first edit - $HBASE_HOME/conf/hadoop-metrics.properties to support - metrics refreshing. (If you've running 0.94.1 and above, or have already configured - hadoop-metrics.properties for another output context, - you can skip this step). -

    - -# Configuration of the "hbase" context for null -hbase.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread -hbase.period=60 - -# Configuration of the "jvm" context for null -jvm.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread -jvm.period=60 - -# Configuration of the "rpc" context for null -rpc.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread -rpc.period=60 - -
    -
    -

    - For remote access, you will need to configure JMX remote passwords - and access profiles. Create the files: -

    -
    -
    $HBASE_HOME/conf/jmxremote.passwd (set permissions - to 600)
    -
    - -monitorRole monitorpass -controlRole controlpass - -
    - -
    $HBASE_HOME/conf/jmxremote.access
    -
    - -monitorRole readonly -controlRole readwrite - -
    -
    -
    -
    -

    - Finally, edit the $HBASE_HOME/conf/hbase-env.sh - script to add JMX support: -

    -
    -
    $HBASE_HOME/conf/hbase-env.sh
    -
    -

    Add the lines:

    - -HBASE_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false" -HBASE_JMX_OPTS="$HBASE_JMX_OPTS -Dcom.sun.management.jmxremote.password.file=$HBASE_HOME/conf/jmxremote.passwd" -HBASE_JMX_OPTS="$HBASE_JMX_OPTS -Dcom.sun.management.jmxremote.access.file=$HBASE_HOME/conf/jmxremote.access" - -export HBASE_MASTER_OPTS="$HBASE_JMX_OPTS -Dcom.sun.management.jmxremote.port=10101" -export HBASE_REGIONSERVER_OPTS="$HBASE_JMX_OPTS -Dcom.sun.management.jmxremote.port=10102" - -
    -
    -

    - After restarting the processes you want to monitor, you should now be - able to run JConsole (included with the JDK since JDK 5.0) to view - the statistics via JMX. HBase MBeans are exported under the - hadoop domain in JMX. -

    -
    -
    -

    - For more information on understanding HBase metrics, see the metrics section in the HBase book. -

    -
    -
    - -
    diff --git a/wms/src/site/xdoc/old_news.xml b/wms/src/site/xdoc/old_news.xml deleted file mode 100644 index 80c388c4..00000000 --- a/wms/src/site/xdoc/old_news.xml +++ /dev/null @@ -1,54 +0,0 @@ - - - - - - - - - Old News - - - -
    -

    March 21st, HBase 0.92 Hackathon at StumbleUpon, SF

    -

    February 22nd, HUG12: February HBase User Group at StumbleUpon SF

    -

    December 13th, HBase Hackathon: Coprocessor Edition

    -

    November 19th, Hadoop HUG in London is all about HBase

    -

    November 15-19th, Devoxx features HBase Training and multiple HBase presentations

    -

    October 12th, HBase-related presentations by core contributors and users at Hadoop World 2010

    -

    October 11th, HUG-NYC: HBase User Group NYC Edition (Night before Hadoop World)

    -

    June 30th, HBase Contributor Workshop (Day after Hadoop Summit)

    -

    May 10th, 2010: HBase graduates from Hadoop sub-project to Apache Top Level Project

    -

    Signup for HBase User Group Meeting, HUG10 hosted by Trend Micro, April 19th, 2010

    - -

    HBase User Group Meeting, HUG9 hosted by Mozilla, March 10th, 2010

    -

    Sign up for the HBase User Group Meeting, HUG8, January 27th, 2010 at StumbleUpon in SF

    -

    September 8th, 2010: HBase 0.20.0 is faster, stronger, slimmer, and sweeter tasting than any previous HBase release. Get it off the Releases page.

    -

    ApacheCon in Oakland: November 2-6th, 2009: - The Apache Foundation will be celebrating its 10th anniversary in beautiful Oakland by the Bay. Lots of good talks and meetups including an HBase presentation by a couple of the lads.

    -

    HBase at Hadoop World in NYC: October 2nd, 2009: A few of us will be talking on Practical HBase out east at Hadoop World: NYC.

    -

    HUG7 and HBase Hackathon: August 7th-9th, 2009 at StumbleUpon in SF: Sign up for the HBase User Group Meeting, HUG7 or for the Hackathon or for both (all are welcome!).

    -

    June, 2009 -- HBase at HadoopSummit2009 and at NOSQL: See the presentations

    -

    March 3rd, 2009 -- HUG6: HBase User Group 6

    -

    January 30th, 2009 -- LA Hbackathon:HBase January Hackathon Los Angeles at Streamy in Manhattan Beach

    -
    - -
    diff --git a/wms/src/site/xdoc/pseudo-distributed.xml b/wms/src/site/xdoc/pseudo-distributed.xml deleted file mode 100644 index 8677ddc2..00000000 --- a/wms/src/site/xdoc/pseudo-distributed.xml +++ /dev/null @@ -1,77 +0,0 @@ - - - - - - - - -Running HBase in pseudo-distributed mode - - - - -

    This document augments what is described in the HBase 'Getting Started' in the - Distributed Operation: Pseudo- and Fully-distributed modes section. - In particular it describes scripts that allow you start extra masters and regionservers when running in pseudo-distributed mode. -

    - -
    1. Copy the pseudo-distributed suggested configuration file (feel free to take a peek and understand what it's doing) - % cp conf/hbase-site.xml{.pseudo-distributed.template,} -
    2. -
    3. (Optional) Start up Pseudo-distributed HDFS. -
      1. If you do, go to conf/hbase-site.xml. Uncomment the 'hbase.rootdir' property. -
      2. -
      3. Additionally, if you want to test HBase with high data durability enabled, also uncomment the 'dfs.support.append' property. -
      4. -
      -
    4. -
    5. Start up the initial HBase cluster - % bin/start-hbase.sh -
      1. To start up an extra backup master(s) on the same server run - % bin/local-master-backup.sh start 1 - Here the '1' means use ports 60001 & 60011, and this backup master's logfile will be at logs/hbase-${USER}-1-master-${HOSTNAME}.log. - To startup multiple backup masters run % bin/local-master-backup.sh start 2 3 You can start up to 9 backup masters (10 total). -
      2. -
      3. To start up more regionservers - % bin/local-regionservers.sh start 1 - where '1' means use ports 60201 & 60301 and its logfile will be at logs/hbase-${USER}-1-regionserver-${HOSTNAME}.log. - To add 4 more regionservers in addition to the one you just started by running % bin/local-regionservers.sh start 2 3 4 5 - Supports up to 99 extra regionservers (100 total). -
      4. -
      -
    6. -
    7. To stop the cluster -
        -
      1. Assuming you want to stop master backup # 1, run - % cat /tmp/hbase-${USER}-1-master.pid |xargs kill -9 - Note that bin/local-master-backup.sh stop 1 will try to stop the cluster along with the master -
      2. -
      3. To stop an individual regionserver, run - % bin/local-regionservers.sh stop 1 - -
      4. -
      -
    8. -
    - - -
    - diff --git a/wms/src/site/xdoc/replication.xml b/wms/src/site/xdoc/replication.xml deleted file mode 100644 index 8233520e..00000000 --- a/wms/src/site/xdoc/replication.xml +++ /dev/null @@ -1,401 +0,0 @@ - - - - - - - - - HBase Replication - - - -
    -

    - HBase replication is a way to copy data between HBase deployments. It - can serve as a disaster recovery solution and can contribute to provide - higher availability at the HBase layer. It can also serve more practically; - for example, as a way to easily copy edits from a web-facing cluster to a "MapReduce" - cluster which will process old and new data and ship back the results - automatically. -

    -

    - The basic architecture pattern used for HBase replication is (HBase cluster) master-push; - it is much easier to keep track of what’s currently being replicated since - each region server has its own write-ahead-log (aka WAL or HLog), just like - other well known solutions like MySQL master/slave replication where - there’s only one bin log to keep track of. One master cluster can - replicate to any number of slave clusters, and each region server will - participate to replicate their own stream of edits. For more information - on the different properties of master/slave replication and other types - of replication, please consult - How Google Serves Data From Multiple Datacenters. -

    -

    - The replication is done asynchronously, meaning that the clusters can - be geographically distant, the links between them can be offline for - some time, and rows inserted on the master cluster won’t be - available at the same time on the slave clusters (eventual consistency). -

    -

    - The replication format used in this design is conceptually the same as - - MySQL’s statement-based replication . Instead of SQL statements, whole - WALEdits (consisting of multiple cell inserts coming from the clients' - Put and Delete) are replicated in order to maintain atomicity. -

    -

    - The HLogs from each region server are the basis of HBase replication, - and must be kept in HDFS as long as they are needed to replicate data - to any slave cluster. Each RS reads from the oldest log it needs to - replicate and keeps the current position inside ZooKeeper to simplify - failure recovery. That position can be different for every slave - cluster, same for the queue of HLogs to process. -

    -

    - The clusters participating in replication can be of asymmetric sizes - and the master cluster will do its “best effort” to balance the stream - of replication on the slave clusters by relying on randomization. -

    -

    - As of version 0.92 HBase supports master/master and cyclic replication as - well as replication to multiple slaves. -

    - -
    -
    -

    - The guide on enabling and using cluster replication is contained - in the API documentation shipped with your HBase distribution. -

    -

    - The most up-to-date documentation is - - available at this address. -

    -
    -
    -

    - The following sections describe the life of a single edit going from a - client that communicates with a master cluster all the way to a single - slave cluster. -

    -
    -

    - The client uses a HBase API that sends a Put, Delete or ICV to a region - server. The key values are transformed into a WALEdit by the region - server and is inspected by the replication code that, for each family - that is scoped for replication, adds the scope to the edit. The edit - is appended to the current WAL and is then applied to its MemStore. -

    -

    - In a separate thread, the edit is read from the log (as part of a batch) - and only the KVs that are replicable are kept (that is, that they are part - of a family scoped GLOBAL in the family's schema, non-catalog so not - .META. or -ROOT-, and did not originate in the target slave cluster - in - case of cyclic replication). -

    -

    - The edit is then tagged with the master's cluster UUID. - When the buffer is filled, or the reader hits the end of the file, - the buffer is sent to a random region server on the slave cluster. -

    -

    - Synchronously, the region server that receives the edits reads them - sequentially and separates each of them into buffers, one per table. - Once all edits are read, each buffer is flushed using the normal HBase - client (HTables managed by a HTablePool). This is done in order to - leverage parallel insertion (MultiPut). - The master's cluster UUID is retained in the edits applied at the - slave cluster in order to allow cyclic replication. -

    -

    - Back in the master cluster's region server, the offset for the current - WAL that's being replicated is registered in ZooKeeper. -

    -
    -
    -

    - The edit is inserted in the same way. -

    -

    - In the separate thread, the region server reads, filters and buffers - the log edits the same way as during normal processing. The slave - region server that's contacted doesn't answer to the RPC, so the master - region server will sleep and retry up to a configured number of times. - If the slave RS still isn't available, the master cluster RS will select a - new subset of RS to replicate to and will retry sending the buffer of - edits. -

    -

    - In the mean time, the WALs will be rolled and stored in a queue in - ZooKeeper. Logs that are archived by their region server (archiving is - basically moving a log from the region server's logs directory to a - central logs archive directory) will update their paths in the in-memory - queue of the replicating thread. -

    -

    - When the slave cluster is finally available, the buffer will be applied - the same way as during normal processing. The master cluster RS will then - replicate the backlog of logs. -

    -
    -
    -
    -

    - This section describes in depth how each of replication's internal - features operate. -

    -
    -

    - When a master cluster RS initiates a replication source to a slave cluster, - it first connects to the slave's ZooKeeper ensemble using the provided - cluster key (that key is composed of the value of hbase.zookeeper.quorum, - zookeeper.znode.parent and hbase.zookeeper.property.clientPort). It - then scans the "rs" directory to discover all the available sinks - (region servers that are accepting incoming streams of edits to replicate) - and will randomly choose a subset of them using a configured - ratio (which has a default value of 10%). For example, if a slave - cluster has 150 machines, 15 will be chosen as potential recipient for - edits that this master cluster RS will be sending. Since this is done by all - master cluster RSs, the probability that all slave RSs are used is very high, - and this method works for clusters of any size. For example, a master cluster - of 10 machines replicating to a slave cluster of 5 machines with a ratio - of 10% means that the master cluster RSs will choose one machine each - at random, thus the chance of overlapping and full usage of the slave - cluster is higher. -

    -
    -
    -

    - Every master cluster RS has its own znode in the replication znodes hierarchy. - It contains one znode per peer cluster (if 5 slave clusters, 5 znodes - are created), and each of these contain a queue - of HLogs to process. Each of these queues will track the HLogs created - by that RS, but they can differ in size. For example, if one slave - cluster becomes unavailable for some time then the HLogs should not be deleted, - thus they need to stay in the queue (while the others are processed). - See the section named "Region server failover" for an example. -

    -

    - When a source is instantiated, it contains the current HLog that the - region server is writing to. During log rolling, the new file is added - to the queue of each slave cluster's znode just before it's made available. - This ensures that all the sources are aware that a new log exists - before HLog is able to append edits into it, but this operations is - now more expensive. - The queue items are discarded when the replication thread cannot read - more entries from a file (because it reached the end of the last block) - and that there are other files in the queue. - This means that if a source is up-to-date and replicates from the log - that the region server writes to, reading up to the "end" of the - current file won't delete the item in the queue. -

    -

    - When a log is archived (because it's not used anymore or because there's - too many of them per hbase.regionserver.maxlogs typically because insertion - rate is faster than region flushing), it will notify the source threads that the path - for that log changed. If the a particular source was already done with - it, it will just ignore the message. If it's in the queue, the path - will be updated in memory. If the log is currently being replicated, - the change will be done atomically so that the reader doesn't try to - open the file when it's already moved. Also, moving a file is a NameNode - operation so, if the reader is currently reading the log, it won't - generate any exception. -

    -
    -
    -

    - By default, a source will try to read from a log file and ship log - entries as fast as possible to a sink. This is first limited by the - filtering of log entries; only KeyValues that are scoped GLOBAL and - that don't belong to catalog tables will be retained. A second limit - is imposed on the total size of the list of edits to replicate per slave, - which by default is 64MB. This means that a master cluster RS with 3 slaves - will use at most 192MB to store data to replicate. This doesn't account - the data filtered that wasn't garbage collected. -

    -

    - Once the maximum size of edits was buffered or the reader hits the end - of the log file, the source thread will stop reading and will choose - at random a sink to replicate to (from the list that was generated by - keeping only a subset of slave RSs). It will directly issue a RPC to - the chosen machine and will wait for the method to return. If it's - successful, the source will determine if the current file is emptied - or if it should continue to read from it. If the former, it will delete - the znode in the queue. If the latter, it will register the new offset - in the log's znode. If the RPC threw an exception, the source will retry - 10 times until trying to find a different sink. -

    -
    -
    -

    - If replication isn't enabled, the master's logs cleaning thread will - delete old logs using a configured TTL. This doesn't work well with - replication since archived logs passed their TTL may still be in a - queue. Thus, the default behavior is augmented so that if a log is - passed its TTL, the cleaning thread will lookup every queue until it - finds the log (while caching the ones it finds). If it's not found, - the log will be deleted. The next time it has to look for a log, - it will first use its cache. -

    -
    -
    -

    - As long as region servers don't fail, keeping track of the logs in ZK - doesn't add any value. Unfortunately, they do fail, so since ZooKeeper - is highly available we can count on it and its semantics to help us - managing the transfer of the queues. -

    -

    - All the master cluster RSs keep a watcher on every other one of them to be - notified when one dies (just like the master does). When it happens, - they all race to create a znode called "lock" inside the dead RS' znode - that contains its queues. The one that creates it successfully will - proceed by transferring all the queues to its own znode (one by one - since ZK doesn't support the rename operation) and will delete all the - old ones when it's done. The recovered queues' znodes will be named - with the id of the slave cluster appended with the name of the dead - server. -

    -

    - Once that is done, the master cluster RS will create one new source thread per - copied queue, and each of them will follow the read/filter/ship pattern. - The main difference is that those queues will never have new data since - they don't belong to their new region server, which means that when - the reader hits the end of the last log, the queue's znode will be - deleted and the master cluster RS will close that replication source. -

    -

    - For example, consider a master cluster with 3 region servers that's - replicating to a single slave with id '2'. The following hierarchy - represents what the znodes layout could be at some point in time. We - can see the RSs' znodes all contain a "peers" znode that contains a - single queue. The znode names in the queues represent the actual file - names on HDFS in the form "address,port.timestamp". -

    -
    -/hbase/replication/rs/
    -                      1.1.1.1,60020,123456780/
    -                          2/
    -                              1.1.1.1,60020.1234  (Contains a position)
    -                              1.1.1.1,60020.1265
    -                      1.1.1.2,60020,123456790/
    -                          2/
    -                              1.1.1.2,60020.1214  (Contains a position)
    -                              1.1.1.2,60020.1248
    -                              1.1.1.2,60020.1312
    -                      1.1.1.3,60020,    123456630/
    -                          2/
    -                              1.1.1.3,60020.1280  (Contains a position)
    -        
    -

    - Now let's say that 1.1.1.2 loses its ZK session. The survivors will race - to create a lock, and for some reasons 1.1.1.3 wins. It will then start - transferring all the queues to its local peers znode by appending the - name of the dead server. Right before 1.1.1.3 is able to clean up the - old znodes, the layout will look like the following: -

    -
    -/hbase/replication/rs/
    -                      1.1.1.1,60020,123456780/
    -                          2/
    -                              1.1.1.1,60020.1234  (Contains a position)
    -                              1.1.1.1,60020.1265
    -                      1.1.1.2,60020,123456790/
    -                          lock
    -                          2/
    -                              1.1.1.2,60020.1214  (Contains a position)
    -                              1.1.1.2,60020.1248
    -                              1.1.1.2,60020.1312
    -                      1.1.1.3,60020,123456630/
    -                          2/
    -                              1.1.1.3,60020.1280  (Contains a position)
    -
    -                          2-1.1.1.2,60020,123456790/
    -                              1.1.1.2,60020.1214  (Contains a position)
    -                              1.1.1.2,60020.1248
    -                              1.1.1.2,60020.1312
    -        
    -

    - Some time later, but before 1.1.1.3 is able to finish replicating the - last HLog from 1.1.1.2, let's say that it dies too (also some new logs - were created in the normal queues). The last RS will then try to lock - 1.1.1.3's znode and will begin transferring all the queues. The new - layout will be: -

    -
    -/hbase/replication/rs/
    -                      1.1.1.1,60020,123456780/
    -                          2/
    -                              1.1.1.1,60020.1378  (Contains a position)
    -
    -                          2-1.1.1.3,60020,123456630/
    -                              1.1.1.3,60020.1325  (Contains a position)
    -                              1.1.1.3,60020.1401
    -
    -                          2-1.1.1.2,60020,123456790-1.1.1.3,60020,123456630/
    -                              1.1.1.2,60020.1312  (Contains a position)
    -                      1.1.1.3,60020,123456630/
    -                          lock
    -                          2/
    -                              1.1.1.3,60020.1325  (Contains a position)
    -                              1.1.1.3,60020.1401
    -
    -                          2-1.1.1.2,60020,123456790/
    -                              1.1.1.2,60020.1312  (Contains a position)
    -        
    -
    -
    -
    -
    -

    - Yes, this is for much later. -

    -
    -
    -

    - You can use the HBase-provided utility called CopyTable from the package - org.apache.hadoop.hbase.mapreduce in order to have a discp-like tool to - bulk copy data. -

    -
    -
    -

    - Yes, this behavior would help a lot but it's not currently available - in HBase (BatchUpdate had that, but it was lost in the new API). -

    -
    -
    -
    -

    - Here's a list of all the jiras that relate to major issues or missing - features in the replication implementation. -

    -
      -
    1. - HBASE-2611, basically if a region server dies while recovering the - queues of another dead RS, we will miss the data from the queues - that weren't copied. -
    2. -
    -
    - -
    diff --git a/wms/src/site/xdoc/sponsors.xml b/wms/src/site/xdoc/sponsors.xml deleted file mode 100644 index e39730bb..00000000 --- a/wms/src/site/xdoc/sponsors.xml +++ /dev/null @@ -1,35 +0,0 @@ - - - - - Installing HBase on Windows using Cygwin - - - -
    -

    The below companies have been gracious enough to provide their commerical tool offerings free of charge to the Apache HBase project. -

    -

    -
    - -