From a7083ca201cf622cb2a1b3b30f01908cac48fc93 Mon Sep 17 00:00:00 2001 From: mujtabachohan Date: Thu, 21 Feb 2013 14:36:55 -0800 Subject: [PATCH] Remove all javadoc warnings --- build.txt | 9 ++----- pom.xml | 26 +++++++++---------- .../salesforce/phoenix/cache/GlobalCache.java | 2 +- .../phoenix/compile/ColumnProjector.java | 1 - .../phoenix/compile/ExpressionCompiler.java | 6 +++-- .../phoenix/compile/FromCompiler.java | 1 - .../phoenix/compile/OrderByCompiler.java | 2 +- .../phoenix/compile/ProjectionCompiler.java | 2 +- .../phoenix/compile/QueryCompiler.java | 1 - .../phoenix/compile/RowProjector.java | 1 - .../phoenix/compile/StatementPlan.java | 1 - .../phoenix/coprocessor/MetaDataProtocol.java | 10 +++---- .../phoenix/execute/AggregateRowCounter.java | 3 +-- .../phoenix/execute/BasicQueryPlan.java | 8 +++--- .../expression/aggregator/Aggregator.java | 2 -- .../aggregator/ServerAggregators.java | 2 +- .../function/FunctionExpression.java | 2 +- .../function/RegexpReplaceFunction.java | 4 +-- .../expression/function/RoundFunction.java | 2 +- .../expression/function/TruncFunction.java | 2 +- .../iterate/AggregatingResultIterator.java | 2 +- .../FilterAggregatingResultIterator.java | 2 +- .../phoenix/iterate/ParallelIterators.java | 2 -- .../phoenix/jdbc/PhoenixConnection.java | 2 +- .../phoenix/join/HashCacheClient.java | 5 ++-- .../phoenix/parse/FunctionParseNode.java | 1 - .../parse/RHSLiteralStatementRewriter.java | 1 - .../query/ConnectionQueryServices.java | 2 +- .../phoenix/schema/PStringColumn.java | 2 +- .../com/salesforce/phoenix/schema/PTable.java | 4 +-- .../phoenix/schema/RowKeyValueAccessor.java | 1 - .../salesforce/phoenix/util/CSVLoader.java | 2 +- .../salesforce/phoenix/util/KeyValueUtil.java | 3 +-- .../salesforce/phoenix/util/ResultUtil.java | 3 --- .../salesforce/phoenix/util/SchemaUtil.java | 3 --- 35 files changed, 49 insertions(+), 73 deletions(-) diff --git a/build.txt b/build.txt index 81bf637a..a3c5fed8 100644 --- a/build.txt +++ b/build.txt @@ -14,7 +14,7 @@ Phoenix uses Maven (3.X) to build all its necessary resources. and optionally, to just skip all the tests and build the jars: $ mvn package -DskipTests -3. +Note: javadocs are generated in target/apidocs ## Importing into eclipse ========================= @@ -27,13 +27,8 @@ Use the m2e eclipse plugin and do Import->Maven Project and just pick the root ' 1. All tests $ mvn clean test -## Javadocs -=========== -Javadoc are generated in /target/site/apidocs - $ mvn site - ## Findbugs =========== Findbugs report is generated in /target/site $ mvn site - \ No newline at end of file + diff --git a/pom.xml b/pom.xml index 8e029920..902e4713 100644 --- a/pom.xml +++ b/pom.xml @@ -268,19 +268,19 @@ - maven-javadoc-plugin - 2.6.1 - - true - - - - site - - javadoc - - - + org.apache.maven.plugins + maven-javadoc-plugin + + true + + + + attach-javadocs + + jar + + + org.apache.maven.plugins diff --git a/src/main/java/com/salesforce/phoenix/cache/GlobalCache.java b/src/main/java/com/salesforce/phoenix/cache/GlobalCache.java index 080bb4ce..14af4673 100644 --- a/src/main/java/com/salesforce/phoenix/cache/GlobalCache.java +++ b/src/main/java/com/salesforce/phoenix/cache/GlobalCache.java @@ -87,7 +87,7 @@ public ConcurrentHashMap getMetaDataCache() { * used in which case a global tenant cache is returned. * @param config the HBase configuration * @param tenantId the tenant ID or null if not applicable. - * @return + * @return TenantCache */ public static TenantCache getTenantCache(Configuration config, ImmutableBytesWritable tenantId) { GlobalCache globalCache = GlobalCache.getInstance(config); diff --git a/src/main/java/com/salesforce/phoenix/compile/ColumnProjector.java b/src/main/java/com/salesforce/phoenix/compile/ColumnProjector.java index 3093c5ed..4e2cba53 100644 --- a/src/main/java/com/salesforce/phoenix/compile/ColumnProjector.java +++ b/src/main/java/com/salesforce/phoenix/compile/ColumnProjector.java @@ -67,7 +67,6 @@ public interface ColumnProjector { * Get the value of the column, coercing it if necessary to the specified type * @param tuple the row containing the column * @param type the type to which to coerce the binary value - * @param ptr TODO * @param ptr used to retrieve the value * @return the object representation of the column value. * @throws SQLException diff --git a/src/main/java/com/salesforce/phoenix/compile/ExpressionCompiler.java b/src/main/java/com/salesforce/phoenix/compile/ExpressionCompiler.java index 18190cb9..430b10d1 100644 --- a/src/main/java/com/salesforce/phoenix/compile/ExpressionCompiler.java +++ b/src/main/java/com/salesforce/phoenix/compile/ExpressionCompiler.java @@ -301,8 +301,6 @@ private Expression wrapGroupByExpression(Expression expression) { /** * Add a Function expression to the expression manager. * Derived classes may use this as a hook to trap all function additions. - * @param node a function expression node - * @param children the child expression arguments to the function expression node. * @return a Function expression * @throws SQLException if the arguments are invalid for the function. */ @@ -311,6 +309,10 @@ protected Expression addFunction(FunctionExpression func) { } @Override + /** + * @param node a function expression node + * @param children the child expression arguments to the function expression node. + */ public Expression visitLeave(FunctionParseNode node, List children) throws SQLException { children = node.validate(children, context); FunctionExpression func = node.create(children, context); diff --git a/src/main/java/com/salesforce/phoenix/compile/FromCompiler.java b/src/main/java/com/salesforce/phoenix/compile/FromCompiler.java index 1c471fee..bb464940 100644 --- a/src/main/java/com/salesforce/phoenix/compile/FromCompiler.java +++ b/src/main/java/com/salesforce/phoenix/compile/FromCompiler.java @@ -82,7 +82,6 @@ public static ColumnResolver getResolver(DropColumnStatement statement, PhoenixC * Iterate through the nodes in the FROM clause to build a column resolver used to * lookup a column given the name and alias. * @param statement the select statement - * @param schema the schema * @return the column resolver * @throws SQLException * @throws SQLFeatureNotSupportedException if unsupported constructs appear in the FROM diff --git a/src/main/java/com/salesforce/phoenix/compile/OrderByCompiler.java b/src/main/java/com/salesforce/phoenix/compile/OrderByCompiler.java index d5149ffb..2afdaab3 100644 --- a/src/main/java/com/salesforce/phoenix/compile/OrderByCompiler.java +++ b/src/main/java/com/salesforce/phoenix/compile/OrderByCompiler.java @@ -75,7 +75,7 @@ public List getOrderingColumns() { * @param context the query context for tracking various states * associated with the given select statement * @param limit - * @param groupByExpressions the list of columns in the GROUP BY clause + * @param groupBy the list of columns in the GROUP BY clause * @return the list of columns in the ORDER BY clause * @throws SQLException */ diff --git a/src/main/java/com/salesforce/phoenix/compile/ProjectionCompiler.java b/src/main/java/com/salesforce/phoenix/compile/ProjectionCompiler.java index e13dbaa1..03e7f023 100644 --- a/src/main/java/com/salesforce/phoenix/compile/ProjectionCompiler.java +++ b/src/main/java/com/salesforce/phoenix/compile/ProjectionCompiler.java @@ -73,7 +73,7 @@ private ProjectionCompiler() { * @param statement SQL statement being compiled * @param context query context kept between compilation of different query clauses * @param limit maximum number of rows to scan during query execution or null if unbounded - * @param groupByExpressions list of GROUP BY expressions or the empty list if no GROUP BY + * @param groupBy list of GROUP BY expressions or the empty list if no GROUP BY * @return projector used to access row values during scan * @throws SQLException * @throws SQLFeatureNotSupportedException if an unsupported construct is encountered. diff --git a/src/main/java/com/salesforce/phoenix/compile/QueryCompiler.java b/src/main/java/com/salesforce/phoenix/compile/QueryCompiler.java index 51dc6171..865f3b8d 100644 --- a/src/main/java/com/salesforce/phoenix/compile/QueryCompiler.java +++ b/src/main/java/com/salesforce/phoenix/compile/QueryCompiler.java @@ -81,7 +81,6 @@ public QueryCompiler(PhoenixConnection connection, int maxRows, Scan scan, PColu * Builds an executable query plan from a parsed SQL statement * @param statement parsed SQL statement * @param binds values of bind variables - * @param scan TODO * @return executable query plan * @throws SQLException if mismatched types are found, bind value do not match binds, * or invalid function arguments are encountered. diff --git a/src/main/java/com/salesforce/phoenix/compile/RowProjector.java b/src/main/java/com/salesforce/phoenix/compile/RowProjector.java index 5432a5e8..0a70df45 100644 --- a/src/main/java/com/salesforce/phoenix/compile/RowProjector.java +++ b/src/main/java/com/salesforce/phoenix/compile/RowProjector.java @@ -54,7 +54,6 @@ public class RowProjector { /** * Construct RowProjector based on a list of ColumnProjectors. * @param columnProjectors ordered list of ColumnProjectors corresponding to projected columns in SELECT clause - * @param rowCountExpression Expression used to calculate the row count from the result returned by the server-side * aggregating coprocessor. Only required in the case of an aggregate query with a limit clause and otherwise may * be null. */ diff --git a/src/main/java/com/salesforce/phoenix/compile/StatementPlan.java b/src/main/java/com/salesforce/phoenix/compile/StatementPlan.java index cd5245a1..b04eeb77 100644 --- a/src/main/java/com/salesforce/phoenix/compile/StatementPlan.java +++ b/src/main/java/com/salesforce/phoenix/compile/StatementPlan.java @@ -48,7 +48,6 @@ public ExplainPlan getExplainPlan() throws SQLException { /** * Returns the ParameterMetaData for the statement - * @return */ ParameterMetaData getParameterMetaData(); diff --git a/src/main/java/com/salesforce/phoenix/coprocessor/MetaDataProtocol.java b/src/main/java/com/salesforce/phoenix/coprocessor/MetaDataProtocol.java index c8064e50..fb7aeac2 100644 --- a/src/main/java/com/salesforce/phoenix/coprocessor/MetaDataProtocol.java +++ b/src/main/java/com/salesforce/phoenix/coprocessor/MetaDataProtocol.java @@ -129,14 +129,14 @@ public void write(DataOutput output) throws IOException { * @param tableName * @param tableTimestamp * @param clientTimestamp - * @return + * @return MetaDataMutationResult * @throws IOException */ MetaDataMutationResult getTable(byte[] schemaName, byte[] tableName, long tableTimestamp, long clientTimestamp) throws IOException; /** * Create a new Phoenix table * @param tableMetadata - * @return + * @return MetaDataMutationResult * @throws IOException */ MetaDataMutationResult createTable(List tableMetadata) throws IOException; @@ -144,7 +144,7 @@ public void write(DataOutput output) throws IOException { * Drop an existing Phoenix table * @param tableMetadata * @param isView TODO - * @return + * @return MetaDataMutationResult * @throws IOException */ MetaDataMutationResult dropTable(List tableMetadata, boolean isView) throws IOException; @@ -152,7 +152,7 @@ public void write(DataOutput output) throws IOException { /** * Add a column to an existing Phoenix table * @param tableMetadata - * @return + * @return MetaDataMutationResult * @throws IOException */ MetaDataMutationResult addColumn(List tableMetadata) throws IOException; @@ -160,7 +160,7 @@ public void write(DataOutput output) throws IOException { /** * Drop a column from an existing Phoenix table * @param tableMetadata - * @return + * @return MetaDataMutationResult * @throws IOException */ MetaDataMutationResult dropColumn(List tableMetadata) throws IOException; diff --git a/src/main/java/com/salesforce/phoenix/execute/AggregateRowCounter.java b/src/main/java/com/salesforce/phoenix/execute/AggregateRowCounter.java index e365e904..0dcb7b52 100644 --- a/src/main/java/com/salesforce/phoenix/execute/AggregateRowCounter.java +++ b/src/main/java/com/salesforce/phoenix/execute/AggregateRowCounter.java @@ -55,8 +55,7 @@ public class AggregateRowCounter implements RowCounter { /** * Construct an AggregateRowCounter that is used to short circuit scans with a rownum limit by * calculating the row count from aggregated query results. - * @param valueSchema the schema of the row result value. - * @param rowCountExpression expression that calculates the row count from a given row result + * @param aggregators aggregators */ public AggregateRowCounter(Aggregators aggregators) { this.schema = aggregators.getValueSchema(); diff --git a/src/main/java/com/salesforce/phoenix/execute/BasicQueryPlan.java b/src/main/java/com/salesforce/phoenix/execute/BasicQueryPlan.java index c15178cf..73801e26 100644 --- a/src/main/java/com/salesforce/phoenix/execute/BasicQueryPlan.java +++ b/src/main/java/com/salesforce/phoenix/execute/BasicQueryPlan.java @@ -95,10 +95,10 @@ private ConnectionQueryServices getConnectionQueryServices(ConnectionQueryServic return childServices; } - /** - * Sets up an id used to do round robin queue processing on the server - * @param scan - */ +// /** +// * Sets up an id used to do round robin queue processing on the server +// * @param scan +// */ // private void setProducer(Scan scan) { // byte[] producer = Bytes.toBytes(UUID.randomUUID().toString()); // scan.setAttribute(HBaseServer.CALL_QUEUE_PRODUCER_ATTRIB_NAME, producer); diff --git a/src/main/java/com/salesforce/phoenix/expression/aggregator/Aggregator.java b/src/main/java/com/salesforce/phoenix/expression/aggregator/Aggregator.java index 31ff50f2..82a96b7e 100644 --- a/src/main/java/com/salesforce/phoenix/expression/aggregator/Aggregator.java +++ b/src/main/java/com/salesforce/phoenix/expression/aggregator/Aggregator.java @@ -46,8 +46,6 @@ public interface Aggregator extends Expression { * Incrementally aggregate the value with the current row * @param tuple the result containing all the key values of the row * @param ptr the bytes pointer to the underlying result - * @return true if the child expression of the aggregator was - * able to be evaluated and false otherwise. */ public void aggregate(Tuple tuple, ImmutableBytesWritable ptr); diff --git a/src/main/java/com/salesforce/phoenix/expression/aggregator/ServerAggregators.java b/src/main/java/com/salesforce/phoenix/expression/aggregator/ServerAggregators.java index e30d388c..c3ff60da 100644 --- a/src/main/java/com/salesforce/phoenix/expression/aggregator/ServerAggregators.java +++ b/src/main/java/com/salesforce/phoenix/expression/aggregator/ServerAggregators.java @@ -70,7 +70,7 @@ public void aggregate(Aggregator[] aggregators, Tuple result) { /** * Serialize an Aggregator into a byte array - * @param aggregator the aggregator to serialize + * @param aggFuncs list of aggregator to serialize * @return serialized byte array respresentation of aggregator */ public static byte[] serialize(List aggFuncs, int minNullableIndex) { diff --git a/src/main/java/com/salesforce/phoenix/expression/function/FunctionExpression.java b/src/main/java/com/salesforce/phoenix/expression/function/FunctionExpression.java index 113ff310..faf40c2a 100644 --- a/src/main/java/com/salesforce/phoenix/expression/function/FunctionExpression.java +++ b/src/main/java/com/salesforce/phoenix/expression/function/FunctionExpression.java @@ -101,7 +101,7 @@ public KeyFormationDirective getKeyFormationDirective() { * Zero-based index of child expression that determines the potential column used to form the row key during optimization. * For example, SUBSTR(prefix,1,3), the first child expression (prefix) would determine the row key column used to * help form the scan start/stop key - * @return + * @return int */ public int getKeyFormationTraversalIndex() { return 0; diff --git a/src/main/java/com/salesforce/phoenix/expression/function/RegexpReplaceFunction.java b/src/main/java/com/salesforce/phoenix/expression/function/RegexpReplaceFunction.java index 8cd2c895..ec9af6d0 100644 --- a/src/main/java/com/salesforce/phoenix/expression/function/RegexpReplaceFunction.java +++ b/src/main/java/com/salesforce/phoenix/expression/function/RegexpReplaceFunction.java @@ -49,10 +49,10 @@ * REGEXP_REPLACE(,,) * source_char is the string in which we want to perform string replacement. pattern is a * Java compatible regular expression string, and we replace all the matching part with - * replace_string. The first 2 arguments are required and are {@link com.salesforce.phoenix.schema.PDataType.VARCHAR}, + * replace_string. The first 2 arguments are required and are {@link com.salesforce.phoenix.schema.PDataType#VARCHAR}, * the replace_string is default to empty string. * - * The function returns a {@link com.salesforce.phoenix.schema.PDataType.VARCHAR} + * The function returns a {@link com.salesforce.phoenix.schema.PDataType#VARCHAR} * * @author zhuang * @since 0.1 diff --git a/src/main/java/com/salesforce/phoenix/expression/function/RoundFunction.java b/src/main/java/com/salesforce/phoenix/expression/function/RoundFunction.java index 33b1fddc..98c59d95 100644 --- a/src/main/java/com/salesforce/phoenix/expression/function/RoundFunction.java +++ b/src/main/java/com/salesforce/phoenix/expression/function/RoundFunction.java @@ -50,7 +50,7 @@ * an even increment. Usage: * ROUND(,<'day'|'hour'|'minute'|'second'|'millisecond'>,) * The integer multiplier is optional and is used to do rollups to a partial time unit (i.e. 10 minute rollup) - * The function returns a {@link com.salesforce.phoenix.schema.PDataType.DATE} + * The function returns a {@link com.salesforce.phoenix.schema.PDataType#DATE} * * @author jtaylor * @since 0.1 diff --git a/src/main/java/com/salesforce/phoenix/expression/function/TruncFunction.java b/src/main/java/com/salesforce/phoenix/expression/function/TruncFunction.java index 66920bdc..4ed5fb66 100644 --- a/src/main/java/com/salesforce/phoenix/expression/function/TruncFunction.java +++ b/src/main/java/com/salesforce/phoenix/expression/function/TruncFunction.java @@ -42,7 +42,7 @@ * an even increment. Usage: * TRUNC(,<'day'|'hour'|'minute'|'second'|'millisecond'>,[]) * The integer multiplier is optional and is used to do rollups to a partial time unit (i.e. 10 minute rollup) - * The function returns a {@link com.salesforce.phoenix.schema.PDataType.DATE} + * The function returns a {@link com.salesforce.phoenix.schema.PDataType#DATE} * * @author jtaylor * @since 0.1 diff --git a/src/main/java/com/salesforce/phoenix/iterate/AggregatingResultIterator.java b/src/main/java/com/salesforce/phoenix/iterate/AggregatingResultIterator.java index aa5f3ae1..01649b1c 100644 --- a/src/main/java/com/salesforce/phoenix/iterate/AggregatingResultIterator.java +++ b/src/main/java/com/salesforce/phoenix/iterate/AggregatingResultIterator.java @@ -41,7 +41,7 @@ public interface AggregatingResultIterator extends ResultIterator { /** * Provides a means of re-aggregating a result row. For - * scanners that need to look ahead (i.e. {@link phoenix.execute.iterate.OrderedAggregatingResultIterator.OrderByResultScanner} + * scanners that need to look ahead (i.e. {@link com.salesforce.phoenix.iterate.OrderedAggregatingResultIterator} * @param result the row to re-aggregate */ void aggregate(Tuple result); diff --git a/src/main/java/com/salesforce/phoenix/iterate/FilterAggregatingResultIterator.java b/src/main/java/com/salesforce/phoenix/iterate/FilterAggregatingResultIterator.java index 382a7d1b..4ad9b6a2 100644 --- a/src/main/java/com/salesforce/phoenix/iterate/FilterAggregatingResultIterator.java +++ b/src/main/java/com/salesforce/phoenix/iterate/FilterAggregatingResultIterator.java @@ -43,7 +43,7 @@ * Post aggregation filter for HAVING clause. Due to the way we cache aggregation values * we cannot have a look ahead for this Iterator, because the expressions in the SELECT * clause would return values for the peeked row instead of the current row. If we only - * use the Result argument in {@link com.salesforce.phoenix.expression.Expression#getValue(Result)} + * use the Result argument in {@link com.salesforce.phoenix.expression.Expression} * instead of our cached value in Aggregators, we could have a look ahead. * * @author jtaylor diff --git a/src/main/java/com/salesforce/phoenix/iterate/ParallelIterators.java b/src/main/java/com/salesforce/phoenix/iterate/ParallelIterators.java index 557c19e5..6b5f5ed5 100644 --- a/src/main/java/com/salesforce/phoenix/iterate/ParallelIterators.java +++ b/src/main/java/com/salesforce/phoenix/iterate/ParallelIterators.java @@ -106,8 +106,6 @@ public boolean apply(HRegionInfo region) { /** * Splits the given scan's key range so that each split can be queried in parallel * - * @param config configuration object that holds concurrency settings for - * the {@link ParallelIterators}. * @param scan the scan to parallelize * @param allTableRegions all online regions for the table to be scanned * @return the key ranges that should be scanned in parallel diff --git a/src/main/java/com/salesforce/phoenix/jdbc/PhoenixConnection.java b/src/main/java/com/salesforce/phoenix/jdbc/PhoenixConnection.java index cbb5a2ce..0d9b53e9 100644 --- a/src/main/java/com/salesforce/phoenix/jdbc/PhoenixConnection.java +++ b/src/main/java/com/salesforce/phoenix/jdbc/PhoenixConnection.java @@ -275,7 +275,7 @@ public Statement createStatement() throws SQLException { /** * Back-door way to inject processing into walking through a result set * @param statementFactory - * @return + * @return PhoenixStatement * @throws SQLException */ public PhoenixStatement createStatement(PhoenixStatementFactory statementFactory) throws SQLException { diff --git a/src/main/java/com/salesforce/phoenix/join/HashCacheClient.java b/src/main/java/com/salesforce/phoenix/join/HashCacheClient.java index e9581a3c..218fb251 100644 --- a/src/main/java/com/salesforce/phoenix/join/HashCacheClient.java +++ b/src/main/java/com/salesforce/phoenix/join/HashCacheClient.java @@ -76,9 +76,8 @@ public class HashCacheClient { * Construct client used to create a serialized cached snapshot of a table and send it to each region server * for caching during hash join processing. * @param services the global services - * @param memoryManager the per request memory manager - * @param loopTable the table being iterated over (as opposed to cached) during join processing - * @param joinKeyPrefix bytes prefixing key of every LHS and RHS row + * @param iterateOverTableName table name + * @param tenantId the tenantId or null if not applicable */ public HashCacheClient(ConnectionQueryServices services, byte[] iterateOverTableName, byte[] tenantId) { this.services = services; diff --git a/src/main/java/com/salesforce/phoenix/parse/FunctionParseNode.java b/src/main/java/com/salesforce/phoenix/parse/FunctionParseNode.java index 38099bf5..a9a4c50b 100644 --- a/src/main/java/com/salesforce/phoenix/parse/FunctionParseNode.java +++ b/src/main/java/com/salesforce/phoenix/parse/FunctionParseNode.java @@ -216,7 +216,6 @@ public List validate(List children, StatementContext con /** * Entry point for parser to instantiate compiled representation of built-in function - * @param node Parse model node representing a built-in function reference * @param children Compiled expressions for child nodes * @param context Query context for accessing state shared across the processing of multiple clauses * @return compiled representation of built-in function diff --git a/src/main/java/com/salesforce/phoenix/parse/RHSLiteralStatementRewriter.java b/src/main/java/com/salesforce/phoenix/parse/RHSLiteralStatementRewriter.java index f834b9d6..3967e83c 100644 --- a/src/main/java/com/salesforce/phoenix/parse/RHSLiteralStatementRewriter.java +++ b/src/main/java/com/salesforce/phoenix/parse/RHSLiteralStatementRewriter.java @@ -45,7 +45,6 @@ public class RHSLiteralStatementRewriter extends ParseNodeRewriter { /** * Rewrite the select statement by filtering out expression nodes from the WHERE clause * @param statement the select statement from which to filter. - * @param removeNodes expression nodes to filter out of WHERE clause. * @return new select statement * @throws SQLException */ diff --git a/src/main/java/com/salesforce/phoenix/query/ConnectionQueryServices.java b/src/main/java/com/salesforce/phoenix/query/ConnectionQueryServices.java index 2b17bb4c..14158daa 100644 --- a/src/main/java/com/salesforce/phoenix/query/ConnectionQueryServices.java +++ b/src/main/java/com/salesforce/phoenix/query/ConnectionQueryServices.java @@ -47,7 +47,7 @@ public interface ConnectionQueryServices extends QueryServices, MetaDataMutated /** * Get (and create if necessary) a child QueryService for a given tenantId. * The QueryService will be cached for the lifetime of the parent QueryService - * @param childId the organization ID + * @param tenantId the organization ID * @return the child QueryService */ public ConnectionQueryServices getChildQueryServices(ImmutableBytesWritable tenantId); diff --git a/src/main/java/com/salesforce/phoenix/schema/PStringColumn.java b/src/main/java/com/salesforce/phoenix/schema/PStringColumn.java index 74a42aa9..c41bc850 100644 --- a/src/main/java/com/salesforce/phoenix/schema/PStringColumn.java +++ b/src/main/java/com/salesforce/phoenix/schema/PStringColumn.java @@ -29,7 +29,7 @@ /** * - * Abstract class for columns of type {@link com.salesforce.phoenix.schema.PDataType.STRING} + * Abstract class for columns of type {@link com.salesforce.phoenix.schema.PDataType#VARCHAR} * * @author jtaylor * @since 0.1 diff --git a/src/main/java/com/salesforce/phoenix/schema/PTable.java b/src/main/java/com/salesforce/phoenix/schema/PTable.java index b8f2b98b..c9091e05 100644 --- a/src/main/java/com/salesforce/phoenix/schema/PTable.java +++ b/src/main/java/com/salesforce/phoenix/schema/PTable.java @@ -106,7 +106,7 @@ public interface PTable extends Writable { /** * Creates a new row at the specified timestamp using the key - * for the PK values (from {@link #newKey(ImmutableBytesWritable, byte[]...)} + * for the PK values (from {@link #newKey(ImmutableBytesWritable, byte[][])} * and the optional key values specified using values. * @param ts the timestamp that the key value will have when committed * @param key the row key of the key value @@ -119,7 +119,7 @@ public interface PTable extends Writable { PRow newRow(long ts, ImmutableBytesWritable key, byte[]... values); /** - * Creates a new row for the PK values (from {@link #newKey(ImmutableBytesWritable, byte[]...)} + * Creates a new row for the PK values (from {@link #newKey(ImmutableBytesWritable, byte[][])} * and the optional key values specified using values. The timestamp of the key value * will be set by the HBase server. * @param key the row key of the key value diff --git a/src/main/java/com/salesforce/phoenix/schema/RowKeyValueAccessor.java b/src/main/java/com/salesforce/phoenix/schema/RowKeyValueAccessor.java index f62836e3..3e38c949 100644 --- a/src/main/java/com/salesforce/phoenix/schema/RowKeyValueAccessor.java +++ b/src/main/java/com/salesforce/phoenix/schema/RowKeyValueAccessor.java @@ -174,7 +174,6 @@ public int getOffset(byte[] keyBuffer, int keyOffset) { /** * Calculate the length of the PK column value - * @param maxLength the fixed length of the PK column value or null if variable length * @param keyBuffer the byte array of the row key * @param keyOffset the offset in the byte array of where the key begins * @param keyLength the length of the entire row key diff --git a/src/main/java/com/salesforce/phoenix/util/CSVLoader.java b/src/main/java/com/salesforce/phoenix/util/CSVLoader.java index 54759b7f..9fa1a3f6 100644 --- a/src/main/java/com/salesforce/phoenix/util/CSVLoader.java +++ b/src/main/java/com/salesforce/phoenix/util/CSVLoader.java @@ -84,7 +84,7 @@ public void upsert(String fileName) throws Exception { * column value to correct type before upsert. Note: Column Names are * expected as first line of CSV file. * - * @param fileName + * @param reader CSVReader instance * @throws Exception */ public void upsert(CSVReader reader) throws Exception { diff --git a/src/main/java/com/salesforce/phoenix/util/KeyValueUtil.java b/src/main/java/com/salesforce/phoenix/util/KeyValueUtil.java index 7bc90e07..32985f6d 100644 --- a/src/main/java/com/salesforce/phoenix/util/KeyValueUtil.java +++ b/src/main/java/com/salesforce/phoenix/util/KeyValueUtil.java @@ -92,9 +92,8 @@ public static KeyValue newKeyValue(Result r, byte[] cf, byte[] cq, long ts, byte /** * Binary search for latest column value without allocating memory in the process - * @param r + * @param kvs * @param searchTerm - * @return */ public static KeyValue getColumnLatest(Listkvs, KeyValue searchTerm) { if (kvs.size() == 0) { diff --git a/src/main/java/com/salesforce/phoenix/util/ResultUtil.java b/src/main/java/com/salesforce/phoenix/util/ResultUtil.java index a54a31a3..1b8e8415 100644 --- a/src/main/java/com/salesforce/phoenix/util/ResultUtil.java +++ b/src/main/java/com/salesforce/phoenix/util/ResultUtil.java @@ -49,7 +49,6 @@ private ResultUtil() { /** * Return a pointer into a potentially much bigger byte buffer that points to the key of a Result. * @param r - * @return */ public static ImmutableBytesWritable getKey(Result r) { return getKey(r, 0); @@ -69,7 +68,6 @@ public static void getKey(KeyValue value, ImmutableBytesWritable key) { * in all of our keys. * @param r * @param offset offset added to start of key and subtracted from key length (to select subset of key bytes) - * @return */ public static ImmutableBytesWritable getKey(Result r, int offset) { return new ImmutableBytesWritable(getRawBytes(r), getKeyOffset(r) + offset, getKeyLength(r) - offset); @@ -149,7 +147,6 @@ public static KeyValue getColumnLatest(Result r, byte[] row, int roffset, int rl * Binary search for latest column value without allocating memory in the process * @param r * @param searchTerm - * @return */ public static KeyValue getColumnLatest(Result r, KeyValue searchTerm) { KeyValue [] kvs = r.raw(); // side effect possibly. diff --git a/src/main/java/com/salesforce/phoenix/util/SchemaUtil.java b/src/main/java/com/salesforce/phoenix/util/SchemaUtil.java index b12fd82a..560d80e3 100644 --- a/src/main/java/com/salesforce/phoenix/util/SchemaUtil.java +++ b/src/main/java/com/salesforce/phoenix/util/SchemaUtil.java @@ -191,7 +191,6 @@ public static List concat(List l1, List l2) { * Get the key used in the Phoenix metadata row for a table definition * @param schemaName * @param tableName - * @return */ public static byte[] getTableKey(byte[] schemaName, byte[] tableName) { return ByteUtil.concat(schemaName, QueryConstants.SEPARATOR_BYTE_ARRAY, tableName, QueryConstants.SEPARATOR_BYTE_ARRAY); @@ -221,9 +220,7 @@ public static String getColumnDisplayName(String schemaName, String tableName, S /** * Get the HTable name for a given schemaName and tableName - * @param schemaName * @param tableName - * @return */ public static byte[] getTableName(String tableName) { return getTableName(null, tableName);