Skip to content

Commit

Permalink
[apache#1316] fix: Support keyword in column name in MySQL/PG catalog (
Browse files Browse the repository at this point in the history
…apache#1319)

### What changes were proposed in this pull request?

back quoted keyword like `long`, 'integer' in column name

### Why are the changes needed?

Users may use this keyword to create table.

Fix: apache#1316 

### Does this PR introduce _any_ user-facing change?

N/A

### How was this patch tested?

IT
  • Loading branch information
yuqi1129 authored Jan 17, 2024
1 parent 075db16 commit 8ac19d0
Show file tree
Hide file tree
Showing 6 changed files with 184 additions and 40 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
public class MysqlTableOperations extends JdbcTableOperations {

public static final String AUTO_INCREMENT = "AUTO_INCREMENT";
public static final String BACK_QUOTE = "`";

@Override
public JdbcTable load(String databaseName, String tableName) throws NoSuchTableException {
Expand Down Expand Up @@ -228,7 +229,12 @@ protected String generateCreateTableSql(
// Add columns
for (int i = 0; i < columns.length; i++) {
JdbcColumn column = columns[i];
sqlBuilder.append(SPACE).append(SPACE).append(column.name());
sqlBuilder
.append(SPACE)
.append(SPACE)
.append(BACK_QUOTE)
.append(column.name())
.append(BACK_QUOTE);

appendColumnDefinition(column, sqlBuilder);
// Add a comma for the next column, unless it's the last one
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@
/** Table operations for PostgreSQL. */
public class PostgreSqlTableOperations extends JdbcTableOperations {

public static final String PG_QUOTE = "\"";

private static final String SHOW_COLUMN_COMMENT_SQL =
"SELECT \n"
+ " a.attname as col_name,\n"
Expand Down Expand Up @@ -234,7 +236,7 @@ protected String generateCreateTableSql(
// Add columns
for (int i = 0; i < columns.length; i++) {
JdbcColumn column = columns[i];
sqlBuilder.append(" ").append(column.name());
sqlBuilder.append(" \"").append(column.name()).append(PG_QUOTE);

appendColumnDefinition(column, sqlBuilder);
// Add a comma for the next column, unless it's the last one
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@
import com.datastrato.gravitino.rel.expressions.sorts.SortOrder;
import com.datastrato.gravitino.rel.expressions.sorts.SortOrders;
import com.datastrato.gravitino.rel.expressions.transforms.Transform;
import com.datastrato.gravitino.rel.expressions.transforms.Transforms;
import com.datastrato.gravitino.rel.types.Types;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
Expand Down Expand Up @@ -360,7 +361,7 @@ public void testCreateHiveTableWithDistributionAndSortOrder()
columns,
TABLE_COMMENT,
properties,
Partitioning.EMPTY_PARTITIONING,
Transforms.EMPTY_TRANSFORM,
distribution,
sortOrders);

Expand All @@ -379,7 +380,7 @@ public void testCreateHiveTableWithDistributionAndSortOrder()
Table createdTable1 =
catalog
.asTableCatalog()
.createTable(nameIdentifier, columns, TABLE_COMMENT, properties, (Partitioning[]) null);
.createTable(nameIdentifier, columns, TABLE_COMMENT, properties, (Transform[]) null);

// Directly get table from hive metastore to check if the table is created successfully.
org.apache.hadoop.hive.metastore.api.Table hiveTable1 =
Expand All @@ -406,7 +407,7 @@ public void testCreateHiveTableWithDistributionAndSortOrder()
columns,
TABLE_COMMENT,
properties,
Partitioning.EMPTY_PARTITIONING,
Transforms.EMPTY_TRANSFORM,
badDistribution,
sortOrders);
});
Expand All @@ -429,7 +430,7 @@ public void testCreateHiveTableWithDistributionAndSortOrder()
columns,
TABLE_COMMENT,
properties,
Partitioning.EMPTY_PARTITIONING,
Transforms.EMPTY_TRANSFORM,
distribution,
badSortOrders);
});
Expand All @@ -447,11 +448,7 @@ public void testCreateHiveTable() throws TException, InterruptedException {
catalog
.asTableCatalog()
.createTable(
nameIdentifier,
columns,
TABLE_COMMENT,
properties,
Partitioning.EMPTY_PARTITIONING);
nameIdentifier, columns, TABLE_COMMENT, properties, Transforms.EMPTY_TRANSFORM);

// Directly get table from hive metastore to check if the table is created successfully.
org.apache.hadoop.hive.metastore.api.Table hiveTab =
Expand All @@ -468,7 +465,7 @@ public void testCreateHiveTable() throws TException, InterruptedException {
Table createdTable1 =
catalog
.asTableCatalog()
.createTable(nameIdentifier, columns, TABLE_COMMENT, properties, (Partitioning[]) null);
.createTable(nameIdentifier, columns, TABLE_COMMENT, properties, (Transform[]) null);

// Directly get table from hive metastore to check if the table is created successfully.
org.apache.hadoop.hive.metastore.api.Table hiveTable1 =
Expand Down Expand Up @@ -496,7 +493,7 @@ public void testHiveTableProperties() throws TException, InterruptedException {
columns,
TABLE_COMMENT,
ImmutableMap.of(),
Partitioning.EMPTY_PARTITIONING);
Transforms.EMPTY_TRANSFORM);
HiveTablePropertiesMetadata tablePropertiesMetadata = new HiveTablePropertiesMetadata();
org.apache.hadoop.hive.metastore.api.Table actualTable =
hiveClientPool.run(client -> client.getTable(schemaName, tableName));
Expand Down Expand Up @@ -524,7 +521,7 @@ public void testHiveTableProperties() throws TException, InterruptedException {
"textfile",
SERDE_LIB,
OPENCSV_SERDE_CLASS),
Partitioning.EMPTY_PARTITIONING);
Transforms.EMPTY_TRANSFORM);
org.apache.hadoop.hive.metastore.api.Table actualTable2 =
hiveClientPool.run(client -> client.getTable(schemaName, table2));

Expand Down Expand Up @@ -590,7 +587,7 @@ public void testHiveSchemaProperties() throws TException, InterruptedException {
createColumns(),
TABLE_COMMENT,
ImmutableMap.of(),
Partitioning.EMPTY_PARTITIONING);
Transforms.EMPTY_TRANSFORM);
org.apache.hadoop.hive.metastore.api.Table actualTable =
hiveClientPool.run(client -> client.getTable(schemaIdent.name(), tableIdent.name()));
String actualTableLocation = actualTable.getSd().getLocation();
Expand All @@ -616,7 +613,7 @@ public void testCreatePartitionedHiveTable() throws TException, InterruptedExcep
columns,
TABLE_COMMENT,
properties,
new Partitioning[] {
new Transform[] {
IdentityPartitioningDTO.of(columns[1].name()),
IdentityPartitioningDTO.of(columns[2].name())
});
Expand All @@ -643,7 +640,7 @@ public void testCreatePartitionedHiveTable() throws TException, InterruptedExcep
columns,
TABLE_COMMENT,
properties,
new Partitioning[] {
new Transform[] {
IdentityPartitioningDTO.of(columns[0].name()),
IdentityPartitioningDTO.of(columns[1].name())
});
Expand Down Expand Up @@ -730,7 +727,7 @@ public void testAlterHiveTable() throws TException, InterruptedException {
columns,
TABLE_COMMENT,
createProperties(),
new Partitioning[] {IdentityPartitioningDTO.of(columns[2].name())});
new Transform[] {IdentityPartitioningDTO.of(columns[2].name())});
Assertions.assertNull(createdTable.auditInfo().lastModifier());
Assertions.assertEquals(AuthConstants.ANONYMOUS_USER, createdTable.auditInfo().creator());
Table alteredTable =
Expand Down Expand Up @@ -824,7 +821,7 @@ public void testAlterHiveTable() throws TException, InterruptedException {
newColumns,
TABLE_COMMENT,
ImmutableMap.of(),
new Transform[0],
Transforms.EMPTY_TRANSFORM,
Distributions.NONE,
new SortOrder[0]);

Expand Down Expand Up @@ -878,7 +875,7 @@ public void testDropHiveTable() {
createColumns(),
TABLE_COMMENT,
createProperties(),
new Transform[0]);
Transforms.EMPTY_TRANSFORM);
catalog
.asTableCatalog()
.dropTable(NameIdentifier.of(metalakeName, catalogName, schemaName, ALTER_TABLE_NAME));
Expand Down Expand Up @@ -1067,7 +1064,7 @@ void testAlterEntityName() {
columns,
TABLE_COMMENT,
createProperties(),
new Transform[0]);
Transforms.EMPTY_TRANSFORM);

for (int i = 0; i < 2; i++) {
// The table to be renamed does not exist
Expand Down Expand Up @@ -1139,7 +1136,7 @@ public void testPurgeHiveManagedTable() throws TException, InterruptedException,
columns,
TABLE_COMMENT,
createProperties(),
new Partitioning[] {IdentityPartitioningDTO.of(columns[2].name())});
new Transform[] {IdentityPartitioningDTO.of(columns[2].name())});
// Directly get table from hive metastore to check if the table is created successfully.
org.apache.hadoop.hive.metastore.api.Table hiveTab =
hiveClientPool.run(client -> client.getTable(schemaName, tableName));
Expand All @@ -1166,7 +1163,7 @@ public void testPurgeHiveExternalTable() throws TException, InterruptedException
columns,
TABLE_COMMENT,
ImmutableMap.of(TABLE_TYPE, EXTERNAL_TABLE.name().toLowerCase(Locale.ROOT)),
new Partitioning[] {IdentityPartitioningDTO.of(columns[2].name())});
new Transform[] {IdentityPartitioningDTO.of(columns[2].name())});
// Directly get table from hive metastore to check if the table is created successfully.
org.apache.hadoop.hive.metastore.api.Table hiveTab =
hiveClientPool.run(client -> client.getTable(schemaName, tableName));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
import com.datastrato.gravitino.catalog.jdbc.config.JdbcConfig;
import com.datastrato.gravitino.client.GravitinoMetaLake;
import com.datastrato.gravitino.dto.rel.ColumnDTO;
import com.datastrato.gravitino.dto.rel.partitions.Partitioning;
import com.datastrato.gravitino.exceptions.NoSuchSchemaException;
import com.datastrato.gravitino.exceptions.NotFoundException;
import com.datastrato.gravitino.exceptions.SchemaAlreadyExistsException;
Expand All @@ -30,6 +29,8 @@
import com.datastrato.gravitino.rel.expressions.distributions.Distribution;
import com.datastrato.gravitino.rel.expressions.distributions.Distributions;
import com.datastrato.gravitino.rel.expressions.sorts.SortOrder;
import com.datastrato.gravitino.rel.expressions.transforms.Transform;
import com.datastrato.gravitino.rel.expressions.transforms.Transforms;
import com.datastrato.gravitino.rel.types.Types;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
Expand Down Expand Up @@ -279,7 +280,7 @@ void testCreateAndLoadMysqlTable() {

final SortOrder[] sortOrders = new SortOrder[0];

Partitioning[] partitioning = Partitioning.EMPTY_PARTITIONING;
Transform[] partitioning = Transforms.EMPTY_TRANSFORM;

Map<String, String> properties = createProperties();
TableCatalog tableCatalog = catalog.asTableCatalog();
Expand Down Expand Up @@ -318,6 +319,70 @@ void testCreateAndLoadMysqlTable() {
}
}

@Test
void testColumnNameWithKeyWords() {
// Create table from Gravitino API
ColumnDTO[] columns =
new ColumnDTO[] {
new ColumnDTO.Builder()
.withName("integer")
.withDataType(Types.IntegerType.get())
.withComment("integer")
.build(),
new ColumnDTO.Builder()
.withName("long")
.withDataType(Types.LongType.get())
.withComment("long")
.build(),
new ColumnDTO.Builder()
.withName("float")
.withDataType(Types.FloatType.get())
.withComment("float")
.build(),
new ColumnDTO.Builder()
.withName("double")
.withDataType(Types.DoubleType.get())
.withComment("double")
.build(),
new ColumnDTO.Builder()
.withName("decimal")
.withDataType(Types.DecimalType.of(10, 3))
.withComment("decimal")
.build(),
new ColumnDTO.Builder()
.withName("date")
.withDataType(Types.DateType.get())
.withComment("date")
.build(),
new ColumnDTO.Builder()
.withName("time")
.withDataType(Types.TimeType.get())
.withComment("time")
.build()
};

String name = GravitinoITUtils.genRandomName("table") + "_keyword";
NameIdentifier tableIdentifier = NameIdentifier.of(metalakeName, catalogName, schemaName, name);
Distribution distribution = Distributions.NONE;

final SortOrder[] sortOrders = new SortOrder[0];

Transform[] partitioning = Transforms.EMPTY_TRANSFORM;

Map<String, String> properties = createProperties();
TableCatalog tableCatalog = catalog.asTableCatalog();
Table createdTable =
tableCatalog.createTable(
tableIdentifier,
columns,
table_comment,
properties,
partitioning,
distribution,
sortOrders);
Assertions.assertEquals(createdTable.name(), name);
}

@Test
void testAlterAndDropMysqlTable() {
ColumnDTO[] columns = createColumns();
Expand Down Expand Up @@ -412,7 +477,7 @@ void testAlterAndDropMysqlTable() {
newColumns,
table_comment,
ImmutableMap.of(),
Partitioning.EMPTY_PARTITIONING,
Transforms.EMPTY_TRANSFORM,
Distributions.NONE,
new SortOrder[0]);

Expand Down
Loading

0 comments on commit 8ac19d0

Please sign in to comment.