From 2d9a22578ba57913d8f07960d2815cf93aa277a3 Mon Sep 17 00:00:00 2001 From: zw_yan <106359229+zwyan0@users.noreply.github.com> Date: Mon, 16 Jan 2023 12:24:04 +0900 Subject: [PATCH] To support PostgreSQL v15 (#131) * To support for PostgreSQL 15 * Added new test to check UNIQUE [ NULLS [NOT] DISTINCT ] * Update SEPC file, COPYRIGHT, program version etc. --- .github/workflows/build.yml | 4 +- .github/workflows/test.yml | 16 +- COPYRIGHT | 2 +- Makefile | 2 +- SPECS/pg_bulkload-pg10.spec | 7 +- SPECS/pg_bulkload-pg11.spec | 9 +- SPECS/pg_bulkload-pg12.spec | 9 +- SPECS/pg_bulkload-pg13.spec | 9 +- SPECS/pg_bulkload-pg14.spec | 9 +- SPECS/pg_bulkload-pg15.spec | 104 ++ SPECS/pg_bulkload-pg96.spec | 7 +- bin/Makefile | 27 +- bin/data/csv8.ctl | 4 + bin/data/csv9.ctl | 3 + bin/data/data10.csv | 4 + bin/data/data9.csv | 4 + bin/expected/init_3.out | 137 ++ bin/expected/load_csv_2.out | 825 ++++++++++++ bin/pg_bulkload.c | 2 +- bin/pgut/pgut-fe.c | 7 +- bin/pgut/pgut-fe.h | 2 +- bin/pgut/pgut-list.c | 4 +- bin/pgut/pgut-list.h | 2 +- bin/pgut/pgut.c | 2 +- bin/pgut/pgut.h | 2 +- bin/postgresql | 2 +- bin/recovery.c | 2 +- bin/sql/init-extension-v3.sql | 164 +++ bin/sql/load_csv-v3.sql | 164 +++ docs/index.html | 2 +- docs/index_ja.html | 2 +- docs/pg_bulkload-ja.html | 1921 ++++++++++++++-------------- docs/pg_bulkload.html | 2075 ++++++++++++++++--------------- docs/pg_timestamp-ja.html | 2 +- docs/pg_timestamp.html | 2 +- docs/sample_bin.ctl | 2 +- docs/sample_csv.ctl | 2 +- include/binary.h | 2 +- include/common.h | 4 +- include/logger.h | 2 +- include/pg_btree.h | 2 +- include/pg_bulkload.h | 2 +- include/pg_loadstatus.h | 2 +- include/pg_profile.h | 2 +- include/pg_strutil.h | 2 +- include/reader.h | 2 +- include/writer.h | 2 +- lib/Makefile | 2 +- lib/binary.c | 2 +- lib/logger.c | 2 +- lib/nbtree/COPYRIGHT.nbtsort | 2 +- lib/nbtree/nbtsort-13.c | 2 +- lib/nbtree/nbtsort-15.c | 2021 ++++++++++++++++++++++++++++++ lib/nbtree/nbtsort-common.c | 16 +- lib/parser_binary.c | 2 +- lib/parser_csv.c | 2 +- lib/parser_function.c | 2 +- lib/parser_tuple.c | 2 +- lib/pg_btree.c | 29 +- lib/pg_bulkload.c | 2 +- lib/pg_bulkload.sql.in | 2 +- lib/pg_strutil.c | 35 +- lib/pgut/pgut-be.c | 2 +- lib/pgut/pgut-be.h | 2 +- lib/pgut/pgut-ipc.c | 2 +- lib/pgut/pgut-ipc.h | 2 +- lib/pgut/pgut-pthread.c | 2 +- lib/pgut/pgut-pthread.h | 2 +- lib/reader.c | 2 +- lib/source.c | 2 +- lib/uninstall_pg_bulkload.sql | 2 +- lib/writer.c | 2 +- lib/writer_binary.c | 2 +- lib/writer_buffered.c | 2 +- lib/writer_direct.c | 2 +- lib/writer_parallel.c | 2 +- util/Makefile | 2 +- util/pg_timestamp.c | 2 +- util/pg_timestamp.sql.in | 2 +- util/uninstall_pg_timestamp.sql | 2 +- 80 files changed, 5628 insertions(+), 2096 deletions(-) create mode 100755 SPECS/pg_bulkload-pg15.spec create mode 100644 bin/data/csv8.ctl create mode 100644 bin/data/csv9.ctl create mode 100644 bin/data/data10.csv create mode 100644 bin/data/data9.csv create mode 100644 bin/expected/init_3.out create mode 100644 bin/expected/load_csv_2.out create mode 100644 bin/sql/init-extension-v3.sql create mode 100644 bin/sql/load_csv-v3.sql create mode 100644 lib/nbtree/nbtsort-15.c diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b5171e0..d0de3b9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -17,15 +17,15 @@ jobs: strategy: matrix: PGVERSION: # TODO: build with master branch + - "15" - "14" - "13" - "12" - "11" - "10" - "9.6" - - "9.5" env: - CACHE_VERSION: 20210426 # to identify cache version + CACHE_VERSION: 20221222 # to identify cache version steps: - name: cat version diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 14abb09..cb47e68 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -17,15 +17,15 @@ jobs: strategy: matrix: PGVERSION: # TODO: build with master branch - - "14.0" - - "13.4" - - "12.8" - - "11.13" - - "10.18" - - "9.6.23" - - "9.5.25" + - "15.1" + - "14.6" + - "13.9" + - "12.13" + - "11.18" + - "10.23" + - "9.6.24" env: - CACHE_VERSION: 20210426 # to identify cache version + CACHE_VERSION: 20221222 # to identify cache version steps: - name: cat version diff --git a/COPYRIGHT b/COPYRIGHT index 35a16a9..c0cc1b6 100644 --- a/COPYRIGHT +++ b/COPYRIGHT @@ -1,4 +1,4 @@ -Copyright (c) 2008-2021, NIPPON TELEGRAPH AND TELEPHONE CORPORATION +Copyright (c) 2008-2023, NIPPON TELEGRAPH AND TELEPHONE CORPORATION All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/Makefile b/Makefile index bdbfac7..3b0a2c0 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # # pg_bulkload: Makefile # -# Copyright (c) 2007-2021, NIPPON TELEGRAPH AND TELEPHONE CORPORATION +# Copyright (c) 2007-2023, NIPPON TELEGRAPH AND TELEPHONE CORPORATION # ifndef USE_PGXS top_builddir = ../.. diff --git a/SPECS/pg_bulkload-pg10.spec b/SPECS/pg_bulkload-pg10.spec index 2ee76e8..e6ffb0a 100644 --- a/SPECS/pg_bulkload-pg10.spec +++ b/SPECS/pg_bulkload-pg10.spec @@ -1,5 +1,5 @@ # SPEC file for pg_bulkload on PostgreSQL 10 -# Copyright (C) 2009-2021 NIPPON TELEGRAPH AND TELEPHONE CORPORATION +# Copyright (C) 2009-2023 NIPPON TELEGRAPH AND TELEPHONE CORPORATION %define sname pg_bulkload %define pgmajorversion 10 @@ -9,7 +9,7 @@ Summary: High speed data load utility for PostgreSQL Name: %{sname} -Version: 3.1.19 +Version: 3.1.20 Release: 1%{?dist} License: BSD Group: Applications/Databases @@ -82,6 +82,9 @@ rm -rf %{buildroot} rm -rf %{_libdir}/pgxs/src/backend/ %changelog +* Thu Jan 13 2023 - NTT OSS Center 3.1.20-1 +- Support PostgreSQL 15 +- Update to pg_bulkload 3.1.20 * Mon Oct 11 2021 - Masahiro ikeda 3.1.19-1 - Support PostgreSQL 14 - Update to pg_bulkload 3.1.19 diff --git a/SPECS/pg_bulkload-pg11.spec b/SPECS/pg_bulkload-pg11.spec index 18e79eb..bca35c7 100644 --- a/SPECS/pg_bulkload-pg11.spec +++ b/SPECS/pg_bulkload-pg11.spec @@ -1,5 +1,5 @@ # SPEC file for pg_bulkload on PostgreSQL 11 -# Copyright (C) 2009-2021 NIPPON TELEGRAPH AND TELEPHONE CORPORATION +# Copyright (C) 2009-2023 NIPPON TELEGRAPH AND TELEPHONE CORPORATION %define sname pg_bulkload %define pgmajorversion 11 @@ -10,7 +10,7 @@ Summary: High speed data load utility for PostgreSQL Name: %{sname} -Version: 3.1.19 +Version: 3.1.20 Release: 1%{?dist} License: BSD Group: Applications/Databases @@ -31,7 +31,7 @@ When we load huge amount of data to a database, it is common situation that data %package llvmjit Requires: postgresql11-server, postgresql11-llvmjit -Requires: pg_bulkload = 3.1.19 +Requires: pg_bulkload = 3.1.20 Summary: Just-in-time compilation support for pg_bulkload %description llvmjit @@ -99,6 +99,9 @@ rm -rf %{buildroot} rm -rf %{_libdir}/pgxs/src/backend/ %changelog +* Thu Jan 13 2023 - NTT OSS Center 3.1.20-1 +- Support PostgreSQL 15 +- Update to pg_bulkload 3.1.20 * Mon Oct 11 2021 - Masahiro ikeda 3.1.19-1 - Support PostgreSQL 14 - Update to pg_bulkload 3.1.19 diff --git a/SPECS/pg_bulkload-pg12.spec b/SPECS/pg_bulkload-pg12.spec index fa9c0b6..9b8b5bc 100644 --- a/SPECS/pg_bulkload-pg12.spec +++ b/SPECS/pg_bulkload-pg12.spec @@ -1,5 +1,5 @@ # SPEC file for pg_bulkload on PostgreSQL 12 -# Copyright (C) 2009-2021 NIPPON TELEGRAPH AND TELEPHONE CORPORATION +# Copyright (C) 2009-2023 NIPPON TELEGRAPH AND TELEPHONE CORPORATION %define sname pg_bulkload %define pgmajorversion 12 @@ -10,7 +10,7 @@ Summary: High speed data load utility for PostgreSQL Name: %{sname} -Version: 3.1.19 +Version: 3.1.20 Release: 1%{?dist} License: BSD Group: Applications/Databases @@ -31,7 +31,7 @@ When we load huge amount of data to a database, it is common situation that data %package llvmjit Requires: postgresql12-server, postgresql12-llvmjit -Requires: pg_bulkload = 3.1.19 +Requires: pg_bulkload = 3.1.20 Summary: Just-in-time compilation support for pg_bulkload %description llvmjit @@ -99,6 +99,9 @@ rm -rf %{buildroot} rm -rf %{_libdir}/pgxs/src/backend/ %changelog +* Thu Jan 13 2023 - NTT OSS Center 3.1.20-1 +- Support PostgreSQL 15 +- Update to pg_bulkload 3.1.20 * Mon Oct 11 2021 - Masahiro ikeda 3.1.19-1 - Support PostgreSQL 14 - Update to pg_bulkload 3.1.19 diff --git a/SPECS/pg_bulkload-pg13.spec b/SPECS/pg_bulkload-pg13.spec index b1f0e3d..0df9270 100755 --- a/SPECS/pg_bulkload-pg13.spec +++ b/SPECS/pg_bulkload-pg13.spec @@ -1,5 +1,5 @@ # SPEC file for pg_bulkload on PostgreSQL 13 -# Copyright (C) 2009-2021 NIPPON TELEGRAPH AND TELEPHONE CORPORATION +# Copyright (C) 2009-2023 NIPPON TELEGRAPH AND TELEPHONE CORPORATION %define sname pg_bulkload %define pgmajorversion 13 @@ -10,7 +10,7 @@ Summary: High speed data load utility for PostgreSQL Name: %{sname} -Version: 3.1.19 +Version: 3.1.20 Release: 1%{?dist} License: BSD Group: Applications/Databases @@ -31,7 +31,7 @@ When we load huge amount of data to a database, it is common situation that data %package llvmjit Requires: postgresql13-server, postgresql13-llvmjit -Requires: pg_bulkload = 3.1.19 +Requires: pg_bulkload = 3.1.20 Summary: Just-in-time compilation support for pg_bulkload %description llvmjit @@ -99,6 +99,9 @@ rm -rf %{buildroot} rm -rf %{_libdir}/pgxs/src/backend/ %changelog +* Thu Jan 13 2023 - NTT OSS Center 3.1.20-1 +- Support PostgreSQL 15 +- Update to pg_bulkload 3.1.20 * Mon Oct 11 2021 - Masahiro ikeda 3.1.19-1 - Support PostgreSQL 14 - Update to pg_bulkload 3.1.19 diff --git a/SPECS/pg_bulkload-pg14.spec b/SPECS/pg_bulkload-pg14.spec index e4e3667..c36b98d 100755 --- a/SPECS/pg_bulkload-pg14.spec +++ b/SPECS/pg_bulkload-pg14.spec @@ -1,5 +1,5 @@ # SPEC file for pg_bulkload on PostgreSQL 14 -# Copyright (C) 2009-2021 NIPPON TELEGRAPH AND TELEPHONE CORPORATION +# Copyright (C) 2009-2023 NIPPON TELEGRAPH AND TELEPHONE CORPORATION %define sname pg_bulkload %define pgmajorversion 14 @@ -10,7 +10,7 @@ Summary: High speed data load utility for PostgreSQL Name: %{sname} -Version: 3.1.19 +Version: 3.1.20 Release: 1%{?dist} License: BSD Group: Applications/Databases @@ -31,7 +31,7 @@ When we load huge amount of data to a database, it is common situation that data %package llvmjit Requires: postgresql14-server, postgresql14-llvmjit -Requires: pg_bulkload = 3.1.19 +Requires: pg_bulkload = 3.1.20 Summary: Just-in-time compilation support for pg_bulkload %description llvmjit @@ -99,6 +99,9 @@ rm -rf %{buildroot} rm -rf %{_libdir}/pgxs/src/backend/ %changelog +* Thu Jan 13 2023 - NTT OSS Center 3.1.20-1 +- Support PostgreSQL 15 +- Update to pg_bulkload 3.1.20 * Mon Oct 11 2021 - Masahiro ikeda 3.1.19-1 - Support PostgreSQL 14 - Update to pg_bulkload 3.1.19 diff --git a/SPECS/pg_bulkload-pg15.spec b/SPECS/pg_bulkload-pg15.spec new file mode 100755 index 0000000..b24de08 --- /dev/null +++ b/SPECS/pg_bulkload-pg15.spec @@ -0,0 +1,104 @@ +# SPEC file for pg_bulkload on PostgreSQL 15 +# Copyright (C) 2022-2023 NIPPON TELEGRAPH AND TELEPHONE CORPORATION + +%define sname pg_bulkload +%define pgmajorversion 15 + +%define _prefix /usr/pgsql-%{pgmajorversion} +%define _libdir %{_prefix}/lib +%define _bcdir %{_libdir}/bitcode/pg_bulkload + +Summary: High speed data load utility for PostgreSQL +Name: %{sname} +Version: 3.1.20 +Release: 1%{?dist} +License: BSD +Group: Applications/Databases +# You can get the tarball by following: https://github.com/ossc-db/pg_bulkload/archive/%{version}.tar.gz +Source0: %{sname}-%{version}.tar.gz +URL: http://ossc-db.github.io/pg_bulkload/index.html +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-%(%{__id_u} -n) + +BuildRequires: postgresql15-devel, postgresql15 +Requires: postgresql15 + + +%description +pg_bulkload provides high-speed data loading capability to PostgreSQL users. + +When we load huge amount of data to a database, it is common situation that data set to be loaded is valid and consistent. For example, dedicated tools are used to prepare such data, providing data validation in advance. In such cases, we'd like to bypass any overheads within database system to load data as quickly as possible. pg_bulkload is developed to help such situations. Therefore, it is not pg_bulkload's goal to provide detailed data validation. Rather, pg_bulkload asumes that loaded data set is validated by separate means. If you're not in such situation, you should use COPY command in PostgreSQL. + + +%package llvmjit +Requires: postgresql15-server, postgresql15-llvmjit +Requires: pg_bulkload = 3.1.20 +Summary: Just-in-time compilation support for pg_bulkload + +%description llvmjit +Just-in-time compilation support for pg_bulkdload + +%prep +rm -rf %{_libdir}/pgxs/src/backend/ + +%setup -n %{sname}-%{version} + +%build +USE_PGXS=1 make %{?_smp_mflags} MAJORVERSION=%{pgmajorversion} + +%install +%define pg_contribdir %{_datadir}/contrib +%define pg_extensiondir %{_datadir}/extension + +rm -rf %{buildroot} + +install -d %{buildroot}%{_bindir} +install -d %{buildroot}%{_libdir} +install -d %{buildroot}%{pg_contribdir} +install -d %{buildroot}%{pg_extensiondir} +install -d %{buildroot}%{_bcdir} + +install -m 755 bin/pg_bulkload %{buildroot}%{_bindir}/pg_bulkload +install -m 755 bin/postgresql %{buildroot}%{_bindir}/postgresql +install -m 755 lib/pg_bulkload.so %{buildroot}%{_libdir}/pg_bulkload.so +install -m 644 lib/pg_bulkload.bc %{buildroot}%{_bcdir}/pg_bulkload.bc + +install -m 644 lib/pg_bulkload.sql %{buildroot}%{pg_contribdir}/pg_bulkload.sql +install -m 644 lib/uninstall_pg_bulkload.sql %{buildroot}%{pg_contribdir}/uninstall_pg_bulkload.sql +install -m 644 lib/pg_bulkload.control %{buildroot}%{pg_extensiondir}/pg_bulkload.control +install -m 644 lib/pg_bulkload--1.0.sql %{buildroot}%{pg_extensiondir}/pg_bulkload--1.0.sql +install -m 644 lib/pg_bulkload--unpackaged--1.0.sql %{buildroot}%{pg_extensiondir}/pg_bulkload--unpackaged--1.0.sql + +# sample_*.ctl files are needed for rpm users. +# %{sname}-%{version} is the same path with "%setup -n"'s argument. +install -m 644 docs/sample_bin.ctl %{buildroot}%{pg_contribdir}/sample_bin.ctl +install -m 644 docs/sample_csv.ctl %{buildroot}%{pg_contribdir}/sample_csv.ctl + +%files +%defattr(755,root,root,755) +%{_bindir}/pg_bulkload +%{_bindir}/postgresql +%{_libdir}/pg_bulkload.so +%defattr(644,root,root,755) +#%doc README.pg_bulkload +%{pg_contribdir}/pg_bulkload.sql +%{pg_contribdir}/uninstall_pg_bulkload.sql +%{pg_contribdir}/sample_bin.ctl +%{pg_contribdir}/sample_csv.ctl +%{pg_extensiondir}/pg_bulkload.control +%{pg_extensiondir}/pg_bulkload--1.0.sql +%{pg_extensiondir}/pg_bulkload--unpackaged--1.0.sql + +%files llvmjit +%defattr(0755,root,root) +%{_bcdir} +%defattr(0644,root,root) +%{_bcdir}/pg_bulkload.bc + +%clean +rm -rf %{buildroot} +rm -rf %{_libdir}/pgxs/src/backend/ + +%changelog +* Thu Jan 13 2023 - NTT OSS Center 3.1.20-1 +- Support PostgreSQL 15 +- Update to pg_bulkload 3.1.20 diff --git a/SPECS/pg_bulkload-pg96.spec b/SPECS/pg_bulkload-pg96.spec index 038c199..399ffab 100644 --- a/SPECS/pg_bulkload-pg96.spec +++ b/SPECS/pg_bulkload-pg96.spec @@ -1,5 +1,5 @@ # SPEC file for pg_bulkload on PostgreSQL 9.6 -# Copyright (C) 2009-2021 NIPPON TELEGRAPH AND TELEPHONE CORPORATION +# Copyright (C) 2009-2023 NIPPON TELEGRAPH AND TELEPHONE CORPORATION %define sname pg_bulkload %define pgmajorversion 9.6 @@ -9,7 +9,7 @@ Summary: High speed data load utility for PostgreSQL Name: %{sname} -Version: 3.1.19 +Version: 3.1.20 Release: 1%{?dist} License: BSD Group: Applications/Databases @@ -82,6 +82,9 @@ rm -rf %{buildroot} rm -rf %{_libdir}/pgxs/src/backend/ %changelog +* Thu Jan 13 2023 - NTT OSS Center 3.1.20-1 +- Support PostgreSQL 15 +- Update to pg_bulkload 3.1.20 * Mon Oct 11 2021 - Masahiro ikeda 3.1.19-1 - Support PostgreSQL 14 - Update to pg_bulkload 3.1.19 diff --git a/bin/Makefile b/bin/Makefile index e7d03aa..ae6b587 100644 --- a/bin/Makefile +++ b/bin/Makefile @@ -1,7 +1,7 @@ # # pg_bulkload: bin/Makefile # -# Copyright (c) 2007-2021, NIPPON TELEGRAPH AND TELEPHONE CORPORATION +# Copyright (c) 2007-2023, NIPPON TELEGRAPH AND TELEPHONE CORPORATION # SRCS = pg_bulkload.c recovery.c pgut/pgut.c pgut/pgut-fe.c pgut/pgut-list.c OBJS = $(SRCS:.c=.o) @@ -64,6 +64,8 @@ sql/init-13.sql: cp sql/init-extension-v2.sql sql/init-13.sql sql/init-14.sql: cp sql/init-extension-v2.sql sql/init-14.sql +sql/init-15.sql: + cp sql/init-extension-v3.sql sql/init-15.sql sql/load_bin.sql: sql/load_bin-$(MAJORVERSION).sql cp sql/load_bin-$(MAJORVERSION).sql sql/load_bin.sql @@ -85,6 +87,8 @@ sql/load_bin-13.sql: cp sql/load_bin-v2.sql sql/load_bin-13.sql sql/load_bin-14.sql: cp sql/load_bin-v2.sql sql/load_bin-14.sql +sql/load_bin-15.sql: + cp sql/load_bin-v2.sql sql/load_bin-15.sql sql/load_csv.sql: sql/load_csv-$(MAJORVERSION).sql cp sql/load_csv-$(MAJORVERSION).sql sql/load_csv.sql @@ -106,6 +110,8 @@ sql/load_csv-13.sql: cp sql/load_csv-v2.sql sql/load_csv-13.sql sql/load_csv-14.sql: cp sql/load_csv-v2.sql sql/load_csv-14.sql +sql/load_csv-15.sql: + cp sql/load_csv-v3.sql sql/load_csv-15.sql sql/load_filter.sql: sql/load_filter-$(MAJORVERSION).sql cp sql/load_filter-$(MAJORVERSION).sql sql/load_filter.sql @@ -127,6 +133,8 @@ sql/load_filter-13.sql: cp sql/load_filter-v3.sql sql/load_filter-13.sql sql/load_filter-14.sql: cp sql/load_filter-v3.sql sql/load_filter-14.sql +sql/load_filter-15.sql: + cp sql/load_filter-v3.sql sql/load_filter-15.sql sql/load_function.sql: sql/load_function-$(MAJORVERSION).sql cp sql/load_function-$(MAJORVERSION).sql sql/load_function.sql @@ -148,6 +156,8 @@ sql/load_function-13.sql: cp sql/load_function-v3.sql sql/load_function-13.sql sql/load_function-14.sql: cp sql/load_function-v3.sql sql/load_function-14.sql +sql/load_function-15.sql: + cp sql/load_function-v3.sql sql/load_function-15.sql sql/write_bin.sql: sql/write_bin-$(MAJORVERSION).sql cp sql/write_bin-$(MAJORVERSION).sql sql/write_bin.sql @@ -169,18 +179,19 @@ sql/write_bin-13.sql: cp sql/write_bin-v2.sql sql/write_bin-13.sql sql/write_bin-14.sql: cp sql/write_bin-v2.sql sql/write_bin-14.sql - +sql/write_bin-15.sql: + cp sql/write_bin-v2.sql sql/write_bin-15.sql .PHONY: subclean clean: subclean subclean: - rm -f sql/init.sql sql/init-{9.3,9.4,9.5,9.6,10,11,12,13,14}.sql - rm -f sql/load_bin.sql sql/load_bin-{9.3,9.4,9.5,9.6,10,11,12,13,14}.sql - rm -f sql/load_csv.sql sql/load_csv-{9.3,9.4,9.5,9.6,10,11,12,13,14}.sql - rm -f sql/load_filter.sql sql/load_filter-{9.3,9.4,9.5,9.6,10,11,12,13,14}.sql - rm -f sql/load_function.sql sql/load_function-{9.3,9.4,9.5,9.6,10,11,12,13,14}.sql - rm -f sql/write_bin.sql sql/write_bin-{9.3,9.4,9.5,9.6,10,11,12,13,14}.sql + rm -f sql/init.sql sql/init-{9.3,9.4,9.5,9.6,10,11,12,13,14,15}.sql + rm -f sql/load_bin.sql sql/load_bin-{9.3,9.4,9.5,9.6,10,11,12,13,14,15}.sql + rm -f sql/load_csv.sql sql/load_csv-{9.3,9.4,9.5,9.6,10,11,12,13,14,15}.sql + rm -f sql/load_filter.sql sql/load_filter-{9.3,9.4,9.5,9.6,10,11,12,13,14,15}.sql + rm -f sql/load_function.sql sql/load_function-{9.3,9.4,9.5,9.6,10,11,12,13,14,15}.sql + rm -f sql/write_bin.sql sql/write_bin-{9.3,9.4,9.5,9.6,10,11,12,13,14,15}.sql installcheck: sql/init.sql sql/load_bin.sql sql/load_csv.sql sql/load_function.sql sql/load_filter.sql sql/write_bin.sql diff --git a/bin/data/csv8.ctl b/bin/data/csv8.ctl new file mode 100644 index 0000000..20e7fa3 --- /dev/null +++ b/bin/data/csv8.ctl @@ -0,0 +1,4 @@ +TABLE=unique_tbl1 +WRITER=DIRECT +TYPE=CSV + diff --git a/bin/data/csv9.ctl b/bin/data/csv9.ctl new file mode 100644 index 0000000..4494210 --- /dev/null +++ b/bin/data/csv9.ctl @@ -0,0 +1,3 @@ +TABLE=unique_tbl2 +WRITER=DIRECT +TYPE=CSV diff --git a/bin/data/data10.csv b/bin/data/data10.csv new file mode 100644 index 0000000..6e95de6 --- /dev/null +++ b/bin/data/data10.csv @@ -0,0 +1,4 @@ +5,"five_" +6,"six_" +,"seven_" +,"eight_" diff --git a/bin/data/data9.csv b/bin/data/data9.csv new file mode 100644 index 0000000..e4b1a42 --- /dev/null +++ b/bin/data/data9.csv @@ -0,0 +1,4 @@ +1,"one" +2,"two" +,"three" +,"four" diff --git a/bin/expected/init_3.out b/bin/expected/init_3.out new file mode 100644 index 0000000..3cec148 --- /dev/null +++ b/bin/expected/init_3.out @@ -0,0 +1,137 @@ +SET client_min_messages = warning; +\set ECHO none +RESET client_min_messages; +CREATE TABLE customer ( + c_id int4 NOT NULL, + c_d_id int2 NOT NULL, + c_w_id int4 NOT NULL, + c_first varchar(16) NOT NULL, + c_middle char(2) NOT NULL, + c_last varchar(16) NOT NULL, + c_street_1 varchar(20) NOT NULL, + c_street_2 varchar(20) NOT NULL, + c_city varchar(20) NOT NULL, + c_state char(2) NOT NULL, + c_zip char(9) NOT NULL, + c_phone char(16) NOT NULL, + c_since timestamp NOT NULL, + c_credit char(2) NOT NULL, + c_credit_lim numeric(16,4) NOT NULL, + c_discount numeric(16,4) NOT NULL, + c_balance numeric(16,4) NOT NULL, + c_ytd_payment numeric(16,4) NOT NULL, + c_payment_cnt float4 NOT NULL, + c_delivery_cnt float8 NOT NULL, + c_data varchar(500) NOT NULL +) WITH (fillfactor=20); +ALTER TABLE customer ADD PRIMARY KEY (c_id, c_w_id, c_d_id); +CREATE INDEX idx_btree ON customer USING btree (c_d_id, c_last); +CREATE INDEX idx_btree_fn ON customer USING btree ((abs(c_w_id) + c_d_id)); +CREATE INDEX idx_hash ON customer USING hash (c_d_id); +CREATE INDEX idx_hash_fn ON customer USING hash ((abs(c_w_id) + c_d_id)); +--------------------------------------------------------------------------- +-- load_csv test csv9 +CREATE TABLE unique_tbl1 (i int, t text); +-- default is NULLS DISTINCT +CREATE UNIQUE INDEX unique_idx1 ON unique_tbl1 (i) NULLS DISTINCT; +CREATE TABLE unique_tbl2 (i int, t text); +CREATE UNIQUE INDEX unique_idx2 ON unique_tbl2 (i) NULLS NOT DISTINCT; +--------------------------------------------------------------------------- +-- load_check test +CREATE TABLE master ( + id int PRIMARY KEY, + str text +); +CREATE TABLE target ( + id int PRIMARY KEY, + str text CHECK(length(str) < 10) NOT NULL UNIQUE, +master int REFERENCES master (id) +); +CREATE TABLE target_like ( + id int, + str text, + master int +); +CREATE FUNCTION f_t_target() RETURNS trigger AS +$$ +BEGIN + INSERT INTO target_like VALUES(new.*); + RETURN NULL; +END; +$$ LANGUAGE plpgsql; +CREATE TRIGGER t_target + AFTER INSERT + ON target + FOR EACH ROW +EXECUTE PROCEDURE f_t_target(); +INSERT INTO master VALUES(1, 'aaa'); +--------------------------------------------------------------------------- +-- load_filter test +-------------------------------- +-- error case +-------------------------------- +-- VARIACIC function +CREATE FUNCTION variadic_f(int, VARIADIC text[]) RETURNS target AS +$$ + SELECT * FROM target; +$$ LANGUAGE SQL; +-- function overloading +CREATE FUNCTION overload_f() RETURNS target AS +$$ + SELECT * FROM target; +$$ LANGUAGE SQL; +CREATE FUNCTION overload_f(int4) RETURNS target AS +$$ + SELECT * FROM target; +$$ LANGUAGE SQL; +-- returns record using OUT paramator +CREATE FUNCTION outarg_f(OUT int4, OUT int4, OUT int4) RETURNS record AS +$$ + SELECT 1, 2, 3; +$$ LANGUAGE SQL; +-- returns setof function +CREATE FUNCTION setof_f() RETURNS SETOF target AS +$$ + SELECT * FROM target; +$$ LANGUAGE SQL; +-- returns data type mismatch +CREATE FUNCTION type_mismatch_f() RETURNS master AS +$$ + SELECT * FROM master LIMIT 1; +$$ LANGUAGE SQL; +-- returns record type mismatch +CREATE FUNCTION rec_mismatch_f() RETURNS record AS +$$ + SELECT 1, 'rec_mismatch_f', 1; +$$ LANGUAGE SQL; +-------------------------------- +-- normal case +-------------------------------- +-- no argument function +CREATE FUNCTION no_arg_f() RETURNS target AS +$$ + SELECT 1, 'call no_arg_f'::text, 3; +$$ LANGUAGE SQL; +--------------------------------------------------------------------------- +-- load_encoding test +CREATE DATABASE contrib_regression_sqlascii TEMPLATE template0 ENCODING 'sql_ascii'; +ALTER DATABASE contrib_regression_sqlascii SET lc_messages TO 'C'; +ALTER DATABASE contrib_regression_sqlascii SET lc_monetary TO 'C'; +ALTER DATABASE contrib_regression_sqlascii SET lc_numeric TO 'C'; +ALTER DATABASE contrib_regression_sqlascii SET lc_time TO 'C'; +ALTER DATABASE contrib_regression_sqlascii SET timezone_abbreviations TO 'Default'; +CREATE DATABASE contrib_regression_utf8 TEMPLATE template0 ENCODING 'utf8'; +ALTER DATABASE contrib_regression_utf8 SET lc_messages TO 'C'; +ALTER DATABASE contrib_regression_utf8 SET lc_monetary TO 'C'; +ALTER DATABASE contrib_regression_utf8 SET lc_numeric TO 'C'; +ALTER DATABASE contrib_regression_utf8 SET lc_time TO 'C'; +ALTER DATABASE contrib_regression_utf8 SET timezone_abbreviations TO 'Default'; +\connect contrib_regression_sqlascii +CREATE TABLE target (id int, str text, master int); +CREATE INDEX i_target ON target (id); +\set ECHO none +\connect contrib_regression_utf8 +CREATE TABLE target (id int, str text, master int); +CREATE INDEX i_target ON target (id); +\set ECHO none +\! rm -f results/*.log results/*.prs results/*.dup results/*.bin results/*.ctl diff --git a/bin/expected/load_csv_2.out b/bin/expected/load_csv_2.out new file mode 100644 index 0000000..44297f9 --- /dev/null +++ b/bin/expected/load_csv_2.out @@ -0,0 +1,825 @@ +SET extra_float_digits = 0; +\! pg_bulkload -d contrib_regression data/csv1.ctl -o"delimiter=|" -i data/data1.csv -l results/csv1.log -P results/csv1.prs -u results/csv1.dup -o "PARSE_ERRORS=3" -o "VERBOSE=YES" -o "TRUNCATE=TRUE" +NOTICE: BULK LOAD START +WARNING: Parse error Record 1: Input Record 3: Rejected - column 8. null value in column "c_street_2" violates not-null constraint +WARNING: Parse error Record 2: Input Record 4: Rejected - column 15. invalid input syntax for type numeric: "BAD-DATA" +WARNING: Parse error Record 3: Input Record 6: Rejected - column 8. null value in column "c_street_2" violates not-null constraint +WARNING: Parse error Record 4: Input Record 7: Rejected - column 7. null value in column "c_street_1" violates not-null constraint +WARNING: Maximum parse error count exceeded - 4 error(s) found in input file +NOTICE: BULK LOAD END + 2 Rows skipped. + 3 Rows successfully loaded. + 4 Rows not loaded due to parse errors. + 0 Rows not loaded due to duplicate errors. + 0 Rows replaced with new rows. +WARNING: some rows were not loaded due to errors. +\! awk -f data/adjust.awk results/csv1.log + +pg_bulkload on + +INPUT = .../data1.csv +PARSE_BADFILE = .../csv1.prs +LOGFILE = .../csv1.log +LIMIT = 5 +PARSE_ERRORS = 3 +CHECK_CONSTRAINTS = NO +TYPE = CSV +SKIP = 2 +DELIMITER = | +QUOTE = "\"" +ESCAPE = "\"" +NULL = "nullvalue " +OUTPUT = public.customer +MULTI_PROCESS = YES +VERBOSE = YES +WRITER = DIRECT +DUPLICATE_BADFILE = .../csv1.dup +DUPLICATE_ERRORS = 0 +ON_DUPLICATE_KEEP = NEW +TRUNCATE = YES + +Parse error Record 1: Input Record 3: Rejected - column 8. null value in column "c_street_2" violates not-null constraint +Parse error Record 2: Input Record 4: Rejected - column 15. invalid input syntax for type numeric: "BAD-DATA" +Parse error Record 3: Input Record 6: Rejected - column 8. null value in column "c_street_2" violates not-null constraint +Parse error Record 4: Input Record 7: Rejected - column 7. null value in column "c_street_1" violates not-null constraint +Maximum parse error count exceeded - 4 error(s) found in input file + + 2 Rows skipped. + 3 Rows successfully loaded. + 4 Rows not loaded due to parse errors. + 0 Rows not loaded due to duplicate errors. + 0 Rows replaced with new rows. + +Run began on +Run ended on + +CPU