package/timescaledb: bump to version 2.0.0

Add patches needed for compatibility with Postgresql 13, which are
still under review upstream.

Debug builds (BR2_ENABLE_DEBUG=y) fails because of warnings, so
disable WARNINGS_AS_ERRORS.

Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Signed-off-by: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
This commit is contained in:
Maxim Kochetkov 2021-01-12 09:10:25 +03:00 committed by Thomas Petazzoni
parent 4b6202f721
commit 5cff0c8a2d
20 changed files with 1809 additions and 2 deletions

View file

@ -0,0 +1,85 @@
From 64c17f08c4a78cf6063651632d95906ab3bcb41b Mon Sep 17 00:00:00 2001
From: Sven Klemm <sven@timescale.com>
Date: Sat, 19 Sep 2020 17:43:52 +0200
Subject: [PATCH] Allow building against PG13 source
This patch adjusts the version checks to allow building against
postgres 13. It also adjusts the cmake version check to allow
building against RC and devel versions.
Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Fetch from: https://github.com/timescale/timescaledb/commit/21dc9b9c1a307e46eda5fa621488ebeb6ee9636c.patch
---
CMakeLists.txt | 18 ++++++++----------
src/compat.h | 7 ++++++-
2 files changed, 14 insertions(+), 11 deletions(-)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8288b444..ce6e9d48 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -221,27 +221,25 @@ execute_process(
OUTPUT_VARIABLE PG_VERSION_STRING
OUTPUT_STRIP_TRAILING_WHITESPACE)
-if (NOT ${PG_VERSION_STRING} MATCHES "^PostgreSQL[ ]+([0-9]+)\\.([0-9]+)(\\.([0-9]+))*")
+if (NOT ${PG_VERSION_STRING} MATCHES "^PostgreSQL[ ]+([0-9]+)(\\.([0-9]+)|devel|rc[0-9]+)")
message(FATAL_ERROR "Could not parse PostgreSQL version ${PG_VERSION_STRING}")
endif ()
set(PG_VERSION_MAJOR ${CMAKE_MATCH_1})
-set(PG_VERSION_MINOR ${CMAKE_MATCH_2})
-set(PG_VERSION_PATCH ${CMAKE_MATCH_4})
-
-if (NOT ${PG_VERSION_PATCH} OR ${PG_VERSION_PATCH} EQUAL "")
- set(PG_VERSION "${PG_VERSION_MAJOR}.${PG_VERSION_MINOR}")
-else ()
- set(PG_VERSION "${PG_VERSION_MAJOR}.${PG_VERSION_MINOR}.${PG_VERSION_PATCH}")
+if (${CMAKE_MATCH_COUNT} GREATER "2" )
+ set(PG_VERSION_MINOR ${CMAKE_MATCH_3})
+else()
+ set(PG_VERSION_MINOR 0)
endif ()
+set(PG_VERSION "${PG_VERSION_MAJOR}.${PG_VERSION_MINOR}")
message(STATUS "Compiling against PostgreSQL version ${PG_VERSION}")
# Ensure that PostgreSQL version is supported and consistent
# with src/compat.h version check
if ((${PG_VERSION_MAJOR} LESS "11") OR
- (${PG_VERSION_MAJOR} GREATER "12"))
- message(FATAL_ERROR "TimescaleDB only supports PostgreSQL 11 and 12")
+ (${PG_VERSION_MAJOR} GREATER "13"))
+ message(FATAL_ERROR "TimescaleDB only supports PostgreSQL 11, 12 and 13")
endif()
# Get PostgreSQL configuration from pg_config
diff --git a/src/compat.h b/src/compat.h
index 475217c6..267bb09a 100644
--- a/src/compat.h
+++ b/src/compat.h
@@ -24,15 +24,20 @@
#define is_supported_pg_version_11(version) ((version >= 110000) && (version < 120000))
#define is_supported_pg_version_12(version) ((version >= 120000) && (version < 130000))
+#define is_supported_pg_version_13(version) ((version >= 130000) && (version < 140000))
#define is_supported_pg_version(version) \
- (is_supported_pg_version_11(version) || is_supported_pg_version_12(version))
+ (is_supported_pg_version_11(version) || is_supported_pg_version_12(version) || \
+ is_supported_pg_version_13(version))
#define PG11 is_supported_pg_version_11(PG_VERSION_NUM)
#define PG12 is_supported_pg_version_12(PG_VERSION_NUM)
+#define PG13 is_supported_pg_version_13(PG_VERSION_NUM)
#define PG12_LT PG11
#define PG12_GE !(PG12_LT)
+#define PG13_LT !(PG13)
+#define PG13_GE PG13
#if !(is_supported_pg_version(PG_VERSION_NUM))
#error "Unsupported PostgreSQL version"
--
2.29.2

View file

@ -0,0 +1,73 @@
From b020863a3ea18488448bc09234a4e3b26b68058d Mon Sep 17 00:00:00 2001
From: Sven Klemm <sven@timescale.com>
Date: Sat, 19 Sep 2020 19:17:38 +0200
Subject: [PATCH] Add compatibilty wrapper functions for base64
encoding/decoding
PG13 adds a destination length 4th argument to pg_b64_decode and
pg_b64_encode functions so this patch adds a macro that translates
to the 3 argument and 4 argument calls depending on postgres version.
This patch also adds checking of return values for those functions.
https://github.com/postgres/postgres/commit/cfc40d384a
Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Fetch from: https://github.com/timescale/timescaledb/commit/002510cb01e1d09767a526560f89c1857c1738a2.patch
---
src/compat.h | 11 +++++++++++
tsl/src/compression/compression.c | 12 ++++++++++--
2 files changed, 21 insertions(+), 2 deletions(-)
diff --git a/src/compat.h b/src/compat.h
index 267bb09a..d84f8754 100644
--- a/src/compat.h
+++ b/src/compat.h
@@ -347,4 +347,15 @@ get_vacuum_options(const VacuumStmt *stmt)
#endif
}
+/* PG13 added a dstlen parameter to pg_b64_decode and pg_b64_encode */
+#if PG13_LT
+#define pg_b64_encode_compat(src, srclen, dst, dstlen) pg_b64_encode((src), (srclen), (dst))
+#define pg_b64_decode_compat(src, srclen, dst, dstlen) pg_b64_decode((src), (srclen), (dst))
+#else
+#define pg_b64_encode_compat(src, srclen, dst, dstlen) \
+ pg_b64_encode((src), (srclen), (dst), (dstlen))
+#define pg_b64_decode_compat(src, srclen, dst, dstlen) \
+ pg_b64_decode((src), (srclen), (dst), (dstlen))
+#endif
+
#endif /* TIMESCALEDB_COMPAT_H */
diff --git a/tsl/src/compression/compression.c b/tsl/src/compression/compression.c
index 470ec4b9..169f74e9 100644
--- a/tsl/src/compression/compression.c
+++ b/tsl/src/compression/compression.c
@@ -1424,7 +1424,11 @@ tsl_compressed_data_in(PG_FUNCTION_ARGS)
decoded_len = pg_b64_dec_len(input_len);
decoded = palloc(decoded_len + 1);
- decoded_len = pg_b64_decode(input, input_len, decoded);
+ decoded_len = pg_b64_decode_compat(input, input_len, decoded, decoded_len);
+
+ if (decoded_len < 0)
+ elog(ERROR, "could not decode base64-encoded compressed data");
+
decoded[decoded_len] = '\0';
data = (StringInfoData){
.data = decoded,
@@ -1446,7 +1450,11 @@ tsl_compressed_data_out(PG_FUNCTION_ARGS)
const char *raw_data = VARDATA(bytes);
int encoded_len = pg_b64_enc_len(raw_len);
char *encoded = palloc(encoded_len + 1);
- encoded_len = pg_b64_encode(raw_data, raw_len, encoded);
+ encoded_len = pg_b64_encode_compat(raw_data, raw_len, encoded, encoded_len);
+
+ if (encoded_len < 0)
+ elog(ERROR, "could not base64-encode compressed data");
+
encoded[encoded_len] = '\0';
PG_RETURN_CSTRING(encoded);
--
2.29.2

View file

@ -0,0 +1,149 @@
From e68915d0aad6760c1feebb04219af9bfbccd71d7 Mon Sep 17 00:00:00 2001
From: Sven Klemm <sven@timescale.com>
Date: Sat, 26 Sep 2020 13:58:17 +0200
Subject: [PATCH] Add missing utils/acl.h includes
PG13 removed acl.h from objectaddress.h so the places that need it
need to now include it explicitly if they got it indirectly this
way previously.
https://github.com/postgres/postgres/commit/3c173a53a8
Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Fetch from: https://github.com/timescale/timescaledb/commit/be8dd086a089f7419824bbc3b77182b95d0bbba7.patch
---
src/bgw/job.c | 1 +
src/bgw/scheduler.c | 1 +
src/chunk.c | 1 +
src/chunk_adaptive.c | 1 +
src/extension_utils.c | 1 +
src/license_guc.c | 1 +
src/process_utility.c | 1 +
tsl/src/bgw_policy/continuous_aggregate_api.c | 1 +
tsl/src/bgw_policy/job_api.c | 1 +
tsl/src/continuous_aggs/refresh.c | 1 +
10 files changed, 10 insertions(+)
diff --git a/src/bgw/job.c b/src/bgw/job.c
index 313caa94..3ae8c9f7 100644
--- a/src/bgw/job.c
+++ b/src/bgw/job.c
@@ -19,6 +19,7 @@
#include <storage/proc.h>
#include <storage/procarray.h>
#include <storage/sinvaladt.h>
+#include <utils/acl.h>
#include <utils/elog.h>
#include <utils/jsonb.h>
diff --git a/src/bgw/scheduler.c b/src/bgw/scheduler.c
index 2d024317..7a7e360c 100644
--- a/src/bgw/scheduler.c
+++ b/src/bgw/scheduler.c
@@ -19,6 +19,7 @@
#include <storage/lwlock.h>
#include <storage/proc.h>
#include <storage/shmem.h>
+#include <utils/acl.h>
#include <utils/inval.h>
#include <utils/jsonb.h>
#include <utils/timestamp.h>
diff --git a/src/chunk.c b/src/chunk.c
index c2671885..e5ffc3fa 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -29,6 +29,7 @@
#include <fmgr.h>
#include <utils/datum.h>
#include <catalog/pg_type.h>
+#include <utils/acl.h>
#include <utils/timestamp.h>
#include <nodes/execnodes.h>
#include <executor/executor.h>
diff --git a/src/chunk_adaptive.c b/src/chunk_adaptive.c
index 470248ed..88e2910b 100644
--- a/src/chunk_adaptive.c
+++ b/src/chunk_adaptive.c
@@ -7,6 +7,7 @@
#include <postgres.h>
#include <catalog/pg_proc.h>
#include <catalog/pg_type.h>
+#include <utils/acl.h>
#include <utils/syscache.h>
#include <utils/lsyscache.h>
#include <utils/guc.h>
diff --git a/src/extension_utils.c b/src/extension_utils.c
index 3fac177b..3d72c5f0 100644
--- a/src/extension_utils.c
+++ b/src/extension_utils.c
@@ -18,6 +18,7 @@
#include <access/relscan.h>
#include <catalog/pg_extension.h>
#include <catalog/pg_authid.h>
+#include <utils/acl.h>
#include <utils/fmgroids.h>
#include <utils/builtins.h>
#include <utils/rel.h>
diff --git a/src/license_guc.c b/src/license_guc.c
index 32629ca9..baa49be5 100644
--- a/src/license_guc.c
+++ b/src/license_guc.c
@@ -5,6 +5,7 @@
*/
#include <postgres.h>
#include <fmgr.h>
+#include <utils/acl.h>
#include <utils/builtins.h>
#include <utils/guc.h>
#include <miscadmin.h>
diff --git a/src/process_utility.c b/src/process_utility.c
index 3576cb97..8f915113 100644
--- a/src/process_utility.c
+++ b/src/process_utility.c
@@ -22,6 +22,7 @@
#include <access/htup_details.h>
#include <access/xact.h>
#include <storage/lmgr.h>
+#include <utils/acl.h>
#include <utils/rel.h>
#include <utils/inval.h>
#include <utils/lsyscache.h>
diff --git a/tsl/src/bgw_policy/continuous_aggregate_api.c b/tsl/src/bgw_policy/continuous_aggregate_api.c
index 1c18d2e0..596f3bf7 100644
--- a/tsl/src/bgw_policy/continuous_aggregate_api.c
+++ b/tsl/src/bgw_policy/continuous_aggregate_api.c
@@ -7,6 +7,7 @@
#include <postgres.h>
#include <miscadmin.h>
#include <parser/parse_coerce.h>
+#include <utils/acl.h>
#include <jsonb_utils.h>
#include <utils/builtins.h>
diff --git a/tsl/src/bgw_policy/job_api.c b/tsl/src/bgw_policy/job_api.c
index e1c24841..536ce692 100644
--- a/tsl/src/bgw_policy/job_api.c
+++ b/tsl/src/bgw_policy/job_api.c
@@ -7,6 +7,7 @@
#include <postgres.h>
#include <funcapi.h>
#include <miscadmin.h>
+#include <utils/acl.h>
#include <utils/builtins.h>
#include <bgw/job.h>
diff --git a/tsl/src/continuous_aggs/refresh.c b/tsl/src/continuous_aggs/refresh.c
index 71d5075f..46f83127 100644
--- a/tsl/src/continuous_aggs/refresh.c
+++ b/tsl/src/continuous_aggs/refresh.c
@@ -4,6 +4,7 @@
* LICENSE-TIMESCALE for a copy of the license.
*/
#include <postgres.h>
+#include <utils/acl.h>
#include <utils/lsyscache.h>
#include <utils/fmgrprotos.h>
#include <utils/snapmgr.h>
--
2.29.2

View file

@ -0,0 +1,473 @@
From 259ff80eb29c2c70e6afb77d266ce49cccbef223 Mon Sep 17 00:00:00 2001
From: Sven Klemm <sven@timescale.com>
Date: Sat, 19 Sep 2020 22:20:34 +0200
Subject: [PATCH] Add support for PG13 List implementation
PG13 changes the List implementation from a linked list to an array
while most of the API functions did not change a few them have slightly
different signature in PG13, additionally the list_make5 functions
got removed.
https://github.com/postgres/postgres/commit/1cff1b95ab
Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Fetch from: https://github.com/timescale/timescaledb/commit/b1a9c3b7b7d44ee78456931292655d52c252930d.patch
---
.clang-format | 1 +
src/bgw/scheduler.c | 12 ++++++------
src/cache.c | 10 ++++++++--
src/chunk_append/chunk_append.c | 2 +-
src/chunk_append/exec.c | 4 ++--
src/compat.h | 16 ++++++++++++++++
src/import/planner.c | 2 +-
src/plan_agg_bookend.c | 7 ++++---
src/plan_expand_hypertable.c | 13 +++++++++----
test/src/bgw/test_job_refresh.c | 3 ++-
tsl/src/continuous_aggs/create.c | 2 +-
tsl/src/debug.c | 15 ++++++++++-----
tsl/src/fdw/deparse.c | 6 +++---
.../nodes/decompress_chunk/decompress_chunk.c | 6 +++---
tsl/src/nodes/decompress_chunk/exec.c | 3 ++-
tsl/src/nodes/gapfill/planner.c | 11 +++++++----
16 files changed, 76 insertions(+), 37 deletions(-)
diff --git a/.clang-format b/.clang-format
index 5bb275cd..9aac7ef4 100644
--- a/.clang-format
+++ b/.clang-format
@@ -60,6 +60,7 @@ ForEachMacros:
- foreach
- forboth
- for_each_cell
+ - for_each_cell_compat
- for_both_cell
- forthree
IncludeBlocks: Preserve # separate include blocks will not be merged
diff --git a/src/bgw/scheduler.c b/src/bgw/scheduler.c
index 7a7e360c..2630ff9f 100644
--- a/src/bgw/scheduler.c
+++ b/src/bgw/scheduler.c
@@ -456,7 +456,7 @@ ts_update_scheduled_jobs_list(List *cur_jobs_list, MemoryContext mctx)
*/
terminate_and_cleanup_job(cur_sjob);
- cur_ptr = lnext(cur_ptr);
+ cur_ptr = lnext_compat(cur_jobs_list, cur_ptr);
continue;
}
if (cur_sjob->job.fd.id == new_sjob->job.fd.id)
@@ -472,15 +472,15 @@ ts_update_scheduled_jobs_list(List *cur_jobs_list, MemoryContext mctx)
if (cur_sjob->state == JOB_STATE_SCHEDULED)
scheduled_bgw_job_transition_state_to(new_sjob, JOB_STATE_SCHEDULED);
- cur_ptr = lnext(cur_ptr);
- new_ptr = lnext(new_ptr);
+ cur_ptr = lnext_compat(cur_jobs_list, cur_ptr);
+ new_ptr = lnext_compat(new_jobs, new_ptr);
}
else if (cur_sjob->job.fd.id > new_sjob->job.fd.id)
{
scheduled_bgw_job_transition_state_to(new_sjob, JOB_STATE_SCHEDULED);
/* Advance the new_job list until we catch up to cur_list */
- new_ptr = lnext(new_ptr);
+ new_ptr = lnext_compat(new_jobs, new_ptr);
}
}
@@ -489,7 +489,7 @@ ts_update_scheduled_jobs_list(List *cur_jobs_list, MemoryContext mctx)
{
ListCell *ptr;
- for_each_cell (ptr, cur_ptr)
+ for_each_cell_compat (ptr, cur_jobs_list, cur_ptr)
terminate_and_cleanup_job(lfirst(ptr));
}
@@ -498,7 +498,7 @@ ts_update_scheduled_jobs_list(List *cur_jobs_list, MemoryContext mctx)
/* Then there are more new jobs. Initialize all of them. */
ListCell *ptr;
- for_each_cell (ptr, new_ptr)
+ for_each_cell_compat (ptr, new_jobs, new_ptr)
scheduled_bgw_job_transition_state_to(lfirst(ptr), JOB_STATE_SCHEDULED);
}
diff --git a/src/cache.c b/src/cache.c
index cc6b2d07..3b53485a 100644
--- a/src/cache.c
+++ b/src/cache.c
@@ -7,6 +7,7 @@
#include <access/xact.h>
#include "cache.h"
+#include "compat.h"
/* List of pinned caches. A cache occurs once in this list for every pin
* taken */
@@ -105,7 +106,10 @@ ts_cache_pin(Cache *cache)
static void
remove_pin(Cache *cache, SubTransactionId subtxnid)
{
- ListCell *lc, *prev = NULL;
+ ListCell *lc;
+#if PG13_LT
+ ListCell *prev = NULL;
+#endif
foreach (lc, pinned_caches)
{
@@ -113,12 +117,14 @@ remove_pin(Cache *cache, SubTransactionId subtxnid)
if (cp->cache == cache && cp->subtxnid == subtxnid)
{
- pinned_caches = list_delete_cell(pinned_caches, lc, prev);
+ pinned_caches = list_delete_cell_compat(pinned_caches, lc, prev);
pfree(cp);
return;
}
+#if PG13_LT
prev = lc;
+#endif
}
/* should never reach here: there should always be a pin to remove */
diff --git a/src/chunk_append/chunk_append.c b/src/chunk_append/chunk_append.c
index fb1c87ff..ed91ff39 100644
--- a/src/chunk_append/chunk_append.c
+++ b/src/chunk_append/chunk_append.c
@@ -209,7 +209,7 @@ ts_chunk_append_path_create(PlannerInfo *root, RelOptInfo *rel, Hypertable *ht,
if (is_not_pruned)
{
merge_childs = lappend(merge_childs, child);
- flat = lnext(flat);
+ flat = lnext_compat(children, flat);
if (flat == NULL)
break;
}
diff --git a/src/chunk_append/exec.c b/src/chunk_append/exec.c
index 8f4dd5d6..84f79e23 100644
--- a/src/chunk_append/exec.c
+++ b/src/chunk_append/exec.c
@@ -344,8 +344,8 @@ initialize_runtime_exclusion(ChunkAppendState *state)
state->runtime_number_exclusions++;
}
- lc_clauses = lnext(lc_clauses);
- lc_constraints = lnext(lc_constraints);
+ lc_clauses = lnext_compat(state->filtered_ri_clauses, lc_clauses);
+ lc_constraints = lnext_compat(state->filtered_constraints, lc_constraints);
}
state->runtime_initialized = true;
diff --git a/src/compat.h b/src/compat.h
index d84f8754..51c1c181 100644
--- a/src/compat.h
+++ b/src/compat.h
@@ -358,4 +358,20 @@ get_vacuum_options(const VacuumStmt *stmt)
pg_b64_decode((src), (srclen), (dst), (dstlen))
#endif
+/* PG13 changes the List implementation from a linked list to an array
+ * while most of the API functions did not change a few them have slightly
+ * different signature in PG13, additionally the list_make5 functions
+ * got removed. */
+#if PG13_LT
+#define lnext_compat(l, lc) lnext((lc))
+#define list_delete_cell_compat(l, lc, prev) list_delete_cell((l), (lc), (prev))
+#define for_each_cell_compat(cell, list, initcell) for_each_cell ((cell), (initcell))
+#else
+#define lnext_compat(l, lc) lnext((l), (lc))
+#define list_delete_cell_compat(l, lc, prev) list_delete_cell((l), (lc))
+#define list_make5(x1, x2, x3, x4, x5) lappend(list_make4(x1, x2, x3, x4), x5)
+#define list_make5_oid(x1, x2, x3, x4, x5) lappend_oid(list_make4_oid(x1, x2, x3, x4), x5)
+#define for_each_cell_compat(cell, list, initcell) for_each_cell ((cell), (list), (initcell))
+#endif
+
#endif /* TIMESCALEDB_COMPAT_H */
diff --git a/src/import/planner.c b/src/import/planner.c
index 31a4889d..b907390d 100644
--- a/src/import/planner.c
+++ b/src/import/planner.c
@@ -196,7 +196,7 @@ ts_make_partial_grouping_target(struct PlannerInfo *root, PathTarget *grouping_t
struct List *non_group_cols;
struct List *non_group_exprs;
int i;
- struct ListCell *lc;
+ ListCell *lc;
partial_target = create_empty_pathtarget();
non_group_cols = NIL;
diff --git a/src/plan_agg_bookend.c b/src/plan_agg_bookend.c
index d4d06f5b..5394cf5d 100644
--- a/src/plan_agg_bookend.c
+++ b/src/plan_agg_bookend.c
@@ -696,13 +696,14 @@ build_first_last_path(PlannerInfo *root, FirstLastAggInfo *fl_info, Oid eqop, Oi
if (app->parent_reloid == rte->relid)
{
subroot->append_rel_list =
- list_delete_cell(subroot->append_rel_list, next, prev);
- next = prev != NULL ? prev->next : list_head(subroot->append_rel_list);
+ list_delete_cell_compat(subroot->append_rel_list, next, prev);
+ next = prev != NULL ? lnext_compat(subroot->append_rel_list, next) :
+ list_head(subroot->append_rel_list);
}
else
{
prev = next;
- next = next->next;
+ next = lnext_compat(subroot->append_rel_list, next);
}
}
}
diff --git a/src/plan_expand_hypertable.c b/src/plan_expand_hypertable.c
index 37282ce4..2b99c93b 100644
--- a/src/plan_expand_hypertable.c
+++ b/src/plan_expand_hypertable.c
@@ -581,7 +581,8 @@ process_quals(Node *quals, CollectQualCtx *ctx, bool is_outer_join)
ListCell *prev pg_attribute_unused() = NULL;
List *additional_quals = NIL;
- for (lc = list_head((List *) quals); lc != NULL; prev = lc, lc = lnext(lc))
+ for (lc = list_head((List *) quals); lc != NULL;
+ prev = lc, lc = lnext_compat((List *) quals, lc))
{
Expr *qual = lfirst(lc);
Relids relids = pull_varnos((Node *) qual);
@@ -611,7 +612,7 @@ process_quals(Node *quals, CollectQualCtx *ctx, bool is_outer_join)
* is called, so we can remove the functions from that directly
*/
#if PG12_LT
- quals = (Node *) list_delete_cell((List *) quals, lc, prev);
+ quals = (Node *) list_delete_cell_compat((List *) quals, lc, prev);
#endif
return quals;
}
@@ -663,7 +664,9 @@ process_quals(Node *quals, CollectQualCtx *ctx, bool is_outer_join)
static List *
remove_exclusion_fns(List *restrictinfo)
{
+#if PG13_LT
ListCell *prev = NULL;
+#endif
ListCell *lc = list_head(restrictinfo);
while (lc != NULL)
@@ -682,11 +685,13 @@ remove_exclusion_fns(List *restrictinfo)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("first parameter for chunks_in function needs to be record")));
- restrictinfo = list_delete_cell((List *) restrictinfo, lc, prev);
+ restrictinfo = list_delete_cell_compat((List *) restrictinfo, lc, prev);
return restrictinfo;
}
+#if PG13_LT
prev = lc;
- lc = lnext(lc);
+#endif
+ lc = lnext_compat(restrictinfo, lc);
}
return restrictinfo;
}
diff --git a/test/src/bgw/test_job_refresh.c b/test/src/bgw/test_job_refresh.c
index 51a3b0d7..d51415d4 100644
--- a/test/src/bgw/test_job_refresh.c
+++ b/test/src/bgw/test_job_refresh.c
@@ -13,6 +13,7 @@
#include <access/htup_details.h>
#include <utils/memutils.h>
+#include "compat.h"
#include "export.h"
#include "bgw/scheduler.h"
@@ -70,7 +71,7 @@ ts_test_job_refresh(PG_FUNCTION_ARGS)
memset(nulls, 0, sizeof(*nulls) * funcctx->tuple_desc->natts);
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
- funcctx->user_fctx = lnext(lc);
+ funcctx->user_fctx = lnext_compat(cur_scheduled_jobs, lc);
SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
}
diff --git a/tsl/src/continuous_aggs/create.c b/tsl/src/continuous_aggs/create.c
index bdf4470a..f767dabb 100644
--- a/tsl/src/continuous_aggs/create.c
+++ b/tsl/src/continuous_aggs/create.c
@@ -1566,7 +1566,7 @@ fixup_userview_query_tlist(Query *userquery, List *tlist_aliases)
if (tle->resjunk)
continue;
tle->resname = pstrdup(strVal(lfirst(alist_item)));
- alist_item = lnext(alist_item);
+ alist_item = lnext_compat(tlist_aliases, alist_item);
if (alist_item == NULL)
break; /* done assigning aliases */
}
diff --git a/tsl/src/debug.c b/tsl/src/debug.c
index 697bfc0b..023097ee 100644
--- a/tsl/src/debug.c
+++ b/tsl/src/debug.c
@@ -168,7 +168,7 @@ append_func_expr(StringInfo buf, const Node *expr, const List *rtable)
foreach (l, e->args)
{
append_expr(buf, lfirst(l), rtable);
- if (lnext(l))
+ if (lnext_compat(e->args, l))
appendStringInfoString(buf, ", ");
}
appendStringInfoChar(buf, ')');
@@ -217,7 +217,7 @@ append_restrict_clauses(StringInfo buf, PlannerInfo *root, List *clauses)
RestrictInfo *c = lfirst(cell);
append_expr(buf, (Node *) c->clause, root->parse->rtable);
- if (lnext(cell))
+ if (lnext_compat(clauses, cell))
appendStringInfoString(buf, ", ");
}
}
@@ -270,7 +270,7 @@ append_pathkeys(StringInfo buf, const List *pathkeys, const List *rtable)
append_expr(buf, (Node *) mem->em_expr, rtable);
}
appendStringInfoChar(buf, ')');
- if (lnext(i))
+ if (lnext_compat(pathkeys, i))
appendStringInfoString(buf, ", ");
}
appendStringInfoChar(buf, ')');
@@ -601,7 +601,10 @@ tsl_debug_append_pruned_pathlist(StringInfo buf, PlannerInfo *root, RelOptInfo *
foreach (lc1, rel->pathlist)
{
Path *p1 = (Path *) lfirst(lc1);
- ListCell *lc2, *prev = NULL;
+ ListCell *lc2;
+#if PG13_LT
+ ListCell *prev = NULL;
+#endif
foreach (lc2, fdw_info->considered_paths)
{
@@ -610,11 +613,13 @@ tsl_debug_append_pruned_pathlist(StringInfo buf, PlannerInfo *root, RelOptInfo *
if (path_is_origin(p1, p2))
{
fdw_info->considered_paths =
- list_delete_cell(fdw_info->considered_paths, lc2, prev);
+ list_delete_cell_compat(fdw_info->considered_paths, lc2, prev);
fdw_utils_free_path(p2);
break;
}
+#if PG13_LT
prev = lc2;
+#endif
}
}
diff --git a/tsl/src/fdw/deparse.c b/tsl/src/fdw/deparse.c
index d90636b5..efd7debb 100644
--- a/tsl/src/fdw/deparse.c
+++ b/tsl/src/fdw/deparse.c
@@ -2211,7 +2211,7 @@ deparseSubscriptingRef(SubscriptingRef *node, deparse_expr_cxt *context)
{
deparseExpr(lfirst(lowlist_item), context);
appendStringInfoChar(buf, ':');
- lowlist_item = lnext(lowlist_item);
+ lowlist_item = lnext_compat(node->reflowerindexpr, lowlist_item);
}
deparseExpr(lfirst(uplist_item), context);
appendStringInfoChar(buf, ']');
@@ -2273,7 +2273,7 @@ deparseFuncExpr(FuncExpr *node, deparse_expr_cxt *context)
{
if (!first)
appendStringInfoString(buf, ", ");
- if (use_variadic && lnext(arg) == NULL)
+ if (use_variadic && lnext_compat(node->args, arg) == NULL)
appendStringInfoString(buf, "VARIADIC ");
deparseExpr((Expr *) lfirst(arg), context);
first = false;
@@ -2601,7 +2601,7 @@ deparseAggref(Aggref *node, deparse_expr_cxt *context)
first = false;
/* Add VARIADIC */
- if (use_variadic && lnext(arg) == NULL)
+ if (use_variadic && lnext_compat(node->args, arg) == NULL)
appendStringInfoString(buf, "VARIADIC ");
deparseExpr((Expr *) n, context);
diff --git a/tsl/src/nodes/decompress_chunk/decompress_chunk.c b/tsl/src/nodes/decompress_chunk/decompress_chunk.c
index 90b6c7c3..1e36f5dc 100644
--- a/tsl/src/nodes/decompress_chunk/decompress_chunk.c
+++ b/tsl/src/nodes/decompress_chunk/decompress_chunk.c
@@ -182,7 +182,7 @@ build_compressed_scan_pathkeys(SortInfo *sort_info, PlannerInfo *root, List *chu
for (lc = list_head(chunk_pathkeys);
lc != NULL && bms_num_members(segmentby_columns) < info->num_segmentby_columns;
- lc = lnext(lc))
+ lc = lnext_compat(chunk_pathkeys, lc))
{
PathKey *pk = lfirst(lc);
var = (Var *) ts_find_em_expr_for_rel(pk->pk_eclass, info->chunk_rel);
@@ -1210,7 +1210,7 @@ build_sortinfo(RelOptInfo *chunk_rel, CompressionInfo *info, List *pathkeys)
* we keep looping even if we found all segmentby columns in case a
* columns appears both in baserestrictinfo and in ORDER BY clause
*/
- for (; lc != NULL; lc = lnext(lc))
+ for (; lc != NULL; lc = lnext_compat(pathkeys, lc))
{
Assert(bms_num_members(segmentby_columns) <= info->num_segmentby_columns);
pk = lfirst(lc);
@@ -1250,7 +1250,7 @@ build_sortinfo(RelOptInfo *chunk_rel, CompressionInfo *info, List *pathkeys)
* loop over the rest of pathkeys
* this needs to exactly match the configured compress_orderby
*/
- for (pk_index = 1; lc != NULL; lc = lnext(lc), pk_index++)
+ for (pk_index = 1; lc != NULL; lc = lnext_compat(pathkeys, lc), pk_index++)
{
bool reverse = false;
pk = lfirst(lc);
diff --git a/tsl/src/nodes/decompress_chunk/exec.c b/tsl/src/nodes/decompress_chunk/exec.c
index 035f2de4..f58e6f6c 100644
--- a/tsl/src/nodes/decompress_chunk/exec.c
+++ b/tsl/src/nodes/decompress_chunk/exec.c
@@ -121,7 +121,8 @@ initialize_column_state(DecompressChunkState *state)
state->columns = palloc0(state->num_columns * sizeof(DecompressChunkColumnState));
- for (i = 0, lc = list_head(state->varattno_map); i < state->num_columns; lc = lnext(lc), i++)
+ for (i = 0, lc = list_head(state->varattno_map); i < state->num_columns;
+ lc = lnext_compat(state->varattno_map, lc), i++)
{
DecompressChunkColumnState *column = &state->columns[i];
column->attno = lfirst_int(lc);
diff --git a/tsl/src/nodes/gapfill/planner.c b/tsl/src/nodes/gapfill/planner.c
index 56bdffd5..765a14ce 100644
--- a/tsl/src/nodes/gapfill/planner.c
+++ b/tsl/src/nodes/gapfill/planner.c
@@ -295,8 +295,10 @@ gapfill_build_pathtarget(PathTarget *pt_upper, PathTarget *pt_path, PathTarget *
/*
* check arguments past first argument dont have Vars
*/
- for (lc_arg = lnext(list_head(context.call.window->args)); lc_arg != NULL;
- lc_arg = lnext(lc_arg))
+ for (lc_arg = lnext_compat(context.call.window->args,
+ list_head(context.call.window->args));
+ lc_arg != NULL;
+ lc_arg = lnext_compat(context.call.window->args, lc_arg))
{
if (contain_var_clause(lfirst(lc_arg)))
ereport(ERROR,
@@ -553,9 +555,10 @@ gapfill_adjust_window_targetlist(PlannerInfo *root, RelOptInfo *input_rel, RelOp
/*
* check arguments past first argument dont have Vars
*/
- for (lc_arg = lnext(list_head(context.call.window->args));
+ for (lc_arg = lnext_compat(context.call.window->args,
+ list_head(context.call.window->args));
lc_arg != NULL;
- lc_arg = lnext(lc_arg))
+ lc_arg = lnext_compat(context.call.window->args, lc_arg))
{
if (contain_var_clause(lfirst(lc_arg)))
ereport(ERROR,
--
2.29.2

View file

@ -0,0 +1,59 @@
From 4a149cb833dbb45507cd52e63707311e9642587c Mon Sep 17 00:00:00 2001
From: Sven Klemm <sven@timescale.com>
Date: Sat, 19 Sep 2020 23:20:37 +0200
Subject: [PATCH] Adjust code to PG13 list sort changes
PG13 changes the name of the list sorting function from list_qsort
to list_sort. Additionally PG13 does in-place sort.
https://github.com/postgres/postgres/commit/569ed7f483
Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Fetch from: https://github.com/timescale/timescaledb/commit/13d8aac33b6fc5104c8ad1da816dc0d009fc13a7.patch
---
src/bgw/scheduler.c | 15 ++++++++++++++-
1 file changed, 14 insertions(+), 1 deletion(-)
diff --git a/src/bgw/scheduler.c b/src/bgw/scheduler.c
index 2630ff9f..b9d1aa38 100644
--- a/src/bgw/scheduler.c
+++ b/src/bgw/scheduler.c
@@ -530,10 +530,15 @@ ts_populate_scheduled_job_tuple(ScheduledBgwJob *sjob, Datum *values)
#endif
static int
+#if PG13_LT
cmp_next_start(const void *left, const void *right)
{
const ListCell *left_cell = *((ListCell **) left);
const ListCell *right_cell = *((ListCell **) right);
+#else
+cmp_next_start(const ListCell *left_cell, const ListCell *right_cell)
+{
+#endif
ScheduledBgwJob *left_sjob = lfirst(left_cell);
ScheduledBgwJob *right_sjob = lfirst(right_cell);
@@ -549,10 +554,18 @@ cmp_next_start(const void *left, const void *right)
static void
start_scheduled_jobs(register_background_worker_callback_type bgw_register)
{
+ List *ordered_scheduled_jobs;
ListCell *lc;
Assert(CurrentMemoryContext == scratch_mctx);
+
/* Order jobs by increasing next_start */
- List *ordered_scheduled_jobs = list_qsort(scheduled_jobs, cmp_next_start);
+#if PG13_LT
+ ordered_scheduled_jobs = list_qsort(scheduled_jobs, cmp_next_start);
+#else
+ /* PG13 does in-place sort */
+ ordered_scheduled_jobs = scheduled_jobs;
+ list_sort(ordered_scheduled_jobs, cmp_next_start);
+#endif
foreach (lc, ordered_scheduled_jobs)
{
--
2.29.2

View file

@ -0,0 +1,37 @@
From 196943ff91a6a21c575fbca1f047544970ec4c98 Mon Sep 17 00:00:00 2001
From: Sven Klemm <sven@timescale.com>
Date: Thu, 24 Sep 2020 16:18:43 +0200
Subject: [PATCH] Adjust copy code to PG13 addRTEtoQuery changes
PG13 removes addRTEtoQuery and provides a similar function
addNSItemToQuery which has a different signature.
https://github.com/postgres/postgres/commit/5815696bc6
Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Fetch from: https://github.com/timescale/timescaledb/commit/10d1c2d698c1b7cbf5af86082287adda4f0c6e97.patch
---
src/copy.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/src/copy.c b/src/copy.c
index 2e63dc76..c8ce93e6 100644
--- a/src/copy.c
+++ b/src/copy.c
@@ -535,7 +535,12 @@ copy_constraints_and_check(ParseState *pstate, Relation rel, List *attnums)
{
ListCell *cur;
char *xactReadOnly;
-#if PG12_GE
+#if PG13_GE
+ ParseNamespaceItem *nsitem =
+ addRangeTableEntryForRelation(pstate, rel, RowExclusiveLock, NULL, false, false);
+ RangeTblEntry *rte = nsitem->p_rte;
+ addNSItemToQuery(pstate, nsitem, true, true, true);
+#elif PG12
RangeTblEntry *rte =
addRangeTableEntryForRelation(pstate, rel, RowExclusiveLock, NULL, false, false);
addRTEtoQuery(pstate, rte, false, true, true);
--
2.29.2

View file

@ -0,0 +1,65 @@
From 1c50cdc9a1c06c58f9fa72ab963d595c9a99d3d8 Mon Sep 17 00:00:00 2001
From: Sven Klemm <sven@timescale.com>
Date: Thu, 24 Sep 2020 05:33:38 +0200
Subject: [PATCH] Adjust code to PG13 convert_tuples_by_name signature
change
PG13 removed the msg parameter from convert_tuples_by_name.
https://github.com/postgres/postgres/commit/fe66125974
Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Fetch from: https://github.com/timescale/timescaledb/commit/968de59f8cd981ba0ca9cca1929d4ba17fbc256f.patch
---
src/chunk_insert_state.c | 21 +++++++++++++++------
1 file changed, 15 insertions(+), 6 deletions(-)
diff --git a/src/chunk_insert_state.c b/src/chunk_insert_state.c
index 398a8811..5dc294df 100644
--- a/src/chunk_insert_state.c
+++ b/src/chunk_insert_state.c
@@ -379,8 +379,11 @@ setup_on_conflict_state(ChunkInsertState *state, ChunkDispatch *dispatch, AttrNu
if (NULL == chunk_attnos)
chunk_attnos = convert_tuples_by_name_map(RelationGetDescr(chunk_rel),
- RelationGetDescr(first_rel),
- gettext_noop("could not convert row type"));
+ RelationGetDescr(first_rel)
+#if PG13_LT
+ , gettext_noop("could not convert row type")
+#endif
+ );
onconflset = translate_clause(ts_chunk_dispatch_get_on_conflict_set(dispatch),
chunk_attnos,
@@ -484,8 +487,11 @@ adjust_projections(ChunkInsertState *cis, ChunkDispatch *dispatch, Oid rowtype)
* to work correctly in mapping hypertable attnos->chunk attnos.
*/
chunk_attnos = convert_tuples_by_name_map(RelationGetDescr(chunk_rel),
- RelationGetDescr(hyper_rel),
- gettext_noop("could not convert row type"));
+ RelationGetDescr(hyper_rel)
+#if PG13_LT
+ ,gettext_noop("could not convert row type")
+#endif
+ );
chunk_rri->ri_projectReturning =
get_adjusted_projection_info_returning(chunk_rri->ri_projectReturning,
@@ -590,8 +596,11 @@ ts_chunk_insert_state_create(Chunk *chunk, ChunkDispatch *dispatch)
if (chunk->relkind != RELKIND_FOREIGN_TABLE)
state->hyper_to_chunk_map =
convert_tuples_by_name(RelationGetDescr(parent_rel),
- RelationGetDescr(rel),
- gettext_noop("could not convert row type"));
+ RelationGetDescr(rel)
+#if PG13_LT
+ ,gettext_noop("could not convert row type")
+#endif
+ );
adjust_projections(state, dispatch, RelationGetForm(rel)->reltype);
--
2.29.2

View file

@ -0,0 +1,259 @@
From 5c17cb0aa7f06bb3ac15d4ca72a37f59a10fa59e Mon Sep 17 00:00:00 2001
From: Sven Klemm <sven@timescale.com>
Date: Thu, 24 Sep 2020 15:21:04 +0200
Subject: [PATCH] Adjust code to PG13 tuple conversion changes
PG13 changes the way attribute mappings are done and uses AttrMap
instead of AttrNumber[] in the new function signatures. This patch
changes ChunkInsertState to use TupleConversionMap which abstracts
this change.
https://github.com/postgres/postgres/commit/e1551f96e6
Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Fetch from: https://github.com/timescale/timescaledb/commit/2cb7c41276c8f8112bd225a020fef709a8e776ff.patch
---
src/chunk_insert_state.c | 92 +++++++++++++++++++++------------------
src/compat.h | 15 +++++++
tsl/src/fdw/modify_exec.c | 4 ++
3 files changed, 69 insertions(+), 42 deletions(-)
diff --git a/src/chunk_insert_state.c b/src/chunk_insert_state.c
index 5dc294df..f8200dd8 100644
--- a/src/chunk_insert_state.c
+++ b/src/chunk_insert_state.c
@@ -120,7 +120,7 @@ create_chunk_result_relation_info(ChunkDispatch *dispatch, Relation rel)
static ProjectionInfo *
get_adjusted_projection_info_returning(ProjectionInfo *orig, List *returning_clauses,
- AttrNumber *map, int map_size, Index varno, Oid rowtype,
+ TupleConversionMap *map, Index varno, Oid rowtype,
TupleDesc chunk_desc)
{
bool found_whole_row;
@@ -128,14 +128,15 @@ get_adjusted_projection_info_returning(ProjectionInfo *orig, List *returning_cla
Assert(returning_clauses != NIL);
/* map hypertable attnos -> chunk attnos */
- returning_clauses = castNode(List,
- map_variable_attnos((Node *) returning_clauses,
- varno,
- 0,
- map,
- map_size,
- rowtype,
- &found_whole_row));
+ if (map != NULL)
+ returning_clauses = castNode(List,
+ map_variable_attnos_compat((Node *) returning_clauses,
+ varno,
+ 0,
+ map->attrMap,
+ map->outdesc->natts,
+ rowtype,
+ &found_whole_row));
return ExecBuildProjectionInfo(returning_clauses,
orig->pi_exprContext,
@@ -145,7 +146,7 @@ get_adjusted_projection_info_returning(ProjectionInfo *orig, List *returning_cla
}
static List *
-translate_clause(List *inclause, AttrNumber *chunk_attnos, Index varno, Relation hyper_rel,
+translate_clause(List *inclause, TupleConversionMap *chunk_map, Index varno, Relation hyper_rel,
Relation chunk_rel)
{
List *clause = copyObject(inclause);
@@ -153,23 +154,23 @@ translate_clause(List *inclause, AttrNumber *chunk_attnos, Index varno, Relation
/* map hypertable attnos -> chunk attnos for the "excluded" table */
clause = castNode(List,
- map_variable_attnos((Node *) clause,
- INNER_VAR,
- 0,
- chunk_attnos,
- RelationGetDescr(hyper_rel)->natts,
- RelationGetForm(chunk_rel)->reltype,
- &found_whole_row));
+ map_variable_attnos_compat((Node *) clause,
+ INNER_VAR,
+ 0,
+ chunk_map->attrMap,
+ RelationGetDescr(hyper_rel)->natts,
+ RelationGetForm(chunk_rel)->reltype,
+ &found_whole_row));
/* map hypertable attnos -> chunk attnos for the hypertable */
clause = castNode(List,
- map_variable_attnos((Node *) clause,
- varno,
- 0,
- chunk_attnos,
- RelationGetDescr(hyper_rel)->natts,
- RelationGetForm(chunk_rel)->reltype,
- &found_whole_row));
+ map_variable_attnos_compat((Node *) clause,
+ varno,
+ 0,
+ chunk_map->attrMap,
+ RelationGetDescr(hyper_rel)->natts,
+ RelationGetForm(chunk_rel)->reltype,
+ &found_whole_row));
return clause;
}
@@ -193,7 +194,11 @@ adjust_hypertable_tlist(List *tlist, TupleConversionMap *map)
{
List *new_tlist = NIL;
TupleDesc chunk_tupdesc = map->outdesc;
+#if PG13_GE
+ AttrNumber *attrMap = map->attrMap->attnums;
+#else
AttrNumber *attrMap = map->attrMap;
+#endif
AttrNumber chunk_attrno;
for (chunk_attrno = 1; chunk_attrno <= chunk_tupdesc->natts; chunk_attrno++)
@@ -351,7 +356,8 @@ get_default_existing_slot(ChunkInsertState *state, ChunkDispatch *dispatch)
* columns, etc.
*/
static void
-setup_on_conflict_state(ChunkInsertState *state, ChunkDispatch *dispatch, AttrNumber *chunk_attnos)
+setup_on_conflict_state(ChunkInsertState *state, ChunkDispatch *dispatch,
+ TupleConversionMap *chunk_map)
{
TupleConversionMap *map = state->hyper_to_chunk_map;
ResultRelInfo *chunk_rri = get_chunk_rri(state);
@@ -377,16 +383,17 @@ setup_on_conflict_state(ChunkInsertState *state, ChunkDispatch *dispatch, AttrNu
Assert(map->outdesc == RelationGetDescr(chunk_rel));
- if (NULL == chunk_attnos)
- chunk_attnos = convert_tuples_by_name_map(RelationGetDescr(chunk_rel),
- RelationGetDescr(first_rel)
+ if (NULL == chunk_map)
+ chunk_map = convert_tuples_by_name(RelationGetDescr(chunk_rel),
+ RelationGetDescr(first_rel)
#if PG13_LT
- , gettext_noop("could not convert row type")
+ ,
+ gettext_noop("could not convert row type")
#endif
- );
+ );
onconflset = translate_clause(ts_chunk_dispatch_get_on_conflict_set(dispatch),
- chunk_attnos,
+ chunk_map,
hyper_rri->ri_RangeTableIndex,
hyper_rel,
chunk_rel);
@@ -412,7 +419,7 @@ setup_on_conflict_state(ChunkInsertState *state, ChunkDispatch *dispatch, AttrNu
if (NULL != onconflict_where)
{
List *clause = translate_clause(castNode(List, onconflict_where),
- chunk_attnos,
+ chunk_map,
hyper_rri->ri_RangeTableIndex,
hyper_rel,
chunk_rel);
@@ -476,7 +483,7 @@ adjust_projections(ChunkInsertState *cis, ChunkDispatch *dispatch, Oid rowtype)
ResultRelInfo *chunk_rri = cis->result_relation_info;
Relation hyper_rel = dispatch->hypertable_result_rel_info->ri_RelationDesc;
Relation chunk_rel = cis->rel;
- AttrNumber *chunk_attnos = NULL;
+ TupleConversionMap *chunk_map = NULL;
OnConflictAction onconflict_action = ts_chunk_dispatch_get_on_conflict_action(dispatch);
if (ts_chunk_dispatch_has_returning(dispatch))
@@ -486,19 +493,19 @@ adjust_projections(ChunkInsertState *cis, ChunkDispatch *dispatch, Oid rowtype)
* to have the hypertable_desc in the out spot for map_variable_attnos
* to work correctly in mapping hypertable attnos->chunk attnos.
*/
- chunk_attnos = convert_tuples_by_name_map(RelationGetDescr(chunk_rel),
- RelationGetDescr(hyper_rel)
+ chunk_map = convert_tuples_by_name(RelationGetDescr(chunk_rel),
+ RelationGetDescr(hyper_rel)
#if PG13_LT
- ,gettext_noop("could not convert row type")
+ ,
+ gettext_noop("could not convert row type")
#endif
- );
+ );
chunk_rri->ri_projectReturning =
get_adjusted_projection_info_returning(chunk_rri->ri_projectReturning,
ts_chunk_dispatch_get_returning_clauses(
dispatch),
- chunk_attnos,
- RelationGetDescr(hyper_rel)->natts,
+ chunk_map,
dispatch->hypertable_result_rel_info
->ri_RangeTableIndex,
rowtype,
@@ -511,7 +518,7 @@ adjust_projections(ChunkInsertState *cis, ChunkDispatch *dispatch, Oid rowtype)
set_arbiter_indexes(cis, dispatch);
if (onconflict_action == ONCONFLICT_UPDATE)
- setup_on_conflict_state(cis, dispatch, chunk_attnos);
+ setup_on_conflict_state(cis, dispatch, chunk_map);
}
}
@@ -598,9 +605,10 @@ ts_chunk_insert_state_create(Chunk *chunk, ChunkDispatch *dispatch)
convert_tuples_by_name(RelationGetDescr(parent_rel),
RelationGetDescr(rel)
#if PG13_LT
- ,gettext_noop("could not convert row type")
+ ,
+ gettext_noop("could not convert row type")
#endif
- );
+ );
adjust_projections(state, dispatch, RelationGetForm(rel)->reltype);
diff --git a/src/compat.h b/src/compat.h
index 51c1c181..1b2ed8e5 100644
--- a/src/compat.h
+++ b/src/compat.h
@@ -374,4 +374,19 @@ get_vacuum_options(const VacuumStmt *stmt)
#define for_each_cell_compat(cell, list, initcell) for_each_cell ((cell), (list), (initcell))
#endif
+/* PG13 removes the natts parameter from map_variable_attnos */
+#if PG13_LT
+#define map_variable_attnos_compat(node, varno, sublevels_up, map, natts, rowtype, found_wholerow) \
+ map_variable_attnos((node), \
+ (varno), \
+ (sublevels_up), \
+ (map), \
+ (natts), \
+ (rowtype), \
+ (found_wholerow))
+#else
+#define map_variable_attnos_compat(node, varno, sublevels_up, map, natts, rowtype, found_wholerow) \
+ map_variable_attnos((node), (varno), (sublevels_up), (map), (rowtype), (found_wholerow))
+#endif
+
#endif /* TIMESCALEDB_COMPAT_H */
diff --git a/tsl/src/fdw/modify_exec.c b/tsl/src/fdw/modify_exec.c
index 38d0e0c6..5307079d 100644
--- a/tsl/src/fdw/modify_exec.c
+++ b/tsl/src/fdw/modify_exec.c
@@ -202,7 +202,11 @@ convert_attrs(TupleConversionMap *map, List *attrs)
for (i = 0; i < map->outdesc->natts; i++)
{
+#if PG13_GE
+ if (map->attrMap->attnums[i] == attnum)
+#else
if (map->attrMap[i] == attnum)
+#endif
{
new_attrs = lappend_int(new_attrs, AttrOffsetGetAttrNumber(i));
break;
--
2.29.2

View file

@ -0,0 +1,88 @@
From 8cf647f2995afbd594d67abb1e6556b97096a1c6 Mon Sep 17 00:00:00 2001
From: Sven Klemm <sven@timescale.com>
Date: Thu, 24 Sep 2020 04:42:45 +0200
Subject: [PATCH] Adjust hypertable expansion to PG13 changes
PG13 merges setup_append_rel_array into setup_simple_rel_arrays
which we use to build the append_rel_array.
https://github.com/postgres/postgres/commit/1661a40505
Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Fetch from: https://github.com/timescale/timescaledb/pull/2735/commits/e6e2711d8e00958cb0a35c23f4e81a75f273113a.patch
---
src/plan_expand_hypertable.c | 30 ++++++++++++++++++++++++++++--
1 file changed, 28 insertions(+), 2 deletions(-)
diff --git a/src/plan_expand_hypertable.c b/src/plan_expand_hypertable.c
index 2b99c93b..4baa1b4c 100644
--- a/src/plan_expand_hypertable.c
+++ b/src/plan_expand_hypertable.c
@@ -95,6 +95,24 @@ is_time_bucket_function(Expr *node)
return false;
}
+#if PG13_GE
+/* PG13 merged setup_append_rel_array with setup_simple_rel_arrays */
+static void
+setup_append_rel_array(PlannerInfo *root)
+{
+ root->append_rel_array =
+ repalloc(root->append_rel_array, root->simple_rel_array_size * sizeof(AppendRelInfo *));
+ ListCell *lc;
+ foreach (lc, root->append_rel_list)
+ {
+ AppendRelInfo *appinfo = lfirst_node(AppendRelInfo, lc);
+ int child_relid = appinfo->child_relid;
+
+ root->append_rel_array[child_relid] = appinfo;
+ }
+}
+#endif
+
/*
* Pre-check to determine if an expression is eligible for constification.
* A more thorough check is in constify_timestamptz_op_interval.
@@ -1146,7 +1164,6 @@ ts_plan_expand_hypertable_chunks(Hypertable *ht, PlannerInfo *root, RelOptInfo *
.join_conditions = NIL,
.propagate_conditions = NIL,
};
- Size old_rel_array_len;
Index first_chunk_index = 0;
#if PG12_GE
Index i;
@@ -1178,6 +1195,11 @@ ts_plan_expand_hypertable_chunks(Hypertable *ht, PlannerInfo *root, RelOptInfo *
propagate_join_quals(root, rel, &ctx);
inh_oids = get_chunk_oids(&ctx, root, rel, ht);
+
+ /* nothing to do here if we have no chunks and no data nodes */
+ if (list_length(inh_oids) + list_length(ht->data_nodes) == 0)
+ return;
+
oldrelation = table_open(parent_oid, NoLock);
/*
@@ -1185,7 +1207,10 @@ ts_plan_expand_hypertable_chunks(Hypertable *ht, PlannerInfo *root, RelOptInfo *
* children to them. We include potential data node rels we might need to
* create in case of a distributed hypertable.
*/
- old_rel_array_len = root->simple_rel_array_size;
+#if PG12_GE
+ expand_planner_arrays(root, list_length(inh_oids) + list_length(ht->data_nodes));
+#else
+ Size old_rel_array_len = root->simple_rel_array_size;
root->simple_rel_array_size += (list_length(inh_oids) + list_length(ht->data_nodes));
root->simple_rel_array =
repalloc(root->simple_rel_array, root->simple_rel_array_size * sizeof(RelOptInfo *));
@@ -1200,6 +1225,7 @@ ts_plan_expand_hypertable_chunks(Hypertable *ht, PlannerInfo *root, RelOptInfo *
memset(root->simple_rte_array + old_rel_array_len,
0,
list_length(inh_oids) * sizeof(*root->simple_rte_array));
+#endif
/* Adding partition info will make PostgreSQL consider the inheritance
* children as part of a partitioned relation. This will enable
--
2.29.2

View file

@ -0,0 +1,41 @@
From d0f82f514eb1f59d63fcbfe71ec99d5d8f82649d Mon Sep 17 00:00:00 2001
From: Sven Klemm <sven@timescale.com>
Date: Fri, 25 Sep 2020 14:23:01 +0200
Subject: [PATCH] Adjust decompress code to Var field renames
PG13 renames the varnoold and varoattno field of Var to varnosyn and
varattnosyn.
https://github.com/postgres/postgres/commit/9ce77d75c5
Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Fetch from: https://github.com/timescale/timescaledb/pull/2786/commits/d46b5de84b975d6f394a7f1fa94a34c5062f50ca.patch
---
tsl/src/nodes/decompress_chunk/decompress_chunk.c | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/tsl/src/nodes/decompress_chunk/decompress_chunk.c b/tsl/src/nodes/decompress_chunk/decompress_chunk.c
index 1e36f5dc..4a6fa782 100644
--- a/tsl/src/nodes/decompress_chunk/decompress_chunk.c
+++ b/tsl/src/nodes/decompress_chunk/decompress_chunk.c
@@ -739,11 +739,15 @@ create_var_for_compressed_equivalence_member(Var *var, const EMCreationContext *
if (var->varlevelsup == 0)
{
var->varno = context->compressed_relid_idx;
- var->varnoold = context->compressed_relid_idx;
var->varattno =
get_attnum(context->compressed_relid, NameStr(context->current_col_info->attname));
-
+#if PG13_GE
+ var->varnosyn = var->varno;
+ var->varattnosyn = var->varattno;
+#else
+ var->varnoold = var->varno;
var->varoattno = var->varattno;
+#endif
return (Node *) var;
}
--
2.29.2

View file

@ -0,0 +1,39 @@
From aef77c6cf3bac36f93ba44244ecd8a23de701896 Mon Sep 17 00:00:00 2001
From: Sven Klemm <sven@timescale.com>
Date: Sat, 26 Sep 2020 02:14:28 +0200
Subject: [PATCH] Adjust jsonb_utils to PG13 changes
PG13 moved jsonapi.h from utils to common.
https://github.com/postgres/postgres/commit/beb4699091
Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Fetch from: https://github.com/timescale/timescaledb/pull/2787/commits/aa1e0c8ac78ca109994269c659d52983fe49db7f.patch
---
src/jsonb_utils.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/src/jsonb_utils.c b/src/jsonb_utils.c
index 46173871..dd901a38 100644
--- a/src/jsonb_utils.c
+++ b/src/jsonb_utils.c
@@ -9,9 +9,15 @@
#include <utils/builtins.h>
#include <utils/json.h>
#include <utils/jsonb.h>
-#include <utils/jsonapi.h>
#include "compat.h"
+
+#if PG13_LT
+#include <utils/jsonapi.h>
+#else
+#include <common/jsonapi.h>
+#endif
+
#include "export.h"
#include "jsonb_utils.h"
--
2.29.2

View file

@ -0,0 +1,48 @@
From 8a2ed03f78601596e1f74fa2e75f9cdf52c4ff83 Mon Sep 17 00:00:00 2001
From: Sven Klemm <sven@timescale.com>
Date: Fri, 25 Sep 2020 14:31:55 +0200
Subject: [PATCH] Handle AT_DropExpression in process_utility
PG13 adds a new ALTER TABLE subcommand for dropping the generated
property from a column.
https://github.com/postgres/postgres/commit/f595117e24
Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Fetch from: https://github.com/timescale/timescaledb/pull/2498/commits/cdb29e2e61ec3f3b52fb8962d12a15727757e35b.patch
---
src/process_utility.c | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/src/process_utility.c b/src/process_utility.c
index 8f915113..0f76f141 100644
--- a/src/process_utility.c
+++ b/src/process_utility.c
@@ -3266,6 +3266,9 @@ process_altertable_end_subcmd(Hypertable *ht, Node *parsetree, ObjectAddress *ob
case AT_AddColumnRecurse:
case AT_DropColumn:
case AT_DropColumnRecurse:
+#if PG13_GE
+ case AT_DropExpression:
+#endif
/*
* adding and dropping columns handled in
@@ -3276,9 +3279,11 @@ process_altertable_end_subcmd(Hypertable *ht, Node *parsetree, ObjectAddress *ob
case AT_DropConstraintRecurse:
/* drop constraints handled by process_ddl_sql_drop */
break;
- case AT_ProcessedConstraint: /* internal command never hit in our
- * test code, so don't know how to
- * handle */
+#if PG13_LT
+ case AT_ProcessedConstraint: /* internal command never hit in our
+ * test code, so don't know how to
+ * handle */
+#endif
case AT_ReAddComment: /* internal command never hit in our test
* code, so don't know how to handle */
case AT_AddColumnToView: /* only used with views */
--
2.29.2

View file

@ -0,0 +1,34 @@
From 5cea3a687f7227997deb0eacbb061c586c6713f7 Mon Sep 17 00:00:00 2001
From: Sven Klemm <sven@timescale.com>
Date: Sat, 26 Sep 2020 02:57:38 +0200
Subject: [PATCH] Adjust copy code to PG13 changes
PG13 adds a CmdType argument to ExecComputeStoredGenerated.
https://github.com/postgres/postgres/commit/c6679e4fca
Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Fetch from: https://github.com/timescale/timescaledb/pull/2498/commits/a2d15828cbbbe7570afb03bb930df083ddeafd7a.patch
---
src/copy.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/src/copy.c b/src/copy.c
index c8ce93e6..e38ae15e 100644
--- a/src/copy.c
+++ b/src/copy.c
@@ -379,7 +379,11 @@ copyfrom(CopyChunkState *ccstate, List *range_table, Hypertable *ht, void (*call
/* Compute stored generated columns */
if (resultRelInfo->ri_RelationDesc->rd_att->constr &&
resultRelInfo->ri_RelationDesc->rd_att->constr->has_generated_stored)
+#if PG13_GE
+ ExecComputeStoredGenerated(estate, myslot, CMD_INSERT);
+#else
ExecComputeStoredGenerated(estate, myslot);
+#endif
#endif
/*
* If the target is a plain table, check the constraints of
--
2.29.2

View file

@ -0,0 +1,186 @@
From 6ef08914041b6166db6f77dd516ae8d66d0ecce6 Mon Sep 17 00:00:00 2001
From: Sven Klemm <sven@timescale.com>
Date: Wed, 30 Sep 2020 01:47:01 +0200
Subject: [PATCH] Adjust code to PG13 command completion tag changes
PG13 changes the representation of command completion tags to structs.
https://github.com/postgres/postgres/commit/2f9661311b
Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Fetch from: https://github.com/timescale/timescaledb/pull/2498/commits/d37ec4e23bc678bc84f126c5b952fb1707ad7fe4.patch
---
src/loader/loader.c | 13 +++++++-
src/process_utility.c | 59 +++++++++++++++++++++++++++---------
src/process_utility.h | 4 +++
tsl/test/src/test_ddl_hook.c | 4 +++
4 files changed, 65 insertions(+), 15 deletions(-)
diff --git a/src/loader/loader.c b/src/loader/loader.c
index f60f9e77..ed35f288 100644
--- a/src/loader/loader.c
+++ b/src/loader/loader.c
@@ -464,7 +464,14 @@ post_analyze_hook(ParseState *pstate, Query *query)
static void
loader_process_utility_hook(PlannedStmt *pstmt, const char *query_string,
ProcessUtilityContext context, ParamListInfo params,
- QueryEnvironment *queryEnv, DestReceiver *dest, char *completion_tag)
+ QueryEnvironment *queryEnv, DestReceiver *dest,
+#if PG13_GE
+ QueryCompletion *qc
+#else
+ char *completion_tag
+#endif
+
+)
{
bool is_distributed_database = false;
char *dist_uuid = NULL;
@@ -500,7 +507,11 @@ loader_process_utility_hook(PlannedStmt *pstmt, const char *query_string,
else
process_utility = standard_ProcessUtility;
+#if PG13_GE
+ process_utility(pstmt, query_string, context, params, queryEnv, dest, qc);
+#else
process_utility(pstmt, query_string, context, params, queryEnv, dest, completion_tag);
+#endif
/*
* Show a NOTICE warning message in case of dropping a
diff --git a/src/process_utility.c b/src/process_utility.c
index 0f76f141..d9d7514d 100644
--- a/src/process_utility.c
+++ b/src/process_utility.c
@@ -91,7 +91,12 @@ prev_ProcessUtility(ProcessUtilityArgs *args)
args->params,
args->queryEnv,
args->dest,
- args->completion_tag);
+#if PG13_GE
+ args->qc
+#else
+ args->completion_tag
+#endif
+ );
}
else
{
@@ -102,7 +107,12 @@ prev_ProcessUtility(ProcessUtilityArgs *args)
args->params,
args->queryEnv,
args->dest,
- args->completion_tag);
+#if PG13_GE
+ args->qc
+#else
+ args->completion_tag
+#endif
+ );
}
}
@@ -493,8 +503,13 @@ process_copy(ProcessUtilityArgs *args)
/* Performs acl check in here inside `copy_security_check` */
timescaledb_DoCopy(stmt, args->query_string, &processed, ht);
+#if PG13_GE
+ args->qc->commandTag = CMDTAG_COPY;
+ args->qc->nprocessed = processed;
+#else
if (args->completion_tag)
snprintf(args->completion_tag, COMPLETION_TAG_BUFSIZE, "COPY " UINT64_FORMAT, processed);
+#endif
process_add_hypertable(args, ht);
@@ -3646,7 +3661,11 @@ process_ddl_command_start(ProcessUtilityArgs *args)
return false;
if (check_read_only)
+#if PG13_GE
+ PreventCommandIfReadOnly(CreateCommandName(args->parsetree));
+#else
PreventCommandIfReadOnly(CreateCommandTag(args->parsetree));
+#endif
return handler(args);
}
@@ -3845,18 +3864,30 @@ process_ddl_sql_drop(EventTriggerDropObject *obj)
static void
timescaledb_ddl_command_start(PlannedStmt *pstmt, const char *query_string,
ProcessUtilityContext context, ParamListInfo params,
- QueryEnvironment *queryEnv, DestReceiver *dest, char *completion_tag)
-{
- ProcessUtilityArgs args = { .query_string = query_string,
- .context = context,
- .params = params,
- .dest = dest,
- .completion_tag = completion_tag,
- .pstmt = pstmt,
- .parsetree = pstmt->utilityStmt,
- .queryEnv = queryEnv,
- .parse_state = make_parsestate(NULL),
- .hypertable_list = NIL };
+ QueryEnvironment *queryEnv, DestReceiver *dest,
+#if PG13_GE
+ QueryCompletion *qc
+#else
+ char *completion_tag
+#endif
+)
+{
+ ProcessUtilityArgs args = {
+ .query_string = query_string,
+ .context = context,
+ .params = params,
+ .dest = dest,
+#if PG13_GE
+ .qc = qc,
+#else
+ .completion_tag = completion_tag,
+#endif
+ .pstmt = pstmt,
+ .parsetree = pstmt->utilityStmt,
+ .queryEnv = queryEnv,
+ .parse_state = make_parsestate(NULL),
+ .hypertable_list = NIL
+ };
bool altering_timescaledb = false;
DDLResult result;
diff --git a/src/process_utility.h b/src/process_utility.h
index ac5519f4..f66448fb 100644
--- a/src/process_utility.h
+++ b/src/process_utility.h
@@ -24,7 +24,11 @@ typedef struct ProcessUtilityArgs
ParamListInfo params;
DestReceiver *dest;
List *hypertable_list;
+#if PG13_GE
+ QueryCompletion *qc;
+#else
char *completion_tag;
+#endif
} ProcessUtilityArgs;
typedef enum
diff --git a/tsl/test/src/test_ddl_hook.c b/tsl/test/src/test_ddl_hook.c
index 4fb58f02..d01e6114 100644
--- a/tsl/test/src/test_ddl_hook.c
+++ b/tsl/test/src/test_ddl_hook.c
@@ -80,7 +80,11 @@ test_ddl_command_end(EventTriggerData *command)
ListCell *cell;
Hypertable *ht;
+#if PG13_GE
+ elog(NOTICE, "test_ddl_command_end: %s", GetCommandTagName(command->tag));
+#else
elog(NOTICE, "test_ddl_command_end: %s", command->tag);
+#endif
if (tsl_delayed_execution_list == NIL)
return;
--
2.29.2

View file

@ -0,0 +1,51 @@
From 399acf6de6d02b3362faa13379b6bd948b91e20c Mon Sep 17 00:00:00 2001
From: Sven Klemm <sven@timescale.com>
Date: Fri, 2 Oct 2020 00:58:57 +0200
Subject: [PATCH] Adjust copy to PG13 HEAP_INSERT_SKIP_WAL change
Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Fetch from: https://github.com/timescale/timescaledb/pull/2498/commits/446f977be8ead518579b3c5b2458429f27518b48.patch
---
src/copy.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/src/copy.c b/src/copy.c
index e38ae15e..1187a696 100644
--- a/src/copy.c
+++ b/src/copy.c
@@ -27,6 +27,7 @@
#include <parser/parse_collate.h>
#include <parser/parse_relation.h>
#include <storage/bufmgr.h>
+#include <storage/smgr.h>
#include <utils/builtins.h>
#include <utils/guc.h>
#include <utils/lsyscache.h>
@@ -215,8 +216,10 @@ copyfrom(CopyChunkState *ccstate, List *range_table, Hypertable *ht, void (*call
ccstate->rel->rd_newRelfilenodeSubid != InvalidSubTransactionId)
{
ti_options |= HEAP_INSERT_SKIP_FSM;
+#if PG13_LT
if (!XLogIsNeeded())
ti_options |= HEAP_INSERT_SKIP_WAL;
+#endif
}
/*
@@ -449,8 +452,13 @@ copyfrom(CopyChunkState *ccstate, List *range_table, Hypertable *ht, void (*call
* If we skipped writing WAL, then we need to sync the heap (but not
* indexes since those use WAL anyway)
*/
+#if PG13_LT
if (ti_options & HEAP_INSERT_SKIP_WAL)
heap_sync(ccstate->rel);
+#else
+ if (!RelationNeedsWAL(ccstate->rel))
+ smgrimmedsync(ccstate->rel->rd_smgr, MAIN_FORKNUM);
+#endif
return processed;
}
--
2.29.2

View file

@ -0,0 +1,58 @@
From 280db3fdb6c365dd37d82afaeeebd16efa70e965 Mon Sep 17 00:00:00 2001
From: Sven Klemm <sven@timescale.com>
Date: Wed, 30 Sep 2020 01:45:29 +0200
Subject: [PATCH] Adjust planner code to PG13 planner_hook signature
change
PG13 adds the query string as argument to the planner_hook.
https://github.com/postgres/postgres/commit/6aba63ef3e
Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Fetch from: https://github.com/timescale/timescaledb/pull/2498/commits/90e3eb3df98f3165f08a17bf5548e3a30713de26.patch
---
src/planner.c | 17 +++++++++++++++--
1 file changed, 15 insertions(+), 2 deletions(-)
diff --git a/src/planner.c b/src/planner.c
index b4d4907a..cd40fa84 100644
--- a/src/planner.c
+++ b/src/planner.c
@@ -278,7 +278,12 @@ preprocess_query(Node *node, Query *rootquery)
}
static PlannedStmt *
+#if PG13_GE
+timescaledb_planner(Query *parse, const char *query_string, int cursor_opts,
+ ParamListInfo bound_params)
+#else
timescaledb_planner(Query *parse, int cursor_opts, ParamListInfo bound_params)
+#endif
{
PlannedStmt *stmt;
ListCell *lc;
@@ -302,11 +307,19 @@ timescaledb_planner(Query *parse, int cursor_opts, ParamListInfo bound_params)
preprocess_query((Node *) parse, parse);
if (prev_planner_hook != NULL)
- /* Call any earlier hooks */
+ /* Call any earlier hooks */
+#if PG13_GE
+ stmt = (prev_planner_hook)(parse, query_string, cursor_opts, bound_params);
+#else
stmt = (prev_planner_hook)(parse, cursor_opts, bound_params);
+#endif
else
- /* Call the standard planner */
+ /* Call the standard planner */
+#if PG13_GE
+ stmt = standard_planner(parse, query_string, cursor_opts, bound_params);
+#else
stmt = standard_planner(parse, cursor_opts, bound_params);
+#endif
if (ts_extension_is_loaded())
{
--
2.29.2

View file

@ -0,0 +1,34 @@
From d7960c761542d7e90e91a32e91b82459e5fcc84f Mon Sep 17 00:00:00 2001
From: Sven Klemm <sven@timescale.com>
Date: Sat, 3 Oct 2020 15:53:19 +0200
Subject: [PATCH] Adjust code to deparse_context changes
PG13 changes EXPLAIN to use Plan instead PlanState as context.
https://github.com/postgres/postgres/commit/6ef77cf46e
Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Fetch from: https://github.com/timescale/timescaledb/pull/2498/commits/20c5ef33fc9c6d47cd6a95ca79b0b260ee3cfd25.patch
---
src/chunk_append/explain.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/src/chunk_append/explain.c b/src/chunk_append/explain.c
index 0111cf72..7ee089c1 100644
--- a/src/chunk_append/explain.c
+++ b/src/chunk_append/explain.c
@@ -84,7 +84,11 @@ show_sort_group_keys(ChunkAppendState *state, List *ancestors, ExplainState *es)
initStringInfo(&sortkeybuf);
/* Set up deparsing context */
+#if PG13_GE
+ context = set_deparse_context_plan(es->deparse_cxt, plan, ancestors);
+#else
context = set_deparse_context_planstate(es->deparse_cxt, (Node *) state, ancestors);
+#endif
useprefix = (list_length(es->rtable) > 1 || es->verbose);
for (keyno = 0; keyno < nkeys; keyno++)
--
2.29.2

View file

@ -0,0 +1,27 @@
From 67744f30f481a05e3cb6a2e73c0cb17117a2eb38 Mon Sep 17 00:00:00 2001
From: fidomax <adobegitler@gmail.com>
Date: Mon, 11 Jan 2021 14:16:06 +0300
Subject: [PATCH] Update compat.h
Signed-off-by: Maxim Kochetkov <fido_max@inbox.ru>
Fetch from: https://github.com/timescale/timescaledb/pull/2801/commits/6a927ee84c91797025fc48ac4aab74c3344ebdad.patch
---
src/compat.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/compat.h b/src/compat.h
index 1b2ed8e5..6b461e38 100644
--- a/src/compat.h
+++ b/src/compat.h
@@ -371,7 +371,7 @@ get_vacuum_options(const VacuumStmt *stmt)
#define list_delete_cell_compat(l, lc, prev) list_delete_cell((l), (lc))
#define list_make5(x1, x2, x3, x4, x5) lappend(list_make4(x1, x2, x3, x4), x5)
#define list_make5_oid(x1, x2, x3, x4, x5) lappend_oid(list_make4_oid(x1, x2, x3, x4), x5)
-#define for_each_cell_compat(cell, list, initcell) for_each_cell ((cell), (list), (initcell))
+#define for_each_cell_compat(cell, list, initcell) for_each_cell (cell, list, initcell)
#endif
/* PG13 removes the natts parameter from map_variable_attnos */
--
2.29.2

View file

@ -1,3 +1,3 @@
# Locally calculated
sha256 d0b7a153ff3e02ecf033a869ecdf4286f8610ea76140baa84928fc3a80223e99 timescaledb-1.7.4.tar.gz
sha256 f74ebebb4461e4e77273615551ccf9250fac0bdef7bdacf5c92ee3a6884d7782 timescaledb-2.0.0.tar.gz
sha256 0378e0948feefd85f579319c74d6e2b671194037f550c7176ef26649d94c895b LICENSE

View file

@ -4,7 +4,7 @@
#
################################################################################
TIMESCALEDB_VERSION = 1.7.4
TIMESCALEDB_VERSION = 2.0.0
TIMESCALEDB_SITE = $(call github,timescale,timescaledb,$(TIMESCALEDB_VERSION))
TIMESCALEDB_LICENSE = Apache-2.0
TIMESCALEDB_LICENSE_FILES = LICENSE
@ -17,6 +17,7 @@ TIMESCALEDB_DEPENDENCIES = postgresql
# --ldflags and --libs.
TIMESCALEDB_CONF_OPTS = \
-DREGRESS_CHECKS=OFF \
-DWARNINGS_AS_ERRORS=OFF \
-DPG_PKGLIBDIR=lib/postgresql \
-DPG_SHAREDIR=share/postgresql \
-DPG_BINDIR=bin \