createEmptyProjection(TableId tableId, int actualPar
return splits;
}
catch (BigQueryException e) {
- throw new PrestoException(BIGQUERY_FAILED_TO_EXECUTE_QUERY, format("Failed to compute empty projection"), e);
+ throw new PrestoException(BIGQUERY_FAILED_TO_EXECUTE_QUERY, "Failed to compute empty projection", e);
}
}
}
diff --git a/presto-common/src/main/java/com/facebook/presto/common/block/Block.java b/presto-common/src/main/java/com/facebook/presto/common/block/Block.java
index 42c33554d510e..6b5379ba06ebc 100644
--- a/presto-common/src/main/java/com/facebook/presto/common/block/Block.java
+++ b/presto-common/src/main/java/com/facebook/presto/common/block/Block.java
@@ -25,7 +25,7 @@
/**
* A block packs positionCount values into a chunk of memory. How the values are packed,
* whether compression is used, endianness, and other implementation details are up to the subclasses.
- * However, for purposes of API, you can think of a Block as a sequence of values that
+ * However, for purposes of API, you can think of a Block as a sequence of zero-indexed values that
* can be read by calling the getter methods in this interface. For instance,
* you can read positionCount bytes by calling
* block.getByte(0), block.getByte(1), ... block.getByte(positionCount - 1).
@@ -51,6 +51,8 @@ default int getSliceLength(int position)
/**
* Gets a byte in the value at {@code position}.
+ *
+ * @throws IllegalArgumentException if position is negative or greater than or equal to the positionCount
*/
default byte getByte(int position)
{
@@ -59,6 +61,8 @@ default byte getByte(int position)
/**
* Gets a short in the value at {@code position}.
+ *
+ * @throws IllegalArgumentException if position is negative or greater than or equal to the positionCount
*/
default short getShort(int position)
{
@@ -67,6 +71,8 @@ default short getShort(int position)
/**
* Gets an int in the value at {@code position}.
+ *
+ * @throws IllegalArgumentException if position is negative or greater than or equal to the positionCount
*/
default int getInt(int position)
{
@@ -75,6 +81,8 @@ default int getInt(int position)
/**
* Gets a long in the value at {@code position}.
+ *
+ * @throws IllegalArgumentException if position is negative or greater than or equal to the positionCount
*/
default long getLong(int position)
{
@@ -99,7 +107,8 @@ default Slice getSlice(int position, int offset, int length)
/**
* Gets a block in the value at {@code position}.
- * @return
+ *
+ * @throws IllegalArgumentException if position is negative or greater than or equal to the positionCount
*/
default Block getBlock(int position)
{
@@ -107,7 +116,7 @@ default Block getBlock(int position)
}
/**
- * Is the byte sequences at {@code offset} in the value at {@code position} equal
+ * Is the byte sequence at {@code offset} in the value at {@code position} equal
* to the byte sequence at {@code otherOffset} in {@code otherSlice}.
* This method must be implemented if @{code getSlice} is implemented.
*/
@@ -147,7 +156,7 @@ default void writeBytesTo(int position, int offset, int length, SliceOutput slic
}
/**
- * Appends the value at {@code position} to {@code blockBuilder} and close the entry.
+ * Appends the value at {@code position} to {@code blockBuilder} and closes the entry.
*/
void writePositionTo(int position, BlockBuilder blockBuilder);
@@ -378,12 +387,14 @@ default Block getLoadedBlock()
Block appendNull();
/**
- * Returns the converted long value at {@code position} if the value ar {@code position} can be converted to long.
- * @throws UnsupportedOperationException if value at {@code position} is not compatible to be converted to long.
+ * Returns the converted long value at {@code position} if the value at {@code position} can be converted to long.
*
* Difference between toLong() and getLong() is:
* getLong() would only return value when the block is LongArrayBlock, otherwise it would throw exception.
* toLong() would return value for compatible types: LongArrayBlock, IntArrayBlock, ByteArrayBlock and ShortArrayBlock.
+ *
+ * @throws UnsupportedOperationException if value at {@code position} is not able to be converted to long.
+ * @throws IllegalArgumentException if position is negative or greater than or equal to the positionCount
*/
default long toLong(int position)
{
diff --git a/presto-common/src/main/java/com/facebook/presto/common/block/DictionaryBlock.java b/presto-common/src/main/java/com/facebook/presto/common/block/DictionaryBlock.java
index 252f6db09eb11..9587f6c8bfc6d 100644
--- a/presto-common/src/main/java/com/facebook/presto/common/block/DictionaryBlock.java
+++ b/presto-common/src/main/java/com/facebook/presto/common/block/DictionaryBlock.java
@@ -36,6 +36,20 @@
import static java.lang.String.format;
import static java.util.Objects.requireNonNull;
+/**
+ * A dictionary holds positionCount values of arbitrary types. Usually some of these values are repeated,
+ * and the block wraps an underlying delegate block with fewer or no repeated values.
+ * This delegate block is called the "dictionary".
+ * The ids array contains positionCount indexes into the underlying delegate block.
+ * When value N is requested from this block instead of returning the value directly,
+ * it looks up the index of value N at ids[N]; then it returns the value in dictionary[ids[N]].
+ * This compresses data when the same value repeats at multiple locations.
+ *
+ * Not every id in the ids array is a valid position in the block.
+ * Specify an offset in the ids array to indicate that IDs are only stored from that position forward.
+ * If the ids array is longer than offset+positionCount, then extra values to the right are not valid.
+ * That is, IDs are stored in a range of the array from offset to offset+positionCount-1 (inclusive).
+ */
public class DictionaryBlock
implements Block
{
@@ -509,6 +523,10 @@ int[] getRawIds()
return ids;
}
+ /**
+ * @param position the position of the desired value in this block
+ * @return the position of the desired value in the underlying block this block wraps
+ */
public int getId(int position)
{
checkValidPosition(position, positionCount);
diff --git a/presto-common/src/main/java/com/facebook/presto/common/predicate/TupleDomainFilterUtils.java b/presto-common/src/main/java/com/facebook/presto/common/predicate/TupleDomainFilterUtils.java
index 016872a91c034..9530c743898f9 100644
--- a/presto-common/src/main/java/com/facebook/presto/common/predicate/TupleDomainFilterUtils.java
+++ b/presto-common/src/main/java/com/facebook/presto/common/predicate/TupleDomainFilterUtils.java
@@ -151,7 +151,7 @@ public static TupleDomainFilter toFilter(Domain domain)
}
/**
- * Returns true is ranges represent != or NOT IN filter for double, float or string column.
+ * Returns true if ranges represent != or NOT IN filter for double, float or string column.
*
* The logic is to return true if ranges are next to each other, but don't include the touch value.
*/
diff --git a/presto-docs/src/main/sphinx/admin/properties.rst b/presto-docs/src/main/sphinx/admin/properties.rst
index 0435d11c29664..6a9af7fc66825 100644
--- a/presto-docs/src/main/sphinx/admin/properties.rst
+++ b/presto-docs/src/main/sphinx/admin/properties.rst
@@ -53,6 +53,25 @@ output data set is not skewed in order to avoid the overhead of hashing and
redistributing all the data across the network. This can also be specified
on a per-query basis using the ``redistribute_writes`` session property.
+``task_writer_count``
+^^^^^^^^^^^^^^^^^^^^^
+
+* **Type:** ``integer``
+* **Default value:** ``1``
+
+Default number of local parallel table writer threads per worker. It is required
+to be a power of two for a Java query engine.
+
+``task_partitioned_writer_count``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* **Type:** ``integer``
+* **Default value:** ``task_writer_count``
+
+Number of local parallel table writer threads per worker for partitioned writes. If not
+set, the number set by ``task_writer_count`` will be used. It is required to be a power
+of two for a Java query engine.
+
.. _tuning-memory:
Memory Management Properties
diff --git a/presto-docs/src/main/sphinx/connector/iceberg.rst b/presto-docs/src/main/sphinx/connector/iceberg.rst
index 727ba555f0fb7..731e1f602b36a 100644
--- a/presto-docs/src/main/sphinx/connector/iceberg.rst
+++ b/presto-docs/src/main/sphinx/connector/iceberg.rst
@@ -210,6 +210,9 @@ Property Name Description
Available values are ``NONE`` or ``OAUTH2`` (default: ``NONE``).
``OAUTH2`` requires either a credential or token.
+``iceberg.rest.auth.oauth2.uri`` OAUTH2 server endpoint URI.
+ Example: ``https://localhost:9191``
+
``iceberg.rest.auth.oauth2.credential`` The credential to use for OAUTH2 authentication.
Example: ``key:secret``
@@ -1631,9 +1634,11 @@ In this example, SYSTEM_TIME can be used as an alias for TIMESTAMP.
// In following query, timestamp string is matching with second inserted record.
SELECT * FROM ctas_nation FOR TIMESTAMP AS OF TIMESTAMP '2023-10-17 13:29:46.822 America/Los_Angeles';
+ SELECT * FROM ctas_nation FOR TIMESTAMP AS OF TIMESTAMP '2023-10-17 13:29:46.822';
// Same example using SYSTEM_TIME as an alias for TIMESTAMP
SELECT * FROM ctas_nation FOR SYSTEM_TIME AS OF TIMESTAMP '2023-10-17 13:29:46.822 America/Los_Angeles';
+ SELECT * FROM ctas_nation FOR SYSTEM_TIME AS OF TIMESTAMP '2023-10-17 13:29:46.822';
.. code-block:: text
@@ -1643,8 +1648,12 @@ In this example, SYSTEM_TIME can be used as an alias for TIMESTAMP.
20 | canada | 2 | comment
(2 rows)
-The option following FOR TIMESTAMP AS OF can accept any expression that returns a timestamp with time zone value.
-For example, `TIMESTAMP '2023-10-17 13:29:46.822 America/Los_Angeles'` is a constant string for the expression.
+.. note::
+
+ Timestamp without timezone will be parsed and rendered in the session time zone. See `TIMESTAMP `_.
+
+The option following FOR TIMESTAMP AS OF can accept any expression that returns a timestamp or timestamp with time zone value.
+For example, `TIMESTAMP '2023-10-17 13:29:46.822 America/Los_Angeles'` and `TIMESTAMP '2023-10-17 13:29:46.822'` are both valid timestamps. The first specifies the timestamp within the timezone `America/Los_Angeles`. The second will use the timestamp based on the user's session timezone.
In the following query, the expression CURRENT_TIMESTAMP returns the current timestamp with time zone value.
.. code-block:: sql
@@ -1665,6 +1674,7 @@ In the following query, the expression CURRENT_TIMESTAMP returns the current tim
// In following query, timestamp string is matching with second inserted record.
// BEFORE clause returns first record which is less than timestamp of the second record.
SELECT * FROM ctas_nation FOR TIMESTAMP BEFORE TIMESTAMP '2023-10-17 13:29:46.822 America/Los_Angeles';
+ SELECT * FROM ctas_nation FOR TIMESTAMP BEFORE TIMESTAMP '2023-10-17 13:29:46.822';
.. code-block:: text
diff --git a/presto-docs/src/main/sphinx/connector/prometheus.rst b/presto-docs/src/main/sphinx/connector/prometheus.rst
index 317076e13fc0d..035be908cb7af 100644
--- a/presto-docs/src/main/sphinx/connector/prometheus.rst
+++ b/presto-docs/src/main/sphinx/connector/prometheus.rst
@@ -25,6 +25,10 @@ replacing the properties as appropriate:
prometheus.max-query-duration=1h
prometheus.cache-ttl=30s
prometheus.bearer-token-file=/path/to/bearer/token/file
+ prometheus.tls.enabled=true
+ prometheus.tls.truststore-path=/path/to/truststore
+ prometheus.tls.truststore-password=truststorePassword
+ verify-host-name=true
Configuration Properties
------------------------
@@ -39,6 +43,10 @@ Property Name Description
``prometheus.max-query-duration`` Width of overall query to Prometheus, will be divided into query-chunk-duration queries
``prometheus.cache-ttl`` How long the config values are cached
``prometheus.bearer-token-file`` File holding bearer token for access to Prometheus
+``prometheus.tls.enabled`` Enable or disable TLS for securing communication with Prometheus
+``prometheus.tls.truststore-path`` Path to the trust store containing the SSL certificates
+``prometheus.tls.truststore-password`` Password to access the trust store for TLS verification
+``verify-host-name`` Enable or disable hostname verification in the SSL certificate
======================================== ============================================================================================
Not Exhausting Your Presto Available Heap
diff --git a/presto-docs/src/main/sphinx/functions/ip.rst b/presto-docs/src/main/sphinx/functions/ip.rst
index f5e637afb50bb..cc685be916dde 100644
--- a/presto-docs/src/main/sphinx/functions/ip.rst
+++ b/presto-docs/src/main/sphinx/functions/ip.rst
@@ -70,3 +70,11 @@ IP Functions
SELECT is_private_ip(IPADDRESS '157.240.200.99'); -- false
SELECT is_private_ip(IPADDRESS '2a03:2880:f031:12:face:b00c:0:2'); -- false
+.. function:: ip_prefix_subnets(ip_prefix, prefix_length) -> array(ip_prefix)
+
+ Returns the subnets of ``ip_prefix`` of size ``prefix_length``. ``prefix_length`` must be valid ([0, 32] for IPv4
+ and [0, 128] for IPv6) or the query will fail and raise an error. An empty array is returned if ``prefix_length``
+ is shorter (that is, less specific) than ``ip_prefix``. ::
+
+ SELECT IP_PREFIX_SUBNETS(IPPREFIX '192.168.1.0/24', 25); -- [{192.168.1.0/25}, {192.168.1.128/25}]
+ SELECT IP_PREFIX_SUBNETS(IPPREFIX '2a03:2880:c000::/34', 36); -- [{2a03:2880:c000::/36}, {2a03:2880:d000::/36}, {2a03:2880:e000::/36}, {2a03:2880:f000::/36}]
diff --git a/presto-docs/src/main/sphinx/functions/json.rst b/presto-docs/src/main/sphinx/functions/json.rst
index ac640be44d38a..026e899e1f792 100644
--- a/presto-docs/src/main/sphinx/functions/json.rst
+++ b/presto-docs/src/main/sphinx/functions/json.rst
@@ -5,25 +5,25 @@ JSON Functions and Operators
Cast to JSON
------------
- Casting from ``BOOLEAN``, ``TINYINT``, ``SMALLINT``, ``INTEGER``,
- ``BIGINT``, ``REAL``, ``DOUBLE`` or ``VARCHAR`` is supported.
- Casting from ``ARRAY``, ``MAP`` or ``ROW`` is supported when the element type of
- the array is one of the supported types, or when the key type of the map
- is ``VARCHAR`` and value type of the map is one of the supported types,
- or when every field type of the row is one of the supported types.
- Behaviors of the casts are shown with the examples below::
-
- SELECT CAST(NULL AS JSON); -- NULL
- SELECT CAST(1 AS JSON); -- JSON '1'
- SELECT CAST(9223372036854775807 AS JSON); -- JSON '9223372036854775807'
- SELECT CAST('abc' AS JSON); -- JSON '"abc"'
- SELECT CAST(true AS JSON); -- JSON 'true'
- SELECT CAST(1.234 AS JSON); -- JSON '1.234'
- SELECT CAST(ARRAY[1, 23, 456] AS JSON); -- JSON '[1,23,456]'
- SELECT CAST(ARRAY[1, NULL, 456] AS JSON); -- JSON '[1,null,456]'
- SELECT CAST(ARRAY[ARRAY[1, 23], ARRAY[456]] AS JSON); -- JSON '[[1,23],[456]]'
- SELECT CAST(MAP_FROM_ENTRIES(ARRAY[('k1', 1), ('k2', 23), ('k3', 456)]) AS JSON); -- JSON '{"k1":1,"k2":23,"k3":456}'
- SELECT CAST(CAST(ROW(123, 'abc', true) AS ROW(v1 BIGINT, v2 VARCHAR, v3 BOOLEAN)) AS JSON); -- JSON '[123,"abc",true]'
+Casting from ``BOOLEAN``, ``TINYINT``, ``SMALLINT``, ``INTEGER``,
+``BIGINT``, ``REAL``, ``DOUBLE`` or ``VARCHAR`` is supported.
+Casting from ``ARRAY``, ``MAP`` or ``ROW`` is supported when the element type of
+the array is one of the supported types, or when the key type of the map
+is ``VARCHAR`` and value type of the map is one of the supported types,
+or when every field type of the row is one of the supported types.
+Behaviors of the casts are shown with the examples below::
+
+ SELECT CAST(NULL AS JSON); -- NULL
+ SELECT CAST(1 AS JSON); -- JSON '1'
+ SELECT CAST(9223372036854775807 AS JSON); -- JSON '9223372036854775807'
+ SELECT CAST('abc' AS JSON); -- JSON '"abc"'
+ SELECT CAST(true AS JSON); -- JSON 'true'
+ SELECT CAST(1.234 AS JSON); -- JSON '1.234'
+ SELECT CAST(ARRAY[1, 23, 456] AS JSON); -- JSON '[1,23,456]'
+ SELECT CAST(ARRAY[1, NULL, 456] AS JSON); -- JSON '[1,null,456]'
+ SELECT CAST(ARRAY[ARRAY[1, 23], ARRAY[456]] AS JSON); -- JSON '[[1,23],[456]]'
+ SELECT CAST(MAP_FROM_ENTRIES(ARRAY[('k1', 1), ('k2', 23), ('k3', 456)]) AS JSON); -- JSON '{"k1":1,"k2":23,"k3":456}'
+ SELECT CAST(CAST(ROW(123, 'abc', true) AS ROW(v1 BIGINT, v2 VARCHAR, v3 BOOLEAN)) AS JSON); -- JSON '[123,"abc",true]'
.. note::
@@ -55,25 +55,25 @@ Cast to JSON
Cast from JSON
--------------
- Casting to ``BOOLEAN``, ``TINYINT``, ``SMALLINT``, ``INTEGER``,
- ``BIGINT``, ``REAL``, ``DOUBLE`` or ``VARCHAR`` is supported.
- Casting to ``ARRAY`` and ``MAP`` is supported when the element type of
- the array is one of the supported types, or when the key type of the map
- is ``VARCHAR`` and value type of the map is one of the supported types.
- Behaviors of the casts are shown with the examples below::
-
- SELECT CAST(JSON 'null' AS VARCHAR); -- NULL
- SELECT CAST(JSON '1' AS INTEGER); -- 1
- SELECT CAST(JSON '9223372036854775807' AS BIGINT); -- 9223372036854775807
- SELECT CAST(JSON '"abc"' AS VARCHAR); -- abc
- SELECT CAST(JSON 'true' AS BOOLEAN); -- true
- SELECT CAST(JSON '1.234' AS DOUBLE); -- 1.234
- SELECT CAST(JSON '[1,23,456]' AS ARRAY(INTEGER)); -- [1, 23, 456]
- SELECT CAST(JSON '[1,null,456]' AS ARRAY(INTEGER)); -- [1, NULL, 456]
- SELECT CAST(JSON '[[1,23],[456]]' AS ARRAY(ARRAY(INTEGER))); -- [[1, 23], [456]]
- SELECT CAST(JSON '{"k1":1,"k2":23,"k3":456}' AS MAP(VARCHAR, INTEGER)); -- {k1=1, k2=23, k3=456}
- SELECT CAST(JSON '{"v1":123,"v2":"abc","v3":true}' AS ROW(v1 BIGINT, v2 VARCHAR, v3 BOOLEAN)); -- {v1=123, v2=abc, v3=true}
- SELECT CAST(JSON '[123,"abc",true]' AS ROW(v1 BIGINT, v2 VARCHAR, v3 BOOLEAN)); -- {value1=123, value2=abc, value3=true}
+Casting to ``BOOLEAN``, ``TINYINT``, ``SMALLINT``, ``INTEGER``,
+``BIGINT``, ``REAL``, ``DOUBLE`` or ``VARCHAR`` is supported.
+Casting to ``ARRAY`` and ``MAP`` is supported when the element type of
+the array is one of the supported types, or when the key type of the map
+is ``VARCHAR`` and value type of the map is one of the supported types.
+Behaviors of the casts are shown with the examples below::
+
+ SELECT CAST(JSON 'null' AS VARCHAR); -- NULL
+ SELECT CAST(JSON '1' AS INTEGER); -- 1
+ SELECT CAST(JSON '9223372036854775807' AS BIGINT); -- 9223372036854775807
+ SELECT CAST(JSON '"abc"' AS VARCHAR); -- abc
+ SELECT CAST(JSON 'true' AS BOOLEAN); -- true
+ SELECT CAST(JSON '1.234' AS DOUBLE); -- 1.234
+ SELECT CAST(JSON '[1,23,456]' AS ARRAY(INTEGER)); -- [1, 23, 456]
+ SELECT CAST(JSON '[1,null,456]' AS ARRAY(INTEGER)); -- [1, NULL, 456]
+ SELECT CAST(JSON '[[1,23],[456]]' AS ARRAY(ARRAY(INTEGER))); -- [[1, 23], [456]]
+ SELECT CAST(JSON '{"k1":1,"k2":23,"k3":456}' AS MAP(VARCHAR, INTEGER)); -- {k1=1, k2=23, k3=456}
+ SELECT CAST(JSON '{"v1":123,"v2":"abc","v3":true}' AS ROW(v1 BIGINT, v2 VARCHAR, v3 BOOLEAN)); -- {v1=123, v2=abc, v3=true}
+ SELECT CAST(JSON '[123,"abc",true]' AS ROW(v1 BIGINT, v2 VARCHAR, v3 BOOLEAN)); -- {v1=123, v2=abc, v3=true}
.. note::
diff --git a/presto-docs/src/main/sphinx/language/types.rst b/presto-docs/src/main/sphinx/language/types.rst
index d257efed42050..9a58f281c80e3 100644
--- a/presto-docs/src/main/sphinx/language/types.rst
+++ b/presto-docs/src/main/sphinx/language/types.rst
@@ -249,7 +249,7 @@ Example: ``MAP(ARRAY['foo', 'bar'], ARRAY[1, 2])``
^^^^^^^
A structure made up of named fields. The fields may be of any SQL type, and are
-accessed with field reference operator ``.``
+accessed with the field reference operator ``.``
Example: ``CAST(ROW(1, 2.0) AS ROW(x BIGINT, y DOUBLE))``
@@ -447,4 +447,4 @@ The types support operations such as spatial measurements and relationship check
crucial for geographic information systems (GIS) and other applications requiring spatial data manipulation.
The geospatial types ensure data integrity and provide robust tools for complex spatial querying and analysis.
-See :doc:`/functions/geospatial`.
\ No newline at end of file
+See :doc:`/functions/geospatial`.
diff --git a/presto-docs/src/main/sphinx/presto_cpp/features.rst b/presto-docs/src/main/sphinx/presto_cpp/features.rst
index ebd0040b190cf..7b814b1adbd2c 100644
--- a/presto-docs/src/main/sphinx/presto_cpp/features.rst
+++ b/presto-docs/src/main/sphinx/presto_cpp/features.rst
@@ -314,6 +314,15 @@ If set to ``true``, disables the optimization in expression evaluation to delay
This should only be used for debugging purposes.
+``native_selective_nimble_reader_enabled``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* **Type:** ``boolean``
+* **Default value:** ``false``
+
+Temporary flag to control whether selective Nimble reader should be used in this
+query or not.
+
``native_join_spill_enabled``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/presto-docs/src/main/sphinx/presto_cpp/properties.rst b/presto-docs/src/main/sphinx/presto_cpp/properties.rst
index 8c28419548dba..cd2abe473270a 100644
--- a/presto-docs/src/main/sphinx/presto_cpp/properties.rst
+++ b/presto-docs/src/main/sphinx/presto_cpp/properties.rst
@@ -32,6 +32,16 @@ Presto C++ workers.
These Presto coordinator configuration properties are described here, in
alphabetical order.
+``driver.cancel-tasks-with-stuck-operators-threshold-ms``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+* **Type:** ``string``
+* **Default value:** ``240000`` (40 minutes)
+
+ Cancels any task when at least one operator has been stuck for at
+ least the time specified by this threshold.
+
+ Set this property to ``0`` to disable canceling.
+
``experimental.table-writer-merge-operator-enabled``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/presto-docs/src/main/sphinx/release/release-0.282.rst b/presto-docs/src/main/sphinx/release/release-0.282.rst
index a59fb4af226fa..fef4e3c48d5f4 100644
--- a/presto-docs/src/main/sphinx/release/release-0.282.rst
+++ b/presto-docs/src/main/sphinx/release/release-0.282.rst
@@ -7,7 +7,7 @@ Release 0.282
General Changes
_______________
-* Fix ``TEMPORARY`` definition :doc:`/sql/create-function` and :doc:`/sql/drop-function`.
+* Fix ``TEMPORARY`` definition :doc:`/sql/create-function` and :doc:`/sql/drop-function`. :pr:`19429`
* Fix a bug where ``cardinality(map_keys(x))`` and ``cardinality(map_values(x))`` would return wrong results.
* Improve performance of ``Explain (TYPE VALIDATE)`` by returning immediately after analysis and ACL checks complete without executing a dummy query. The output column is now called ``result`` rather than ``valid``.
* Improve error handling when using custom ``FunctionNamespaceManagers``.
diff --git a/presto-docs/src/main/sphinx/sql/explain.rst b/presto-docs/src/main/sphinx/sql/explain.rst
index b5dc695cec1d6..e4c9144726e4a 100644
--- a/presto-docs/src/main/sphinx/sql/explain.rst
+++ b/presto-docs/src/main/sphinx/sql/explain.rst
@@ -38,7 +38,7 @@ distributed between fragments:
``BROADCAST``
Fragment is executed on a fixed number of nodes with the input data
- broadcasted to all nodes.
+ broadcast to all nodes.
``SOURCE``
Fragment is executed on nodes where input splits are accessed.
diff --git a/presto-druid/src/main/java/com/facebook/presto/druid/segment/SmooshedColumnSource.java b/presto-druid/src/main/java/com/facebook/presto/druid/segment/SmooshedColumnSource.java
index 21364a3611246..e731bc2617645 100644
--- a/presto-druid/src/main/java/com/facebook/presto/druid/segment/SmooshedColumnSource.java
+++ b/presto-druid/src/main/java/com/facebook/presto/druid/segment/SmooshedColumnSource.java
@@ -82,7 +82,7 @@ private void loadSmooshFileMetadata()
BufferedReader in = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(metadata)));
String line = in.readLine();
if (line == null) {
- throw new PrestoException(DRUID_SEGMENT_LOAD_ERROR, format("Malformed metadata file: first line should be version,maxChunkSize,numChunks, got null."));
+ throw new PrestoException(DRUID_SEGMENT_LOAD_ERROR, "Malformed metadata file: first line should be version,maxChunkSize,numChunks, got null.");
}
String[] splits = line.split(",");
diff --git a/presto-function-namespace-managers/src/main/java/com/facebook/presto/functionNamespace/AbstractSqlInvokedFunctionNamespaceManager.java b/presto-function-namespace-managers/src/main/java/com/facebook/presto/functionNamespace/AbstractSqlInvokedFunctionNamespaceManager.java
index f8701da6284b9..b0bfdbc34d037 100644
--- a/presto-function-namespace-managers/src/main/java/com/facebook/presto/functionNamespace/AbstractSqlInvokedFunctionNamespaceManager.java
+++ b/presto-function-namespace-managers/src/main/java/com/facebook/presto/functionNamespace/AbstractSqlInvokedFunctionNamespaceManager.java
@@ -331,7 +331,7 @@ protected ScalarFunctionImplementation sqlInvokedFunctionToImplementation(SqlInv
throw new IllegalStateException(
format("SqlInvokedFunction %s has BUILTIN implementation type but %s cannot manage BUILTIN functions", function.getSignature().getName(), this.getClass()));
case CPP:
- throw new IllegalStateException(format("Presto coordinator can not resolve implementation of CPP UDF functions"));
+ throw new IllegalStateException("Presto coordinator can not resolve implementation of CPP UDF functions");
default:
throw new IllegalStateException(format("Unknown function implementation type: %s", implementationType));
}
diff --git a/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadata.java b/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadata.java
index 95a082c4ea0fc..cb55230ed41de 100644
--- a/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadata.java
+++ b/presto-hive/src/main/java/com/facebook/presto/hive/HiveMetadata.java
@@ -3509,7 +3509,7 @@ protected Optional getTableEncryptionPropertiesFromTa
}
if (seenColumns.contains(columnWithSubfield.toString())) {
- throw new PrestoException(INVALID_TABLE_PROPERTY, format("The same column/subfield cannot have 2 encryption keys"));
+ throw new PrestoException(INVALID_TABLE_PROPERTY, "The same column/subfield cannot have 2 encryption keys");
}
if (columnWithSubfield.getSubfieldPath().isPresent()) {
diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveClient.java b/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveClient.java
index 6d36b4863cf35..c4f1f468b544f 100644
--- a/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveClient.java
+++ b/presto-hive/src/test/java/com/facebook/presto/hive/AbstractTestHiveClient.java
@@ -4308,7 +4308,7 @@ private void doInsert(HiveStorageFormat storageFormat, SchemaTableName tableName
// verify all temp files start with the unique prefix
stagingPathRoot = getStagingPathRoot(insertTableHandle);
Set tempFiles = listAllDataFiles(context, stagingPathRoot);
- assertTrue(!tempFiles.isEmpty());
+ assertFalse(tempFiles.isEmpty());
for (String filePath : tempFiles) {
assertTrue(new Path(filePath).getName().startsWith(session.getQueryId()));
}
@@ -4535,7 +4535,7 @@ private void doInsertIntoNewPartition(HiveStorageFormat storageFormat, SchemaTab
insertTableHandle.getLocationHandle().getTargetPath().toString(),
false);
Set tempFiles = listAllDataFiles(context, getStagingPathRoot(insertTableHandle));
- assertTrue(!tempFiles.isEmpty());
+ assertFalse(tempFiles.isEmpty());
for (String filePath : tempFiles) {
assertTrue(new Path(filePath).getName().startsWith(session.getQueryId()));
}
@@ -4663,7 +4663,7 @@ private void doInsertIntoExistingPartition(HiveStorageFormat storageFormat, Sche
insertTableHandle.getLocationHandle().getTargetPath().toString(),
false);
Set tempFiles = listAllDataFiles(context, getStagingPathRoot(insertTableHandle));
- assertTrue(!tempFiles.isEmpty());
+ assertFalse(tempFiles.isEmpty());
for (String filePath : tempFiles) {
assertTrue(new Path(filePath).getName().startsWith(session.getQueryId()));
}
diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveIntegrationSmokeTest.java b/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveIntegrationSmokeTest.java
index 815b600881094..27a1df19f6c16 100644
--- a/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveIntegrationSmokeTest.java
+++ b/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveIntegrationSmokeTest.java
@@ -6395,7 +6395,7 @@ public void testAddTableConstraints()
// Negative tests
assertQueryFails(addPrimaryKeyStmt, format("Primary key already exists for: %s.%s", getSession().getSchema().get(), tableName));
- assertQueryFails(addUniqueConstraintStmt, format("Constraint already exists: 'uq3'"));
+ assertQueryFails(addUniqueConstraintStmt, "Constraint already exists: 'uq3'");
String dropNonExistentConstraint = format("ALTER TABLE %s.%s.%s DROP CONSTRAINT missingconstraint", getSession().getCatalog().get(), getSession().getSchema().get(), tableName);
assertQueryFails(dropNonExistentConstraint, "Constraint 'missingconstraint' not found");
diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveLogicalPlanner.java b/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveLogicalPlanner.java
index c6499696293ba..3388022ded000 100644
--- a/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveLogicalPlanner.java
+++ b/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveLogicalPlanner.java
@@ -148,6 +148,7 @@
import static java.lang.String.format;
import static java.util.Objects.requireNonNull;
import static org.testng.Assert.assertFalse;
+import static org.testng.Assert.assertNotSame;
import static org.testng.Assert.assertTrue;
@Test(singleThreaded = true)
@@ -2126,7 +2127,7 @@ private void assertNoAggregatedColumns(Plan plan, String tableName)
for (ColumnHandle columnHandle : tableScan.getAssignments().values()) {
assertTrue(columnHandle instanceof HiveColumnHandle);
HiveColumnHandle hiveColumnHandle = (HiveColumnHandle) columnHandle;
- assertFalse(hiveColumnHandle.getColumnType() == HiveColumnHandle.ColumnType.AGGREGATED);
+ assertNotSame(hiveColumnHandle.getColumnType(), HiveColumnHandle.ColumnType.AGGREGATED);
assertFalse(hiveColumnHandle.getPartialAggregation().isPresent());
}
}
diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveMaterializedViewLogicalPlanner.java b/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveMaterializedViewLogicalPlanner.java
index 5d423606df0ef..957afcd0297b1 100644
--- a/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveMaterializedViewLogicalPlanner.java
+++ b/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveMaterializedViewLogicalPlanner.java
@@ -87,7 +87,7 @@
import static java.lang.String.format;
import static java.util.Collections.emptyList;
import static java.util.stream.Collectors.joining;
-import static org.testng.Assert.assertFalse;
+import static org.testng.Assert.assertNotEquals;
import static org.testng.Assert.assertTrue;
@Test(singleThreaded = true)
@@ -812,7 +812,7 @@ public void testMaterializedViewSampledRelations()
// from sampled table and full table
String viewHalfQuery = format("SELECT * from %s ORDER BY nationkey", viewHalf);
MaterializedResult viewHalfTable = computeActual(viewHalfQuery);
- assertFalse(viewFullTable.equals(viewHalfTable));
+ assertNotEquals(viewFullTable, viewHalfTable);
}
finally {
queryRunner.execute("DROP MATERIALIZED VIEW IF EXISTS " + viewFull);
diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveNativeLogicalPlanner.java b/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveNativeLogicalPlanner.java
index 1c15a14cc420d..3c74dca08ce57 100644
--- a/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveNativeLogicalPlanner.java
+++ b/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveNativeLogicalPlanner.java
@@ -33,6 +33,7 @@
import static com.facebook.presto.sql.planner.optimizations.PlanNodeSearcher.searchFrom;
import static io.airlift.tpch.TpchTable.ORDERS;
import static org.testng.Assert.assertFalse;
+import static org.testng.Assert.assertNotSame;
import static org.testng.Assert.assertTrue;
@Test(singleThreaded = true)
@@ -74,7 +75,7 @@ private void assertNoAggregatedColumns(Plan plan, String tableName)
for (ColumnHandle columnHandle : tableScan.getAssignments().values()) {
assertTrue(columnHandle instanceof HiveColumnHandle);
HiveColumnHandle hiveColumnHandle = (HiveColumnHandle) columnHandle;
- assertFalse(hiveColumnHandle.getColumnType() == HiveColumnHandle.ColumnType.AGGREGATED);
+ assertNotSame(hiveColumnHandle.getColumnType(), HiveColumnHandle.ColumnType.AGGREGATED);
assertFalse(hiveColumnHandle.getPartialAggregation().isPresent());
}
}
diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveSplitScheduling.java b/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveSplitScheduling.java
index 8a95cc41e9519..ee95390f19932 100644
--- a/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveSplitScheduling.java
+++ b/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveSplitScheduling.java
@@ -65,9 +65,6 @@ public void testDynamicSplits()
assertTrue(numberOfSplitsWithDynamicSplitScheduling < numberOfSplitsWithoutDynamicSplitScheduling, "Expected less splits with dynamic split scheduling");
});
}
- catch (Exception e) {
- assertTrue(false, e.getMessage());
- }
finally {
getQueryRunner().execute("DROP TABLE IF EXISTS test_orders");
}
diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveSplitSource.java b/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveSplitSource.java
index 65ae49a577e7f..7411ba49428f6 100644
--- a/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveSplitSource.java
+++ b/presto-hive/src/test/java/com/facebook/presto/hive/TestHiveSplitSource.java
@@ -327,7 +327,7 @@ public void testReaderWaitsForSplits()
// sleep for a bit, and assure the thread is blocked
MILLISECONDS.sleep(200);
- assertTrue(!splits.isDone());
+ assertFalse(splits.isDone());
// add a split
hiveSplitSource.addToQueue(new TestSplit(33));
diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/TestJsonHiveHandles.java b/presto-hive/src/test/java/com/facebook/presto/hive/TestJsonHiveHandles.java
index 7538783a47e5f..97097df4914e8 100644
--- a/presto-hive/src/test/java/com/facebook/presto/hive/TestJsonHiveHandles.java
+++ b/presto-hive/src/test/java/com/facebook/presto/hive/TestJsonHiveHandles.java
@@ -98,7 +98,7 @@ public void testColumnHandleDeserialize()
assertEquals(columnHandle.getTypeSignature(), DOUBLE.getTypeSignature());
assertEquals(columnHandle.getHiveType(), HiveType.HIVE_FLOAT);
assertEquals(columnHandle.getHiveColumnIndex(), -1);
- assertEquals(columnHandle.isPartitionKey(), true);
+ assertTrue(columnHandle.isPartitionKey());
}
private void testJsonEquals(String json, Map expectedMap)
diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/metastore/glue/TestHiveClientGlueMetastore.java b/presto-hive/src/test/java/com/facebook/presto/hive/metastore/glue/TestHiveClientGlueMetastore.java
index a2454f17859a0..94d2b15b4a118 100644
--- a/presto-hive/src/test/java/com/facebook/presto/hive/metastore/glue/TestHiveClientGlueMetastore.java
+++ b/presto-hive/src/test/java/com/facebook/presto/hive/metastore/glue/TestHiveClientGlueMetastore.java
@@ -95,6 +95,7 @@
import static org.apache.hadoop.hive.metastore.TableType.EXTERNAL_TABLE;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
public class TestHiveClientGlueMetastore
@@ -356,7 +357,7 @@ public void testGetPartitionsWithFilterUsingReservedKeywordsAsColumnName()
tableName.getTableName(),
predicates);
- assertTrue(!partitionNames.isEmpty());
+ assertFalse(partitionNames.isEmpty());
assertEquals(partitionNames, ImmutableList.of("key=value2/int_partition=2"));
// KEY is a reserved keyword in the grammar of the SQL parser used internally by Glue API
@@ -370,7 +371,7 @@ public void testGetPartitionsWithFilterUsingReservedKeywordsAsColumnName()
tableName.getSchemaName(),
tableName.getTableName(),
predicates);
- assertTrue(!partitionNames.isEmpty());
+ assertFalse(partitionNames.isEmpty());
assertEquals(partitionNames, ImmutableList.of("key=value1/int_partition=1", "key=value2/int_partition=2"));
}
finally {
diff --git a/presto-hive/src/test/java/com/facebook/presto/hive/security/ranger/TestRangerBasedAccessControl.java b/presto-hive/src/test/java/com/facebook/presto/hive/security/ranger/TestRangerBasedAccessControl.java
index ec0d27388df6a..b93d5ac824877 100644
--- a/presto-hive/src/test/java/com/facebook/presto/hive/security/ranger/TestRangerBasedAccessControl.java
+++ b/presto-hive/src/test/java/com/facebook/presto/hive/security/ranger/TestRangerBasedAccessControl.java
@@ -48,7 +48,7 @@
public class TestRangerBasedAccessControl
{
public static final ConnectorTransactionHandle TRANSACTION_HANDLE = new ConnectorTransactionHandle() {};
- public static final AccessControlContext CONTEXT = new AccessControlContext(new QueryId("query_id"), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats());
+ public static final AccessControlContext CONTEXT = new AccessControlContext(new QueryId("query_id"), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats(), Optional.empty());
@Test
public void testTablePriviledgesRolesNotAllowed()
diff --git a/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergAbstractMetadata.java b/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergAbstractMetadata.java
index c920f22e6f6b9..f60e13abd2b04 100644
--- a/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergAbstractMetadata.java
+++ b/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergAbstractMetadata.java
@@ -19,6 +19,7 @@
import com.facebook.presto.common.predicate.TupleDomain;
import com.facebook.presto.common.type.BigintType;
import com.facebook.presto.common.type.SqlTimestampWithTimeZone;
+import com.facebook.presto.common.type.TimestampType;
import com.facebook.presto.common.type.TimestampWithTimeZoneType;
import com.facebook.presto.common.type.TypeManager;
import com.facebook.presto.common.type.VarcharType;
@@ -981,6 +982,11 @@ private static long getSnapshotIdForTableVersion(Table table, ConnectorTableVers
long millisUtc = new SqlTimestampWithTimeZone((long) tableVersion.getTableVersion()).getMillisUtc();
return getSnapshotIdTimeOperator(table, millisUtc, tableVersion.getVersionOperator());
}
+ else if (tableVersion.getVersionExpressionType() instanceof TimestampType) {
+ long timestampValue = (long) tableVersion.getTableVersion();
+ long millisUtc = ((TimestampType) tableVersion.getVersionExpressionType()).getPrecision().toMillis(timestampValue);
+ return getSnapshotIdTimeOperator(table, millisUtc, tableVersion.getVersionOperator());
+ }
throw new PrestoException(NOT_SUPPORTED, "Unsupported table version expression type: " + tableVersion.getVersionExpressionType());
}
if (tableVersion.getVersionType() == VersionType.VERSION) {
diff --git a/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergNativeCatalogFactory.java b/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergNativeCatalogFactory.java
index 3890649d36592..d0a361184af13 100644
--- a/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergNativeCatalogFactory.java
+++ b/presto-iceberg/src/main/java/com/facebook/presto/iceberg/IcebergNativeCatalogFactory.java
@@ -48,7 +48,7 @@
*/
public class IcebergNativeCatalogFactory
{
- private final Cache catalogCache;
+ protected final Cache catalogCache;
private final String catalogName;
protected final CatalogType catalogType;
private final String catalogWarehouse;
@@ -99,7 +99,7 @@ public SupportsNamespaces getNamespaces(ConnectorSession session)
throw new PrestoException(NOT_SUPPORTED, "Iceberg catalog of type " + catalogType + " does not support namespace operations");
}
- private String getCacheKey(ConnectorSession session)
+ protected String getCacheKey(ConnectorSession session)
{
StringBuilder sb = new StringBuilder();
sb.append(catalogName);
@@ -112,7 +112,7 @@ protected Optional getCatalogCacheKey(ConnectorSession session)
return Optional.empty();
}
- private Map getProperties(ConnectorSession session)
+ protected Map getProperties(ConnectorSession session)
{
Map properties = new HashMap<>();
if (icebergConfig.getManifestCachingEnabled()) {
@@ -134,7 +134,7 @@ protected Map getCatalogProperties(ConnectorSession session)
return ImmutableMap.of();
}
- private Configuration getHadoopConfiguration()
+ protected Configuration getHadoopConfiguration()
{
Configuration configuration = new Configuration(false);
diff --git a/presto-iceberg/src/main/java/com/facebook/presto/iceberg/rest/IcebergRestCatalogFactory.java b/presto-iceberg/src/main/java/com/facebook/presto/iceberg/rest/IcebergRestCatalogFactory.java
index ababf3b2e2cbd..4a8be67c1c9db 100644
--- a/presto-iceberg/src/main/java/com/facebook/presto/iceberg/rest/IcebergRestCatalogFactory.java
+++ b/presto-iceberg/src/main/java/com/facebook/presto/iceberg/rest/IcebergRestCatalogFactory.java
@@ -20,21 +20,36 @@
import com.facebook.presto.iceberg.IcebergConfig;
import com.facebook.presto.iceberg.IcebergNativeCatalogFactory;
import com.facebook.presto.spi.ConnectorSession;
+import com.facebook.presto.spi.PrestoException;
+import com.facebook.presto.spi.security.ConnectorIdentity;
import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.UncheckedExecutionException;
import io.jsonwebtoken.Jwts;
-import org.apache.iceberg.rest.auth.OAuth2Properties;
+import org.apache.iceberg.CatalogProperties;
+import org.apache.iceberg.catalog.Catalog;
+import org.apache.iceberg.catalog.SessionCatalog.SessionContext;
+import org.apache.iceberg.rest.HTTPClient;
+import org.apache.iceberg.rest.RESTCatalog;
import javax.inject.Inject;
import java.util.Date;
import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.ExecutionException;
import static com.facebook.presto.iceberg.rest.AuthenticationType.OAUTH2;
import static com.facebook.presto.iceberg.rest.SessionType.USER;
+import static com.google.common.base.Throwables.throwIfInstanceOf;
+import static com.google.common.base.Throwables.throwIfUnchecked;
import static java.lang.String.format;
import static java.util.Objects.requireNonNull;
+import static java.util.UUID.randomUUID;
import static org.apache.iceberg.CatalogProperties.URI;
+import static org.apache.iceberg.CatalogUtil.configureHadoopConf;
import static org.apache.iceberg.rest.auth.OAuth2Properties.CREDENTIAL;
+import static org.apache.iceberg.rest.auth.OAuth2Properties.JWT_TOKEN_TYPE;
+import static org.apache.iceberg.rest.auth.OAuth2Properties.OAUTH2_SERVER_URI;
import static org.apache.iceberg.rest.auth.OAuth2Properties.TOKEN;
public class IcebergRestCatalogFactory
@@ -42,6 +57,7 @@ public class IcebergRestCatalogFactory
{
private final IcebergRestConfig catalogConfig;
private final NodeVersion nodeVersion;
+ private final String catalogName;
@Inject
public IcebergRestCatalogFactory(
@@ -55,6 +71,37 @@ public IcebergRestCatalogFactory(
super(config, catalogName, s3ConfigurationUpdater, gcsConfigurationInitialize);
this.catalogConfig = requireNonNull(catalogConfig, "catalogConfig is null");
this.nodeVersion = requireNonNull(nodeVersion, "nodeVersion is null");
+ this.catalogName = requireNonNull(catalogName, "catalogName is null").getCatalogName();
+ }
+
+ @Override
+ public Catalog getCatalog(ConnectorSession session)
+ {
+ try {
+ return catalogCache.get(getCacheKey(session), () -> {
+ RESTCatalog catalog = new RESTCatalog(
+ convertSession(session),
+ config -> HTTPClient.builder(config).uri(config.get(URI)).build());
+
+ configureHadoopConf(catalog, getHadoopConfiguration());
+ catalog.initialize(catalogName, getProperties(session));
+ return catalog;
+ });
+ }
+ catch (ExecutionException | UncheckedExecutionException e) {
+ throwIfInstanceOf(e.getCause(), PrestoException.class);
+ throwIfUnchecked(e);
+ throw new UncheckedExecutionException(e);
+ }
+ }
+
+ @Override
+ protected Optional getCatalogCacheKey(ConnectorSession session)
+ {
+ StringBuilder sb = new StringBuilder();
+ catalogConfig.getSessionType().filter(type -> type.equals(USER))
+ .ifPresent(type -> sb.append(session.getUser()));
+ return Optional.of(sb.toString());
}
@Override
@@ -67,6 +114,11 @@ protected Map getCatalogProperties(ConnectorSession session)
catalogConfig.getAuthenticationType().ifPresent(type -> {
if (type == OAUTH2) {
+ // The oauth2/tokens endpoint of the REST catalog spec has been deprecated and will
+ // be removed in Iceberg 2.0 (https://github.com/apache/iceberg/pull/10603)
+ // TODO auth server URI will eventually need to be made a required property
+ catalogConfig.getAuthenticationServerUri().ifPresent(authServerUri -> properties.put(OAUTH2_SERVER_URI, authServerUri));
+
if (!catalogConfig.credentialOrTokenExists()) {
throw new IllegalStateException("iceberg.rest.auth.oauth2 requires either a credential or a token");
}
@@ -75,24 +127,96 @@ protected Map getCatalogProperties(ConnectorSession session)
}
});
- catalogConfig.getSessionType().ifPresent(type -> {
- if (type == USER) {
- properties.putAll(session.getIdentity().getExtraCredentials());
-
- String sessionId = format("%s-%s", session.getUser(), session.getSource().orElse("default"));
- String jwt = Jwts.builder()
- .setId(sessionId)
- .setSubject(session.getUser())
- .setIssuedAt(new Date())
- .setIssuer(nodeVersion.toString())
- .claim("user", session.getUser())
- .claim("source", session.getSource().orElse(""))
- .compact();
-
- properties.put(OAuth2Properties.JWT_TOKEN_TYPE, jwt);
- }
- });
+ catalogConfig.getSessionType().filter(type -> type.equals(USER))
+ .ifPresent(type -> properties.put(CatalogProperties.USER, session.getUser()));
return properties.build();
}
+
+ protected SessionContext convertSession(ConnectorSession session)
+ {
+ RestSessionBuilder sessionContextBuilder = catalogConfig.getSessionType()
+ .filter(type -> type.equals(USER))
+ .map(type -> {
+ String sessionId = format("%s-%s", session.getUser(), session.getSource().orElse("default"));
+ Map properties = ImmutableMap.of(
+ "user", session.getUser(),
+ "source", session.getSource().orElse(""),
+ "version", nodeVersion.toString());
+
+ String jwt = Jwts.builder()
+ .setSubject(session.getUser())
+ .setIssuer(nodeVersion.toString())
+ .setIssuedAt(new Date())
+ .claim("user", session.getUser())
+ .claim("source", session.getSource().orElse(""))
+ .compact();
+
+ ImmutableMap.Builder credentials = ImmutableMap.builder();
+ credentials.put(JWT_TOKEN_TYPE, jwt).putAll(session.getIdentity().getExtraCredentials());
+
+ return builder(session).setSessionId(sessionId)
+ .setIdentity(session.getUser())
+ .setCredentials(credentials.build())
+ .setProperties(properties);
+ }).orElse(builder(session).setSessionId(randomUUID().toString()));
+ return sessionContextBuilder.build();
+ }
+
+ protected static class RestSessionBuilder
+ {
+ private String sessionId;
+ private String identity;
+ private Map properties;
+ private Map credentials;
+ private final ConnectorIdentity wrappedIdentity;
+
+ private RestSessionBuilder(ConnectorSession session)
+ {
+ sessionId = null;
+ identity = null;
+ credentials = null;
+ properties = ImmutableMap.of();
+ wrappedIdentity = session.getIdentity();
+ }
+
+ protected RestSessionBuilder setSessionId(String sessionId)
+ {
+ this.sessionId = sessionId;
+ return this;
+ }
+
+ protected RestSessionBuilder setIdentity(String identity)
+ {
+ this.identity = identity;
+ return this;
+ }
+
+ protected RestSessionBuilder setCredentials(Map credentials)
+ {
+ this.credentials = credentials;
+ return this;
+ }
+
+ protected RestSessionBuilder setProperties(Map properties)
+ {
+ this.properties = properties;
+ return this;
+ }
+
+ protected SessionContext build()
+ {
+ return new SessionContext(
+ sessionId,
+ identity,
+ credentials,
+ properties,
+ wrappedIdentity);
+ }
+ }
+
+ protected static RestSessionBuilder builder(ConnectorSession session)
+ {
+ return new RestSessionBuilder(session);
+ }
}
diff --git a/presto-iceberg/src/main/java/com/facebook/presto/iceberg/rest/IcebergRestConfig.java b/presto-iceberg/src/main/java/com/facebook/presto/iceberg/rest/IcebergRestConfig.java
index b00c68f09aca5..fe0d1a5522cb3 100644
--- a/presto-iceberg/src/main/java/com/facebook/presto/iceberg/rest/IcebergRestConfig.java
+++ b/presto-iceberg/src/main/java/com/facebook/presto/iceberg/rest/IcebergRestConfig.java
@@ -25,6 +25,7 @@ public class IcebergRestConfig
private String serverUri;
private SessionType sessionType;
private AuthenticationType authenticationType;
+ private String authenticationServerUri;
private String credential;
private String token;
@@ -68,6 +69,19 @@ public IcebergRestConfig setAuthenticationType(AuthenticationType authentication
return this;
}
+ public Optional getAuthenticationServerUri()
+ {
+ return Optional.ofNullable(authenticationServerUri);
+ }
+
+ @Config("iceberg.rest.auth.oauth2.uri")
+ @ConfigDescription("The URI to connect to the OAUTH2 server")
+ public IcebergRestConfig setAuthenticationServerUri(String authServerUri)
+ {
+ this.authenticationServerUri = authServerUri;
+ return this;
+ }
+
public Optional getCredential()
{
return Optional.ofNullable(credential);
diff --git a/presto-iceberg/src/test/java/com/facebook/presto/iceberg/TestIcebergTableVersion.java b/presto-iceberg/src/test/java/com/facebook/presto/iceberg/TestIcebergTableVersion.java
index 8da55ce029aae..7f75dfcea61c4 100644
--- a/presto-iceberg/src/test/java/com/facebook/presto/iceberg/TestIcebergTableVersion.java
+++ b/presto-iceberg/src/test/java/com/facebook/presto/iceberg/TestIcebergTableVersion.java
@@ -14,21 +14,31 @@
package com.facebook.presto.iceberg;
import com.facebook.presto.Session;
+import com.facebook.presto.Session.SessionBuilder;
+import com.facebook.presto.common.type.TimeZoneKey;
import com.facebook.presto.testing.QueryRunner;
import com.facebook.presto.tests.AbstractTestQueryFramework;
import com.facebook.presto.tests.DistributedQueryRunner;
import com.google.common.collect.ImmutableMap;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
+import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.nio.file.Path;
+import java.time.Instant;
+import java.time.LocalDateTime;
+import java.time.ZoneId;
+import java.time.format.DateTimeFormatter;
import java.util.Map;
+import static com.facebook.presto.SystemSessionProperties.LEGACY_TIMESTAMP;
import static com.facebook.presto.iceberg.CatalogType.HIVE;
import static com.facebook.presto.iceberg.IcebergQueryRunner.ICEBERG_CATALOG;
import static com.facebook.presto.iceberg.IcebergQueryRunner.getIcebergDataDirectoryPath;
import static com.facebook.presto.testing.TestingSession.testSessionBuilder;
+import static java.lang.String.format;
+import static org.testng.Assert.assertTrue;
public class TestIcebergTableVersion
extends AbstractTestQueryFramework
@@ -271,6 +281,55 @@ public void testTableVersionMisc()
assertQuery("SELECT count(*) FROM " + viewName3 + " INNER JOIN " + viewName4 + " ON " + viewName3 + ".id = " + viewName4 + ".id", "VALUES 2");
}
+ @DataProvider(name = "timezones")
+ public Object[][] timezones()
+ {
+ return new Object[][] {
+ {"UTC", true},
+ {"America/Los_Angeles", true},
+ {"Asia/Shanghai", true},
+ {"UTC", false}};
+ }
+
+ @Test(dataProvider = "timezones")
+ public void testTableVersionWithTimestamp(String zoneId, boolean legacyTimestamp)
+ {
+ Session session = sessionForTimezone(zoneId, legacyTimestamp);
+ String tableName = schemaName + "." + "table_version_with_timestamp";
+ try {
+ assertUpdate(session, "CREATE TABLE " + tableName + " (id integer, desc varchar) WITH(partitioning = ARRAY['id'])");
+ assertUpdate(session, "INSERT INTO " + tableName + " VALUES(1, 'aaa')", 1);
+ waitUntilAfter(System.currentTimeMillis());
+
+ long timestampMillis1 = System.currentTimeMillis();
+ String timestampWithoutTZ1 = getTimestampString(timestampMillis1, zoneId);
+ waitUntilAfter(timestampMillis1);
+
+ assertUpdate(session, "INSERT INTO " + tableName + " VALUES(2, 'bbb')", 1);
+ waitUntilAfter(System.currentTimeMillis());
+
+ long timestampMillis2 = System.currentTimeMillis();
+ String timestampWithoutTZ2 = getTimestampString(timestampMillis2, zoneId);
+ waitUntilAfter(timestampMillis2);
+
+ assertUpdate(session, "INSERT INTO " + tableName + " VALUES(3, 'ccc')", 1);
+ waitUntilAfter(System.currentTimeMillis());
+
+ long timestampMillis3 = System.currentTimeMillis();
+ String timestampWithoutTZ3 = getTimestampString(timestampMillis3, zoneId);
+
+ assertQuery(session, "SELECT desc FROM " + tableName + " FOR TIMESTAMP AS OF TIMESTAMP " + "'" + timestampWithoutTZ1 + "'", "VALUES 'aaa'");
+ assertQuery(session, "SELECT desc FROM " + tableName + " FOR TIMESTAMP BEFORE TIMESTAMP " + "'" + timestampWithoutTZ1 + "'", "VALUES 'aaa'");
+ assertQuery(session, "SELECT desc FROM " + tableName + " FOR TIMESTAMP AS OF TIMESTAMP " + "'" + timestampWithoutTZ2 + "'", "VALUES 'aaa', 'bbb'");
+ assertQuery(session, "SELECT desc FROM " + tableName + " FOR TIMESTAMP BEFORE TIMESTAMP " + "'" + timestampWithoutTZ2 + "'", "VALUES 'aaa', 'bbb'");
+ assertQuery(session, "SELECT desc FROM " + tableName + " FOR TIMESTAMP AS OF TIMESTAMP " + "'" + timestampWithoutTZ3 + "'", "VALUES 'aaa', 'bbb', 'ccc'");
+ assertQuery(session, "SELECT desc FROM " + tableName + " FOR TIMESTAMP BEFORE TIMESTAMP " + "'" + timestampWithoutTZ3 + "'", "VALUES 'aaa', 'bbb', 'ccc'");
+ }
+ finally {
+ assertQuerySucceeds("DROP TABLE IF EXISTS " + tableName);
+ }
+ }
+
@Test
public void testTableVersionErrors()
{
@@ -284,23 +343,56 @@ public void testTableVersionErrors()
assertQueryFails("SELECT desc FROM " + tableName2 + " FOR VERSION AS OF " + tab2VersionId1 + " - " + tab2VersionId1, "Iceberg snapshot ID does not exists: 0");
assertQueryFails("SELECT desc FROM " + tableName2 + " FOR VERSION AS OF CAST (100 AS BIGINT)", "Iceberg snapshot ID does not exists: 100");
- assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP AS OF 100", ".* Type integer is invalid. Supported table version AS OF/BEFORE expression type is Timestamp with Time Zone.");
- assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP AS OF 'bad'", ".* Type varchar\\(3\\) is invalid. Supported table version AS OF/BEFORE expression type is Timestamp with Time Zone.");
+ assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP AS OF 100", ".* Type integer is invalid. Supported table version AS OF/BEFORE expression type is Timestamp or Timestamp with Time Zone.");
+ assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP AS OF 'bad'", ".* Type varchar\\(3\\) is invalid. Supported table version AS OF/BEFORE expression type is Timestamp or Timestamp with Time Zone.");
assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP AS OF id", ".* cannot be resolved");
assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP AS OF (SELECT CURRENT_TIMESTAMP)", ".* Constant expression cannot contain a subquery");
assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP AS OF NULL", "Table version AS OF/BEFORE expression cannot be NULL for .*");
assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP AS OF TIMESTAMP " + "'" + tab2Timestamp1 + "' - INTERVAL '1' MONTH", "No history found based on timestamp for table \"test_tt_schema\".\"test_table_version_tab2\"");
assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP AS OF CAST ('2023-01-01' AS TIMESTAMP WITH TIME ZONE)", "No history found based on timestamp for table \"test_tt_schema\".\"test_table_version_tab2\"");
- assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP AS OF CAST ('2023-01-01' AS TIMESTAMP)", ".* Type timestamp is invalid. Supported table version AS OF/BEFORE expression type is Timestamp with Time Zone.");
- assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP AS OF CAST ('2023-01-01' AS DATE)", ".* Type date is invalid. Supported table version AS OF/BEFORE expression type is Timestamp with Time Zone.");
- assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP AS OF CURRENT_DATE", ".* Type date is invalid. Supported table version AS OF/BEFORE expression type is Timestamp with Time Zone.");
- assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP AS OF TIMESTAMP '2023-01-01 00:00:00.000'", ".* Type timestamp is invalid. Supported table version AS OF/BEFORE expression type is Timestamp with Time Zone.");
+ assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP AS OF CAST ('2023-01-01' AS TIMESTAMP)", "No history found based on timestamp for table \"test_tt_schema\".\"test_table_version_tab2\"");
+ assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP AS OF CAST ('2023-01-01' AS DATE)", ".* Type date is invalid. Supported table version AS OF/BEFORE expression type is Timestamp or Timestamp with Time Zone.");
+ assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP AS OF CURRENT_DATE", ".* Type date is invalid. Supported table version AS OF/BEFORE expression type is Timestamp or Timestamp with Time Zone.");
+ assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP AS OF TIMESTAMP '2023-01-01 00:00:00.000'", "No history found based on timestamp for table \"test_tt_schema\".\"test_table_version_tab2\"");
assertQueryFails("SELECT desc FROM " + tableName1 + " FOR VERSION BEFORE " + tab1VersionId1 + " ORDER BY 1", "No history found based on timestamp for table \"test_tt_schema\".\"test_table_version_tab1\"");
assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP BEFORE TIMESTAMP " + "'" + tab2Timestamp1 + "' - INTERVAL '1' MONTH", "No history found based on timestamp for table \"test_tt_schema\".\"test_table_version_tab2\"");
assertQueryFails("SELECT desc FROM " + tableName2 + " FOR VERSION BEFORE 100", ".* Type integer is invalid. Supported table version AS OF/BEFORE expression type is BIGINT or VARCHAR");
assertQueryFails("SELECT desc FROM " + tableName2 + " FOR VERSION BEFORE " + tab2VersionId1 + " - " + tab2VersionId1, "Iceberg snapshot ID does not exists: 0");
- assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP BEFORE 'bad'", ".* Type varchar\\(3\\) is invalid. Supported table version AS OF/BEFORE expression type is Timestamp with Time Zone.");
+ assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP BEFORE 'bad'", ".* Type varchar\\(3\\) is invalid. Supported table version AS OF/BEFORE expression type is Timestamp or Timestamp with Time Zone.");
assertQueryFails("SELECT desc FROM " + tableName2 + " FOR TIMESTAMP BEFORE NULL", "Table version AS OF/BEFORE expression cannot be NULL for .*");
}
+
+ private Session sessionForTimezone(String zoneId, boolean legacyTimestamp)
+ {
+ SessionBuilder sessionBuilder = Session.builder(getSession())
+ .setSystemProperty(LEGACY_TIMESTAMP, String.valueOf(legacyTimestamp));
+ if (legacyTimestamp) {
+ sessionBuilder.setTimeZoneKey(TimeZoneKey.getTimeZoneKey(zoneId));
+ }
+ return sessionBuilder.build();
+ }
+
+ private long waitUntilAfter(long snapshotTimeMillis)
+ {
+ long currentTimeMillis = System.currentTimeMillis();
+ assertTrue(snapshotTimeMillis - currentTimeMillis <= 10,
+ format("Snapshot time %s is greater than the current time %s by more than 10ms", snapshotTimeMillis, currentTimeMillis));
+
+ while (currentTimeMillis <= snapshotTimeMillis) {
+ currentTimeMillis = System.currentTimeMillis();
+ }
+ return currentTimeMillis;
+ }
+
+ private String getTimestampString(long timeMillsUtc, String zoneId)
+ {
+ Instant instant = Instant.ofEpochMilli(timeMillsUtc);
+ LocalDateTime localDateTime = instant
+ .atZone(ZoneId.of(zoneId))
+ .toLocalDateTime();
+ DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS");
+ formatter = formatter.withZone(ZoneId.of(zoneId));
+ return localDateTime.format(formatter);
+ }
}
diff --git a/presto-iceberg/src/test/java/com/facebook/presto/iceberg/rest/TestIcebergDistributedRest.java b/presto-iceberg/src/test/java/com/facebook/presto/iceberg/rest/TestIcebergDistributedRest.java
index 3d306dc56c5cb..06965eb0e32ed 100644
--- a/presto-iceberg/src/test/java/com/facebook/presto/iceberg/rest/TestIcebergDistributedRest.java
+++ b/presto-iceberg/src/test/java/com/facebook/presto/iceberg/rest/TestIcebergDistributedRest.java
@@ -14,7 +14,9 @@
package com.facebook.presto.iceberg.rest;
import com.facebook.airlift.http.server.testing.TestingHttpServer;
+import com.facebook.presto.Session;
import com.facebook.presto.iceberg.IcebergDistributedTestBase;
+import com.facebook.presto.spi.security.Identity;
import com.facebook.presto.testing.QueryRunner;
import com.google.common.collect.ImmutableMap;
import org.assertj.core.util.Files;
@@ -29,9 +31,11 @@
import static com.facebook.presto.iceberg.CatalogType.REST;
import static com.facebook.presto.iceberg.FileFormat.PARQUET;
+import static com.facebook.presto.iceberg.IcebergQueryRunner.ICEBERG_CATALOG;
import static com.facebook.presto.iceberg.IcebergQueryRunner.createIcebergQueryRunner;
import static com.facebook.presto.iceberg.rest.IcebergRestTestUtil.getRestServer;
import static com.facebook.presto.iceberg.rest.IcebergRestTestUtil.restConnectorProperties;
+import static com.facebook.presto.testing.TestingSession.testSessionBuilder;
import static com.google.common.io.MoreFiles.deleteRecursively;
import static com.google.common.io.RecursiveDeleteOption.ALLOW_INSECURE;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
@@ -83,9 +87,14 @@ public void tearDown()
protected QueryRunner createQueryRunner()
throws Exception
{
+ Map connectorProperties = ImmutableMap.builder()
+ .putAll(restConnectorProperties(serverUri))
+ .put("iceberg.rest.session.type", SessionType.USER.name())
+ .build();
+
return createIcebergQueryRunner(
ImmutableMap.of(),
- restConnectorProperties(serverUri),
+ connectorProperties,
PARQUET,
true,
false,
@@ -101,4 +110,28 @@ public void testDeleteOnV1Table()
.isInstanceOf(RuntimeException.class)
.hasMessageMatching("Cannot downgrade v2 table to v1");
}
+
+ @Test
+ public void testRestUserSessionAuthorization()
+ {
+ // Query with default user should succeed
+ assertQuerySucceeds(getSession(), "SHOW SCHEMAS");
+
+ String unauthorizedUser = "unauthorized_user";
+ Session unauthorizedUserSession = testSessionBuilder()
+ .setCatalog(ICEBERG_CATALOG)
+ .setUserAgent(unauthorizedUser)
+ .setIdentity(new Identity(
+ unauthorizedUser,
+ Optional.empty(),
+ ImmutableMap.of(),
+ ImmutableMap.of(),
+ ImmutableMap.of(),
+ Optional.of(unauthorizedUser),
+ Optional.empty()))
+ .build();
+
+ // Query with different user should fail
+ assertQueryFails(unauthorizedUserSession, "SHOW SCHEMAS", "Forbidden: User not authorized");
+ }
}
diff --git a/presto-iceberg/src/test/java/com/facebook/presto/iceberg/rest/TestIcebergRestConfig.java b/presto-iceberg/src/test/java/com/facebook/presto/iceberg/rest/TestIcebergRestConfig.java
index 730a58d862c5b..a332a8f6e6fc0 100644
--- a/presto-iceberg/src/test/java/com/facebook/presto/iceberg/rest/TestIcebergRestConfig.java
+++ b/presto-iceberg/src/test/java/com/facebook/presto/iceberg/rest/TestIcebergRestConfig.java
@@ -32,6 +32,7 @@ public void testDefaults()
assertRecordedDefaults(ConfigAssertions.recordDefaults(IcebergRestConfig.class)
.setServerUri(null)
.setAuthenticationType(null)
+ .setAuthenticationServerUri(null)
.setCredential(null)
.setToken(null)
.setSessionType(null));
@@ -43,6 +44,7 @@ public void testExplicitPropertyMappings()
Map properties = ImmutableMap.builder()
.put("iceberg.rest.uri", "http://localhost:xxx")
.put("iceberg.rest.auth.type", "OAUTH2")
+ .put("iceberg.rest.auth.oauth2.uri", "http://localhost:yyy")
.put("iceberg.rest.auth.oauth2.credential", "key:secret")
.put("iceberg.rest.auth.oauth2.token", "SXVLUXUhIExFQ0tFUiEK")
.put("iceberg.rest.session.type", "USER")
@@ -51,6 +53,7 @@ public void testExplicitPropertyMappings()
IcebergRestConfig expected = new IcebergRestConfig()
.setServerUri("http://localhost:xxx")
.setAuthenticationType(OAUTH2)
+ .setAuthenticationServerUri("http://localhost:yyy")
.setCredential("key:secret")
.setToken("SXVLUXUhIExFQ0tFUiEK")
.setSessionType(USER);
diff --git a/presto-iceberg/src/test/java/com/facebook/presto/iceberg/rest/TestIcebergSmokeRest.java b/presto-iceberg/src/test/java/com/facebook/presto/iceberg/rest/TestIcebergSmokeRest.java
index 65c34bc31bbee..d74c11c4ad063 100644
--- a/presto-iceberg/src/test/java/com/facebook/presto/iceberg/rest/TestIcebergSmokeRest.java
+++ b/presto-iceberg/src/test/java/com/facebook/presto/iceberg/rest/TestIcebergSmokeRest.java
@@ -29,6 +29,7 @@
import com.facebook.presto.testing.QueryRunner;
import com.google.common.collect.ImmutableMap;
import org.apache.iceberg.Table;
+import org.apache.iceberg.rest.RESTCatalog;
import org.assertj.core.util.Files;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
@@ -42,12 +43,15 @@
import static com.facebook.presto.iceberg.FileFormat.PARQUET;
import static com.facebook.presto.iceberg.IcebergQueryRunner.ICEBERG_CATALOG;
import static com.facebook.presto.iceberg.IcebergUtil.getNativeIcebergTable;
+import static com.facebook.presto.iceberg.rest.AuthenticationType.OAUTH2;
import static com.facebook.presto.iceberg.rest.IcebergRestTestUtil.getRestServer;
import static com.facebook.presto.iceberg.rest.IcebergRestTestUtil.restConnectorProperties;
import static com.google.common.io.MoreFiles.deleteRecursively;
import static com.google.common.io.RecursiveDeleteOption.ALLOW_INSECURE;
import static java.lang.String.format;
+import static org.apache.iceberg.rest.auth.OAuth2Properties.OAUTH2_SERVER_URI;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
+import static org.testng.Assert.assertEquals;
@Test
public class TestIcebergSmokeRest
@@ -106,12 +110,11 @@ protected QueryRunner createQueryRunner()
Optional.of(warehouseLocation.toPath()));
}
- protected IcebergNativeCatalogFactory getCatalogFactory()
+ protected IcebergNativeCatalogFactory getCatalogFactory(IcebergRestConfig restConfig)
{
IcebergConfig icebergConfig = new IcebergConfig()
.setCatalogType(REST)
- .setCatalogWarehouse(warehouseLocation.getAbsolutePath().toString());
- IcebergRestConfig restConfig = new IcebergRestConfig().setServerUri(serverUri);
+ .setCatalogWarehouse(warehouseLocation.getAbsolutePath());
return new IcebergRestCatalogFactory(
icebergConfig,
@@ -125,7 +128,8 @@ protected IcebergNativeCatalogFactory getCatalogFactory()
@Override
protected Table getIcebergTable(ConnectorSession session, String schema, String tableName)
{
- return getNativeIcebergTable(getCatalogFactory(),
+ IcebergRestConfig restConfig = new IcebergRestConfig().setServerUri(serverUri);
+ return getNativeIcebergTable(getCatalogFactory(restConfig),
session,
SchemaTableName.valueOf(schema + "." + tableName));
}
@@ -192,4 +196,20 @@ public void testMetadataDeleteOnTableWithUnsupportedSpecsWhoseDataAllDeleted(Str
super.testMetadataDeleteOnTableWithUnsupportedSpecsWhoseDataAllDeleted(version, mode);
}
}
+
+ @Test
+ public void testSetOauth2ServerUriPropertyI()
+ {
+ String authEndpoint = "http://localhost:8888";
+ IcebergRestConfig restConfig = new IcebergRestConfig()
+ .setServerUri(serverUri)
+ .setAuthenticationType(OAUTH2)
+ .setToken("SXVLUXUhIExFQ0tFUiEK")
+ .setAuthenticationServerUri(authEndpoint);
+
+ IcebergRestCatalogFactory catalogFactory = (IcebergRestCatalogFactory) getCatalogFactory(restConfig);
+ RESTCatalog catalog = (RESTCatalog) catalogFactory.getCatalog(getSession().toConnectorSession());
+
+ assertEquals(catalog.properties().get(OAUTH2_SERVER_URI), authEndpoint);
+ }
}
diff --git a/presto-iceberg/src/test/java/org/apache/iceberg/rest/IcebergRestCatalogServlet.java b/presto-iceberg/src/test/java/org/apache/iceberg/rest/IcebergRestCatalogServlet.java
index e9aa46a589683..d05eb23fcda28 100644
--- a/presto-iceberg/src/test/java/org/apache/iceberg/rest/IcebergRestCatalogServlet.java
+++ b/presto-iceberg/src/test/java/org/apache/iceberg/rest/IcebergRestCatalogServlet.java
@@ -14,6 +14,9 @@
package org.apache.iceberg.rest;
import com.facebook.airlift.log.Logger;
+import io.jsonwebtoken.Claims;
+import io.jsonwebtoken.Jwts;
+import io.jsonwebtoken.MalformedJwtException;
import org.apache.hc.core5.http.ContentType;
import org.apache.hc.core5.http.HttpHeaders;
import org.apache.iceberg.exceptions.RESTException;
@@ -94,6 +97,14 @@ protected void execute(ServletRequestContext context, HttpServletResponse respon
response.setStatus(HttpServletResponse.SC_OK);
responseHeaders.forEach(response::setHeader);
+ String token = context.headers.get("Authorization");
+ if (token != null && isRestUserSessionToken(token) && !isAuthorizedRestUserSessionToken(token)) {
+ context.errorResponse = ErrorResponse.builder()
+ .responseCode(HttpServletResponse.SC_FORBIDDEN)
+ .withMessage("User not authorized")
+ .build();
+ }
+
if (context.error().isPresent()) {
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
RESTObjectMapper.mapper().writeValue(response.getWriter(), context.error().get());
@@ -145,6 +156,33 @@ protected Consumer handle(HttpServletResponse response)
};
}
+ protected Claims getTokenClaims(String token)
+ {
+ token = token.replaceAll("Bearer token-exchange-token:sub=", "");
+ return Jwts.parserBuilder().build().parseClaimsJwt(token).getBody();
+ }
+
+ protected boolean isRestUserSessionToken(String token)
+ {
+ try {
+ getTokenClaims(token);
+ }
+ catch (MalformedJwtException mje) {
+ // Not a json web token
+ return false;
+ }
+ return true;
+ }
+
+ protected boolean isAuthorizedRestUserSessionToken(String jwt)
+ {
+ Claims jwtClaims = getTokenClaims(jwt);
+ return jwtClaims.getSubject().equals("user") &&
+ jwtClaims.getIssuer().equals("testversion") &&
+ jwtClaims.get("user").equals("user") &&
+ jwtClaims.get("source").equals("test");
+ }
+
public static class ServletRequestContext
{
private HTTPMethod method;
diff --git a/presto-main/src/main/java/com/facebook/presto/Session.java b/presto-main/src/main/java/com/facebook/presto/Session.java
index 24441303ae1db..2e8c35f510d8e 100644
--- a/presto-main/src/main/java/com/facebook/presto/Session.java
+++ b/presto-main/src/main/java/com/facebook/presto/Session.java
@@ -15,6 +15,7 @@
import com.facebook.presto.common.RuntimeStats;
import com.facebook.presto.common.function.SqlFunctionProperties;
+import com.facebook.presto.common.resourceGroups.QueryType;
import com.facebook.presto.common.transaction.TransactionId;
import com.facebook.presto.common.type.TimeZoneKey;
import com.facebook.presto.cost.PlanCostEstimate;
@@ -99,6 +100,7 @@ public final class Session
private final Optional tracer;
private final WarningCollector warningCollector;
private final RuntimeStats runtimeStats;
+ private final Optional queryType;
private final OptimizerInformationCollector optimizerInformationCollector = new OptimizerInformationCollector();
private final OptimizerResultCollector optimizerResultCollector = new OptimizerResultCollector();
@@ -131,7 +133,8 @@ public Session(
Map sessionFunctions,
Optional tracer,
WarningCollector warningCollector,
- RuntimeStats runtimeStats)
+ RuntimeStats runtimeStats,
+ Optional queryType)
{
this.queryId = requireNonNull(queryId, "queryId is null");
this.transactionId = requireNonNull(transactionId, "transactionId is null");
@@ -172,7 +175,8 @@ public Session(
this.tracer = requireNonNull(tracer, "tracer is null");
this.warningCollector = requireNonNull(warningCollector, "warningCollector is null");
this.runtimeStats = requireNonNull(runtimeStats, "runtimeStats is null");
- this.context = new AccessControlContext(queryId, clientInfo, clientTags, source, warningCollector, runtimeStats);
+ this.queryType = requireNonNull(queryType, "queryType is null");
+ this.context = new AccessControlContext(queryId, clientInfo, clientTags, source, warningCollector, runtimeStats, queryType);
}
public QueryId getQueryId()
@@ -353,6 +357,11 @@ public Map getPlanNodeCostMap()
return planNodeCostMap;
}
+ public Optional getQueryType()
+ {
+ return queryType;
+ }
+
public Session beginTransactionId(TransactionId transactionId, TransactionManager transactionManager, AccessControl accessControl)
{
requireNonNull(transactionId, "transactionId is null");
@@ -447,63 +456,8 @@ public Session beginTransactionId(TransactionId transactionId, TransactionManage
sessionFunctions,
tracer,
warningCollector,
- runtimeStats);
- }
-
- public Session withDefaultProperties(
- SystemSessionPropertyConfiguration systemPropertyConfiguration,
- Map> catalogPropertyDefaults)
- {
- requireNonNull(systemPropertyConfiguration, "systemPropertyConfiguration is null");
- requireNonNull(catalogPropertyDefaults, "catalogPropertyDefaults is null");
-
- // to remove this check properties must be authenticated and validated as in beginTransactionId
- checkState(
- !this.transactionId.isPresent() && this.connectorProperties.isEmpty(),
- "Session properties cannot be overridden once a transaction is active");
-
- Map systemProperties = new HashMap<>();
- systemProperties.putAll(systemPropertyConfiguration.systemPropertyDefaults);
- systemProperties.putAll(this.systemProperties);
- systemProperties.putAll(systemPropertyConfiguration.systemPropertyOverrides);
-
- Map> connectorProperties = catalogPropertyDefaults.entrySet().stream()
- .map(entry -> Maps.immutableEntry(entry.getKey(), new HashMap<>(entry.getValue())))
- .collect(Collectors.toMap(Entry::getKey, Entry::getValue));
- for (Entry> catalogProperties : this.unprocessedCatalogProperties.entrySet()) {
- String catalog = catalogProperties.getKey();
- for (Entry entry : catalogProperties.getValue().entrySet()) {
- connectorProperties.computeIfAbsent(catalog, id -> new HashMap<>())
- .put(entry.getKey(), entry.getValue());
- }
- }
-
- return new Session(
- queryId,
- transactionId,
- clientTransactionSupport,
- identity,
- source,
- catalog,
- schema,
- traceToken,
- timeZoneKey,
- locale,
- remoteUserAddress,
- userAgent,
- clientInfo,
- clientTags,
- resourceEstimates,
- startTime,
- systemProperties,
- ImmutableMap.of(),
- connectorProperties,
- sessionPropertyManager,
- preparedStatements,
- sessionFunctions,
- tracer,
- warningCollector,
- runtimeStats);
+ runtimeStats,
+ queryType);
}
public ConnectorSession toConnectorSession()
@@ -630,6 +584,7 @@ public static class SessionBuilder
private final SessionPropertyManager sessionPropertyManager;
private final Map preparedStatements = new HashMap<>();
private final Map sessionFunctions = new HashMap<>();
+ private Optional queryType = Optional.empty();
private WarningCollector warningCollector = WarningCollector.NOOP;
private RuntimeStats runtimeStats = new RuntimeStats();
@@ -665,6 +620,7 @@ private SessionBuilder(Session session)
this.tracer = requireNonNull(session.tracer, "tracer is null");
this.warningCollector = requireNonNull(session.warningCollector, "warningCollector is null");
this.runtimeStats = requireNonNull(session.runtimeStats, "runtimeStats is null");
+ this.queryType = requireNonNull(session.queryType, "queryType is null");
}
public SessionBuilder setQueryId(QueryId queryId)
@@ -821,11 +777,57 @@ public SessionBuilder setRuntimeStats(RuntimeStats runtimeStats)
return this;
}
+ public SessionBuilder setQueryType(Optional queryType)
+ {
+ this.queryType = requireNonNull(queryType, "queryType is null");
+ return this;
+ }
+
public T getSystemProperty(String name, Class type)
{
return sessionPropertyManager.decodeSystemPropertyValue(name, systemProperties.get(name), type);
}
+ public WarningCollector getWarningCollector()
+ {
+ return this.warningCollector;
+ }
+
+ public Map getPreparedStatements()
+ {
+ return this.preparedStatements;
+ }
+
+ public Identity getIdentity()
+ {
+ return this.identity;
+ }
+
+ public Optional getSource()
+ {
+ return Optional.ofNullable(this.source);
+ }
+
+ public Set getClientTags()
+ {
+ return this.clientTags;
+ }
+
+ public Optional getClientInfo()
+ {
+ return Optional.ofNullable(this.clientInfo);
+ }
+
+ public Map getSystemProperties()
+ {
+ return this.systemProperties;
+ }
+
+ public Map> getUnprocessedCatalogProperties()
+ {
+ return this.catalogSessionProperties;
+ }
+
public Session build()
{
return new Session(
@@ -853,7 +855,42 @@ public Session build()
sessionFunctions,
tracer,
warningCollector,
- runtimeStats);
+ runtimeStats,
+ queryType);
+ }
+
+ public void applyDefaultProperties(SystemSessionPropertyConfiguration systemPropertyConfiguration, Map> catalogPropertyDefaults)
+ {
+ requireNonNull(systemPropertyConfiguration, "systemPropertyConfiguration is null");
+ requireNonNull(catalogPropertyDefaults, "catalogPropertyDefaults is null");
+
+ // to remove this check properties must be authenticated and validated as in beginTransactionId
+ checkState(
+ this.transactionId == null && this.connectorProperties.isEmpty(),
+ "Session properties cannot be overridden once a transaction is active");
+
+ Map systemProperties = new HashMap<>();
+ systemProperties.putAll(systemPropertyConfiguration.systemPropertyDefaults);
+ systemProperties.putAll(this.systemProperties);
+ systemProperties.putAll(systemPropertyConfiguration.systemPropertyOverrides);
+ this.systemProperties.putAll(systemProperties);
+
+ Map> connectorProperties = catalogPropertyDefaults.entrySet().stream()
+ .map(entry -> Maps.immutableEntry(entry.getKey(), new HashMap<>(entry.getValue())))
+ .collect(Collectors.toMap(Entry::getKey, Entry::getValue));
+ for (Entry> catalogProperties : this.catalogSessionProperties.entrySet()) {
+ String catalog = catalogProperties.getKey();
+ for (Entry entry : catalogProperties.getValue().entrySet()) {
+ connectorProperties.computeIfAbsent(catalog, id -> new HashMap<>()).put(entry.getKey(), entry.getValue());
+ }
+ }
+
+ for (Entry> catalogProperties : connectorProperties.entrySet()) {
+ String catalog = catalogProperties.getKey();
+ for (Entry entry : catalogProperties.getValue().entrySet()) {
+ setCatalogSessionProperty(catalog, entry.getKey(), entry.getValue());
+ }
+ }
}
}
diff --git a/presto-main/src/main/java/com/facebook/presto/SessionRepresentation.java b/presto-main/src/main/java/com/facebook/presto/SessionRepresentation.java
index 9089840b82631..7651ab2b96489 100644
--- a/presto-main/src/main/java/com/facebook/presto/SessionRepresentation.java
+++ b/presto-main/src/main/java/com/facebook/presto/SessionRepresentation.java
@@ -338,6 +338,7 @@ public Session toSession(SessionPropertyManager sessionPropertyManager, Map validateValueIsPowerOfTwo(requireNonNull(value, "value is null"), TASK_WRITER_COUNT),
+ featuresConfig.isNativeExecutionEnabled() ? value -> validateNullablePositiveIntegerValue(value, TASK_WRITER_COUNT) : value -> validateValueIsPowerOfTwo(value, TASK_WRITER_COUNT),
value -> value),
new PropertyMetadata<>(
TASK_PARTITIONED_WRITER_COUNT,
@@ -528,7 +529,7 @@ public SystemSessionProperties(
Integer.class,
taskManagerConfig.getPartitionedWriterCount(),
false,
- value -> validateValueIsPowerOfTwo(value, TASK_PARTITIONED_WRITER_COUNT),
+ featuresConfig.isNativeExecutionEnabled() ? value -> validateNullablePositiveIntegerValue(value, TASK_PARTITIONED_WRITER_COUNT) : value -> validateValueIsPowerOfTwo(value, TASK_PARTITIONED_WRITER_COUNT),
value -> value),
booleanProperty(
REDISTRIBUTE_WRITES,
@@ -1750,6 +1751,13 @@ public SystemSessionProperties(
"of lazy inputs unless required. Should only be used for debugging.",
false,
true),
+ booleanProperty(
+ NATIVE_SELECTIVE_NIMBLE_READER_ENABLED,
+ "Temporary flag to control whether selective Nimble reader should be " +
+ "used in this query or not. Will be removed after the selective Nimble " +
+ "reader is fully rolled out.",
+ false,
+ true),
longProperty(
NATIVE_MAX_PARTIAL_AGGREGATION_MEMORY,
"The max partial aggregation memory when data reduction is not optimal.",
diff --git a/presto-main/src/main/java/com/facebook/presto/dispatcher/DispatchManager.java b/presto-main/src/main/java/com/facebook/presto/dispatcher/DispatchManager.java
index 6fef7dc46d2f9..2639529ba1abd 100644
--- a/presto-main/src/main/java/com/facebook/presto/dispatcher/DispatchManager.java
+++ b/presto-main/src/main/java/com/facebook/presto/dispatcher/DispatchManager.java
@@ -54,6 +54,7 @@
import java.util.Optional;
import java.util.concurrent.Executor;
+import static com.facebook.presto.Session.SessionBuilder;
import static com.facebook.presto.SystemSessionProperties.getAnalyzerType;
import static com.facebook.presto.spi.StandardErrorCode.QUERY_TEXT_TOO_LARGE;
import static com.facebook.presto.util.AnalyzerUtil.createAnalyzerOptions;
@@ -259,6 +260,7 @@ public ListenableFuture> createQuery(QueryId queryId, String slug, int retryCo
private void createQueryInternal(QueryId queryId, String slug, int retryCount, SessionContext sessionContext, String query, ResourceGroupManager resourceGroupManager)
{
Session session = null;
+ SessionBuilder sessionBuilder = null;
PreparedQuery preparedQuery;
try {
if (query.length() > maxQueryLength) {
@@ -268,16 +270,18 @@ private void createQueryInternal(QueryId queryId, String slug, int retryCoun
}
// decode session
- session = sessionSupplier.createSession(queryId, sessionContext, warningCollectorFactory);
+ sessionBuilder = sessionSupplier.createSessionBuilder(queryId, sessionContext, warningCollectorFactory);
+ session = sessionBuilder.build();
// prepare query
- AnalyzerOptions analyzerOptions = createAnalyzerOptions(session, session.getWarningCollector());
+ AnalyzerOptions analyzerOptions = createAnalyzerOptions(session, sessionBuilder.getWarningCollector());
QueryPreparerProvider queryPreparerProvider = queryPreparerProviderManager.getQueryPreparerProvider(getAnalyzerType(session));
- preparedQuery = queryPreparerProvider.getQueryPreparer().prepareQuery(analyzerOptions, query, session.getPreparedStatements(), session.getWarningCollector());
+ preparedQuery = queryPreparerProvider.getQueryPreparer().prepareQuery(analyzerOptions, query, sessionBuilder.getPreparedStatements(), sessionBuilder.getWarningCollector());
query = preparedQuery.getFormattedQuery().orElse(query);
// select resource group
Optional queryType = preparedQuery.getQueryType();
+ sessionBuilder.setQueryType(queryType);
SelectionContext selectionContext = resourceGroupManager.selectGroup(new SelectionCriteria(
sessionContext.getIdentity().getPrincipal().isPresent(),
sessionContext.getIdentity().getUser(),
@@ -290,7 +294,12 @@ private void createQueryInternal(QueryId queryId, String slug, int retryCoun
sessionContext.getIdentity().getPrincipal().map(Principal::getName)));
// apply system default session properties (does not override user set properties)
- session = sessionPropertyDefaults.newSessionWithDefaultProperties(session, queryType.map(Enum::name), Optional.of(selectionContext.getResourceGroupId()));
+ sessionPropertyDefaults.applyDefaultProperties(sessionBuilder, queryType.map(Enum::name), Optional.of(selectionContext.getResourceGroupId()));
+
+ session = sessionBuilder.build();
+ if (sessionContext.getTransactionId().isPresent()) {
+ session = session.beginTransactionId(sessionContext.getTransactionId().get(), transactionManager, accessControl);
+ }
// mark existing transaction as active
transactionManager.activateTransaction(session, preparedQuery.isTransactionControlStatement(), accessControl);
diff --git a/presto-main/src/main/java/com/facebook/presto/execution/TaskManagerConfig.java b/presto-main/src/main/java/com/facebook/presto/execution/TaskManagerConfig.java
index ea34e1d4cc363..24c87fc2095f3 100644
--- a/presto-main/src/main/java/com/facebook/presto/execution/TaskManagerConfig.java
+++ b/presto-main/src/main/java/com/facebook/presto/execution/TaskManagerConfig.java
@@ -428,14 +428,14 @@ public TaskManagerConfig setInfoMaxAge(Duration infoMaxAge)
}
@Min(1)
- @PowerOfTwo
public int getWriterCount()
{
return writerCount;
}
+ // NOTE: writer count needs to be a power of two for java query engine.
@Config("task.writer-count")
- @ConfigDescription("Number of writers per task")
+ @ConfigDescription("Number of writer threads per task")
public TaskManagerConfig setWriterCount(int writerCount)
{
this.writerCount = writerCount;
@@ -443,14 +443,14 @@ public TaskManagerConfig setWriterCount(int writerCount)
}
@Min(1)
- @PowerOfTwo
public Integer getPartitionedWriterCount()
{
return partitionedWriterCount;
}
+ // NOTE: partitioned writer count needs to be a power of two for java query engine.
@Config("task.partitioned-writer-count")
- @ConfigDescription("Number of writers per task for partitioned writes. If not set, the number set by task.writer-count will be used")
+ @ConfigDescription("Number of writer threads per task for partitioned writes. If not set, the number set by task.writer-count will be used")
public TaskManagerConfig setPartitionedWriterCount(Integer partitionedWriterCount)
{
this.partitionedWriterCount = partitionedWriterCount;
diff --git a/presto-main/src/main/java/com/facebook/presto/operator/scalar/IpPrefixFunctions.java b/presto-main/src/main/java/com/facebook/presto/operator/scalar/IpPrefixFunctions.java
index b47e8f48ce04e..5f7f9a27d19f7 100644
--- a/presto-main/src/main/java/com/facebook/presto/operator/scalar/IpPrefixFunctions.java
+++ b/presto-main/src/main/java/com/facebook/presto/operator/scalar/IpPrefixFunctions.java
@@ -52,6 +52,8 @@ public final class IpPrefixFunctions
{
private static final BigInteger TWO = BigInteger.valueOf(2);
+ private static final Block EMPTY_BLOCK = IPPREFIX.createBlockBuilder(null, 0).build();
+
/**
* Our definitions for what IANA considers not "globally reachable" are taken from the docs at
* https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml and
@@ -290,6 +292,71 @@ public static boolean isPrivateIpAddress(@SqlType(StandardTypes.IPADDRESS) Slice
return false;
}
+ @Description("Split the input prefix into subnets the size of the new prefix length.")
+ @ScalarFunction("ip_prefix_subnets")
+ @SqlType("array(IPPREFIX)")
+ public static Block ipPrefixSubnets(@SqlType(StandardTypes.IPPREFIX) Slice prefix, @SqlType(StandardTypes.BIGINT) long newPrefixLength)
+ {
+ boolean inputIsIpV4 = isIpv4(prefix);
+
+ if (newPrefixLength < 0 || (inputIsIpV4 && newPrefixLength > 32) || (!inputIsIpV4 && newPrefixLength > 128)) {
+ throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "Invalid prefix length for IPv" + (inputIsIpV4 ? "4" : "6") + ": " + newPrefixLength);
+ }
+
+ int inputPrefixLength = getPrefixLength(prefix);
+ // An IP prefix is a 'network', or group of contiguous IP addresses. The common format for describing IP prefixes is
+ // uses 2 parts separated by a '/': (1) the IP address part and the (2) prefix length part (also called subnet size or CIDR).
+ // For example, in 9.255.255.0/24, 9.255.255.0 is the IP address part and 24 is the prefix length.
+ // The prefix length describes how many IP addresses the prefix contains in terms of the leading number of bits required. A higher number of bits
+ // means smaller number of IP addresses. Subnets inherently mean smaller groups of IP addresses.
+ // We can only disaggregate a prefix if the prefix length is the same length or longer (more-specific) than the length of the input prefix.
+ // E.g., if the input prefix is 9.255.255.0/24, the prefix length can be /24, /25, /26, etc... but not 23 or larger value than 24.
+
+ int newPrefixCount = 0; // if inputPrefixLength > newPrefixLength, there are no new prefixes and we will return an empty array.
+ if (inputPrefixLength <= newPrefixLength) {
+ // Next, count how many new prefixes we will generate. In general, every difference in prefix length doubles the number new prefixes.
+ // For example if we start with 9.255.255.0/24, and want to split into /25s, we would have 2 new prefixes. If we wanted to split into /26s,
+ // we would have 4 new prefixes, and /27 would have 8 prefixes etc....
+ newPrefixCount = 1 << (newPrefixLength - inputPrefixLength); // 2^N
+ }
+
+ if (newPrefixCount == 0) {
+ return EMPTY_BLOCK;
+ }
+
+ BlockBuilder blockBuilder = IPPREFIX.createBlockBuilder(null, newPrefixCount);
+
+ if (newPrefixCount == 1) {
+ IPPREFIX.writeSlice(blockBuilder, prefix); // just return the original prefix in an array
+ return blockBuilder.build(); // returns empty or single entry
+ }
+
+ int ipVersionMaxBits = inputIsIpV4 ? 32 : 128;
+ BigInteger newPrefixIpCount = TWO.pow(ipVersionMaxBits - (int) newPrefixLength);
+
+ Slice startingIpAddressAsSlice = ipSubnetMin(prefix);
+ BigInteger currentIpAddress = toBigInteger(startingIpAddressAsSlice);
+
+ try {
+ for (int i = 0; i < newPrefixCount; i++) {
+ InetAddress asInetAddress = bigIntegerToIpAddress(currentIpAddress);
+ Slice ipPrefixAsSlice = castFromVarcharToIpPrefix(utf8Slice(InetAddresses.toAddrString(asInetAddress) + "/" + newPrefixLength));
+ IPPREFIX.writeSlice(blockBuilder, ipPrefixAsSlice);
+ currentIpAddress = currentIpAddress.add(newPrefixIpCount); // increment to start of next new prefix
+ }
+ }
+ catch (UnknownHostException ex) {
+ throw new PrestoException(GENERIC_INTERNAL_ERROR, "Unable to convert " + currentIpAddress + " to IP prefix", ex);
+ }
+
+ return blockBuilder.build();
+ }
+
+ private static int getPrefixLength(Slice ipPrefix)
+ {
+ return ipPrefix.getByte(IPPREFIX.getFixedSize() - 1) & 0xFF;
+ }
+
private static List generateMinIpPrefixes(BigInteger firstIpAddress, BigInteger lastIpAddress, int ipVersionMaxBits)
{
List ipPrefixSlices = new ArrayList<>();
diff --git a/presto-main/src/main/java/com/facebook/presto/operator/scalar/sql/ArraySqlFunctions.java b/presto-main/src/main/java/com/facebook/presto/operator/scalar/sql/ArraySqlFunctions.java
index b51855cc9a2ee..2fc1218ea2408 100644
--- a/presto-main/src/main/java/com/facebook/presto/operator/scalar/sql/ArraySqlFunctions.java
+++ b/presto-main/src/main/java/com/facebook/presto/operator/scalar/sql/ArraySqlFunctions.java
@@ -69,7 +69,7 @@ public static String arrayFrequency()
"m -> m)";
}
- @SqlInvokedScalarFunction(value = "array_duplicates", alias = {"array_dupes"}, deterministic = true, calledOnNullInput = false)
+ @SqlInvokedScalarFunction(value = "array_duplicates", deterministic = true, calledOnNullInput = false)
@Description("Returns set of elements that have duplicates")
@SqlParameter(name = "input", type = "array(T)")
@TypeParameter("T")
@@ -81,7 +81,7 @@ public static String arrayDuplicates()
"map_keys(map_filter(array_frequency(input), (k, v) -> v > 1)))";
}
- @SqlInvokedScalarFunction(value = "array_has_duplicates", alias = {"array_has_dupes"}, deterministic = true, calledOnNullInput = false)
+ @SqlInvokedScalarFunction(value = "array_has_duplicates", deterministic = true, calledOnNullInput = false)
@Description("Returns whether array has any duplicate element")
@TypeParameter("T")
@SqlParameter(name = "input", type = "array(T)")
diff --git a/presto-main/src/main/java/com/facebook/presto/security/AccessControlUtils.java b/presto-main/src/main/java/com/facebook/presto/security/AccessControlUtils.java
index 9a0a3dd25cb09..c29c65d46023d 100644
--- a/presto-main/src/main/java/com/facebook/presto/security/AccessControlUtils.java
+++ b/presto-main/src/main/java/com/facebook/presto/security/AccessControlUtils.java
@@ -46,7 +46,8 @@ public static void checkPermissions(AccessControl accessControl, SecurityConfig
sessionContext.getClientTags(),
Optional.ofNullable(sessionContext.getSource()),
WarningCollector.NOOP,
- sessionContext.getRuntimeStats()),
+ sessionContext.getRuntimeStats(),
+ Optional.empty()),
identity.getPrincipal(),
identity.getUser());
}
@@ -71,7 +72,8 @@ public static Optional getAuthorizedIdentity(AccessControl a
sessionContext.getClientTags(),
Optional.ofNullable(sessionContext.getSource()),
WarningCollector.NOOP,
- sessionContext.getRuntimeStats()),
+ sessionContext.getRuntimeStats(),
+ Optional.empty()),
identity.getUser(),
sessionContext.getCertificates());
return Optional.of(authorizedIdentity);
diff --git a/presto-main/src/main/java/com/facebook/presto/server/NoOpSessionSupplier.java b/presto-main/src/main/java/com/facebook/presto/server/NoOpSessionSupplier.java
index 4b3ce0c3e7c04..a7717bda17c43 100644
--- a/presto-main/src/main/java/com/facebook/presto/server/NoOpSessionSupplier.java
+++ b/presto-main/src/main/java/com/facebook/presto/server/NoOpSessionSupplier.java
@@ -17,6 +17,8 @@
import com.facebook.presto.execution.warnings.WarningCollectorFactory;
import com.facebook.presto.spi.QueryId;
+import static com.facebook.presto.Session.SessionBuilder;
+
/**
* Used on workers.
*/
@@ -28,4 +30,10 @@ public Session createSession(QueryId queryId, SessionContext context, WarningCol
{
throw new UnsupportedOperationException();
}
+
+ @Override
+ public SessionBuilder createSessionBuilder(QueryId queryId, SessionContext context, WarningCollectorFactory warningCollectorFactory)
+ {
+ throw new UnsupportedOperationException();
+ }
}
diff --git a/presto-main/src/main/java/com/facebook/presto/server/QuerySessionSupplier.java b/presto-main/src/main/java/com/facebook/presto/server/QuerySessionSupplier.java
index 2d4062f56abe6..260611d190033 100644
--- a/presto-main/src/main/java/com/facebook/presto/server/QuerySessionSupplier.java
+++ b/presto-main/src/main/java/com/facebook/presto/server/QuerySessionSupplier.java
@@ -75,6 +75,16 @@ public QuerySessionSupplier(
@Override
public Session createSession(QueryId queryId, SessionContext context, WarningCollectorFactory warningCollectorFactory)
+ {
+ Session session = createSessionBuilder(queryId, context, warningCollectorFactory).build();
+ if (context.getTransactionId().isPresent()) {
+ session = session.beginTransactionId(context.getTransactionId().get(), transactionManager, accessControl);
+ }
+ return session;
+ }
+
+ @Override
+ public SessionBuilder createSessionBuilder(QueryId queryId, SessionContext context, WarningCollectorFactory warningCollectorFactory)
{
SessionBuilder sessionBuilder = Session.builder(sessionPropertyManager)
.setQueryId(queryId)
@@ -128,11 +138,7 @@ else if (context.getTimeZoneId() != null) {
WarningCollector warningCollector = warningCollectorFactory.create(sessionBuilder.getSystemProperty(WARNING_HANDLING, WarningHandlingLevel.class));
sessionBuilder.setWarningCollector(warningCollector);
- Session session = sessionBuilder.build();
- if (context.getTransactionId().isPresent()) {
- session = session.beginTransactionId(context.getTransactionId().get(), transactionManager, accessControl);
- }
- return session;
+ return sessionBuilder;
}
private Identity authenticateIdentity(QueryId queryId, SessionContext context)
diff --git a/presto-main/src/main/java/com/facebook/presto/server/SessionPropertyDefaults.java b/presto-main/src/main/java/com/facebook/presto/server/SessionPropertyDefaults.java
index 4ec9069973d09..71a778646f2a8 100644
--- a/presto-main/src/main/java/com/facebook/presto/server/SessionPropertyDefaults.java
+++ b/presto-main/src/main/java/com/facebook/presto/server/SessionPropertyDefaults.java
@@ -15,7 +15,6 @@
import com.facebook.airlift.log.Logger;
import com.facebook.airlift.node.NodeInfo;
-import com.facebook.presto.Session;
import com.facebook.presto.client.NodeVersion;
import com.facebook.presto.spi.resourceGroups.ResourceGroupId;
import com.facebook.presto.spi.resourceGroups.SessionPropertyConfigurationManagerContext;
@@ -35,6 +34,7 @@
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicReference;
+import static com.facebook.presto.Session.SessionBuilder;
import static com.facebook.presto.util.PropertiesUtil.loadProperties;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
@@ -106,27 +106,27 @@ public void setConfigurationManager(String configManagerName, Map queryType,
Optional resourceGroupId)
{
SessionPropertyConfigurationManager configurationManager = delegate.get();
if (configurationManager == null) {
- return session;
+ return;
}
SessionConfigurationContext context = new SessionConfigurationContext(
- session.getIdentity().getUser(),
- session.getSource(),
- session.getClientTags(),
+ sessionBuilder.getIdentity().getUser(),
+ sessionBuilder.getSource(),
+ sessionBuilder.getClientTags(),
queryType,
resourceGroupId,
- session.getClientInfo(),
+ sessionBuilder.getClientInfo(),
prestoServerVersion);
SystemSessionPropertyConfiguration systemPropertyConfiguration = configurationManager.getSystemSessionProperties(context);
Map> catalogPropertyOverrides = configurationManager.getCatalogSessionProperties(context);
- return session.withDefaultProperties(systemPropertyConfiguration, catalogPropertyOverrides);
+ sessionBuilder.applyDefaultProperties(systemPropertyConfiguration, catalogPropertyOverrides);
}
}
diff --git a/presto-main/src/main/java/com/facebook/presto/server/SessionSupplier.java b/presto-main/src/main/java/com/facebook/presto/server/SessionSupplier.java
index d021f25724407..240439d67ce09 100644
--- a/presto-main/src/main/java/com/facebook/presto/server/SessionSupplier.java
+++ b/presto-main/src/main/java/com/facebook/presto/server/SessionSupplier.java
@@ -17,7 +17,11 @@
import com.facebook.presto.execution.warnings.WarningCollectorFactory;
import com.facebook.presto.spi.QueryId;
+import static com.facebook.presto.Session.SessionBuilder;
+
public interface SessionSupplier
{
Session createSession(QueryId queryId, SessionContext context, WarningCollectorFactory warningCollectorFactory);
+
+ SessionBuilder createSessionBuilder(QueryId queryId, SessionContext context, WarningCollectorFactory warningCollectorFactory);
}
diff --git a/presto-main/src/main/java/com/facebook/presto/server/TaskResource.java b/presto-main/src/main/java/com/facebook/presto/server/TaskResource.java
index 37450b850aa68..372326ad4ce48 100644
--- a/presto-main/src/main/java/com/facebook/presto/server/TaskResource.java
+++ b/presto-main/src/main/java/com/facebook/presto/server/TaskResource.java
@@ -322,8 +322,9 @@ public Response taskResultsHeaders(
public Response taskResultsHeaders(
@PathParam("taskId") TaskId taskId,
@PathParam("bufferId") OutputBufferId bufferId,
- @PathParam("token") final long unused)
+ @PathParam("token") final long token)
{
+ taskManager.acknowledgeTaskResults(taskId, bufferId, token);
return taskResultsHeaders(taskId, bufferId);
}
diff --git a/presto-main/src/main/java/com/facebook/presto/sql/analyzer/StatementAnalyzer.java b/presto-main/src/main/java/com/facebook/presto/sql/analyzer/StatementAnalyzer.java
index 49297ff101fc6..0bd1e9030404f 100644
--- a/presto-main/src/main/java/com/facebook/presto/sql/analyzer/StatementAnalyzer.java
+++ b/presto-main/src/main/java/com/facebook/presto/sql/analyzer/StatementAnalyzer.java
@@ -27,6 +27,7 @@
import com.facebook.presto.common.type.MapType;
import com.facebook.presto.common.type.RealType;
import com.facebook.presto.common.type.RowType;
+import com.facebook.presto.common.type.TimestampType;
import com.facebook.presto.common.type.TimestampWithTimeZoneType;
import com.facebook.presto.common.type.Type;
import com.facebook.presto.common.type.VarcharType;
@@ -1415,9 +1416,9 @@ private Optional processTableVersion(Table table, QualifiedObjectNa
}
Object evalStateExpr = evaluateConstantExpression(stateExpr, stateExprType, metadata, session, analysis.getParameters());
if (tableVersionType == TIMESTAMP) {
- if (!(stateExprType instanceof TimestampWithTimeZoneType)) {
+ if (!(stateExprType instanceof TimestampWithTimeZoneType || stateExprType instanceof TimestampType)) {
throw new SemanticException(TYPE_MISMATCH, stateExpr,
- "Type %s is invalid. Supported table version AS OF/BEFORE expression type is Timestamp with Time Zone.",
+ "Type %s is invalid. Supported table version AS OF/BEFORE expression type is Timestamp or Timestamp with Time Zone.",
stateExprType.getDisplayName());
}
}
diff --git a/presto-main/src/main/java/com/facebook/presto/sql/planner/EffectivePredicateExtractor.java b/presto-main/src/main/java/com/facebook/presto/sql/planner/EffectivePredicateExtractor.java
index 9e439fb863c40..a59b64b09f856 100644
--- a/presto-main/src/main/java/com/facebook/presto/sql/planner/EffectivePredicateExtractor.java
+++ b/presto-main/src/main/java/com/facebook/presto/sql/planner/EffectivePredicateExtractor.java
@@ -60,6 +60,7 @@
import static com.facebook.presto.common.function.OperatorType.EQUAL;
import static com.facebook.presto.common.type.BooleanType.BOOLEAN;
+import static com.facebook.presto.expressions.LogicalRowExpressions.FALSE_CONSTANT;
import static com.facebook.presto.expressions.LogicalRowExpressions.TRUE_CONSTANT;
import static com.facebook.presto.expressions.LogicalRowExpressions.extractConjuncts;
import static com.facebook.presto.spi.relation.SpecialFormExpression.Form.IS_NULL;
@@ -418,9 +419,10 @@ private RowExpression pullExpressionThroughVariables(RowExpression expression, C
for (RowExpression conjunct : new EqualityInference.Builder(functionManger).nonInferableConjuncts(expression)) {
if (determinismEvaluator.isDeterministic(conjunct)) {
RowExpression rewritten = equalityInference.rewriteExpression(conjunct, in(variables));
- if (rewritten != null) {
+ if (rewritten != null && (hasVariableReferences(rewritten) || rewritten.equals(FALSE_CONSTANT))) {
effectiveConjuncts.add(rewritten);
}
+ // If equality inference has reduced the predicate to an expression referring to only constants, it does not make sense to pull this predicate up
}
}
@@ -428,5 +430,10 @@ private RowExpression pullExpressionThroughVariables(RowExpression expression, C
return logicalRowExpressions.combineConjuncts(effectiveConjuncts.build());
}
+
+ private static boolean hasVariableReferences(RowExpression rowExpression)
+ {
+ return !VariablesExtractor.extractUnique(rowExpression).isEmpty();
+ }
}
}
diff --git a/presto-main/src/main/java/com/facebook/presto/sql/planner/iterative/rule/RewriteCaseToMap.java b/presto-main/src/main/java/com/facebook/presto/sql/planner/iterative/rule/RewriteCaseToMap.java
index 23c4e149cacef..30e4cedcba064 100644
--- a/presto-main/src/main/java/com/facebook/presto/sql/planner/iterative/rule/RewriteCaseToMap.java
+++ b/presto-main/src/main/java/com/facebook/presto/sql/planner/iterative/rule/RewriteCaseToMap.java
@@ -231,6 +231,10 @@ else if (!curCheck.equals(checkExpr)) {
}
}
+ if (checkExpr == null) {
+ return node;
+ }
+
// Here we have all values!
RowExpression mapLookup = makeMapAndAccess(whens, thens, checkExpr);
diff --git a/presto-main/src/main/java/com/facebook/presto/testing/LocalQueryRunner.java b/presto-main/src/main/java/com/facebook/presto/testing/LocalQueryRunner.java
index 93a30a8664d1b..4bc53439ed6da 100644
--- a/presto-main/src/main/java/com/facebook/presto/testing/LocalQueryRunner.java
+++ b/presto-main/src/main/java/com/facebook/presto/testing/LocalQueryRunner.java
@@ -549,7 +549,8 @@ private LocalQueryRunner(Session defaultSession, FeaturesConfig featuresConfig,
defaultSession.getSessionFunctions(),
defaultSession.getTracer(),
defaultSession.getWarningCollector(),
- defaultSession.getRuntimeStats());
+ defaultSession.getRuntimeStats(),
+ defaultSession.getQueryType());
dataDefinitionTask = ImmutableMap., DataDefinitionTask>>builder()
.put(CreateTable.class, new CreateTableTask())
diff --git a/presto-main/src/test/java/com/facebook/presto/execution/executor/Histogram.java b/presto-main/src/test/java/com/facebook/presto/execution/executor/Histogram.java
deleted file mode 100644
index 7f6ffbe5948c9..0000000000000
--- a/presto-main/src/test/java/com/facebook/presto/execution/executor/Histogram.java
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.facebook.presto.execution.executor;
-
-import com.google.common.collect.ImmutableList;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.function.Function;
-
-import static com.google.common.base.Preconditions.checkArgument;
-
-class Histogram>
-{
- private final List buckets;
- private final boolean discrete;
-
- private Histogram(Collection buckets, boolean discrete)
- {
- this.buckets = new ArrayList<>(buckets);
- this.discrete = discrete;
- Collections.sort(this.buckets);
- }
-
- public static > Histogram fromDiscrete(Collection buckets)
- {
- return new Histogram<>(buckets, true);
- }
-
- public static > Histogram fromContinuous(Collection buckets)
- {
- return new Histogram<>(buckets, false);
- }
-
- public static Histogram fromContinuous(Collection initialData, Function keyFunction)
- {
- if (initialData.isEmpty()) {
- return new Histogram<>(ImmutableList.of(), false);
- }
-
- int numBuckets = Math.min(10, (int) Math.sqrt(initialData.size()));
- long min = initialData.stream()
- .mapToLong(keyFunction::apply)
- .min()
- .getAsLong();
- long max = initialData.stream()
- .mapToLong(keyFunction::apply)
- .max()
- .getAsLong();
-
- checkArgument(max > min);
-
- long bucketSize = (max - min) / numBuckets;
- long bucketRemainder = (max - min) % numBuckets;
-
- List minimums = new ArrayList<>();
-
- long currentMin = min;
- for (int i = 0; i < numBuckets; i++) {
- minimums.add(currentMin);
- long currentMax = currentMin + bucketSize;
- if (bucketRemainder > 0) {
- currentMax++;
- bucketRemainder--;
- }
- currentMin = currentMax + 1;
- }
-
- minimums.add(numBuckets, currentMin);
-
- return new Histogram<>(minimums, false);
- }
-
- public void printDistribution(
- Collection data,
- Function keyFunction,
- Function keyFormatter)
- {
- if (buckets.isEmpty()) {
- System.out.println("No buckets");
- return;
- }
-
- if (data.isEmpty()) {
- System.out.println("No data");
- return;
- }
-
- long[] bucketData = new long[buckets.size()];
-
- for (D datum : data) {
- K key = keyFunction.apply(datum);
-
- for (int i = 0; i < buckets.size(); i++) {
- if (key.compareTo(buckets.get(i)) >= 0 && (i == (buckets.size() - 1) || key.compareTo(buckets.get(i + 1)) < 0)) {
- bucketData[i]++;
- break;
- }
- }
- }
-
- if (!discrete) {
- for (int i = 0; i < bucketData.length - 1; i++) {
- System.out.printf("%8s - %8s : (%5s values)\n",
- keyFormatter.apply(buckets.get(i)),
- keyFormatter.apply(buckets.get(i + 1)),
- bucketData[i]);
- }
- }
- else {
- for (int i = 0; i < bucketData.length; i++) {
- System.out.printf("%8s : (%5s values)\n",
- keyFormatter.apply(buckets.get(i)),
- bucketData[i]);
- }
- }
- }
-
- public void printDistribution(
- Collection data,
- Function keyFunction,
- Function valueFunction,
- Function keyFormatter,
- Function, G> valueFormatter)
- {
- if (buckets.isEmpty()) {
- System.out.println("No buckets");
- return;
- }
-
- if (data.isEmpty()) {
- System.out.println("No data");
- return;
- }
-
- SortedMap> bucketData = new TreeMap<>();
- for (int i = 0; i < buckets.size(); i++) {
- bucketData.put(i, new ArrayList<>());
- }
-
- for (D datum : data) {
- K key = keyFunction.apply(datum);
- V value = valueFunction.apply(datum);
-
- for (int i = 0; i < buckets.size(); i++) {
- if (key.compareTo(buckets.get(i)) >= 0 && (i == (buckets.size() - 1) || key.compareTo(buckets.get(i + 1)) < 0)) {
- bucketData.get(i).add(value);
- break;
- }
- }
- }
-
- if (!discrete) {
- for (int i = 0; i < bucketData.size() - 1; i++) {
- System.out.printf("%8s - %8s : (%5s values) %s\n",
- keyFormatter.apply(buckets.get(i)),
- keyFormatter.apply(buckets.get(i + 1)),
- bucketData.get(i).size(),
- valueFormatter.apply(bucketData.get(i)));
- }
- }
- else {
- for (int i = 0; i < bucketData.size(); i++) {
- System.out.printf("%19s : (%5s values) %s\n",
- keyFormatter.apply(buckets.get(i)),
- bucketData.get(i).size(),
- valueFormatter.apply(bucketData.get(i)));
- }
- }
- }
-}
diff --git a/presto-main/src/test/java/com/facebook/presto/execution/executor/SimulationController.java b/presto-main/src/test/java/com/facebook/presto/execution/executor/SimulationController.java
deleted file mode 100644
index e931f9495b5e0..0000000000000
--- a/presto-main/src/test/java/com/facebook/presto/execution/executor/SimulationController.java
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.facebook.presto.execution.executor;
-
-import com.facebook.presto.execution.TaskId;
-import com.facebook.presto.execution.executor.SimulationTask.IntermediateTask;
-import com.facebook.presto.execution.executor.SimulationTask.LeafTask;
-import com.facebook.presto.execution.executor.SplitGenerators.SplitGenerator;
-import com.google.common.collect.ArrayListMultimap;
-import com.google.common.collect.ListMultimap;
-import com.google.common.collect.Multimaps;
-
-import java.util.Map;
-import java.util.OptionalInt;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.function.BiConsumer;
-
-import static com.facebook.presto.execution.executor.SimulationController.TaskSpecification.Type.LEAF;
-import static java.util.concurrent.Executors.newSingleThreadExecutor;
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
-
-class SimulationController
-{
- private static final int DEFAULT_MIN_SPLITS_PER_TASK = 3;
-
- private final TaskExecutor taskExecutor;
- private final BiConsumer callback;
-
- private final ExecutorService controllerExecutor = newSingleThreadExecutor();
-
- private final Map specificationEnabled = new ConcurrentHashMap<>();
- private final ListMultimap runningTasks = Multimaps.synchronizedListMultimap(ArrayListMultimap.create());
-
- private final ListMultimap completedTasks = Multimaps.synchronizedListMultimap(ArrayListMultimap.create());
- private final AtomicBoolean clearPendingQueue = new AtomicBoolean();
-
- private final AtomicBoolean stopped = new AtomicBoolean();
-
- public SimulationController(TaskExecutor taskExecutor, BiConsumer callback)
- {
- this.taskExecutor = taskExecutor;
- this.callback = callback;
- }
-
- public synchronized void addTaskSpecification(TaskSpecification spec)
- {
- specificationEnabled.put(spec, false);
- }
-
- public synchronized void clearPendingQueue()
- {
- System.out.println("Clearing pending queue..");
- clearPendingQueue.set(true);
- }
-
- public synchronized void stop()
- {
- stopped.set(true);
- controllerExecutor.shutdownNow();
- taskExecutor.stop();
- }
-
- public synchronized void enableSpecification(TaskSpecification specification)
- {
- specificationEnabled.replace(specification, false, true);
- startSpec(specification);
- }
-
- public synchronized void disableSpecification(TaskSpecification specification)
- {
- if (specificationEnabled.replace(specification, true, false) && callback != null) {
- runCallback();
- }
- }
-
- public synchronized void runCallback()
- {
- callback.accept(this, taskExecutor);
- }
-
- public void run()
- {
- controllerExecutor.submit(() -> {
- while (!stopped.get()) {
- replaceCompletedTasks();
- scheduleSplitsForRunningTasks();
-
- try {
- MILLISECONDS.sleep(500);
- }
- catch (InterruptedException e) {
- return;
- }
- }
- });
- }
-
- private synchronized void scheduleSplitsForRunningTasks()
- {
- if (clearPendingQueue.get()) {
- if (taskExecutor.getWaitingSplits() > (taskExecutor.getIntermediateSplits() - taskExecutor.getBlockedSplits())) {
- return;
- }
-
- System.out.println("Cleared pending queue.");
- clearPendingQueue.set(false);
- }
-
- for (TaskSpecification specification : specificationEnabled.keySet()) {
- if (!specificationEnabled.get(specification)) {
- continue;
- }
-
- for (SimulationTask task : runningTasks.get(specification)) {
- if (specification.getType() == LEAF) {
- int remainingSplits = specification.getNumSplitsPerTask() - (task.getRunningSplits().size() + task.getCompletedSplits().size());
- int candidateSplits = DEFAULT_MIN_SPLITS_PER_TASK - task.getRunningSplits().size();
- for (int i = 0; i < Math.min(remainingSplits, candidateSplits); i++) {
- task.schedule(taskExecutor, 1);
- }
- }
- else {
- int remainingSplits = specification.getNumSplitsPerTask() - (task.getRunningSplits().size() + task.getCompletedSplits().size());
- task.schedule(taskExecutor, remainingSplits);
- }
- }
- }
- }
-
- private synchronized void replaceCompletedTasks()
- {
- boolean moved;
- do {
- moved = false;
-
- for (TaskSpecification specification : specificationEnabled.keySet()) {
- if (specification.getTotalTasks().isPresent() &&
- specificationEnabled.get(specification) &&
- specification.getTotalTasks().getAsInt() <= completedTasks.get(specification).size() + runningTasks.get(specification).size()) {
- System.out.println();
- System.out.println(specification.getName() + " disabled for reaching target count " + specification.getTotalTasks());
- System.out.println();
- disableSpecification(specification);
- continue;
- }
- for (SimulationTask task : runningTasks.get(specification)) {
- if (task.getCompletedSplits().size() >= specification.getNumSplitsPerTask()) {
- completedTasks.put(specification, task);
- runningTasks.remove(specification, task);
- taskExecutor.removeTask(task.getTaskHandle());
-
- if (!specificationEnabled.get(specification)) {
- continue;
- }
-
- createTask(specification);
- moved = true;
- break;
- }
- }
- }
- }
- while (moved);
- }
-
- private void createTask(TaskSpecification specification)
- {
- if (specification.getType() == LEAF) {
- runningTasks.put(specification, new LeafTask(
- taskExecutor,
- specification,
- new TaskId(specification.getName(), 0, 0, runningTasks.get(specification).size() + completedTasks.get(specification).size(), 0)));
- }
- else {
- runningTasks.put(specification, new IntermediateTask(
- taskExecutor,
- specification,
- new TaskId(specification.getName(), 0, 0, runningTasks.get(specification).size() + completedTasks.get(specification).size(), 0)));
- }
- }
-
- public Map getSpecificationEnabled()
- {
- return specificationEnabled;
- }
-
- public ListMultimap getRunningTasks()
- {
- return runningTasks;
- }
-
- public ListMultimap getCompletedTasks()
- {
- return completedTasks;
- }
-
- private void startSpec(TaskSpecification specification)
- {
- if (!specificationEnabled.get(specification)) {
- return;
- }
- for (int i = 0; i < specification.getNumConcurrentTasks(); i++) {
- createTask(specification);
- }
- }
-
- public static class TaskSpecification
- {
- enum Type
- {
- LEAF,
- INTERMEDIATE
- }
-
- private final Type type;
- private final String name;
- private final OptionalInt totalTasks;
- private final int numConcurrentTasks;
- private final int numSplitsPerTask;
- private final SplitGenerator splitGenerator;
-
- TaskSpecification(Type type, String name, OptionalInt totalTasks, int numConcurrentTasks, int numSplitsPerTask, SplitGenerator splitGenerator)
- {
- this.type = type;
- this.name = name;
- this.totalTasks = totalTasks;
- this.numConcurrentTasks = numConcurrentTasks;
- this.numSplitsPerTask = numSplitsPerTask;
- this.splitGenerator = splitGenerator;
- }
-
- Type getType()
- {
- return type;
- }
-
- String getName()
- {
- return name;
- }
-
- int getNumConcurrentTasks()
- {
- return numConcurrentTasks;
- }
-
- int getNumSplitsPerTask()
- {
- return numSplitsPerTask;
- }
-
- OptionalInt getTotalTasks()
- {
- return totalTasks;
- }
-
- SplitSpecification nextSpecification()
- {
- return splitGenerator.next();
- }
- }
-}
diff --git a/presto-main/src/test/java/com/facebook/presto/execution/executor/SimulationSplit.java b/presto-main/src/test/java/com/facebook/presto/execution/executor/SimulationSplit.java
deleted file mode 100644
index 874d37b2bee64..0000000000000
--- a/presto-main/src/test/java/com/facebook/presto/execution/executor/SimulationSplit.java
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.facebook.presto.execution.executor;
-
-import com.facebook.presto.execution.SplitRunner;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import io.airlift.units.Duration;
-
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import static com.facebook.presto.operator.Operator.NOT_BLOCKED;
-import static io.airlift.units.Duration.succinctNanos;
-import static java.util.Objects.requireNonNull;
-import static java.util.concurrent.TimeUnit.NANOSECONDS;
-
-abstract class SimulationSplit
- implements SplitRunner
-{
- private final SimulationTask task;
-
- private final AtomicInteger calls = new AtomicInteger(0);
-
- private final AtomicLong completedProcessNanos = new AtomicLong();
- private final AtomicLong startNanos = new AtomicLong(-1);
- private final AtomicLong doneNanos = new AtomicLong(-1);
- private final AtomicLong waitNanos = new AtomicLong();
- private final AtomicLong lastReadyTime = new AtomicLong(-1);
- private final AtomicBoolean killed = new AtomicBoolean(false);
-
- private final long scheduledTimeNanos;
-
- SimulationSplit(SimulationTask task, long scheduledTimeNanos)
- {
- this.task = requireNonNull(task, "task is null");
- this.scheduledTimeNanos = scheduledTimeNanos;
- }
-
- long getCompletedProcessNanos()
- {
- return completedProcessNanos.get();
- }
-
- long getWaitNanos()
- {
- return waitNanos.get();
- }
-
- int getCalls()
- {
- return calls.get();
- }
-
- long getScheduledTimeNanos()
- {
- return scheduledTimeNanos;
- }
-
- void setKilled()
- {
- waitNanos.addAndGet(System.nanoTime() - lastReadyTime.get());
- killed.set(true);
- task.setKilled();
- }
-
- @Override
- public boolean isFinished()
- {
- return doneNanos.get() >= 0;
- }
-
- @Override
- public void close()
- {
- }
-
- abstract boolean process();
-
- abstract ListenableFuture> getProcessResult();
-
- void setSplitReady()
- {
- lastReadyTime.set(System.nanoTime());
- }
-
- @Override
- public ListenableFuture> processFor(Duration duration)
- {
- calls.incrementAndGet();
-
- long callStart = System.nanoTime();
- startNanos.compareAndSet(-1, callStart);
- lastReadyTime.compareAndSet(-1, callStart);
- waitNanos.addAndGet(callStart - lastReadyTime.get());
-
- boolean done = process();
-
- long callEnd = System.nanoTime();
-
- completedProcessNanos.addAndGet(callEnd - callStart);
-
- if (done) {
- doneNanos.compareAndSet(-1, callEnd);
-
- if (!killed.get()) {
- task.splitComplete(this);
- }
-
- return Futures.immediateFuture(null);
- }
-
- ListenableFuture> processResult = getProcessResult();
- if (processResult.isDone()) {
- setSplitReady();
- }
-
- return processResult;
- }
-
- static class LeafSplit
- extends SimulationSplit
- {
- private final long perQuantaNanos;
-
- LeafSplit(SimulationTask task, long scheduledTimeNanos, long perQuantaNanos)
- {
- super(task, scheduledTimeNanos);
- this.perQuantaNanos = perQuantaNanos;
- }
-
- boolean process()
- {
- if (getCompletedProcessNanos() >= super.scheduledTimeNanos) {
- return true;
- }
-
- long processNanos = Math.min(super.scheduledTimeNanos - getCompletedProcessNanos(), perQuantaNanos);
- if (processNanos > 0) {
- try {
- NANOSECONDS.sleep(processNanos);
- }
- catch (InterruptedException e) {
- setKilled();
- return true;
- }
- }
-
- return false;
- }
-
- ListenableFuture> getProcessResult()
- {
- return NOT_BLOCKED;
- }
-
- @Override
- public String getInfo()
- {
- double pct = (100.0 * getCompletedProcessNanos() / super.scheduledTimeNanos);
- return String.format("leaf %3s%% done (total: %8s, per quanta: %8s)",
- (int) (pct > 100.00 ? 100.0 : pct),
- succinctNanos(super.scheduledTimeNanos),
- succinctNanos(perQuantaNanos));
- }
- }
-
- static class IntermediateSplit
- extends SimulationSplit
- {
- private final long wallTimeNanos;
- private final long numQuantas;
- private final long perQuantaNanos;
- private final long betweenQuantaNanos;
-
- private final ScheduledExecutorService executorService;
-
- private SettableFuture> future = SettableFuture.create();
- private SettableFuture> doneFuture = SettableFuture.create();
-
- IntermediateSplit(SimulationTask task, long scheduledTimeNanos, long wallTimeNanos, long numQuantas, long perQuantaNanos, long betweenQuantaNanos, ScheduledExecutorService executorService)
- {
- super(task, scheduledTimeNanos);
- this.wallTimeNanos = wallTimeNanos;
- this.numQuantas = numQuantas;
- this.perQuantaNanos = perQuantaNanos;
- this.betweenQuantaNanos = betweenQuantaNanos;
- this.executorService = executorService;
-
- doneFuture.set(null);
- }
-
- boolean process()
- {
- try {
- if (getCalls() < numQuantas) {
- NANOSECONDS.sleep(perQuantaNanos);
- return false;
- }
- }
- catch (InterruptedException ignored) {
- setKilled();
- return true;
- }
-
- return true;
- }
-
- ListenableFuture> getProcessResult()
- {
- future = SettableFuture.create();
- try {
- executorService.schedule(() -> {
- try {
- if (!executorService.isShutdown()) {
- future.set(null);
- }
- else {
- setKilled();
- }
- setSplitReady();
- }
- catch (RuntimeException ignored) {
- setKilled();
- }
- }, betweenQuantaNanos, NANOSECONDS);
- }
- catch (RejectedExecutionException ignored) {
- setKilled();
- return doneFuture;
- }
- return future;
- }
-
- @Override
- public String getInfo()
- {
- double pct = (100.0 * getCalls() / numQuantas);
- return String.format("intr %3s%% done (wall: %9s, per quanta: %8s, between quanta: %8s)",
- (int) (pct > 100.00 ? 100.0 : pct),
- succinctNanos(wallTimeNanos),
- succinctNanos(perQuantaNanos),
- succinctNanos(betweenQuantaNanos));
- }
- }
-}
diff --git a/presto-main/src/test/java/com/facebook/presto/execution/executor/SimulationTask.java b/presto-main/src/test/java/com/facebook/presto/execution/executor/SimulationTask.java
deleted file mode 100644
index 6898ef9b71e9d..0000000000000
--- a/presto-main/src/test/java/com/facebook/presto/execution/executor/SimulationTask.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.facebook.presto.execution.executor;
-
-import com.facebook.presto.execution.TaskId;
-import com.facebook.presto.execution.executor.SimulationController.TaskSpecification;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Sets;
-import io.airlift.units.Duration;
-
-import java.util.OptionalInt;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import static java.util.concurrent.TimeUnit.SECONDS;
-
-abstract class SimulationTask
-{
- private final TaskSpecification specification;
- private final TaskId taskId;
-
- private final Set runningSplits = Sets.newConcurrentHashSet();
- private final Set completedSplits = Sets.newConcurrentHashSet();
-
- private final TaskHandle taskHandle;
- private final AtomicBoolean killed = new AtomicBoolean();
-
- public SimulationTask(TaskExecutor taskExecutor, TaskSpecification specification, TaskId taskId)
- {
- this.specification = specification;
- this.taskId = taskId;
- taskHandle = taskExecutor.addTask(taskId, () -> 0, 10, new Duration(1, SECONDS), OptionalInt.empty());
- }
-
- public void setKilled()
- {
- killed.set(true);
- }
-
- public boolean isKilled()
- {
- return killed.get();
- }
-
- public Set getCompletedSplits()
- {
- return completedSplits;
- }
-
- TaskId getTaskId()
- {
- return taskId;
- }
-
- public TaskHandle getTaskHandle()
- {
- return taskHandle;
- }
-
- public Set getRunningSplits()
- {
- return runningSplits;
- }
-
- public synchronized void splitComplete(SimulationSplit split)
- {
- runningSplits.remove(split);
- completedSplits.add(split);
- }
-
- public TaskSpecification getSpecification()
- {
- return specification;
- }
-
- public long getTotalWaitTimeNanos()
- {
- long runningWaitTime = runningSplits.stream()
- .mapToLong(SimulationSplit::getWaitNanos)
- .sum();
-
- long completedWaitTime = completedSplits.stream()
- .mapToLong(SimulationSplit::getWaitNanos)
- .sum();
-
- return runningWaitTime + completedWaitTime;
- }
-
- public long getProcessedTimeNanos()
- {
- long runningProcessedTime = runningSplits.stream()
- .mapToLong(SimulationSplit::getCompletedProcessNanos)
- .sum();
-
- long completedProcessedTime = completedSplits.stream()
- .mapToLong(SimulationSplit::getCompletedProcessNanos)
- .sum();
-
- return runningProcessedTime + completedProcessedTime;
- }
-
- public long getScheduledTimeNanos()
- {
- long runningWallTime = runningSplits.stream()
- .mapToLong(SimulationSplit::getScheduledTimeNanos)
- .sum();
-
- long completedWallTime = completedSplits.stream()
- .mapToLong(SimulationSplit::getScheduledTimeNanos)
- .sum();
-
- return runningWallTime + completedWallTime;
- }
-
- public abstract void schedule(TaskExecutor taskExecutor, int numSplits);
-
- public static class LeafTask
- extends SimulationTask
- {
- private final TaskSpecification taskSpecification;
-
- public LeafTask(TaskExecutor taskExecutor, TaskSpecification specification, TaskId taskId)
- {
- super(taskExecutor, specification, taskId);
- this.taskSpecification = specification;
- }
-
- public void schedule(TaskExecutor taskExecutor, int numSplits)
- {
- ImmutableList.Builder splits = ImmutableList.builder();
- for (int i = 0; i < numSplits; i++) {
- splits.add(taskSpecification.nextSpecification().instantiate(this));
- }
- super.runningSplits.addAll(splits.build());
- taskExecutor.enqueueSplits(getTaskHandle(), false, splits.build());
- }
- }
-
- public static class IntermediateTask
- extends SimulationTask
- {
- private final SplitSpecification splitSpecification;
-
- public IntermediateTask(TaskExecutor taskExecutor, TaskSpecification specification, TaskId taskId)
- {
- super(taskExecutor, specification, taskId);
- this.splitSpecification = specification.nextSpecification();
- }
-
- public void schedule(TaskExecutor taskExecutor, int numSplits)
- {
- ImmutableList.Builder splits = ImmutableList.builder();
- for (int i = 0; i < numSplits; i++) {
- splits.add(splitSpecification.instantiate(this));
- }
- super.runningSplits.addAll(splits.build());
- taskExecutor.enqueueSplits(getTaskHandle(), true, splits.build());
- }
- }
-}
diff --git a/presto-main/src/test/java/com/facebook/presto/execution/executor/SplitGenerators.java b/presto-main/src/test/java/com/facebook/presto/execution/executor/SplitGenerators.java
deleted file mode 100644
index 366db8fd0582f..0000000000000
--- a/presto-main/src/test/java/com/facebook/presto/execution/executor/SplitGenerators.java
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.facebook.presto.execution.executor;
-
-import com.facebook.presto.execution.executor.SplitSpecification.IntermediateSplitSpecification;
-import com.facebook.presto.execution.executor.SplitSpecification.LeafSplitSpecification;
-import com.google.common.collect.ImmutableList;
-import io.airlift.units.Duration;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ThreadLocalRandom;
-
-import static com.facebook.presto.execution.executor.Histogram.fromContinuous;
-import static java.util.concurrent.TimeUnit.DAYS;
-import static java.util.concurrent.TimeUnit.MICROSECONDS;
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
-import static java.util.concurrent.TimeUnit.MINUTES;
-
-class SplitGenerators
-{
- private SplitGenerators() {}
-
- public static void main(String[] args)
- {
- Histogram bins = fromContinuous(ImmutableList.of(
- MILLISECONDS.toNanos(0),
- MILLISECONDS.toNanos(1),
- MILLISECONDS.toNanos(10),
- MILLISECONDS.toNanos(100),
- MILLISECONDS.toNanos(1_000),
- MILLISECONDS.toNanos(10_000),
- MILLISECONDS.toNanos(60_000),
- MILLISECONDS.toNanos(300_000),
- MINUTES.toNanos(20),
- DAYS.toNanos(1)));
-
- IntermediateSplitGenerator intermediateSplitGenerator = new IntermediateSplitGenerator(null);
- List intermediateSpecs = new ArrayList<>();
- for (int i = 0; i < 10_000; i++) {
- IntermediateSplitSpecification next = intermediateSplitGenerator.next();
- intermediateSpecs.add(next);
- }
-
- System.out.println("Scheduled time distributions");
- System.out.println("============================");
- System.out.println();
- System.out.println("Tasks with 8x " + IntermediateSplitGenerator.class.getSimpleName());
- bins.printDistribution(intermediateSpecs, t -> t.getScheduledTimeNanos() * 8, a -> 1, Duration::succinctNanos, a -> "");
-
- List leafSplitGenerators = ImmutableList.of(
- new FastLeafSplitGenerator(),
- new SlowLeafSplitGenerator(),
- new L4LeafSplitGenerator(),
- new QuantaExceedingSplitGenerator(),
- new AggregatedLeafSplitGenerator());
-
- for (SplitGenerator generator : leafSplitGenerators) {
- List leafSpecs = new ArrayList<>();
- for (int i = 0; i < 17000; i++) {
- leafSpecs.add(generator.next());
- }
-
- System.out.println();
- System.out.println("Tasks with 4x " + generator.getClass().getSimpleName());
- bins.printDistribution(leafSpecs, t -> t.getScheduledTimeNanos() * 4, Duration::succinctNanos);
-
- System.out.println("Per quanta:");
- bins.printDistribution(leafSpecs, SplitSpecification::getPerQuantaNanos, Duration::succinctNanos);
- }
- }
-
- interface SplitGenerator
- {
- SplitSpecification next();
- }
-
- public static class IntermediateSplitGenerator
- implements SplitGenerator
- {
- private final ScheduledExecutorService wakeupExecutor;
-
- IntermediateSplitGenerator(ScheduledExecutorService wakeupExecutor)
- {
- this.wakeupExecutor = wakeupExecutor;
- }
-
- public IntermediateSplitSpecification next()
- {
- long numQuanta = generateIntermediateSplitNumQuanta(0, 1);
-
- long wallNanos = MILLISECONDS.toNanos(generateIntermediateSplitWallTimeMs(0, 1));
- long scheduledNanos = MILLISECONDS.toNanos(generateIntermediateSplitScheduledTimeMs(0, 1));
-
- long blockedNanos = (long) (ThreadLocalRandom.current().nextDouble(0.97, 0.99) * wallNanos);
-
- long perQuantaNanos = scheduledNanos / numQuanta;
- long betweenQuantaNanos = blockedNanos / numQuanta;
-
- return new IntermediateSplitSpecification(scheduledNanos, wallNanos, numQuanta, perQuantaNanos, betweenQuantaNanos, wakeupExecutor);
- }
- }
-
- public static class AggregatedLeafSplitGenerator
- implements SplitGenerator
- {
- public LeafSplitSpecification next()
- {
- long totalNanos = MILLISECONDS.toNanos(generateLeafSplitScheduledTimeMs(0, 1));
- long quantaNanos = Math.min(totalNanos, MICROSECONDS.toNanos(generateLeafSplitPerCallMicros(0, 1)));
-
- return new LeafSplitSpecification(totalNanos, quantaNanos);
- }
- }
-
- public static class FastLeafSplitGenerator
- implements SplitGenerator
- {
- public LeafSplitSpecification next()
- {
- long totalNanos = MILLISECONDS.toNanos(generateLeafSplitScheduledTimeMs(0, 0.75));
- long quantaNanos = Math.min(totalNanos, MICROSECONDS.toNanos(generateLeafSplitPerCallMicros(0, 1)));
-
- return new LeafSplitSpecification(totalNanos, quantaNanos);
- }
- }
-
- public static class SlowLeafSplitGenerator
- implements SplitGenerator
- {
- public LeafSplitSpecification next()
- {
- long totalNanos = MILLISECONDS.toNanos(generateLeafSplitScheduledTimeMs(0.75, 1));
- long quantaNanos = Math.min(totalNanos, MICROSECONDS.toNanos(generateLeafSplitPerCallMicros(0, 1)));
-
- return new LeafSplitSpecification(totalNanos, quantaNanos);
- }
- }
-
- public static class L4LeafSplitGenerator
- implements SplitGenerator
- {
- public LeafSplitSpecification next()
- {
- long totalNanos = MILLISECONDS.toNanos(generateLeafSplitScheduledTimeMs(0.99, 1));
- long quantaNanos = Math.min(totalNanos, MICROSECONDS.toNanos(generateLeafSplitPerCallMicros(0, 0.9)));
-
- return new LeafSplitSpecification(totalNanos, quantaNanos);
- }
- }
-
- public static class QuantaExceedingSplitGenerator
- implements SplitGenerator
- {
- public LeafSplitSpecification next()
- {
- long totalNanos = MILLISECONDS.toNanos(generateLeafSplitScheduledTimeMs(0.99, 1));
- long quantaNanos = Math.min(totalNanos, MICROSECONDS.toNanos(generateLeafSplitPerCallMicros(0.75, 1)));
-
- return new LeafSplitSpecification(totalNanos, quantaNanos);
- }
- }
-
- public static class SimpleLeafSplitGenerator
- implements SplitGenerator
- {
- private final long totalNanos;
- private final long quantaNanos;
-
- public SimpleLeafSplitGenerator(long totalNanos, long quantaNanos)
- {
- this.totalNanos = totalNanos;
- this.quantaNanos = quantaNanos;
- }
-
- public LeafSplitSpecification next()
- {
- return new LeafSplitSpecification(totalNanos, quantaNanos);
- }
- }
-
- // these numbers come from real world stats
- private static long generateLeafSplitScheduledTimeMs(double origin, double bound)
- {
- ThreadLocalRandom generator = ThreadLocalRandom.current();
- double value = generator.nextDouble(origin, bound);
- // in reality, max is several hours, but this would make the simulation too slow
- if (value > 0.998) {
- return generator.nextLong(5 * 60 * 1000, 10 * 60 * 1000);
- }
-
- if (value > 0.99) {
- return generator.nextLong(60 * 1000, 5 * 60 * 1000);
- }
-
- if (value > 0.95) {
- return generator.nextLong(10_000, 60 * 1000);
- }
-
- if (value > 0.50) {
- return generator.nextLong(1000, 10_000);
- }
-
- if (value > 0.25) {
- return generator.nextLong(100, 1000);
- }
-
- if (value > 0.10) {
- return generator.nextLong(10, 100);
- }
-
- return generator.nextLong(1, 10);
- }
-
- private static long generateLeafSplitPerCallMicros(double origin, double bound)
- {
- ThreadLocalRandom generator = ThreadLocalRandom.current();
- double value = generator.nextDouble(origin, bound);
- if (value > 0.9999) {
- return 200_000_000;
- }
-
- if (value > 0.99) {
- return generator.nextLong(3_000_000, 15_000_000);
- }
-
- if (value > 0.95) {
- return generator.nextLong(2_000_000, 5_000_000);
- }
-
- if (value > 0.90) {
- return generator.nextLong(1_500_000, 5_000_000);
- }
-
- if (value > 0.75) {
- return generator.nextLong(1_000_000, 2_000_000);
- }
-
- if (value > 0.50) {
- return generator.nextLong(500_000, 1_000_000);
- }
-
- if (value > 0.1) {
- return generator.nextLong(100_000, 500_000);
- }
-
- return generator.nextLong(250, 500);
- }
-
- private static long generateIntermediateSplitScheduledTimeMs(double origin, double bound)
- {
- ThreadLocalRandom generator = ThreadLocalRandom.current();
- double value = generator.nextDouble(origin, bound);
- // in reality, max is several hours, but this would make the simulation too slow
-
- if (value > 0.999) {
- return generator.nextLong(5 * 60 * 1000, 10 * 60 * 1000);
- }
-
- if (value > 0.99) {
- return generator.nextLong(60 * 1000, 5 * 60 * 1000);
- }
-
- if (value > 0.95) {
- return generator.nextLong(10_000, 60 * 1000);
- }
-
- if (value > 0.75) {
- return generator.nextLong(1000, 10_000);
- }
-
- if (value > 0.45) {
- return generator.nextLong(100, 1000);
- }
-
- if (value > 0.20) {
- return generator.nextLong(10, 100);
- }
-
- return generator.nextLong(1, 10);
- }
-
- private static long generateIntermediateSplitWallTimeMs(double origin, double bound)
- {
- ThreadLocalRandom generator = ThreadLocalRandom.current();
- double value = generator.nextDouble(origin, bound);
- // in reality, max is several hours, but this would make the simulation too slow
-
- if (value > 0.90) {
- return generator.nextLong(400_000, 800_000);
- }
-
- if (value > 0.75) {
- return generator.nextLong(100_000, 200_000);
- }
-
- if (value > 0.50) {
- return generator.nextLong(50_000, 100_000);
- }
-
- if (value > 0.40) {
- return generator.nextLong(30_000, 50_000);
- }
-
- if (value > 0.30) {
- return generator.nextLong(20_000, 30_000);
- }
-
- if (value > 0.20) {
- return generator.nextLong(10_000, 15_000);
- }
-
- if (value > 0.10) {
- return generator.nextLong(5_000, 10_000);
- }
-
- return generator.nextLong(1_000, 5_000);
- }
-
- private static long generateIntermediateSplitNumQuanta(double origin, double bound)
- {
- ThreadLocalRandom generator = ThreadLocalRandom.current();
- double value = generator.nextDouble(origin, bound);
-
- if (value > 0.95) {
- return generator.nextLong(2000, 20_000);
- }
-
- if (value > 0.90) {
- return generator.nextLong(1_000, 2_000);
- }
-
- return generator.nextLong(10, 1000);
- }
-}
diff --git a/presto-main/src/test/java/com/facebook/presto/execution/executor/SplitSpecification.java b/presto-main/src/test/java/com/facebook/presto/execution/executor/SplitSpecification.java
deleted file mode 100644
index 1dccbb25aa737..0000000000000
--- a/presto-main/src/test/java/com/facebook/presto/execution/executor/SplitSpecification.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.facebook.presto.execution.executor;
-
-import com.facebook.presto.execution.executor.SimulationSplit.IntermediateSplit;
-import com.facebook.presto.execution.executor.SimulationSplit.LeafSplit;
-
-import java.util.concurrent.ScheduledExecutorService;
-
-abstract class SplitSpecification
-{
- private final long scheduledTimeNanos;
- private final long perQuantaNanos;
-
- private SplitSpecification(long scheduledTimeNanos, long perQuantaNanos)
- {
- this.scheduledTimeNanos = scheduledTimeNanos;
- this.perQuantaNanos = perQuantaNanos;
- }
-
- public long getScheduledTimeNanos()
- {
- return scheduledTimeNanos;
- }
-
- public long getPerQuantaNanos()
- {
- return perQuantaNanos;
- }
-
- public abstract SimulationSplit instantiate(SimulationTask task);
-
- public static class LeafSplitSpecification
- extends SplitSpecification
- {
- public LeafSplitSpecification(long scheduledTimeNanos, long perQuantaNanos)
- {
- super(scheduledTimeNanos, perQuantaNanos);
- }
-
- public LeafSplit instantiate(SimulationTask task)
- {
- return new LeafSplit(task, super.getScheduledTimeNanos(), super.getPerQuantaNanos());
- }
- }
-
- public static class IntermediateSplitSpecification
- extends SplitSpecification
- {
- private final long wallTimeNanos;
- private final long numQuantas;
- private final long betweenQuantaNanos;
- private final ScheduledExecutorService wakeupExecutor;
-
- public IntermediateSplitSpecification(
- long scheduledTimeNanos,
- long perQuantaNanos,
- long wallTimeNanos,
- long numQuantas,
- long betweenQuantaNanos,
- ScheduledExecutorService wakeupExecutor)
- {
- super(scheduledTimeNanos, perQuantaNanos);
- this.wallTimeNanos = wallTimeNanos;
- this.numQuantas = numQuantas;
- this.betweenQuantaNanos = betweenQuantaNanos;
- this.wakeupExecutor = wakeupExecutor;
- }
-
- public IntermediateSplit instantiate(SimulationTask task)
- {
- return new IntermediateSplit(task, wallTimeNanos, numQuantas, super.getPerQuantaNanos(), betweenQuantaNanos, super.getScheduledTimeNanos(), wakeupExecutor);
- }
- }
-}
diff --git a/presto-main/src/test/java/com/facebook/presto/execution/executor/TaskExecutorSimulator.java b/presto-main/src/test/java/com/facebook/presto/execution/executor/TaskExecutorSimulator.java
deleted file mode 100644
index 1c25bb0826346..0000000000000
--- a/presto-main/src/test/java/com/facebook/presto/execution/executor/TaskExecutorSimulator.java
+++ /dev/null
@@ -1,449 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package com.facebook.presto.execution.executor;
-
-import com.facebook.presto.execution.executor.SimulationController.TaskSpecification;
-import com.facebook.presto.execution.executor.SplitGenerators.AggregatedLeafSplitGenerator;
-import com.facebook.presto.execution.executor.SplitGenerators.FastLeafSplitGenerator;
-import com.facebook.presto.execution.executor.SplitGenerators.IntermediateSplitGenerator;
-import com.facebook.presto.execution.executor.SplitGenerators.L4LeafSplitGenerator;
-import com.facebook.presto.execution.executor.SplitGenerators.QuantaExceedingSplitGenerator;
-import com.facebook.presto.execution.executor.SplitGenerators.SimpleLeafSplitGenerator;
-import com.facebook.presto.execution.executor.SplitGenerators.SlowLeafSplitGenerator;
-import com.google.common.base.Ticker;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.ListMultimap;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import io.airlift.units.Duration;
-import org.joda.time.DateTime;
-
-import java.io.Closeable;
-import java.util.List;
-import java.util.LongSummaryStatistics;
-import java.util.Map;
-import java.util.OptionalInt;
-import java.util.Set;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.stream.Collectors;
-
-import static com.facebook.airlift.concurrent.Threads.threadsNamed;
-import static com.facebook.presto.execution.TaskManagerConfig.TaskPriorityTracking.TASK_FAIR;
-import static com.facebook.presto.execution.executor.Histogram.fromContinuous;
-import static com.facebook.presto.execution.executor.Histogram.fromDiscrete;
-import static com.facebook.presto.execution.executor.SimulationController.TaskSpecification.Type.INTERMEDIATE;
-import static com.facebook.presto.execution.executor.SimulationController.TaskSpecification.Type.LEAF;
-import static com.google.common.util.concurrent.MoreExecutors.listeningDecorator;
-import static io.airlift.units.Duration.nanosSince;
-import static io.airlift.units.Duration.succinctNanos;
-import static java.util.concurrent.Executors.newCachedThreadPool;
-import static java.util.concurrent.Executors.newScheduledThreadPool;
-import static java.util.concurrent.Executors.newSingleThreadScheduledExecutor;
-import static java.util.concurrent.TimeUnit.DAYS;
-import static java.util.concurrent.TimeUnit.HOURS;
-import static java.util.concurrent.TimeUnit.MILLISECONDS;
-import static java.util.concurrent.TimeUnit.MINUTES;
-import static java.util.concurrent.TimeUnit.SECONDS;
-import static java.util.function.Function.identity;
-
-public class TaskExecutorSimulator
- implements Closeable
-{
- public static void main(String[] args)
- throws Exception
- {
- try (TaskExecutorSimulator simulator = new TaskExecutorSimulator()) {
- simulator.run();
- }
- }
-
- private final ListeningExecutorService submissionExecutor = listeningDecorator(newCachedThreadPool(threadsNamed(getClass().getSimpleName() + "-%s")));
- private final ScheduledExecutorService overallStatusPrintExecutor = newSingleThreadScheduledExecutor();
- private final ScheduledExecutorService runningSplitsPrintExecutor = newSingleThreadScheduledExecutor();
- private final ScheduledExecutorService wakeupExecutor = newScheduledThreadPool(32);
-
- private final TaskExecutor taskExecutor;
- private final MultilevelSplitQueue splitQueue;
-
- private TaskExecutorSimulator()
- {
- splitQueue = new MultilevelSplitQueue(2);
- taskExecutor = new TaskExecutor(36, 72, 3, 8, TASK_FAIR, splitQueue, Ticker.systemTicker());
- taskExecutor.start();
- }
-
- @Override
- public void close()
- {
- submissionExecutor.shutdownNow();
- overallStatusPrintExecutor.shutdownNow();
- runningSplitsPrintExecutor.shutdownNow();
- wakeupExecutor.shutdownNow();
- taskExecutor.stop();
- }
-
- public void run()
- throws Exception
- {
- long start = System.nanoTime();
- scheduleStatusPrinter(start);
-
- SimulationController controller = new SimulationController(taskExecutor, TaskExecutorSimulator::printSummaryStats);
-
- // Uncomment one of these:
- // runExperimentOverloadedCluster(controller);
- // runExperimentMisbehavingQuanta(controller);
- // runExperimentStarveSlowSplits(controller);
- runExperimentWithinLevelFairness(controller);
-
- System.out.println("Stopped scheduling new tasks. Ending simulation..");
- controller.stop();
- close();
-
- SECONDS.sleep(5);
-
- System.out.println();
- System.out.println("Simulation finished at " + DateTime.now() + ". Runtime: " + nanosSince(start));
- System.out.println();
-
- printSummaryStats(controller, taskExecutor);
- }
-
- private void runExperimentOverloadedCluster(SimulationController controller)
- throws InterruptedException
- {
- /*
- Designed to simulate a somewhat overloaded Hive cluster.
- The following data is a point-in-time snapshot representative production cluster:
- - 60 running queries => 45 queries/node
- - 80 tasks/node
- - 600 splits scheduled/node (80% intermediate => ~480, 20% leaf => 120)
- - Only 60% intermediate splits will ever get data (~300)
-
- Desired result:
- This experiment should demonstrate the trade-offs that will be made during periods when a
- node is under heavy load. Ideally, the different classes of tasks should each accumulate
- scheduled time, and not spend disproportionately long waiting.
- */
-
- System.out.println("Overload experiment started.");
- TaskSpecification leafSpec = new TaskSpecification(LEAF, "leaf", OptionalInt.empty(), 16, 30, new AggregatedLeafSplitGenerator());
- controller.addTaskSpecification(leafSpec);
-
- TaskSpecification slowLeafSpec = new TaskSpecification(LEAF, "slow_leaf", OptionalInt.empty(), 16, 10, new SlowLeafSplitGenerator());
- controller.addTaskSpecification(slowLeafSpec);
-
- TaskSpecification intermediateSpec = new TaskSpecification(INTERMEDIATE, "intermediate", OptionalInt.empty(), 8, 40, new IntermediateSplitGenerator(wakeupExecutor));
- controller.addTaskSpecification(intermediateSpec);
-
- controller.enableSpecification(leafSpec);
- controller.enableSpecification(slowLeafSpec);
- controller.enableSpecification(intermediateSpec);
- controller.run();
-
- SECONDS.sleep(30);
-
- // this gets the executor into a more realistic point-in-time state, where long running tasks start to make progress
- for (int i = 0; i < 20; i++) {
- controller.clearPendingQueue();
- MINUTES.sleep(1);
- }
-
- System.out.println("Overload experiment completed.");
- }
-
- private void runExperimentStarveSlowSplits(SimulationController controller)
- throws InterruptedException
- {
- /*
- Designed to simulate how higher level admission control affects short-term scheduling decisions.
- A fixed, large number of tasks (120) are submitted at approximately the same time.
-
- Desired result:
- Presto is designed to prioritize fast, short tasks at the expense of longer slower tasks.
- This experiment allows us to quantify exactly how this preference manifests itself. It is
- expected that shorter tasks will complete faster, however, longer tasks should not starve
- for more than a couple of minutes at a time.
- */
-
- System.out.println("Starvation experiment started.");
- TaskSpecification slowLeafSpec = new TaskSpecification(LEAF, "slow_leaf", OptionalInt.of(600), 40, 4, new SlowLeafSplitGenerator());
- controller.addTaskSpecification(slowLeafSpec);
-
- TaskSpecification intermediateSpec = new TaskSpecification(INTERMEDIATE, "intermediate", OptionalInt.of(400), 40, 8, new IntermediateSplitGenerator(wakeupExecutor));
- controller.addTaskSpecification(intermediateSpec);
-
- TaskSpecification fastLeafSpec = new TaskSpecification(LEAF, "fast_leaf", OptionalInt.of(600), 40, 4, new FastLeafSplitGenerator());
- controller.addTaskSpecification(fastLeafSpec);
-
- controller.enableSpecification(slowLeafSpec);
- controller.enableSpecification(fastLeafSpec);
- controller.enableSpecification(intermediateSpec);
-
- controller.run();
-
- for (int i = 0; i < 60; i++) {
- SECONDS.sleep(20);
- controller.clearPendingQueue();
- }
-
- System.out.println("Starvation experiment completed.");
- }
-
- private void runExperimentMisbehavingQuanta(SimulationController controller)
- throws InterruptedException
- {
- /*
- Designed to simulate how Presto allocates resources in scenarios where there is variance in
- quanta run-time between tasks.
-
- Desired result:
- Variance in quanta run time should not affect total accrued scheduled time. It is
- acceptable, however, to penalize tasks that use extremely short quanta, as each quanta
- incurs scheduling overhead.
- */
-
- System.out.println("Misbehaving quanta experiment started.");
-
- TaskSpecification slowLeafSpec = new TaskSpecification(LEAF, "good_leaf", OptionalInt.empty(), 16, 4, new L4LeafSplitGenerator());
- controller.addTaskSpecification(slowLeafSpec);
-
- TaskSpecification misbehavingLeafSpec = new TaskSpecification(LEAF, "bad_leaf", OptionalInt.empty(), 16, 4, new QuantaExceedingSplitGenerator());
- controller.addTaskSpecification(misbehavingLeafSpec);
-
- controller.enableSpecification(slowLeafSpec);
- controller.enableSpecification(misbehavingLeafSpec);
-
- controller.run();
-
- for (int i = 0; i < 120; i++) {
- controller.clearPendingQueue();
- SECONDS.sleep(20);
- }
-
- System.out.println("Misbehaving quanta experiment completed.");
- }
-
- private void runExperimentWithinLevelFairness(SimulationController controller)
- throws InterruptedException
- {
- /*
- Designed to simulate how Presto allocates resources to tasks at the same level of the
- feedback queue when there is large variance in accrued scheduled time.
-
- Desired result:
- Scheduling within levels should be fair - total accrued time should not affect what
- fraction of resources tasks are allocated as long as they are in the same level.
- */
-
- System.out.println("Level fairness experiment started.");
-
- TaskSpecification longLeafSpec = new TaskSpecification(INTERMEDIATE, "l4_long", OptionalInt.empty(), 2, 16, new SimpleLeafSplitGenerator(MINUTES.toNanos(4), SECONDS.toNanos(1)));
- controller.addTaskSpecification(longLeafSpec);
-
- TaskSpecification shortLeafSpec = new TaskSpecification(INTERMEDIATE, "l4_short", OptionalInt.empty(), 2, 16, new SimpleLeafSplitGenerator(MINUTES.toNanos(2), SECONDS.toNanos(1)));
- controller.addTaskSpecification(shortLeafSpec);
-
- controller.enableSpecification(longLeafSpec);
- controller.run();
-
- // wait until long tasks are all well into L4
- MINUTES.sleep(1);
- controller.runCallback();
-
- // start short leaf tasks
- controller.enableSpecification(shortLeafSpec);
-
- // wait until short tasks hit L4
- SECONDS.sleep(25);
- controller.runCallback();
-
- // now watch for L4 fairness at this point
- MINUTES.sleep(2);
-
- System.out.println("Level fairness experiment completed.");
- }
-
- private void scheduleStatusPrinter(long start)
- {
- overallStatusPrintExecutor.scheduleAtFixedRate(() -> {
- try {
- System.out.printf(
- "%6s -- %4s splits (R: %2s L: %3s I: %3s B: %3s W: %3s C: %5s) | %3s tasks (%3s %3s %3s %3s %3s) | Selections: %4s %4s %4s %4s %3s\n",
- nanosSince(start),
- taskExecutor.getTotalSplits(),
- taskExecutor.getRunningSplits(),
- taskExecutor.getTotalSplits() - taskExecutor.getIntermediateSplits(),
- taskExecutor.getIntermediateSplits(),
- taskExecutor.getBlockedSplits(),
- taskExecutor.getWaitingSplits(),
- taskExecutor.getCompletedSplitsLevel0() + taskExecutor.getCompletedSplitsLevel1() + taskExecutor.getCompletedSplitsLevel2() + taskExecutor.getCompletedSplitsLevel3() + taskExecutor.getCompletedSplitsLevel4(),
- taskExecutor.getTasks(),
- taskExecutor.getRunningTasksLevel0(),
- taskExecutor.getRunningTasksLevel1(),
- taskExecutor.getRunningTasksLevel2(),
- taskExecutor.getRunningTasksLevel3(),
- taskExecutor.getRunningTasksLevel4(),
- (int) splitQueue.getSelectedCountLevel0().getOneMinute().getRate(),
- (int) splitQueue.getSelectedCountLevel1().getOneMinute().getRate(),
- (int) splitQueue.getSelectedCountLevel2().getOneMinute().getRate(),
- (int) splitQueue.getSelectedCountLevel3().getOneMinute().getRate(),
- (int) splitQueue.getSelectedCountLevel4().getOneMinute().getRate());
- }
- catch (Exception ignored) {
- }
- }, 1, 1, SECONDS);
- }
-
- private static void printSummaryStats(SimulationController controller, TaskExecutor taskExecutor)
- {
- Map specEnabled = controller.getSpecificationEnabled();
-
- ListMultimap completedTasks = controller.getCompletedTasks();
- ListMultimap runningTasks = controller.getRunningTasks();
- Set allTasks = ImmutableSet.builder().addAll(completedTasks.values()).addAll(runningTasks.values()).build();
-
- long completedSplits = completedTasks.values().stream().mapToInt(t -> t.getCompletedSplits().size()).sum();
- long runningSplits = runningTasks.values().stream().mapToInt(t -> t.getCompletedSplits().size()).sum();
-
- System.out.println("Completed tasks : " + completedTasks.size());
- System.out.println("Remaining tasks : " + runningTasks.size());
- System.out.println("Completed splits: " + completedSplits);
- System.out.println("Remaining splits: " + runningSplits);
- System.out.println();
- System.out.println("Completed tasks L0: " + taskExecutor.getCompletedTasksLevel0());
- System.out.println("Completed tasks L1: " + taskExecutor.getCompletedTasksLevel1());
- System.out.println("Completed tasks L2: " + taskExecutor.getCompletedTasksLevel2());
- System.out.println("Completed tasks L3: " + taskExecutor.getCompletedTasksLevel3());
- System.out.println("Completed tasks L4: " + taskExecutor.getCompletedTasksLevel4());
- System.out.println();
- System.out.println("Completed splits L0: " + taskExecutor.getCompletedSplitsLevel0());
- System.out.println("Completed splits L1: " + taskExecutor.getCompletedSplitsLevel1());
- System.out.println("Completed splits L2: " + taskExecutor.getCompletedSplitsLevel2());
- System.out.println("Completed splits L3: " + taskExecutor.getCompletedSplitsLevel3());
- System.out.println("Completed splits L4: " + taskExecutor.getCompletedSplitsLevel4());
-
- Histogram levelsHistogram = fromContinuous(ImmutableList.of(
- MILLISECONDS.toNanos(0L),
- MILLISECONDS.toNanos(1_000),
- MILLISECONDS.toNanos(10_000L),
- MILLISECONDS.toNanos(60_000L),
- MILLISECONDS.toNanos(300_000L),
- HOURS.toNanos(1),
- DAYS.toNanos(1)));
-
- System.out.println();
- System.out.println("Levels - Completed Task Processed Time");
- levelsHistogram.printDistribution(
- completedTasks.values().stream().filter(t -> t.getSpecification().getType() == LEAF).collect(Collectors.toList()),
- SimulationTask::getScheduledTimeNanos,
- SimulationTask::getProcessedTimeNanos,
- Duration::succinctNanos,
- TaskExecutorSimulator::formatNanos);
-
- System.out.println();
- System.out.println("Levels - Running Task Processed Time");
- levelsHistogram.printDistribution(
- runningTasks.values().stream().filter(t -> t.getSpecification().getType() == LEAF).collect(Collectors.toList()),
- SimulationTask::getScheduledTimeNanos,
- SimulationTask::getProcessedTimeNanos,
- Duration::succinctNanos,
- TaskExecutorSimulator::formatNanos);
-
- System.out.println();
- System.out.println("Levels - All Task Wait Time");
- levelsHistogram.printDistribution(
- runningTasks.values().stream().filter(t -> t.getSpecification().getType() == LEAF).collect(Collectors.toList()),
- SimulationTask::getScheduledTimeNanos,
- SimulationTask::getTotalWaitTimeNanos,
- Duration::succinctNanos,
- TaskExecutorSimulator::formatNanos);
-
- System.out.println();
- System.out.println("Specification - Processed time");
- Set specifications = runningTasks.values().stream().map(t -> t.getSpecification().getName()).collect(Collectors.toSet());
- fromDiscrete(specifications).printDistribution(
- allTasks,
- t -> t.getSpecification().getName(),
- SimulationTask::getProcessedTimeNanos,
- identity(),
- TaskExecutorSimulator::formatNanos);
-
- System.out.println();
- System.out.println("Specification - Wait time");
- fromDiscrete(specifications).printDistribution(
- allTasks,
- t -> t.getSpecification().getName(),
- SimulationTask::getTotalWaitTimeNanos,
- identity(),
- TaskExecutorSimulator::formatNanos);
-
- System.out.println();
- System.out.println("Breakdown by specification");
- System.out.println("##########################");
- for (TaskSpecification specification : specEnabled.keySet()) {
- List allSpecificationTasks = ImmutableList.builder()
- .addAll(completedTasks.get(specification))
- .addAll(runningTasks.get(specification))
- .build();
-
- System.out.println(specification.getName());
- System.out.println("=============================");
- System.out.println("Completed tasks : " + completedTasks.get(specification).size());
- System.out.println("In-progress tasks : " + runningTasks.get(specification).size());
- System.out.println("Total tasks : " + specification.getTotalTasks());
- System.out.println("Splits/task : " + specification.getNumSplitsPerTask());
- System.out.println("Current required time : " + succinctNanos(allSpecificationTasks.stream().mapToLong(SimulationTask::getScheduledTimeNanos).sum()));
- System.out.println("Completed scheduled time : " + succinctNanos(allSpecificationTasks.stream().mapToLong(SimulationTask::getProcessedTimeNanos).sum()));
- System.out.println("Total wait time : " + succinctNanos(allSpecificationTasks.stream().mapToLong(SimulationTask::getTotalWaitTimeNanos).sum()));
-
- System.out.println();
- System.out.println("All Tasks by Scheduled time - Processed Time");
- levelsHistogram.printDistribution(
- allSpecificationTasks,
- SimulationTask::getScheduledTimeNanos,
- SimulationTask::getProcessedTimeNanos,
- Duration::succinctNanos,
- TaskExecutorSimulator::formatNanos);
-
- System.out.println();
- System.out.println("All Tasks by Scheduled time - Wait Time");
- levelsHistogram.printDistribution(
- allSpecificationTasks,
- SimulationTask::getScheduledTimeNanos,
- SimulationTask::getTotalWaitTimeNanos,
- Duration::succinctNanos,
- TaskExecutorSimulator::formatNanos);
-
- System.out.println();
- System.out.println("Complete Tasks by Scheduled time - Wait Time");
- levelsHistogram.printDistribution(
- completedTasks.get(specification),
- SimulationTask::getScheduledTimeNanos,
- SimulationTask::getTotalWaitTimeNanos,
- Duration::succinctNanos,
- TaskExecutorSimulator::formatNanos);
- }
- }
-
- private static String formatNanos(List list)
- {
- LongSummaryStatistics stats = list.stream().mapToLong(Long::new).summaryStatistics();
- return String.format("Min: %8s Max: %8s Avg: %8s Sum: %8s",
- succinctNanos(stats.getMin() == Long.MAX_VALUE ? 0 : stats.getMin()),
- succinctNanos(stats.getMax() == Long.MIN_VALUE ? 0 : stats.getMax()),
- succinctNanos((long) stats.getAverage()),
- succinctNanos(stats.getSum()));
- }
-}
diff --git a/presto-main/src/test/java/com/facebook/presto/operator/scalar/TestIpPrefixFunctions.java b/presto-main/src/test/java/com/facebook/presto/operator/scalar/TestIpPrefixFunctions.java
index a895fdd7e1c50..21f2b615887c9 100644
--- a/presto-main/src/test/java/com/facebook/presto/operator/scalar/TestIpPrefixFunctions.java
+++ b/presto-main/src/test/java/com/facebook/presto/operator/scalar/TestIpPrefixFunctions.java
@@ -287,4 +287,37 @@ public void testIsPrivateIpNull()
{
assertFunction("IS_PRIVATE_IP(NULL)", BOOLEAN, null);
}
+
+ @Test
+ public void testIpPrefixSubnets()
+ {
+ assertFunction("IP_PREFIX_SUBNETS(IPPREFIX '192.168.1.0/24', 25)", new ArrayType(IPPREFIX), ImmutableList.of("192.168.1.0/25", "192.168.1.128/25"));
+ assertFunction("IP_PREFIX_SUBNETS(IPPREFIX '192.168.0.0/24', 26)", new ArrayType(IPPREFIX), ImmutableList.of("192.168.0.0/26", "192.168.0.64/26", "192.168.0.128/26", "192.168.0.192/26"));
+ assertFunction("IP_PREFIX_SUBNETS(IPPREFIX '2A03:2880:C000::/34', 37)",
+ new ArrayType(IPPREFIX),
+ ImmutableList.of("2a03:2880:c000::/37", "2a03:2880:c800::/37", "2a03:2880:d000::/37", "2a03:2880:d800::/37", "2a03:2880:e000::/37", "2a03:2880:e800::/37", "2a03:2880:f000::/37", "2a03:2880:f800::/37"));
+ }
+
+ @Test
+ public void testIpPrefixSubnetsReturnSelf()
+ {
+ assertFunction("IP_PREFIX_SUBNETS(IPPREFIX '192.168.1.0/24', 24)", new ArrayType(IPPREFIX), ImmutableList.of("192.168.1.0/24"));
+ assertFunction("IP_PREFIX_SUBNETS(IPPREFIX '2804:431:b000::/38', 38)", new ArrayType(IPPREFIX), ImmutableList.of("2804:431:b000::/38"));
+ }
+
+ @Test
+ public void testIpPrefixSubnetsNewPrefixLengthLongerReturnsEmpty()
+ {
+ assertFunction("IP_PREFIX_SUBNETS(IPPREFIX '192.168.0.0/24', 23)", new ArrayType(IPPREFIX), ImmutableList.of());
+ assertFunction("IP_PREFIX_SUBNETS(IPPREFIX '64:ff9b::17/64', 48)", new ArrayType(IPPREFIX), ImmutableList.of());
+ }
+
+ @Test
+ public void testIpPrefixSubnetsInvalidPrefixLengths()
+ {
+ assertInvalidFunction("IP_PREFIX_SUBNETS(IPPREFIX '192.168.0.0/24', -1)", "Invalid prefix length for IPv4: -1");
+ assertInvalidFunction("IP_PREFIX_SUBNETS(IPPREFIX '192.168.0.0/24', 33)", "Invalid prefix length for IPv4: 33");
+ assertInvalidFunction("IP_PREFIX_SUBNETS(IPPREFIX '64:ff9b::17/64', -1)", "Invalid prefix length for IPv6: -1");
+ assertInvalidFunction("IP_PREFIX_SUBNETS(IPPREFIX '64:ff9b::17/64', 129)", "Invalid prefix length for IPv6: 129");
+ }
}
diff --git a/presto-main/src/test/java/com/facebook/presto/operator/scalar/sql/TestArraySqlFunctions.java b/presto-main/src/test/java/com/facebook/presto/operator/scalar/sql/TestArraySqlFunctions.java
index bcc96309d96ad..2e0e865f13be4 100644
--- a/presto-main/src/test/java/com/facebook/presto/operator/scalar/sql/TestArraySqlFunctions.java
+++ b/presto-main/src/test/java/com/facebook/presto/operator/scalar/sql/TestArraySqlFunctions.java
@@ -196,9 +196,6 @@ public void testArrayHasDuplicates()
assertFunction("array_has_duplicates(array[0, null])", BOOLEAN, false);
assertFunction("array_has_duplicates(array[0, null, null])", BOOLEAN, true);
- // Test legacy name.
- assertFunction("array_has_dupes(array[varchar 'a', varchar 'b', varchar 'a'])", BOOLEAN, true);
-
assertFunction("array_has_duplicates(array[array[1], array[2], array[]])", BOOLEAN, false);
assertFunction("array_has_duplicates(array[array[1], array[2], array[2]])", BOOLEAN, true);
assertFunction("array_has_duplicates(array[(1, 2), (1, 2)])", BOOLEAN, true);
@@ -224,9 +221,6 @@ public void testArrayDuplicates()
assertFunction("array_duplicates(array[0, null])", new ArrayType(INTEGER), ImmutableList.of());
assertFunction("array_duplicates(array[0, null, null])", new ArrayType(INTEGER), singletonList(null));
- // Test legacy name.
- assertFunction("array_dupes(array[1, 2, 1])", new ArrayType(INTEGER), ImmutableList.of(1));
-
RowType rowType = RowType.from(ImmutableList.of(RowType.field(INTEGER), RowType.field(INTEGER)));
assertFunction("array_duplicates(array[array[1], array[2], array[]])", new ArrayType(new ArrayType(INTEGER)), ImmutableList.of());
assertFunction("array_duplicates(array[array[1], array[2], array[2]])", new ArrayType(new ArrayType(INTEGER)), ImmutableList.of(ImmutableList.of(2)));
diff --git a/presto-main/src/test/java/com/facebook/presto/security/TestAccessControlManager.java b/presto-main/src/test/java/com/facebook/presto/security/TestAccessControlManager.java
index e41cb1d588cad..7039ba042697d 100644
--- a/presto-main/src/test/java/com/facebook/presto/security/TestAccessControlManager.java
+++ b/presto-main/src/test/java/com/facebook/presto/security/TestAccessControlManager.java
@@ -82,7 +82,7 @@ public void testInitializing()
AccessControlManager accessControlManager = new AccessControlManager(createTestTransactionManager());
accessControlManager.checkCanSetUser(
new Identity(USER_NAME, Optional.of(PRINCIPAL)),
- new AccessControlContext(new QueryId(QUERY_ID), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats()),
+ new AccessControlContext(new QueryId(QUERY_ID), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats(), Optional.empty()),
Optional.empty(),
"foo");
}
@@ -94,7 +94,7 @@ public void testNoneSystemAccessControl()
accessControlManager.setSystemAccessControl(AllowAllSystemAccessControl.NAME, ImmutableMap.of());
accessControlManager.checkCanSetUser(
new Identity(USER_NAME, Optional.of(PRINCIPAL)),
- new AccessControlContext(new QueryId(QUERY_ID), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats()),
+ new AccessControlContext(new QueryId(QUERY_ID), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats(), Optional.empty()),
Optional.empty(),
USER_NAME);
}
@@ -106,7 +106,7 @@ public void testReadOnlySystemAccessControl()
QualifiedObjectName tableName = new QualifiedObjectName("catalog", "schema", "table");
TransactionManager transactionManager = createTestTransactionManager();
AccessControlManager accessControlManager = new AccessControlManager(transactionManager);
- AccessControlContext context = new AccessControlContext(new QueryId(QUERY_ID), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats());
+ AccessControlContext context = new AccessControlContext(new QueryId(QUERY_ID), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats(), Optional.empty());
accessControlManager.setSystemAccessControl(ReadOnlySystemAccessControl.NAME, ImmutableMap.of());
accessControlManager.checkCanSetUser(identity, context, Optional.of(PRINCIPAL), USER_NAME);
@@ -149,7 +149,7 @@ public void testSetAccessControl()
accessControlManager.checkCanSetUser(
new Identity(USER_NAME, Optional.of(PRINCIPAL)),
- new AccessControlContext(new QueryId(QUERY_ID), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats()),
+ new AccessControlContext(new QueryId(QUERY_ID), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats(), Optional.empty()),
Optional.of(PRINCIPAL),
USER_NAME);
assertEquals(accessControlFactory.getCheckedUserName(), USER_NAME);
@@ -160,7 +160,7 @@ public void testSetAccessControl()
public void testCheckQueryIntegrity()
{
AccessControlManager accessControlManager = new AccessControlManager(createTestTransactionManager());
- AccessControlContext context = new AccessControlContext(new QueryId(QUERY_ID), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats());
+ AccessControlContext context = new AccessControlContext(new QueryId(QUERY_ID), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats(), Optional.empty());
TestSystemAccessControlFactory accessControlFactory = new TestSystemAccessControlFactory("test");
accessControlManager.addSystemAccessControlFactory(accessControlFactory);
@@ -210,7 +210,7 @@ public void testNoCatalogAccessControl()
transaction(transactionManager, accessControlManager)
.execute(transactionId -> {
accessControlManager.checkCanSelectFromColumns(transactionId, new Identity(USER_NAME, Optional.of(PRINCIPAL)),
- new AccessControlContext(new QueryId(QUERY_ID), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats()),
+ new AccessControlContext(new QueryId(QUERY_ID), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats(), Optional.empty()),
new QualifiedObjectName("catalog", "schema", "table"), ImmutableSet.of(new Subfield("column")));
});
}
@@ -232,7 +232,7 @@ public void testDenyCatalogAccessControl()
transaction(transactionManager, accessControlManager)
.execute(transactionId -> {
accessControlManager.checkCanSelectFromColumns(transactionId, new Identity(USER_NAME, Optional.of(PRINCIPAL)),
- new AccessControlContext(new QueryId(QUERY_ID), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats()),
+ new AccessControlContext(new QueryId(QUERY_ID), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats(), Optional.empty()),
new QualifiedObjectName("catalog", "schema", "table"), ImmutableSet.of(new Subfield("column")));
});
}
@@ -254,7 +254,7 @@ public void testDenySystemAccessControl()
transaction(transactionManager, accessControlManager)
.execute(transactionId -> {
accessControlManager.checkCanSelectFromColumns(transactionId, new Identity(USER_NAME, Optional.of(PRINCIPAL)),
- new AccessControlContext(new QueryId(QUERY_ID), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats()),
+ new AccessControlContext(new QueryId(QUERY_ID), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats(), Optional.empty()),
new QualifiedObjectName("secured_catalog", "schema", "table"), ImmutableSet.of(new Subfield("column")));
});
}
diff --git a/presto-main/src/test/java/com/facebook/presto/security/TestFileBasedSystemAccessControl.java b/presto-main/src/test/java/com/facebook/presto/security/TestFileBasedSystemAccessControl.java
index 5bd759aef1863..9d0d574e7805d 100644
--- a/presto-main/src/test/java/com/facebook/presto/security/TestFileBasedSystemAccessControl.java
+++ b/presto-main/src/test/java/com/facebook/presto/security/TestFileBasedSystemAccessControl.java
@@ -70,7 +70,7 @@ public class TestFileBasedSystemAccessControl
private static final QualifiedObjectName aliceTable = new QualifiedObjectName("alice-catalog", "schema", "table");
private static final QualifiedObjectName aliceView = new QualifiedObjectName("alice-catalog", "schema", "view");
private static final CatalogSchemaName aliceSchema = new CatalogSchemaName("alice-catalog", "schema");
- private static final AccessControlContext context = new AccessControlContext(new QueryId("query_id"), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats());
+ private static final AccessControlContext context = new AccessControlContext(new QueryId("query_id"), Optional.empty(), Collections.emptySet(), Optional.empty(), WarningCollector.NOOP, new RuntimeStats(), Optional.empty());
@Test
public void testCanSetUserOperations() throws IOException
{
diff --git a/presto-main/src/test/java/com/facebook/presto/server/TestSessionPropertyDefaults.java b/presto-main/src/test/java/com/facebook/presto/server/TestSessionPropertyDefaults.java
index 964183375bdb2..034d81caaff7c 100644
--- a/presto-main/src/test/java/com/facebook/presto/server/TestSessionPropertyDefaults.java
+++ b/presto-main/src/test/java/com/facebook/presto/server/TestSessionPropertyDefaults.java
@@ -28,6 +28,7 @@
import java.util.Optional;
+import static com.facebook.presto.Session.SessionBuilder;
import static com.facebook.presto.SystemSessionProperties.HASH_PARTITION_COUNT;
import static com.facebook.presto.SystemSessionProperties.JOIN_DISTRIBUTION_TYPE;
import static com.facebook.presto.SystemSessionProperties.QUERY_MAX_MEMORY;
@@ -59,33 +60,32 @@ public void testApplyDefaultProperties()
sessionPropertyDefaults.addConfigurationManagerFactory(factory);
sessionPropertyDefaults.setConfigurationManager(factory.getName(), ImmutableMap.of());
- Session session = Session.builder(new SessionPropertyManager())
+ SessionBuilder sessionBuilder = Session.builder(new SessionPropertyManager())
.setQueryId(new QueryId("test_query_id"))
.setIdentity(new Identity("testUser", Optional.empty()))
.setSystemProperty(QUERY_MAX_MEMORY, "1GB")
.setSystemProperty(JOIN_DISTRIBUTION_TYPE, "partitioned")
.setSystemProperty(HASH_PARTITION_COUNT, "43")
.setSystemProperty("override", "should be overridden")
- .setCatalogSessionProperty("testCatalog", "explicit_set", "explicit_set")
- .build();
+ .setCatalogSessionProperty("testCatalog", "explicit_set", "explicit_set");
- assertEquals(session.getSystemProperties(), ImmutableMap.builder()
+ assertEquals(sessionBuilder.getSystemProperties(), ImmutableMap.builder()
.put(QUERY_MAX_MEMORY, "1GB")
.put(JOIN_DISTRIBUTION_TYPE, "partitioned")
.put(HASH_PARTITION_COUNT, "43")
.put("override", "should be overridden")
.build());
assertEquals(
- session.getUnprocessedCatalogProperties(),
+ sessionBuilder.getUnprocessedCatalogProperties(),
ImmutableMap.of(
"testCatalog",
ImmutableMap.builder()
.put("explicit_set", "explicit_set")
.build()));
- session = sessionPropertyDefaults.newSessionWithDefaultProperties(session, Optional.empty(), Optional.of(TEST_RESOURCE_GROUP_ID));
+ sessionPropertyDefaults.applyDefaultProperties(sessionBuilder, Optional.empty(), Optional.of(TEST_RESOURCE_GROUP_ID));
- assertEquals(session.getSystemProperties(), ImmutableMap.builder()
+ assertEquals(sessionBuilder.getSystemProperties(), ImmutableMap.builder()
.put(QUERY_MAX_MEMORY, "1GB")
.put(JOIN_DISTRIBUTION_TYPE, "partitioned")
.put(HASH_PARTITION_COUNT, "43")
@@ -93,7 +93,7 @@ public void testApplyDefaultProperties()
.put("override", "overridden")
.build());
assertEquals(
- session.getUnprocessedCatalogProperties(),
+ sessionBuilder.getUnprocessedCatalogProperties(),
ImmutableMap.of(
"testCatalog",
ImmutableMap.builder()
diff --git a/presto-main/src/test/java/com/facebook/presto/sql/planner/TestEffectivePredicateExtractor.java b/presto-main/src/test/java/com/facebook/presto/sql/planner/TestEffectivePredicateExtractor.java
index a6d672435af38..4e76ddd5d271c 100644
--- a/presto-main/src/test/java/com/facebook/presto/sql/planner/TestEffectivePredicateExtractor.java
+++ b/presto-main/src/test/java/com/facebook/presto/sql/planner/TestEffectivePredicateExtractor.java
@@ -238,6 +238,26 @@ public void testProject()
equals(DV, EV)));
}
+ @Test
+ public void testProjectOverFilterWithNoReferencedAssignments()
+ {
+ PlanNode node = new ProjectNode(newId(),
+ filter(baseTableScan,
+ and(
+ equals(call("mod",
+ metadata.getFunctionAndTypeManager().lookupFunction("mod", fromTypes(BIGINT, BIGINT)),
+ BIGINT,
+ ImmutableList.of(CV, bigintLiteral(5L))), bigintLiteral(-1L)),
+ equals(CV, bigintLiteral(10L)))),
+ assignment(DV, AV));
+
+ RowExpression effectivePredicate = effectivePredicateExtractor.extract(node);
+
+ // The filter predicate is reduced to `CV = 10 AND mod(10,5) = -1`
+ // Since we have no references to `CV` in the assignments however, neither of these conjuncts is pulled up through the Project
+ assertEquals(effectivePredicate, TRUE_CONSTANT);
+ }
+
@Test
public void testTopN()
{
diff --git a/presto-native-execution/presto_cpp/main/PrestoServer.cpp b/presto-native-execution/presto_cpp/main/PrestoServer.cpp
index 6716546e8324f..91e3a39ae13b4 100644
--- a/presto-native-execution/presto_cpp/main/PrestoServer.cpp
+++ b/presto-native-execution/presto_cpp/main/PrestoServer.cpp
@@ -739,7 +739,7 @@ void PrestoServer::initializeThreadPools() {
#ifdef __linux__
threadFactory = std::make_shared("Driver");
#else
- VELOX_FAIL("Batch scheduling policy can only be enabled on Linux")
+ VELOX_FAIL("Batch scheduling policy can only be enabled on Linux");
#endif
} else {
threadFactory = std::make_shared("Driver");
@@ -834,8 +834,6 @@ void PrestoServer::initializeVeloxMemory() {
systemConfig->sharedArbitratorMemoryPoolInitialCapacity()},
{std::string(SharedArbitratorConfig::kMemoryPoolReservedCapacity),
systemConfig->sharedArbitratorMemoryPoolReservedCapacity()},
- {std::string(SharedArbitratorConfig::kMemoryPoolTransferCapacity),
- systemConfig->sharedArbitratorMemoryPoolTransferCapacity()},
{std::string(SharedArbitratorConfig::kMemoryReclaimMaxWaitTime),
systemConfig->sharedArbitratorMemoryReclaimWaitTime()},
{std::string(SharedArbitratorConfig::kMemoryPoolMinFreeCapacity),
diff --git a/presto-native-execution/presto_cpp/main/PrestoTask.cpp b/presto-native-execution/presto_cpp/main/PrestoTask.cpp
index ae84bff7ded87..44fac67de0785 100644
--- a/presto-native-execution/presto_cpp/main/PrestoTask.cpp
+++ b/presto-native-execution/presto_cpp/main/PrestoTask.cpp
@@ -590,10 +590,20 @@ void PrestoTask::updateExecutionInfoLocked(
prestoTaskStats.outputPositions = 0;
prestoTaskStats.outputDataSizeInBytes = 0;
- prestoTaskStats.queuedDrivers = veloxTaskStats.numQueuedDrivers;
- prestoTaskStats.totalDrivers = veloxTaskStats.numTotalDrivers;
+ // Presto Java reports number of drivers to number of splits in Presto UI
+ // because split and driver are 1 to 1 mapping relationship. This is not true
+ // in Prestissimo where 1 driver handles many splits. In order to quickly
+ // unblock developers from viewing the correct progress of splits in
+ // Prestissimo's coordinator UI, we put number of splits in total, queued, and
+ // finished to indicate the progress of the query. Number of running drivers
+ // are passed as it is to have a proper running drivers count in UI.
+ //
+ // TODO: We should really extend the API (protocol::TaskStats and Presto
+ // coordinator UI) to have splits information as a proper fix.
+ prestoTaskStats.totalDrivers = veloxTaskStats.numTotalSplits;
+ prestoTaskStats.queuedDrivers = veloxTaskStats.numQueuedSplits;
prestoTaskStats.runningDrivers = veloxTaskStats.numRunningDrivers;
- prestoTaskStats.completedDrivers = veloxTaskStats.numCompletedDrivers;
+ prestoTaskStats.completedDrivers = veloxTaskStats.numFinishedSplits;
prestoTaskStats.pipelines.resize(veloxTaskStats.pipelineStats.size());
for (int i = 0; i < veloxTaskStats.pipelineStats.size(); ++i) {
diff --git a/presto-native-execution/presto_cpp/main/SessionProperties.cpp b/presto-native-execution/presto_cpp/main/SessionProperties.cpp
index c2413ac24d372..338d17766e722 100644
--- a/presto-native-execution/presto_cpp/main/SessionProperties.cpp
+++ b/presto-native-execution/presto_cpp/main/SessionProperties.cpp
@@ -244,6 +244,16 @@ SessionProperties::SessionProperties() {
QueryConfig::kDebugDisableExpressionWithLazyInputs,
boolToString(c.debugDisableExpressionsWithLazyInputs()));
+ addSessionProperty(
+ kSelectiveNimbleReaderEnabled,
+ "Temporary flag to control whether selective Nimble reader should be "
+ "used in this query or not. Will be removed after the selective Nimble "
+ "reader is fully rolled out.",
+ BOOLEAN(),
+ false,
+ QueryConfig::kSelectiveNimbleReaderEnabled,
+ boolToString(c.selectiveNimbleReaderEnabled()));
+
// If `legacy_timestamp` is true, the coordinator expects timestamp
// conversions without a timezone to be converted to the user's
// session_timezone.
diff --git a/presto-native-execution/presto_cpp/main/SessionProperties.h b/presto-native-execution/presto_cpp/main/SessionProperties.h
index bf43c21e2c781..50f9869267e62 100644
--- a/presto-native-execution/presto_cpp/main/SessionProperties.h
+++ b/presto-native-execution/presto_cpp/main/SessionProperties.h
@@ -165,6 +165,12 @@ class SessionProperties {
static constexpr const char* kDebugDisableExpressionWithLazyInputs =
"native_debug_disable_expression_with_lazy_inputs";
+ /// Temporary flag to control whether selective Nimble reader should be used
+ /// in this query or not. Will be removed after the selective Nimble reader
+ /// is fully rolled out.
+ static constexpr const char* kSelectiveNimbleReaderEnabled =
+ "native_selective_nimble_reader_enabled";
+
/// Enable timezone-less timestamp conversions.
static constexpr const char* kLegacyTimestamp = "legacy_timestamp";
diff --git a/presto-native-execution/presto_cpp/main/common/Configs.cpp b/presto-native-execution/presto_cpp/main/common/Configs.cpp
index 5ff693d88518b..2fd1522427618 100644
--- a/presto-native-execution/presto_cpp/main/common/Configs.cpp
+++ b/presto-native-execution/presto_cpp/main/common/Configs.cpp
@@ -192,7 +192,6 @@ SystemConfig::SystemConfig() {
STR_PROP(kSharedArbitratorReservedCapacity, "4GB"),
STR_PROP(kSharedArbitratorMemoryPoolInitialCapacity, "128MB"),
STR_PROP(kSharedArbitratorMemoryPoolReservedCapacity, "64MB"),
- STR_PROP(kSharedArbitratorMemoryPoolTransferCapacity, "32MB"),
STR_PROP(kSharedArbitratorMemoryReclaimMaxWaitTime, "5m"),
STR_PROP(kSharedArbitratorGlobalArbitrationEnabled, "false"),
NUM_PROP(kLargestSizeClassPages, 256),
@@ -553,15 +552,6 @@ std::string SystemConfig::sharedArbitratorMemoryPoolReservedCapacity() const {
std::string(kSharedArbitratorMemoryPoolReservedCapacityDefault));
}
-std::string SystemConfig::sharedArbitratorMemoryPoolTransferCapacity() const {
- static constexpr std::string_view
- kSharedArbitratorMemoryPoolTransferCapacityDefault = "32MB";
- return optionalProperty(
- kSharedArbitratorMemoryPoolTransferCapacity)
- .value_or(
- std::string(kSharedArbitratorMemoryPoolTransferCapacityDefault));
-}
-
std::string SystemConfig::sharedArbitratorMemoryReclaimWaitTime() const {
static constexpr std::string_view
kSharedArbitratorMemoryReclaimMaxWaitTimeDefault = "5m";
diff --git a/presto-native-execution/presto_cpp/main/common/Configs.h b/presto-native-execution/presto_cpp/main/common/Configs.h
index 8f38f89d6af44..2466698ef0d18 100644
--- a/presto-native-execution/presto_cpp/main/common/Configs.h
+++ b/presto-native-execution/presto_cpp/main/common/Configs.h
@@ -419,11 +419,6 @@ class SystemConfig : public ConfigBase {
static constexpr std::string_view kSharedArbitratorMemoryPoolReservedCapacity{
"shared-arbitrator.memory-pool-reserved-capacity"};
- /// The minimal memory capacity in bytes transferred between memory pools
- /// during memory arbitration.
- static constexpr std::string_view kSharedArbitratorMemoryPoolTransferCapacity{
- "shared-arbitrator.memory-pool-transfer-capacity"};
-
/// Specifies the max time to wait for memory reclaim by arbitration. The
/// memory reclaim might fail if the max wait time has exceeded. If it is
/// zero, then there is no timeout.
@@ -790,8 +785,6 @@ class SystemConfig : public ConfigBase {
std::string sharedArbitratorMemoryPoolReservedCapacity() const;
- std::string sharedArbitratorMemoryPoolTransferCapacity() const;
-
std::string sharedArbitratorMemoryReclaimWaitTime() const;
std::string sharedArbitratorMemoryPoolInitialCapacity() const;
diff --git a/presto-native-execution/presto_cpp/main/http/HttpClient.cpp b/presto-native-execution/presto_cpp/main/http/HttpClient.cpp
index 59e8144e59f0d..4504d9ae83b15 100644
--- a/presto-native-execution/presto_cpp/main/http/HttpClient.cpp
+++ b/presto-native-execution/presto_cpp/main/http/HttpClient.cpp
@@ -145,7 +145,7 @@ std::unique_ptr HttpResponse::consumeBody(
void HttpResponse::freeBuffers() {
if (pool_ != nullptr) {
- for (auto& iobuf : bodyChain_) {
+ for (const auto& iobuf : bodyChain_) {
if (iobuf != nullptr) {
pool_->free(iobuf->writableData(), iobuf->capacity());
}
@@ -170,7 +170,7 @@ std::string HttpResponse::dumpBodyChain() const {
std::string responseBody;
if (!bodyChain_.empty()) {
std::ostringstream oss;
- for (auto& buf : bodyChain_) {
+ for (const auto& buf : bodyChain_) {
oss << std::string((const char*)buf->data(), buf->length());
}
responseBody = oss.str();
diff --git a/presto-native-execution/presto_cpp/main/http/HttpServer.cpp b/presto-native-execution/presto_cpp/main/http/HttpServer.cpp
index 205d95b480ab0..8d22b3fb0e2d0 100644
--- a/presto-native-execution/presto_cpp/main/http/HttpServer.cpp
+++ b/presto-native-execution/presto_cpp/main/http/HttpServer.cpp
@@ -21,9 +21,7 @@
namespace facebook::presto::http {
void sendOkResponse(proxygen::ResponseHandler* downstream) {
- proxygen::ResponseBuilder(downstream)
- .status(http::kHttpOk, "OK")
- .sendWithEOM();
+ proxygen::ResponseBuilder(downstream).status(http::kHttpOk, "").sendWithEOM();
}
void sendOkResponse(proxygen::ResponseHandler* downstream, const json& body) {
@@ -49,7 +47,7 @@ void sendOkResponse(
proxygen::ResponseHandler* downstream,
const std::string& body) {
proxygen::ResponseBuilder(downstream)
- .status(http::kHttpOk, "OK")
+ .status(http::kHttpOk, "")
.header(
proxygen::HTTP_HEADER_CONTENT_TYPE, http::kMimeTypeApplicationJson)
.body(body)
@@ -60,7 +58,7 @@ void sendOkThriftResponse(
proxygen::ResponseHandler* downstream,
const std::string& body) {
proxygen::ResponseBuilder(downstream)
- .status(http::kHttpOk, "OK")
+ .status(http::kHttpOk, "")
.header(
proxygen::HTTP_HEADER_CONTENT_TYPE, http::kMimeTypeApplicationThrift)
.body(body)
@@ -71,19 +69,8 @@ void sendErrorResponse(
proxygen::ResponseHandler* downstream,
const std::string& error,
uint16_t status) {
- static const size_t kMaxStatusSize = 1024;
-
- // Use a prefix of the 'error' as status message. Make sure it doesn't include
- // new lines. See https://www.w3.org/Protocols/rfc2616/rfc2616-sec6.html
-
- size_t statusSize = kMaxStatusSize;
- auto pos = error.find('\n');
- if (pos != std::string::npos && pos < statusSize) {
- statusSize = pos;
- }
-
proxygen::ResponseBuilder(downstream)
- .status(status, error.substr(0, statusSize))
+ .status(status, "")
.body(error)
.sendWithEOM();
}
diff --git a/presto-native-execution/presto_cpp/main/runtime-metrics/PrometheusStatsReporter.cpp b/presto-native-execution/presto_cpp/main/runtime-metrics/PrometheusStatsReporter.cpp
index 2cedf64fb86f1..30e361a6bc4d4 100644
--- a/presto-native-execution/presto_cpp/main/runtime-metrics/PrometheusStatsReporter.cpp
+++ b/presto-native-execution/presto_cpp/main/runtime-metrics/PrometheusStatsReporter.cpp
@@ -169,17 +169,23 @@ void PrometheusStatsReporter::addMetricValue(const char* key, size_t value)
auto statsInfo = metricIterator->second;
switch (statsInfo.statType) {
case velox::StatType::COUNT: {
- auto counter =
+ auto* counter =
reinterpret_cast<::prometheus::Counter*>(statsInfo.metricPtr);
- counter->Increment(value);
- } break;
- case velox::StatType::SUM:
+ counter->Increment(static_cast(value));
+ break;
+ }
+ case velox::StatType::SUM: {
+ auto* gauge = reinterpret_cast<::prometheus::Gauge*>(statsInfo.metricPtr);
+ gauge->Increment(static_cast(value));
+ break;
+ }
case velox::StatType::AVG:
case velox::StatType::RATE: {
// Overrides the existing state.
- auto gauge = reinterpret_cast<::prometheus::Gauge*>(statsInfo.metricPtr);
- gauge->Set(value);
- } break;
+ auto* gauge = reinterpret_cast<::prometheus::Gauge*>(statsInfo.metricPtr);
+ gauge->Set(static_cast(value));
+ break;
+ }
default:
VELOX_UNSUPPORTED(
"Unsupported metric type {}",
diff --git a/presto-native-execution/presto_cpp/main/runtime-metrics/tests/PrometheusReporterTest.cpp b/presto-native-execution/presto_cpp/main/runtime-metrics/tests/PrometheusReporterTest.cpp
index e2b7ffa178e87..c6b6d635d1a97 100644
--- a/presto-native-execution/presto_cpp/main/runtime-metrics/tests/PrometheusReporterTest.cpp
+++ b/presto-native-execution/presto_cpp/main/runtime-metrics/tests/PrometheusReporterTest.cpp
@@ -22,6 +22,7 @@ class PrometheusReporterTest : public testing::Test {
void SetUp() override {
reporter = std::make_shared(testLabels);
}
+
void verifySerializedResult(
const std::string& fullSerializedResult,
std::vector& expected) {
@@ -32,6 +33,7 @@ class PrometheusReporterTest : public testing::Test {
EXPECT_EQ(line, expected[i++]);
}
}
+
const std::map testLabels = {
{"cluster", "test_cluster"},
{"worker", "test_worker_pod"}};
@@ -62,24 +64,29 @@ TEST_F(PrometheusReporterTest, testCountAndGauge) {
facebook::velox::StatType::RATE,
reporter->registeredMetricsMap_.find("test.key4")->second.statType);
- std::vector testData = {10, 11, 15};
+ std::vector testData = {10, 12, 14};
for (auto i : testData) {
reporter->addMetricValue("test.key1", i);
reporter->addMetricValue("test.key2", i + 1000);
+ reporter->addMetricValue("test.key3", i + 2000);
+ reporter->addMetricValue("test.key4", i + 3000);
}
+
// Uses default value of 1 for second parameter.
reporter->addMetricValue("test.key1");
+ reporter->addMetricValue("test.key3");
+
auto fullSerializedResult = reporter->fetchMetrics();
std::vector expected = {
"# TYPE test_key1 counter",
"test_key1{" + labelsSerialized + "} 37",
"# TYPE test_key2 gauge",
- "test_key2{" + labelsSerialized + "} 1015",
+ "test_key2{" + labelsSerialized + "} 1014",
"# TYPE test_key3 gauge",
- "test_key3{" + labelsSerialized + "} 0",
+ "test_key3{" + labelsSerialized + "} 6037",
"# TYPE test_key4 gauge",
- "test_key4{" + labelsSerialized + "} 0"};
+ "test_key4{" + labelsSerialized + "} 3014"};
verifySerializedResult(fullSerializedResult, expected);
};
diff --git a/presto-native-execution/presto_cpp/main/tests/QueryContextManagerTest.cpp b/presto-native-execution/presto_cpp/main/tests/QueryContextManagerTest.cpp
index 43bd39074e76d..75730cca8aeb7 100644
--- a/presto-native-execution/presto_cpp/main/tests/QueryContextManagerTest.cpp
+++ b/presto-native-execution/presto_cpp/main/tests/QueryContextManagerTest.cpp
@@ -56,6 +56,7 @@ TEST_F(QueryContextManagerTest, nativeSessionProperties) {
{"native_debug_disable_common_sub_expressions", "true"},
{"native_debug_disable_expression_with_memoization", "true"},
{"native_debug_disable_expression_with_lazy_inputs", "true"},
+ {"native_selective_nimble_reader_enabled", "true"},
{"aggregation_spill_all", "true"}}};
auto queryCtx = taskManager_->getQueryContextManager()->findOrCreateQueryCtx(
taskId, session);
@@ -67,6 +68,7 @@ TEST_F(QueryContextManagerTest, nativeSessionProperties) {
EXPECT_TRUE(queryCtx->queryConfig().debugDisableCommonSubExpressions());
EXPECT_TRUE(queryCtx->queryConfig().debugDisableExpressionsWithMemoization());
EXPECT_TRUE(queryCtx->queryConfig().debugDisableExpressionsWithLazyInputs());
+ EXPECT_TRUE(queryCtx->queryConfig().selectiveNimbleReaderEnabled());
EXPECT_EQ(queryCtx->queryConfig().spillWriteBufferSize(), 1024);
}
diff --git a/presto-native-execution/presto_cpp/main/tests/TaskManagerTest.cpp b/presto-native-execution/presto_cpp/main/tests/TaskManagerTest.cpp
index 6602ad1045104..8bd485bc20518 100644
--- a/presto-native-execution/presto_cpp/main/tests/TaskManagerTest.cpp
+++ b/presto-native-execution/presto_cpp/main/tests/TaskManagerTest.cpp
@@ -29,6 +29,8 @@
#include "velox/dwio/common/FileSink.h"
#include "velox/dwio/common/WriterFactory.h"
#include "velox/dwio/common/tests/utils/BatchMaker.h"
+#include "velox/dwio/dwrf/RegisterDwrfReader.h"
+#include "velox/dwio/dwrf/RegisterDwrfWriter.h"
#include "velox/dwio/dwrf/writer/Writer.h"
#include "velox/exec/Exchange.h"
#include "velox/exec/Values.h"
@@ -198,7 +200,10 @@ class TaskManagerTest : public testing::Test {
velox::memory::MemoryManagerOptions options;
options.allocatorCapacity = 8L << 30;
options.arbitratorCapacity = 6L << 30;
- options.memoryPoolInitCapacity = 512 << 20;
+ options.extraArbitratorConfigs = {
+ {std::string(velox::memory::SharedArbitrator::ExtraConfig::
+ kMemoryPoolInitialCapacity),
+ "512MB"}};
options.arbitratorKind = "SHARED";
options.checkUsageLeak = true;
options.arbitrationStateCheckCb = memoryArbitrationStateCheck;
@@ -212,6 +217,8 @@ class TaskManagerTest : public testing::Test {
functions::prestosql::registerAllScalarFunctions();
aggregate::prestosql::registerAllAggregateFunctions();
parse::registerTypeResolver();
+ dwrf::registerDwrfWriterFactory();
+ dwrf::registerDwrfReaderFactory();
exec::ExchangeSource::registerFactory(
[cpuExecutor = exchangeCpuExecutor_,
ioExecutor = exchangeIoExecutor_,
@@ -286,6 +293,8 @@ class TaskManagerTest : public testing::Test {
connector::unregisterConnector(kHiveConnectorId);
unregisterPrestoToVeloxConnector(
connector::hive::HiveConnectorFactory::kHiveConnectorName);
+ dwrf::unregisterDwrfWriterFactory();
+ dwrf::unregisterDwrfReaderFactory();
}
std::vector makeVectors(int count, int rowsPerVector) {
diff --git a/presto-native-execution/presto_cpp/main/types/PrestoToVeloxConnector.cpp b/presto-native-execution/presto_cpp/main/types/PrestoToVeloxConnector.cpp
index faa4f93eaf67a..a9a6f21ed25f5 100644
--- a/presto-native-execution/presto_cpp/main/types/PrestoToVeloxConnector.cpp
+++ b/presto-native-execution/presto_cpp/main/types/PrestoToVeloxConnector.cpp
@@ -1109,13 +1109,15 @@ HivePrestoToVeloxConnector::toVeloxSplit(
for (const auto& [key, value] : hiveSplit->storage.serdeParameters) {
serdeParameters[key] = value;
}
- std::unordered_map infoColumns;
- infoColumns.reserve(2);
- infoColumns.insert(
- {"$file_size", std::to_string(hiveSplit->fileSplit.fileSize)});
- infoColumns.insert(
+ std::unordered_map infoColumns = {
+ {"$path", hiveSplit->fileSplit.path},
+ {"$file_size", std::to_string(hiveSplit->fileSplit.fileSize)},
{"$file_modified_time",
- std::to_string(hiveSplit->fileSplit.fileModifiedTime)});
+ std::to_string(hiveSplit->fileSplit.fileModifiedTime)},
+ };
+ if (hiveSplit->tableBucketNumber) {
+ infoColumns["$bucket"] = std::to_string(*hiveSplit->tableBucketNumber);
+ }
auto veloxSplit =
std::make_unique(
catalogId,
diff --git a/presto-native-execution/presto_cpp/main/types/PrestoToVeloxQueryPlan.cpp b/presto-native-execution/presto_cpp/main/types/PrestoToVeloxQueryPlan.cpp
index 9d32452444625..28ca974c5b6ad 100644
--- a/presto-native-execution/presto_cpp/main/types/PrestoToVeloxQueryPlan.cpp
+++ b/presto-native-execution/presto_cpp/main/types/PrestoToVeloxQueryPlan.cpp
@@ -606,6 +606,29 @@ core::PlanNodePtr VeloxQueryPlanConverterBase::toVeloxQueryPlan(
left->outputType()));
}
+ // For ScanFilter and ScanFilterProject, the planner sometimes put the
+ // remaining filter in a FilterNode after the TableScan. We need to put it
+ // back to TableScan so that Velox can leverage it to do stripe level
+ // skipping. Otherwise we only get row level skipping and lose some
+ // optimization opportunity in case of very low selectivity.
+ if (auto tableScan = std::dynamic_pointer_cast(
+ node->source)) {
+ if (auto* tableLayout = dynamic_cast(
+ tableScan->table.connectorTableLayout.get())) {
+ auto remainingFilter =
+ exprConverter_.toVeloxExpr(tableLayout->remainingPredicate);
+ if (auto* constant = dynamic_cast(
+ remainingFilter.get())) {
+ bool value = constant->value().value();
+ // We should get empty values node instead of table scan if the
+ // remaining filter is constantly false.
+ VELOX_CHECK(value, "Unexpected always-false remaining predicate");
+ tableLayout->remainingPredicate = node->predicate;
+ return toVeloxQueryPlan(tableScan, tableWriteInfo, taskId);
+ }
+ }
+ }
+
return std::make_shared(
node->id,
exprConverter_.toVeloxExpr(node->predicate),
diff --git a/presto-native-execution/presto_cpp/main/types/tests/PlanConverterTest.cpp b/presto-native-execution/presto_cpp/main/types/tests/PlanConverterTest.cpp
index b72814044fb0f..a29db9f97b17c 100644
--- a/presto-native-execution/presto_cpp/main/types/tests/PlanConverterTest.cpp
+++ b/presto-native-execution/presto_cpp/main/types/tests/PlanConverterTest.cpp
@@ -143,6 +143,10 @@ TEST_F(PlanConverterTest, scanAgg) {
ASSERT_EQ(
tableHandle->dataColumns()->toString(),
"ROW>>,comment:VARCHAR>");
+ ASSERT_TRUE(tableHandle->remainingFilter());
+ ASSERT_EQ(
+ tableHandle->remainingFilter()->toString(),
+ "presto.default.lt(presto.default.rand(),0.0001)");
auto tableParameters = tableHandle->tableParameters();
ASSERT_EQ(tableParameters.size(), 6);
diff --git a/presto-native-execution/presto_cpp/main/types/tests/PrestoToVeloxSplitTest.cpp b/presto-native-execution/presto_cpp/main/types/tests/PrestoToVeloxSplitTest.cpp
index 5e3a8f98c295a..bdb08da2d6eb7 100644
--- a/presto-native-execution/presto_cpp/main/types/tests/PrestoToVeloxSplitTest.cpp
+++ b/presto-native-execution/presto_cpp/main/types/tests/PrestoToVeloxSplitTest.cpp
@@ -157,6 +157,8 @@ TEST_F(PrestoToVeloxSplitTest, bucketConversion) {
ASSERT_EQ(veloxHiveSplit.bucketConversion->tableBucketCount, 4096);
ASSERT_EQ(veloxHiveSplit.bucketConversion->partitionBucketCount, 512);
ASSERT_EQ(veloxHiveSplit.bucketConversion->bucketColumnHandles.size(), 1);
+ ASSERT_EQ(veloxHiveSplit.infoColumns.at("$path"), hiveSplit.fileSplit.path);
+ ASSERT_EQ(veloxHiveSplit.infoColumns.at("$bucket"), "42");
auto& veloxColumn = veloxHiveSplit.bucketConversion->bucketColumnHandles[0];
ASSERT_EQ(veloxColumn->name(), "c0");
ASSERT_EQ(*veloxColumn->dataType(), *BIGINT());
diff --git a/presto-native-execution/presto_cpp/main/types/tests/data/ScanAgg.json b/presto-native-execution/presto_cpp/main/types/tests/data/ScanAgg.json
index cdda8bdb383d2..1033e8a2617ff 100644
--- a/presto-native-execution/presto_cpp/main/types/tests/data/ScanAgg.json
+++ b/presto-native-execution/presto_cpp/main/types/tests/data/ScanAgg.json
@@ -7,166 +7,209 @@
"@type":".ProjectNode",
"id":"1",
"source":{
- "@type":".TableScanNode",
- "id":"0",
- "table":{
- "connectorId":"hive",
- "connectorHandle":{
- "@type":"hive",
- "schemaName":"tpch",
- "tableName":"nation"
- },
- "transaction":{
- "@type":"hive",
- "uuid":"7cc96264-a0fa-45e4-9042-62754ac3a5a0"
- },
- "connectorTableLayout":{
- "@type":"hive",
- "schemaTableName":{
- "schema":"tpch",
- "table":"nation"
+ "@type" : ".FilterNode",
+ "id" : "449",
+ "source":{
+ "@type":".TableScanNode",
+ "id":"0",
+ "table":{
+ "connectorId":"hive",
+ "connectorHandle":{
+ "@type":"hive",
+ "schemaName":"tpch",
+ "tableName":"nation"
+ },
+ "transaction":{
+ "@type":"hive",
+ "uuid":"7cc96264-a0fa-45e4-9042-62754ac3a5a0"
},
- "tablePath":"a/path/to/a/table",
- "partitionColumns":[
+ "connectorTableLayout":{
+ "@type":"hive",
+ "schemaTableName":{
+ "schema":"tpch",
+ "table":"nation"
+ },
+ "tablePath":"a/path/to/a/table",
+ "partitionColumns":[
- ],
- "dataColumns":[
- {
- "name":"nationkey",
- "type":"bigint"
+ ],
+ "dataColumns":[
+ {
+ "name":"nationkey",
+ "type":"bigint"
+ },
+ {
+ "name":"name",
+ "type":"varchar(25)"
+ },
+ {
+ "name":"regionkey",
+ "type":"bigint"
+ },
+ {
+ "name":"complex_type",
+ "type":"array