From 32aab4a0cbc28a73e1d95b3b1eb153965aa9ac1b Mon Sep 17 00:00:00 2001 From: Subash Canapathy Date: Tue, 25 May 2021 16:27:49 -0700 Subject: [PATCH] Airflow 2.0.2 Local Runner Support --- VERSION | 2 +- docker/Dockerfile | 2 +- docker/config/airflow.cfg | 778 ++++++++++++--------------- docker/config/constraints.txt | 559 ++++++++++++------- docker/config/requirements.txt | 144 ++--- docker/docker-compose-local.yml | 2 +- docker/docker-compose-resetdb.yml | 2 +- docker/docker-compose-sequential.yml | 2 +- docker/script/bootstrap.sh | 16 +- docker/script/entrypoint.sh | 8 +- docker/script/systemlibs.sh | 2 +- mwaa-local-env | 4 +- 12 files changed, 824 insertions(+), 697 deletions(-) diff --git a/VERSION b/VERSION index afaf360d3..f93ea0ca3 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.0.0 \ No newline at end of file +2.0.2 \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile index 502d920e4..7f246473d 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -7,7 +7,7 @@ FROM amazonlinux LABEL maintainer="amazon" # Airflow -ARG AIRFLOW_VERSION=1.10.12 +ARG AIRFLOW_VERSION=2.0.2 ARG AIRFLOW_USER_HOME=/usr/local/airflow ARG AIRFLOW_DEPS="" ARG PYTHON_DEPS="" diff --git a/docker/config/airflow.cfg b/docker/config/airflow.cfg index b6d11b55f..c004661b8 100644 --- a/docker/config/airflow.cfg +++ b/docker/config/airflow.cfg @@ -1,77 +1,42 @@ [core] # The folder where your airflow pipelines live, most likely a -# subfolder in a code repository -# This path must be absolute +# subfolder in a code repository. This path must be absolute. dags_folder = /usr/local/airflow/dags -# The folder where airflow should store its log files -# This path must be absolute -base_log_folder = /usr/local/airflow/logs - -# Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search. -# Set this to True if you want to enable remote logging. -remote_logging = False - -# Logging level -logging_level = INFO - -# Logging level for Flask-appbuilder UI -fab_logging_level = WARN - -# Logging class -# Specify the class that will specify the logging configuration -# This class has to be on the python classpath -# Example: logging_config_class = my.path.default_local_settings.LOGGING_CONFIG -logging_config_class = - -# Flag to enable/disable Colored logs in Console -# Colour the logs when the controlling terminal is a TTY. -colored_console_log = True - -# Log format for when Colored logs is enabled -colored_log_format = [%%(blue)s%%(asctime)s%%(reset)s] {{%%(blue)s%%(filename)s:%%(reset)s%%(lineno)d}} %%(log_color)s%%(levelname)s%%(reset)s - %%(log_color)s%%(message)s%%(reset)s -colored_formatter_class = airflow.utils.log.colored_log.CustomTTYColoredFormatter - -# Format of Log line -log_format = [%%(asctime)s] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s -simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s - -# Log filename format -# we need to escape the curly braces by adding an additional curly brace -log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log -log_processor_filename_template = {{ filename }}.log -dag_processor_manager_log_location = /usr/local/airflow/logs/dag_processor_manager/dag_processor_manager.log - -# Name of handler to read task instance logs. -# Default to use task handler. -task_log_reader = task - # Hostname by providing a path to a callable, which will resolve the hostname. -# The format is "package:function". +# The format is "package.function". # -# For example, default value "socket:getfqdn" means that result from getfqdn() of "socket" +# For example, default value "socket.getfqdn" means that result from getfqdn() of "socket" # package will be used as hostname. # # No argument should be required in the function specified. -# If using IP address as hostname is preferred, use value ``airflow.utils.net:get_host_ip_address`` -hostname_callable = socket:getfqdn +# If using IP address as hostname is preferred, use value ``airflow.utils.net.get_host_ip_address`` +hostname_callable = socket.getfqdn # Default timezone in case supplied date times are naive # can be utc (default), system, or any IANA timezone string (e.g. Europe/Amsterdam) default_timezone = utc # The executor class that airflow should use. Choices include -# SequentialExecutor, LocalExecutor, CeleryExecutor, DaskExecutor, KubernetesExecutor +# ``SequentialExecutor``, ``LocalExecutor``, ``CeleryExecutor``, ``DaskExecutor``, +# ``KubernetesExecutor``, ``CeleryKubernetesExecutor`` or the +# full import path to the class when using a custom executor. executor = SequentialExecutor # The SqlAlchemy connection string to the metadata database. # SqlAlchemy supports many different database engine, more information # their website -# sql_alchemy_conn = sqlite:////tmp/airflow.db +#sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/airflow.db # The encoding for the databases sql_engine_encoding = utf-8 +# Collation for ``dag_id``, ``task_id``, ``key`` columns in case they have different encoding. +# This is particularly useful in case of mysql with utf8mb4 encoding because +# primary keys for XCom table has too big size and ``sql_engine_collation_for_ids`` should +# be set to ``utf8mb3_general_ci``. +# sql_engine_collation_for_ids = + # If SqlAlchemy should pool database connections. sql_alchemy_pool_enabled = False @@ -86,8 +51,8 @@ sql_alchemy_pool_size = 5 # It follows then that the total number of simultaneous connections the pool will allow # is pool_size + max_overflow, # and the total number of "sleeping" connections the pool will allow is pool_size. -# max_overflow can be set to -1 to indicate no overflow limit; -# no limit will be placed on the total number of concurrent connections. Defaults to 10. +# max_overflow can be set to ``-1`` to indicate no overflow limit; +# no limit will be placed on the total number of concurrent connections. Defaults to ``10``. sql_alchemy_max_overflow = 10 # The SqlAlchemy pool recycle is the number of seconds a connection @@ -106,12 +71,19 @@ sql_alchemy_pool_pre_ping = True # SqlAlchemy supports databases with the concept of multiple schemas. sql_alchemy_schema = +# Import path for connect args in SqlAlchemy. Defaults to an empty dict. +# This is useful when you want to configure db engine args that SqlAlchemy won't parse +# in connection string. +# See https://docs.sqlalchemy.org/en/13/core/engines.html#sqlalchemy.create_engine.params.connect_args +# sql_alchemy_connect_args = + # The amount of parallelism as a setting to the executor. This defines # the max number of task instances that should run simultaneously # on this airflow installation parallelism = 32 # The number of task instances allowed to run concurrently by the scheduler +# in one DAG. Can be overridden by ``concurrency`` on DAG level. dag_concurrency = 16 # Are DAGs paused by default at creation @@ -121,18 +93,23 @@ dags_are_paused_at_creation = True max_active_runs_per_dag = 16 # Whether to load the DAG examples that ship with Airflow. It's good to -# get started, but you probably want to set this to False in a production +# get started, but you probably want to set this to ``False`` in a production # environment load_examples = True # Whether to load the default connections that ship with Airflow. It's good to -# get started, but you probably want to set this to False in a production +# get started, but you probably want to set this to ``False`` in a production # environment load_default_connections = True -# Where your Airflow plugins are stored +# Path to the folder containing Airflow plugins plugins_folder = /usr/local/airflow/plugins +# Should tasks be executed via forking of the parent process ("False", +# the speedier option) or by spawning a new python process ("True" slow, +# but means plugin changes picked up by tasks straight away) +execute_tasks_new_python_interpreter = False + # Secret key to save connection passwords in the db fernet_key = $FERNET_KEY @@ -140,12 +117,21 @@ fernet_key = $FERNET_KEY donot_pickle = False # How long before timing out a python file import -dagbag_import_timeout = 30 +dagbag_import_timeout = 30.0 + +# Should a traceback be shown in the UI for dagbag import errors, +# instead of just the exception message +dagbag_import_error_tracebacks = True + +# If tracebacks are shown, how many entries from the traceback should be shown +dagbag_import_error_traceback_depth = 2 # How long before timing out a DagFileProcessor, which processes a dag file dag_file_processor_timeout = 50 -# The class to use for running task instances in a subprocess +# The class to use for running task instances in a subprocess. +# Choices include StandardTaskRunner, CgroupTaskRunner or the full import path to the class +# when using a custom task runner. task_runner = StandardTaskRunner # If set, tasks without a ``run_as_user`` argument will be run with this user @@ -155,16 +141,12 @@ default_impersonation = # What security module to use (for example kerberos) security = -# If set to False enables some unsecure features like Charts and Ad Hoc Queries. -# In 2.0 will default to True. -secure_mode = True - # Turn unit test mode on (overwrites many configuration options with test # values at runtime) unit_test_mode = False # Whether to enable pickling for xcom (note that this is insecure and allows for -# RCE exploits). This will be deprecated in Airflow 2.0 (be forced to False). +# RCE exploits). enable_xcom_pickling = False # When a task is killed forcefully, this is the amount of time in seconds that @@ -176,15 +158,13 @@ killed_task_cleanup_time = 60 # ``airflow dags trigger -c``, the key-value pairs will override the existing ones in params. dag_run_conf_overrides_params = False -# Worker initialisation check to validate Metadata Database connection -worker_precheck = False - # When discovering DAGs, ignore any files that don't contain the strings ``DAG`` and ``airflow``. dag_discovery_safe_mode = True # The number of retries each task is going to have by default. Can be overridden at dag or task level. default_task_retries = 0 + # Whether to serialise DAGs and persist them in DB. # If set to True, Webserver reads from DB instead of parsing DAG files # More details: https://airflow.apache.org/docs/stable/dag-serialization.html @@ -193,16 +173,19 @@ store_serialized_dags = True # Updating serialized DAG can not be faster than a minimum interval to reduce database write rate. min_serialized_dag_update_interval = 30 +# Fetching serialized DAG can not be faster than a minimum interval to reduce database +# read rate. This config controls when your DAGs are updated in the Webserver +min_serialized_dag_fetch_interval = 10 + # Whether to persist DAG files code in DB. # If set to True, Webserver reads file contents from DB instead of -# trying to access files in a DAG folder. Defaults to same as the -# ``store_serialized_dags`` setting. -store_dag_code = %(store_serialized_dags)s +# trying to access files in a DAG folder. +# Example: store_dag_code = False +# store_dag_code = # Maximum number of Rendered Task Instance Fields (Template Fields) per task to store # in the Database. -# When Dag Serialization is enabled (``store_serialized_dags=True``), all the template_fields -# for each of Task Instance are stored in the Database. +# All the template_fields for each of Task Instance are stored in the Database. # Keeping this number small may cause an error when you try to view ``Rendered`` tab in # TaskInstance view for older tasks. max_num_rendered_ti_fields_per_task = 30 @@ -210,9 +193,114 @@ max_num_rendered_ti_fields_per_task = 30 # On each dagrun check against defined SLAs check_slas = True +# Path to custom XCom class that will be used to store and resolve operators results +# Example: xcom_backend = path.to.CustomXCom +xcom_backend = airflow.models.xcom.BaseXCom + +# By default Airflow plugins are lazily-loaded (only loaded when required). Set it to ``False``, +# if you want to load plugins whenever 'airflow' is invoked via cli or loaded from module. +lazy_load_plugins = True + +# By default Airflow providers are lazily-discovered (discovery and imports happen only when required). +# Set it to False, if you want to discover providers whenever 'airflow' is invoked via cli or +# loaded from module. +lazy_discover_providers = True + +# Number of times the code should be retried in case of DB Operational Errors. +# Not all transactions will be retried as it can cause undesired state. +# Currently it is only used in ``DagFileProcessor.process_file`` to retry ``dagbag.sync_to_db``. +max_db_retries = 3 + +[logging] +# The folder where airflow should store its log files +# This path must be absolute +base_log_folder = /usr/local/airflow/logs + +# Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search. +# Set this to True if you want to enable remote logging. +remote_logging = False + +# Use server-side encryption for logs stored in S3 +encrypt_s3_logs = False + +# Logging level +logging_level = INFO + +# Logging level for Flask-appbuilder UI +fab_logging_level = WARN + +# Logging class +# Specify the class that will specify the logging configuration +# This class has to be on the python classpath +# Example: logging_config_class = my.path.default_local_settings.LOGGING_CONFIG +logging_config_class = + +# Flag to enable/disable Colored logs in Console +# Colour the logs when the controlling terminal is a TTY. +colored_console_log = True + +# Log format for when Colored logs is enabled +colored_log_format = [%%(blue)s%%(asctime)s%%(reset)s] {{%%(blue)s%%(filename)s:%%(reset)s%%(lineno)d}} %%(log_color)s%%(levelname)s%%(reset)s - %%(log_color)s%%(message)s%%(reset)s +colored_formatter_class = airflow.utils.log.colored_log.CustomTTYColoredFormatter + +# Format of Log line +log_format = [%%(asctime)s] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s +simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s + +# Log filename format +# we need to escape the curly braces by adding an additional curly brace +log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log +log_processor_filename_template = {{ filename }}.log +dag_processor_manager_log_location = /usr/local/airflow/logs/dag_processor_manager/dag_processor_manager.log + +# Specify prefix pattern like mentioned below with stream handler TaskHandlerWithCustomFormatter +# Example: task_log_prefix_template = {{ti.dag_id}}-{{ti.task_id}}-{{execution_date}}-{{try_number}} +task_log_prefix_template = + +# Name of handler to read task instance logs. +# Defaults to use ``task`` handler. +task_log_reader = task + +# A comma\-separated list of third-party logger names that will be configured to print messages to +# consoles\. +# Example: extra_loggers = connexion,sqlalchemy +extra_loggers = + +[metrics] + +# StatsD (https://github.com/etsy/statsd) integration settings. +# Enables sending metrics to StatsD. +statsd_on = True +statsd_host = localhost +statsd_port = 8125 +statsd_prefix = airflow + +# If you want to avoid sending all the available metrics to StatsD, +# you can configure an allow list of prefixes (comma separated) to send only the metrics that +# start with the elements of the list (e.g: "scheduler,executor,dagrun") +statsd_allow_list = + +# A function that validate the statsd stat name, apply changes to the stat name if necessary and return +# the transformed stat name. +# +# The function should have the following signature: +# def func_name(stat_name: str) -> str: +stat_name_handler = + +# To enable datadog integration to send airflow metrics. +statsd_datadog_enabled = False + +# List of datadog tags attached to all metrics(e.g: key1:value1,key2:value2) +statsd_datadog_tags = + +# If you want to utilise your own custom Statsd client set the relevant +# module path below. +# Note: The module path must exist on your PYTHONPATH for Airflow to pick it up +# statsd_custom_client_path = + [secrets] # Full class name of secrets backend to enable (will precede env vars and metastore in search path) -# Example: backend = airflow.contrib.secrets.aws_systems_manager.SystemsManagerParameterStoreBackend +# Example: backend = airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend backend = # The backend_kwargs param is loaded into a dictionary and passed to __init__ of secrets backend class. @@ -233,12 +321,15 @@ api_client = airflow.api.client.local_client endpoint_url = http://localhost:8080 [debug] -# Used only with DebugExecutor. If set to True DAG will fail with first +# Used only with ``DebugExecutor``. If set to ``True`` DAG will fail with first # failed task. Helpful for debugging purposes. fail_fast = False [api] -# How to authenticate users of the API + +# How to authenticate users of the API. See +# https://airflow.apache.org/docs/stable/security.html for possible values. +# ("airflow.api.auth.backend.default" allows all requests for historic reasons) auth_backend = airflow.api.auth.backend.deny_all [lineage] @@ -261,17 +352,29 @@ default_ram = 512 default_disk = 512 default_gpus = 0 +# Is allowed to pass additional/unused arguments (args, kwargs) to the BaseOperator operator. +# If set to False, an exception will be thrown, otherwise only the console message will be displayed. +allow_illegal_arguments = False + [hive] # Default mapreduce queue for HiveOperator tasks default_hive_mapred_queue = +[aws_mwaa] +redirect_url = https://console.aws.amazon.com/ +session_duration_minutes = 720 + +# Template for mapred_job_name in HiveOperator, supports the following named parameters +# hostname, dag_id, task_id, execution_date +# mapred_job_name_template = + [webserver] # The base url of your website as airflow cannot guess what domain or # cname you are using. This is used in automated emails that # airflow sends to point links to the right web server base_url = http://localhost:8080 -# Default timezone to display all dates in the RBAC UI, can be UTC, system, or +# Default timezone to display all dates in the UI, can be UTC, system, or # any IANA timezone string (e.g. Europe/Amsterdam). If left empty the # default value of core/default_timezone will be used # Example: default_ui_timezone = America/New_York @@ -305,6 +408,10 @@ worker_refresh_batch_size = 1 # Number of seconds to wait before refreshing a batch of workers. worker_refresh_interval = 30 +# If set to True, Airflow will track files in plugins_folder directory. When it detects changes, +# then reload the gunicorn. +reload_on_plugin_change = False + # Secret key used to run your flask app # It should be as random as possible secret_key = $SECRET_KEY @@ -322,6 +429,11 @@ access_logfile = - # Log files for the gunicorn webserver. '-' means log to stderr. error_logfile = - +# Access log format for gunicorn webserver. +# default format is %%(h)s %%(l)s %%(u)s %%(t)s "%%(r)s" %%(s)s %%(b)s "%%(f)s" "%%(a)s" +# documentation - https://docs.gunicorn.org/en/stable/settings.html#access-log-format +access_logformat = + # Expose the configuration file in the web server expose_config = False @@ -331,26 +443,11 @@ expose_hostname = False # Expose stacktrace in the web server expose_stacktrace = False -# Set to true to turn on authentication: -# https://airflow.apache.org/security.html#web-authentication -authenticate = False - -# Filter the list of dags by owner name (requires authentication to be enabled) -filter_by_owner = False - -# Filtering mode. Choices include user (default) and ldapgroup. -# Ldap group filtering requires using the ldap backend -# -# Note that the ldap server needs the "memberOf" overlay to be set up -# in order to user the ldapgroup mode. -owner_mode = user - -# Default DAG view. Valid values are: -# tree, graph, duration, gantt, landing_times +# Default DAG view. Valid values are: ``tree``, ``graph``, ``duration``, ``gantt``, ``landing_times`` dag_default_view = tree -# "Default DAG orientation. Valid values are:" -# LR (Left->Right), TB (Top->Bottom), RL (Right->Left), BT (Bottom->Top) +# Default DAG orientation. Valid values are: +# ``LR`` (Left->Right), ``TB`` (Top->Bottom), ``RL`` (Right->Left), ``BT`` (Bottom->Top) dag_orientation = LR # Puts the webserver in demonstration mode; blurs the names of Operators for @@ -377,17 +474,14 @@ hide_paused_dags_by_default = False # Consistent page size across all listing views in the UI page_size = 100 -# Use FAB-based webserver with RBAC feature -rbac = True - # Define the color of navigation bar -navbar_color = #007A87 +navbar_color = #fff # Default dagrun to show in UI default_dag_run_display_number = 25 # Set secure flag on session cookie -cookie_secure = False +cookie_secure = True # Set samesite policy on session cookie cookie_samesite = Lax @@ -405,18 +499,24 @@ x_frame_enabled = False # Unique ID of your account in the analytics tool # analytics_id = +# 'Recent Tasks' stats will show for old DagRuns if set +show_recent_stats_for_completed_runs = True + # Update FAB permissions and sync security manager roles # on webserver startup update_fab_perms = True -# Minutes of non-activity before logged out from UI -# 0 means never get forcibly logged out -force_log_out_after = 0 - # The UI cookie lifetime in days -session_lifetime_days = 1 +session_lifetime_minutes = 1440 + +# Sets a custom page title for the DAGs overview page and site title for all pages +# instance_name = [email] + +# Configuration email backend and whether to +# send email alerts on retry or failure +# Email backend to use email_backend = airflow.utils.email.send_email_smtp [smtp] @@ -433,12 +533,30 @@ smtp_ssl = False # smtp_password = smtp_port = 25 smtp_mail_from = airflow@example.com +smtp_timeout = 30 +smtp_retry_limit = 5 [sentry] -# Sentry (https://docs.sentry.io) integration +# Sentry (https://docs.sentry.io) integration. Here you can supply +# additional configuration options based on the Python platform. See: +# https://docs.sentry.io/error-reporting/configuration/?platform=python. +# Unsupported options: ``integrations``, ``in_app_include``, ``in_app_exclude``, +# ``ignore_errors``, ``before_breadcrumb``, ``before_send``, ``transport``. +# Enable error reporting to Sentry +sentry_on = false sentry_dsn = +[celery_kubernetes_executor] + +# This section only applies if you are using the ``CeleryKubernetesExecutor`` in +# ``[core]`` section above +# Define when to send a task to ``KubernetesExecutor`` when using ``CeleryKubernetesExecutor``. +# When the queue of a task is the value of ``kubernetes_queue`` (default ``kubernetes``), +# the task is executed via ``KubernetesExecutor``, +# otherwise via ``CeleryExecutor`` +kubernetes_queue = kubernetes + [celery] # This section only applies if you are using the CeleryExecutor in @@ -461,6 +579,16 @@ worker_concurrency = 16 # Example: worker_autoscale = 16,12 # worker_autoscale = +# Used to increase the number of tasks that a worker prefetches which can improve performance. +# The number of processes multiplied by worker_prefetch_multiplier is the number of tasks +# that are prefetched by a worker. A value greater than 1 can result in tasks being unnecessarily +# blocked if there are multiple workers and one worker prefetches tasks that sit behind long +# running tasks while another worker has unutilized processes that are unable to process the already +# claimed blocked tasks. +# https://docs.celeryproject.org/en/stable/userguide/optimizing.html#prefetch-limits +# Example: worker_prefetch_multiplier = 1 +# worker_prefetch_multiplier = + # When you start an airflow worker, airflow starts a tiny web server # subprocess to serve the workers local log files to the airflow main # web server, who then builds pages and sends them to users. This defines @@ -468,22 +596,28 @@ worker_concurrency = 16 # visible from the main web server to connect into the workers. worker_log_server_port = 8793 +# Umask that will be used when starting workers with the ``airflow celery worker`` +# in daemon mode. This control the file-creation mode mask which determines the initial +# value of file permission bits for newly created files. +worker_umask = 0o077 + # The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally -# a sqlalchemy database. Refer to the Celery documentation for more -# information. -# http://docs.celeryproject.org/en/latest/userguide/configuration.html#broker-settings +# a sqlalchemy database. Refer to the Celery documentation for more information. +# broker_url = redis://redis:6379/0 broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow + # The Celery result_backend. When a job finishes, it needs to update the # metadata of the job. Therefore it will post a message on a message bus, # or insert it into a database (depending of the backend) # This status is used by the scheduler to update the state of the task # The use of a database is highly recommended # http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-result-backend-settings +# result_backend = db+postgresql://postgres:airflow@postgres/airflow result_backend = db+mysql://airflow:airflow@localhost:3306/airflow # Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start -# it ``airflow flower``. This defines the IP that Celery Flower runs on +# it ``airflow celery flower``. This defines the IP that Celery Flower runs on flower_host = 0.0.0.0 # The root URL for Flower @@ -507,15 +641,13 @@ sync_parallelism = 0 # Import path for celery configuration options celery_config_options = celery_config.CUSTOM_CELERY_CONFIG - -# In case of using SSL ssl_active = False ssl_key = ssl_cert = ssl_cacert = # Celery Pool implementation. -# Choices include: prefork (default), eventlet, gevent or solo. +# Choices include: ``prefork`` (default), ``eventlet``, ``gevent`` or ``solo``. # See: # https://docs.celeryproject.org/en/latest/userguide/workers.html#concurrency # https://docs.celeryproject.org/en/latest/userguide/concurrency/eventlet.html @@ -523,7 +655,23 @@ pool = prefork # The number of seconds to wait before timing out ``send_task_to_executor`` or # ``fetch_celery_task_state`` operations. -operation_timeout = 2 +operation_timeout = 1.0 + +# Celery task will report its status as 'started' when the task is executed by a worker. +# This is used in Airflow to keep track of the running tasks and if a Scheduler is restarted +# or run in HA mode, it can adopt the orphan tasks launched by previous SchedulerJob. +task_track_started = True + +# Time in seconds after which Adopted tasks are cleared by CeleryExecutor. This is helpful to clear +# stalled tasks. +task_adoption_timeout = 600 + +# The Maximum number of retries for publishing task messages to the broker when failing +# due to ``AirflowTaskTimeout`` error before giving up and marking Task as failed. +task_publish_max_retries = 3 + +# Worker initialisation check to validate Metadata Database connection +worker_precheck = False [celery_broker_transport_options] @@ -561,15 +709,15 @@ tls_key = # listen (in seconds). job_heartbeat_sec = 5 +# How often (in seconds) to check and tidy up 'running' TaskInstancess +# that no longer have a matching DagRun +clean_tis_without_dagrun_interval = 15.0 + # The scheduler constantly tries to trigger new tasks (look at the # scheduler section in the docs for more information). This defines # how often the scheduler should run (in seconds). scheduler_heartbeat_sec = 5 -# After how much time should the scheduler terminate in seconds -# -1 indicates to run continuously (see also num_runs) -run_duration = -1 - # The number of times to try to schedule each DAG file # -1 indicates unlimited number num_runs = -1 @@ -577,20 +725,27 @@ num_runs = -1 # The number of seconds to wait between consecutive DAG file processing processor_poll_interval = 1 -# after how much time (seconds) a new DAGs should be picked up from the filesystem -min_file_process_interval = 0 +# Number of seconds after which a DAG file is parsed. The DAG file is parsed every +# ``min_file_process_interval`` number of seconds. Updates to DAGs are reflected after +# this interval. Keeping this number low will increase CPU usage. +min_file_process_interval = 30 # How often (in seconds) to scan the DAGs directory for new files. Default to 5 minutes. -dag_dir_list_interval = 30 +dag_dir_list_interval = 300 # How often should stats be printed to the logs. Setting to 0 will disable printing stats print_stats_interval = 30 +# How often (in seconds) should pool usage stats be sent to statsd (if statsd_on is enabled) +pool_metrics_interval = 5.0 + # If the last scheduler heartbeat happened more than scheduler_health_check_threshold # ago (in seconds), scheduler is considered unhealthy. # This is used by the health check in the "/health" endpoint scheduler_health_check_threshold = 30 +# How often (in seconds) should the scheduler check for orphaned tasks and SchedulerJobs +orphaned_tasks_check_interval = 300.0 child_process_log_directory = /usr/local/airflow/logs/scheduler # Local task jobs periodically heartbeat to the DB. If the job has @@ -598,13 +753,13 @@ child_process_log_directory = /usr/local/airflow/logs/scheduler # associated task instance as failed and will re-schedule the task. scheduler_zombie_task_threshold = 300 -# Turn off scheduler catchup by setting this to False. +# Turn off scheduler catchup by setting this to ``False``. # Default behavior is unchanged and # Command Line Backfills still work, but the scheduler -# will not do scheduler catchup if this is False, +# will not do scheduler catchup if this is ``False``, # however it can be set on a per DAG basis in the # DAG definition (catchup) -catchup_by_default = True +catchup_by_default = False # This changes the batch size of queries in the scheduling main loop. # If this is too high, SQL query performance may be impacted by one @@ -616,21 +771,32 @@ catchup_by_default = True # Set this to 0 for no limit (not advised) max_tis_per_query = 512 -# Statsd (https://github.com/etsy/statsd) integration settings -statsd_on = True -statsd_host = localhost -statsd_port = 8125 -statsd_prefix = airflow +# Should the scheduler issue ``SELECT ... FOR UPDATE`` in relevant queries. +# If this is set to False then you should not run more than a single +# scheduler at once +use_row_level_locking = True -# If you want to avoid send all the available metrics to StatsD, -# you can configure an allow list of prefixes to send only the metrics that -# start with the elements of the list (e.g: scheduler,executor,dagrun) -statsd_allow_list = +# Max number of DAGs to create DagRuns for per scheduler loop +# +# Default: 10 +# max_dagruns_to_create_per_loop = + +# How many DagRuns should a scheduler examine (and lock) when scheduling +# and queuing tasks. +# +# Default: 20 +# max_dagruns_per_loop_to_schedule = -# The scheduler can run multiple threads in parallel to schedule dags. -# This defines how many threads will run. -max_threads = 2 -authenticate = False +# Should the Task supervisor process perform a "mini scheduler" to attempt to schedule more tasks of the +# same DAG. Leaving this on will mean tasks in the same DAG execute quicker, but might starve out other +# dags in some circumstances +# +# Default: True +# schedule_after_task_execution = + +# The scheduler can run multiple processes in parallel to parse dags. +# This defines how many processes will run. +parsing_processes = 2 # Turn off scheduler use of cron intervals by setting this to False. # DAGs submitted manually in the web UI or with trigger_dag will still run. @@ -640,70 +806,6 @@ use_job_schedule = True # Only has effect if schedule_interval is set to None in DAG allow_trigger_in_future = False -[ldap] -# set this to ldaps://: -uri = -user_filter = objectClass=* -user_name_attr = uid -group_member_attr = memberOf -superuser_filter = -data_profiler_filter = -bind_user = cn=Manager,dc=example,dc=com -bind_password = insecure -basedn = dc=example,dc=com -cacert = /etc/ca/ldap_ca.crt -search_scope = LEVEL - -# This setting allows the use of LDAP servers that either return a -# broken schema, or do not return a schema. -ignore_malformed_schema = False - -[mesos] -# Mesos master address which MesosExecutor will connect to. -master = localhost:5050 - -# The framework name which Airflow scheduler will register itself as on mesos -framework_name = Airflow - -# Number of cpu cores required for running one task instance using -# 'airflow run --local -p ' -# command on a mesos slave -task_cpu = 1 - -# Memory in MB required for running one task instance using -# 'airflow run --local -p ' -# command on a mesos slave -task_memory = 256 - -# Enable framework checkpointing for mesos -# See http://mesos.apache.org/documentation/latest/slave-recovery/ -checkpoint = False - -# Failover timeout in milliseconds. -# When checkpointing is enabled and this option is set, Mesos waits -# until the configured timeout for -# the MesosExecutor framework to re-register after a failover. Mesos -# shuts down running tasks if the -# MesosExecutor framework fails to re-register within this timeframe. -# Example: failover_timeout = 604800 -# failover_timeout = - -# Enable framework authentication for mesos -# See http://mesos.apache.org/documentation/latest/configuration/ -authenticate = False - -# Mesos credentials, if authentication is enabled -# Example: default_principal = admin -# default_principal = -# Example: default_secret = admin -# default_secret = - -# Optional Docker Image to run on slave before running the command -# This image should be accessible from mesos slave i.e mesos slave -# should be able to pull this docker image before executing the command. -# Example: docker_image_slave = puckel/docker-airflow -# docker_image_slave = - [kerberos] ccache = /tmp/airflow_krb5_ccache @@ -720,6 +822,9 @@ api_rev = v3 # UI to hide sensitive variable fields when set to True hide_sensitive_variable_fields = True +# A comma-separated list of sensitive keywords to look for in variables names. +sensitive_variable_fields = + [elasticsearch] # Elasticsearch host host = @@ -749,10 +854,17 @@ use_ssl = False verify_certs = True [kubernetes] -# The repository, tag and imagePullPolicy of the Kubernetes Image for the Worker to Run +# Path to the YAML pod file. If set, all other kubernetes-related fields are ignored. +pod_template_file = + +# The repository of the Kubernetes Image for the Worker to Run worker_container_repository = + +# The tag of the Kubernetes Image for the Worker to Run worker_container_tag = -worker_container_image_pull_policy = IfNotPresent + +# The Kubernetes namespace where airflow workers should be created. Defaults to ``default`` +namespace = default # If True, all worker pods will be deleted upon termination delete_worker_pods = True @@ -761,167 +873,16 @@ delete_worker_pods = True # failed worker pods will not be deleted so users can investigate them. delete_worker_pods_on_failure = False -# Number of Kubernetes Worker Pod creation calls per scheduler loop +# Number of Kubernetes Worker Pod creation calls per scheduler loop. +# Note that the current default of "1" will only launch a single pod +# per-heartbeat. It is HIGHLY recommended that users increase this +# number to match the tolerance of their kubernetes cluster for +# better performance. worker_pods_creation_batch_size = 1 -# The Kubernetes namespace where airflow workers should be created. Defaults to ``default`` -namespace = default - -# The name of the Kubernetes ConfigMap containing the Airflow Configuration (this file) -# Example: airflow_configmap = airflow-configmap -airflow_configmap = - -# The name of the Kubernetes ConfigMap containing ``airflow_local_settings.py`` file. -# -# For example: -# -# ``airflow_local_settings_configmap = "airflow-configmap"`` if you have the following ConfigMap. -# -# ``airflow-configmap.yaml``: -# -# .. code-block:: yaml -# -# --- -# apiVersion: v1 -# kind: ConfigMap -# metadata: -# name: airflow-configmap -# data: -# airflow_local_settings.py: | -# def pod_mutation_hook(pod): -# ... -# airflow.cfg: | -# ... -# Example: airflow_local_settings_configmap = airflow-configmap -airflow_local_settings_configmap = - -# For docker image already contains DAGs, this is set to ``True``, and the worker will -# search for dags in dags_folder, -# otherwise use git sync or dags volume claim to mount DAGs -dags_in_image = False - -# For either git sync or volume mounted DAGs, the worker will look in this subpath for DAGs -dags_volume_subpath = - -# For DAGs mounted via a volume claim (mutually exclusive with git-sync and host path) -dags_volume_claim = - -# For volume mounted logs, the worker will look in this subpath for logs -logs_volume_subpath = - -# A shared volume claim for the logs -logs_volume_claim = - -# For DAGs mounted via a hostPath volume (mutually exclusive with volume claim and git-sync) -# Useful in local environment, discouraged in production -dags_volume_host = - -# A hostPath volume for the logs -# Useful in local environment, discouraged in production -logs_volume_host = - -# A list of configMapsRefs to envFrom. If more than one configMap is -# specified, provide a comma separated list: configmap_a,configmap_b -env_from_configmap_ref = - -# A list of secretRefs to envFrom. If more than one secret is -# specified, provide a comma separated list: secret_a,secret_b -env_from_secret_ref = - -# Git credentials and repository for DAGs mounted via Git (mutually exclusive with volume claim) -git_repo = -git_branch = -git_subpath = - -# The specific rev or hash the git_sync init container will checkout -# This becomes GIT_SYNC_REV environment variable in the git_sync init container for worker pods -git_sync_rev = - -# Use git_user and git_password for user authentication or git_ssh_key_secret_name -# and git_ssh_key_secret_key for SSH authentication -git_user = -git_password = -git_sync_root = /git -git_sync_dest = repo - -# Mount point of the volume if git-sync is being used. -# i.e. {AIRFLOW_HOME}/dags -git_dags_folder_mount_point = - -# To get Git-sync SSH authentication set up follow this format -# -# ``airflow-secrets.yaml``: -# -# .. code-block:: yaml -# -# --- -# apiVersion: v1 -# kind: Secret -# metadata: -# name: airflow-secrets -# data: -# # key needs to be gitSshKey -# gitSshKey: -# Example: git_ssh_key_secret_name = airflow-secrets -git_ssh_key_secret_name = - -# To get Git-sync SSH authentication set up follow this format -# -# ``airflow-configmap.yaml``: -# -# .. code-block:: yaml -# -# --- -# apiVersion: v1 -# kind: ConfigMap -# metadata: -# name: airflow-configmap -# data: -# known_hosts: | -# github.com ssh-rsa <...> -# airflow.cfg: | -# ... -# Example: git_ssh_known_hosts_configmap_name = airflow-configmap -git_ssh_known_hosts_configmap_name = - -# To give the git_sync init container credentials via a secret, create a secret -# with two fields: GIT_SYNC_USERNAME and GIT_SYNC_PASSWORD (example below) and -# add ``git_sync_credentials_secret = `` to your airflow config under the -# ``kubernetes`` section -# -# Secret Example: -# -# .. code-block:: yaml -# -# --- -# apiVersion: v1 -# kind: Secret -# metadata: -# name: git-credentials -# data: -# GIT_SYNC_USERNAME: -# GIT_SYNC_PASSWORD: -git_sync_credentials_secret = - -# For cloning DAGs from git repositories into volumes: https://github.com/kubernetes/git-sync -git_sync_container_repository = k8s.gcr.io/git-sync -git_sync_container_tag = v3.1.1 -git_sync_init_container_name = git-sync-clone -git_sync_run_as_user = 65533 - -# The name of the Kubernetes service account to be associated with airflow workers, if any. -# Service accounts are required for workers that require access to secrets or cluster resources. -# See the Kubernetes RBAC documentation for more: -# https://kubernetes.io/docs/admin/authorization/rbac/ -worker_service_account_name = - -# Any image pull secrets to be given to worker pods, If more than one secret is -# required, provide a comma separated list: secret_a,secret_b -image_pull_secrets = - -# GCP Service Account Keys to be provided to tasks run on Kubernetes Executors -# Should be supplied in the format: key-name-1:key-path-1,key-name-2:key-path-2 -gcp_service_account_keys = +# Allows users to launch pods in multiple namespaces. +# Will require creating a cluster-role for the scheduler +multi_namespace_mode = False # Use the service account kubernetes gives to pods to connect to kubernetes cluster. # It's intended for clients that expect to be running inside a pod running on kubernetes. @@ -931,81 +892,56 @@ in_cluster = True # When running with in_cluster=False change the default cluster_context or config_file # options to Kubernetes client. Leave blank these to use default behaviour like ``kubectl`` has. # cluster_context = -# config_file = - -# Affinity configuration as a single line formatted JSON object. -# See the affinity model for top-level key names (e.g. ``nodeAffinity``, etc.): -# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#affinity-v1-core -affinity = -# A list of toleration objects as a single line formatted JSON array -# See: -# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#toleration-v1-core -tolerations = +# Path to the kubernetes configfile to be used when ``in_cluster`` is set to False +# config_file = # Keyword parameters to pass while calling a kubernetes client core_v1_api methods # from Kubernetes Executor provided as a single line formatted JSON dictionary string. # List of supported params are similar for all core_v1_apis, hence a single config -# variable for all apis. -# See: -# https://raw.githubusercontent.com/kubernetes-client/python/master/kubernetes/client/apis/core_v1_api.py -# Note that if no _request_timeout is specified, the kubernetes client will wait indefinitely -# for kubernetes api responses, which will cause the scheduler to hang. -# The timeout is specified as [connect timeout, read timeout] +# variable for all apis. See: +# https://raw.githubusercontent.com/kubernetes-client/python/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/api/core_v1_api.py kube_client_request_args = -# Specifies the uid to run the first process of the worker pods containers as -run_as_user = +# Optional keyword arguments to pass to the ``delete_namespaced_pod`` kubernetes client +# ``core_v1_api`` method when using the Kubernetes Executor. +# This should be an object and can contain any of the options listed in the ``v1DeleteOptions`` +# class defined here: +# https://github.com/kubernetes-client/python/blob/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/models/v1_delete_options.py#L19 +# Example: delete_option_kwargs = {{"grace_period_seconds": 10}} +delete_option_kwargs = -# Specifies a gid to associate with all containers in the worker pods -# if using a git_ssh_key_secret_name use an fs_group -# that allows for the key to be read, e.g. 65533 -fs_group = +# Enables TCP keepalive mechanism. This prevents Kubernetes API requests to hang indefinitely +# when idle connection is time-outed on services like cloud load balancers or firewalls. +enable_tcp_keepalive = False -[kubernetes_node_selectors] +# When the `enable_tcp_keepalive` option is enabled, TCP probes a connection that has +# been idle for `tcp_keep_idle` seconds. +tcp_keep_idle = 120 -# The Key-value pairs to be given to worker pods. -# The worker pods will be scheduled to the nodes of the specified key-value pairs. -# Should be supplied in the format: key = value +# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond +# to a keepalive probe, TCP retransmits the probe after `tcp_keep_intvl` seconds. +tcp_keep_intvl = 30 -[kubernetes_annotations] +# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond +# to a keepalive probe, TCP retransmits the probe `tcp_keep_cnt number` of times before +# a connection is considered to be broken. +tcp_keep_cnt = 6 -# The Key-value annotations pairs to be given to worker pods. -# Should be supplied in the format: key = value +# Set this to false to skip verifying SSL certificate of Kubernetes python client. +verify_ssl = True -[kubernetes_environment_variables] +[smart_sensor] +# When `use_smart_sensor` is True, Airflow redirects multiple qualified sensor tasks to +# smart sensor task. +use_smart_sensor = False -# The scheduler sets the following environment variables into your workers. You may define as -# many environment variables as needed and the kubernetes launcher will set them in the launched workers. -# Environment variables in this section are defined as follows -# `` = `` -# -# For example if you wanted to set an environment variable with value `prod` and key -# ``ENVIRONMENT`` you would follow the following format: -# ENVIRONMENT = prod -# -# Additionally you may override worker airflow settings with the ``AIRFLOW__
__`` -# formatting as supported by airflow normally. - -[kubernetes_secrets] - -# The scheduler mounts the following secrets into your workers as they are launched by the -# scheduler. You may define as many secrets as needed and the kubernetes launcher will parse the -# defined secrets and mount them as secret environment variables in the launched workers. -# Secrets in this section are defined as follows -# `` = =`` -# -# For example if you wanted to mount a kubernetes secret key named ``postgres_password`` from the -# kubernetes secret object ``airflow-secret`` as the environment variable ``POSTGRES_PASSWORD`` into -# your workers you would follow the following format: -# ``POSTGRES_PASSWORD = airflow-secret=postgres_credentials`` -# -# Additionally you may override worker airflow settings with the ``AIRFLOW__
__`` -# formatting as supported by airflow normally. +# `shard_code_upper_limit` is the upper limit of `shard_code` value. The `shard_code` is generated +# by `hashcode % shard_code_upper_limit`. +shard_code_upper_limit = 10000 -[kubernetes_labels] +# The number of running smart sensor processes for each service. +shards = 5 -# The Key-value pairs to be given to worker pods. -# The worker pods will be given these static labels, as well as some additional dynamic labels -# to identify the task. -# Should be supplied in the format: ``key = value`` +# comma separated sensor classes support in smart_sensor. +sensors_enabled = NamedHivePartitionSensor \ No newline at end of file diff --git a/docker/config/constraints.txt b/docker/config/constraints.txt index 9574a37d0..c58ac3792 100644 --- a/docker/config/constraints.txt +++ b/docker/config/constraints.txt @@ -1,295 +1,446 @@ -# downloaded from https://raw.githubusercontent.com/apache/airflow/constraints-1.10.12/constraints-3.7.txt -# Editable install with no version control (apache-airflow==1.10.12) -Babel==2.8.0 -Flask-Admin==1.5.4 -Flask-AppBuilder==2.3.4 +# downloaded from https://raw.githubusercontent.com/apache/airflow/constraints-2-0/constraints-3.7.txt +# Editable install with no version control (apache-airflow==2.0.2) +APScheduler==3.6.3 +Authlib==0.15.3 +Babel==2.9.0 Flask-Babel==1.0.0 Flask-Bcrypt==0.7.1 -Flask-Caching==1.3.3 -Flask-JWT-Extended==3.24.1 +Flask-Caching==1.10.1 +Flask-JWT-Extended==3.25.1 Flask-Login==0.4.1 +Flask-OAuthlib==0.9.5 Flask-OpenID==1.2.5 -Flask-SQLAlchemy==2.4.4 +Flask-SQLAlchemy==2.5.1 Flask-WTF==0.14.3 Flask==1.1.2 -JPype1==0.7.1 +GitPython==3.1.15 +HeapDict==1.0.1 +JPype1==1.2.1 JayDeBeApi==1.2.3 -Jinja2==2.11.2 -Mako==1.1.3 -Markdown==2.6.11 +Jinja2==2.11.3 +Mako==1.1.4 +Markdown==3.3.4 MarkupSafe==1.1.1 PyHive==0.6.3 PyJWT==1.7.1 PyNaCl==1.4.0 PySmbClient==0.1.5 -PyYAML==5.3.1 -Pygments==2.6.1 -SQLAlchemy-JSONField==0.9.0 -SQLAlchemy-Utils==0.36.8 -SQLAlchemy==1.3.18 -Sphinx==3.2.1 -Unidecode==1.1.1 +Pygments==2.8.1 +SQLAlchemy-JSONField==1.0.0 +SQLAlchemy-Utils==0.37.0 +SQLAlchemy==1.3.24 +Sphinx==3.4.3 +Unidecode==1.2.0 WTForms==2.3.3 -Werkzeug==0.16.1 -adal==1.2.4 +Werkzeug==1.0.1 +adal==1.2.7 +aiohttp==3.7.4.post0 alabaster==0.7.12 -alembic==1.4.2 +alembic==1.5.8 amqp==2.6.1 analytics-python==1.2.9 ansiwrap==0.8.4 +apache-airflow-providers-airbyte==1.0.0 +apache-airflow-providers-amazon==1.3.0 +apache-airflow-providers-apache-beam==1.0.1 +apache-airflow-providers-apache-cassandra==1.0.1 +apache-airflow-providers-apache-druid==1.1.0 +apache-airflow-providers-apache-hdfs==1.0.1 +apache-airflow-providers-apache-hive==1.0.3 +apache-airflow-providers-apache-kylin==1.0.1 +apache-airflow-providers-apache-livy==1.1.0 +apache-airflow-providers-apache-pig==1.0.1 +apache-airflow-providers-apache-pinot==1.0.1 +apache-airflow-providers-apache-spark==1.0.2 +apache-airflow-providers-apache-sqoop==1.0.1 +apache-airflow-providers-celery==1.0.1 +apache-airflow-providers-cloudant==1.0.1 +apache-airflow-providers-cncf-kubernetes==1.1.0 +apache-airflow-providers-databricks==1.0.1 +apache-airflow-providers-datadog==1.0.1 +apache-airflow-providers-dingding==1.0.2 +apache-airflow-providers-discord==1.0.1 +apache-airflow-providers-docker==1.1.0 +apache-airflow-providers-elasticsearch==1.0.3 +apache-airflow-providers-exasol==1.1.1 +apache-airflow-providers-facebook==1.1.0 +apache-airflow-providers-ftp==1.0.1 +apache-airflow-providers-google==2.2.0 +apache-airflow-providers-grpc==1.1.0 +apache-airflow-providers-hashicorp==1.0.2 +apache-airflow-providers-http==1.1.1 +apache-airflow-providers-imap==1.0.1 +apache-airflow-providers-jdbc==1.0.1 +apache-airflow-providers-jenkins==1.1.0 +apache-airflow-providers-jira==1.0.1 +apache-airflow-providers-microsoft-azure==1.3.0 +apache-airflow-providers-microsoft-mssql==1.0.1 +apache-airflow-providers-microsoft-winrm==1.1.0 +apache-airflow-providers-mongo==1.0.1 +apache-airflow-providers-mysql==1.1.0 +apache-airflow-providers-neo4j==1.0.1 +apache-airflow-providers-odbc==1.0.1 +apache-airflow-providers-openfaas==1.1.1 +apache-airflow-providers-opsgenie==1.0.2 +apache-airflow-providers-oracle==1.1.0 +apache-airflow-providers-pagerduty==1.0.1 +apache-airflow-providers-papermill==1.0.2 +apache-airflow-providers-plexus==1.0.1 +apache-airflow-providers-postgres==1.0.1 +apache-airflow-providers-presto==1.0.2 +apache-airflow-providers-qubole==1.0.2 +apache-airflow-providers-redis==1.0.1 +apache-airflow-providers-salesforce==2.0.0 +apache-airflow-providers-samba==1.0.1 +apache-airflow-providers-segment==1.0.1 +apache-airflow-providers-sendgrid==1.0.2 +apache-airflow-providers-sftp==1.1.1 +apache-airflow-providers-singularity==1.1.0 +apache-airflow-providers-slack==3.0.0 +apache-airflow-providers-snowflake==1.2.0 +apache-airflow-providers-sqlite==1.0.2 +apache-airflow-providers-ssh==1.3.0 +apache-airflow-providers-tableau==1.0.0 +apache-airflow-providers-telegram==1.0.2 +apache-airflow-providers-trino==1.0.0 +apache-airflow-providers-vertica==1.0.1 +apache-airflow-providers-yandex==1.0.1 +apache-airflow-providers-zendesk==1.0.1 +apache-beam==2.28.0 apipkg==1.5 -apispec==1.3.3 +apispec==3.3.2 appdirs==1.4.4 -argcomplete==1.12.0 +argcomplete==1.12.3 +arrow==1.0.3 asn1crypto==1.4.0 -astroid==2.4.2 +astroid==2.5.3 async-generator==1.10 +async-timeout==3.0.1 atlasclient==1.0.0 -attrs==19.3.0 -aws-sam-translator==1.26.0 -aws-xray-sdk==2.6.0 -azure-common==1.1.25 +attrs==20.3.0 +avro-python3==1.9.2.1 +aws-xray-sdk==2.7.0 +azure-batch==10.0.0 +azure-common==1.1.27 +azure-core==1.13.0 azure-cosmos==3.2.0 -azure-datalake-store==0.0.49 +azure-datalake-store==0.0.52 +azure-identity==1.5.0 +azure-keyvault-certificates==4.2.1 +azure-keyvault-keys==4.3.1 +azure-keyvault-secrets==4.2.0 +azure-keyvault==4.1.0 +azure-kusto-data==0.0.45 azure-mgmt-containerinstance==1.5.0 -azure-mgmt-resource==10.2.0 +azure-mgmt-core==1.2.2 +azure-mgmt-datafactory==1.1.0 +azure-mgmt-datalake-nspkg==3.0.1 +azure-mgmt-datalake-store==0.5.0 +azure-mgmt-nspkg==3.0.2 +azure-mgmt-resource==16.1.0 azure-nspkg==3.0.2 -azure-storage-blob==2.1.0 +azure-storage-blob==12.8.0 azure-storage-common==2.1.0 -azure-storage==0.36.0 +azure-storage-file==2.1.0 backcall==0.2.0 bcrypt==3.2.0 beautifulsoup4==4.7.1 -billiard==3.6.3.0 -black==19.10b0 +billiard==3.6.4.0 +black==20.8b1 blinker==1.4 -boto3==1.14.43 +boto3==1.17.54 boto==2.49.0 -botocore==1.17.43 -cached-property==1.5.1 -cachetools==4.1.1 +botocore==1.20.54 +bowler==0.9.0 +cached-property==1.5.2 +cachetools==4.2.1 cassandra-driver==3.20.2 -cattrs==1.0.0 +cattrs==1.5.0 celery==4.4.7 -certifi==2020.6.20 -cffi==1.14.2 +certifi==2020.12.5 +cffi==1.14.5 cfgv==3.2.0 -cfn-lint==0.35.0 cgroupspy==0.1.6 chardet==3.0.4 -click==6.7 -cloudant==0.5.10 -colorama==0.4.3 -colorlog==4.0.2 -configparser==3.5.3 -coverage==5.2.1 -croniter==0.3.34 -cryptography==3.0 -cx-Oracle==8.0.0 -datadog==0.38.0 -decorator==4.4.2 -defusedxml==0.6.0 +click==7.1.2 +clickclick==20.10.2 +cloudant==2.14.0 +cloudpickle==1.4.1 +colorama==0.4.4 +colorlog==5.0.1 +commonmark==0.9.1 +connexion==2.7.0 +coverage==5.5 +crcmod==1.7 +croniter==0.3.37 +cryptography==3.4.7 +curlify==2.2.1 +cx-Oracle==8.1.0 +dask==2021.4.0 +datadog==0.41.0 +decorator==5.0.7 +defusedxml==0.7.1 dill==0.3.2 distlib==0.3.1 +distributed==2.19.0 dnspython==1.16.0 docker-pycreds==0.4.0 docker==3.7.3 docopt==0.6.2 -docutils==0.16 -ecdsa==0.15 -elasticsearch-dsl==5.4.0 -elasticsearch==5.5.3 -email-validator==1.1.1 +ecdsa==0.14.1 +elasticsearch-dbapi==0.1.0 +elasticsearch-dsl==7.3.0 +elasticsearch==7.5.1 +email-validator==1.1.2 entrypoints==0.3 -execnet==1.7.1 -fastavro==0.24.1 +eventlet==0.30.2 +execnet==1.8.0 +facebook-business==10.0.0 +fastavro==1.4.0 +fasteners==0.16 filelock==3.0.12 -flake8-colors==0.1.6 -flake8==3.8.3 +fissix==20.8.0 +flake8-colors==0.1.9 +flake8==3.9.1 flaky==3.7.0 -flask-swagger==0.2.14 -flower==0.9.5 -freezegun==0.3.15 -fsspec==0.8.0 -funcsigs==1.0.2 -future-fstrings==1.2.0 +flower==0.9.7 +freezegun==1.1.0 +fsspec==2021.4.0 future==0.18.2 -gcsfs==0.6.2 -google-api-core==1.22.1 -google-api-python-client==1.10.0 -google-auth-httplib2==0.0.4 -google-auth-oauthlib==0.4.1 -google-auth==1.20.1 -google-cloud-bigquery==1.26.1 -google-cloud-bigtable==1.4.0 +gcsfs==0.8.0 +gevent==21.1.2 +gitdb==4.0.7 +github3.py==2.0.0 +google-ads==7.0.0 +google-api-core==1.26.3 +google-api-python-client==1.12.8 +google-apitools==0.5.31 +google-auth-httplib2==0.1.0 +google-auth-oauthlib==0.4.4 +google-auth==1.29.0 +google-cloud-automl==2.3.0 +google-cloud-bigquery-datatransfer==3.1.1 +google-cloud-bigquery-storage==2.4.0 +google-cloud-bigquery==1.28.0 +google-cloud-bigtable==1.7.0 +google-cloud-build==2.0.0 google-cloud-container==1.0.1 -google-cloud-core==1.4.1 +google-cloud-core==1.6.0 +google-cloud-datacatalog==3.1.1 +google-cloud-dataproc==2.3.1 +google-cloud-datastore==1.15.3 google-cloud-dlp==1.0.0 +google-cloud-kms==2.2.0 google-cloud-language==1.3.0 +google-cloud-logging==2.3.1 +google-cloud-memcache==0.3.0 +google-cloud-monitoring==2.2.1 +google-cloud-os-login==2.1.0 +google-cloud-pubsub==2.4.1 +google-cloud-redis==2.1.0 google-cloud-secret-manager==1.0.0 -google-cloud-spanner==1.17.1 +google-cloud-spanner==1.19.1 google-cloud-speech==1.3.2 -google-cloud-storage==1.30.0 +google-cloud-storage==1.37.1 +google-cloud-tasks==2.2.0 google-cloud-texttospeech==1.0.1 google-cloud-translate==1.7.0 -google-cloud-videointelligence==1.15.0 +google-cloud-videointelligence==1.16.1 google-cloud-vision==1.0.0 -google-crc32c==0.1.0 -google-resumable-media==0.7.1 -googleapis-common-protos==1.52.0 -graphviz==0.14.1 +google-cloud-workflows==0.2.0 +google-crc32c==1.1.2 +google-resumable-media==1.2.0 +googleapis-common-protos==1.53.0 +graphviz==0.16 +greenlet==1.0.0 grpc-google-iam-v1==0.12.3 grpcio-gcp==0.2.2 -grpcio==1.31.0 -gunicorn==20.0.4 -hdfs==2.5.8 +grpcio==1.37.0 +gunicorn==19.10.0 +hdfs==2.6.0 hmsclient==0.1.1 -httplib2==0.18.1 -humanize==2.6.0 -hvac==0.10.5 -identify==1.4.28 +httplib2==0.17.4 +humanize==3.4.1 +hvac==0.10.9 +identify==2.2.4 idna==2.10 imagesize==1.2.0 importlib-metadata==1.7.0 -inflection==0.5.0 -ipdb==0.13.3 +importlib-resources==1.5.0 +inflection==0.5.1 +iniconfig==1.1.1 +ipdb==0.13.7 ipython-genutils==0.2.0 -ipython==7.17.0 -iso8601==0.1.12 +ipython==7.22.0 +iso8601==0.1.14 isodate==0.6.0 +isort==5.8.0 itsdangerous==1.1.0 -jedi==0.17.2 +jedi==0.18.0 jira==2.0.0 jmespath==0.10.0 json-merge-patch==0.2 -jsondiff==1.1.2 -jsonpatch==1.26 -jsonpickle==1.4.1 -jsonpointer==2.0 +jsondiff==1.3.0 +jsonpath-ng==1.5.2 jsonschema==3.2.0 -junit-xml==1.9 -jupyter-client==6.1.6 -jupyter-core==4.6.3 +jupyter-client==6.1.12 +jupyter-core==4.7.1 +jwcrypto==0.8 kombu==4.6.11 kubernetes==11.0.0 -lazy-object-proxy==1.5.1 -ldap3==2.8 +kylinpy==2.8.4 +lazy-object-proxy==1.4.3 +ldap3==2.9 +libcst==0.3.18 +locket==0.2.1 lockfile==0.12.2 marshmallow-enum==1.5.1 +marshmallow-oneofschema==2.1.0 marshmallow-sqlalchemy==0.23.1 -marshmallow==2.21.0 +marshmallow==3.11.1 mccabe==0.6.1 -mock==4.0.2 -mongomock==3.20.0 -more-itertools==8.4.0 -moto==1.3.14 -msrest==0.6.18 +mock==2.0.0 +mongomock==3.22.1 +more-itertools==8.7.0 +moreorless==0.4.0 +moto==2.0.5 +msal-extensions==0.3.0 +msal==1.11.0 +msgpack==1.0.2 +msrest==0.6.21 msrestazure==0.6.4 multi-key-dict==2.0.3 +multidict==5.1.0 mypy-extensions==0.4.3 -mypy==0.720 -mysqlclient==1.3.14 -natsort==7.0.1 -nbclient==0.4.1 -nbformat==5.0.7 -nest-asyncio==1.4.0 -networkx==2.4 -nodeenv==1.4.0 -nteract-scrapbook==0.4.1 +mypy==0.770 +mysql-connector-python==8.0.22 +mysqlclient==2.0.3 +natsort==7.1.1 +nbclient==0.5.3 +nbformat==5.1.3 +neo4j==4.2.1 +nest-asyncio==1.5.1 +nodeenv==1.6.0 +nteract-scrapbook==0.4.2 ntlm-auth==1.5.0 -numpy==1.19.1 -oauthlib==3.1.0 +numpy==1.20.2 +oauth2client==4.1.3 +oauthlib==2.1.0 +openapi-schema-validator==0.1.5 +openapi-spec-validator==0.3.0 oscrypto==1.2.1 -packaging==20.4 -pandas-gbq==0.13.2 -pandas==1.1.0 -papermill==2.1.2 -parameterized==0.7.4 -paramiko==2.7.1 -parso==0.7.1 -pathspec==0.8.0 -pbr==5.4.5 -pendulum==1.4.4 +packaging==20.9 +pandas-gbq==0.14.1 +pandas==1.2.4 +papermill==2.3.3 +parameterized==0.8.1 +paramiko==2.7.2 +parso==0.8.2 +partd==1.2.0 +pathspec==0.8.1 +pbr==5.5.1 +pdpyras==4.1.4 +pendulum==2.1.2 pexpect==4.8.0 pickleshare==0.7.5 -pinotdb==0.1.1 +pinotdb==0.3.3 +pipdeptree==2.0.0 pluggy==0.13.1 -pre-commit==2.6.0 +ply==3.11 +plyvel==1.3.0 +portalocker==1.7.1 +pre-commit==2.12.1 presto-python-client==0.7.0 prison==0.1.3 prometheus-client==0.8.0 -prompt-toolkit==3.0.6 -protobuf==3.13.0 -psutil==5.7.2 -psycopg2-binary==2.8.5 -ptyprocess==0.6.0 -py==1.9.0 +prompt-toolkit==3.0.18 +proto-plus==1.18.1 +protobuf==3.15.8 +psutil==5.8.0 +psycopg2-binary==2.8.6 +ptyprocess==0.7.0 +py4j==0.10.9 +py==1.10.0 pyOpenSSL==19.1.0 -pyarrow==0.17.1 +pyarrow==2.0.0 pyasn1-modules==0.2.8 pyasn1==0.4.8 -pycodestyle==2.6.0 +pycodestyle==2.7.0 +pycountry==20.7.3 pycparser==2.20 -pycryptodomex==3.9.8 +pycryptodomex==3.10.1 pydata-google-auth==1.1.0 -pydruid==0.5.8 -pyflakes==2.2.0 +pydot==1.4.2 +pydruid==0.6.2 +pyenchant==3.2.0 +pyexasol==0.18.1 +pyflakes==2.3.1 pykerberos==1.2.1 -pymongo==3.10.1 -pymssql==2.1.4 +pylint==2.7.4 +pymongo==3.11.3 +pymssql==2.2.1 +pyodbc==4.0.30 pyparsing==2.4.7 -pyrsistent==0.16.0 +pyrsistent==0.17.3 pysftp==0.2.9 -pytest-cov==2.10.1 +pyspark==3.1.1 +pytest-cov==2.11.1 pytest-forked==1.3.0 pytest-instafail==0.4.2 -pytest-rerunfailures==9.0 -pytest-timeout==1.4.2 -pytest-xdist==2.0.0 -pytest==5.4.3 -# 2.2.4 -> 2.2.3 https://t.corp.amazon.com/V304351598 -python-daemon==2.2.3 +pytest-rerunfailures==9.1.1 +pytest-timeouts==1.2.1 +pytest-xdist==2.2.1 +pytest==6.2.3 +python-daemon==2.3.0 python-dateutil==2.8.1 python-editor==1.0.4 -python-http-client==3.2.7 +python-http-client==3.3.2 python-jenkins==1.7.0 python-jose==3.2.0 +python-ldap==3.3.1 python-nvd3==0.15.0 python-slugify==4.0.1 +python-telegram-bot==13.0 python3-openid==3.2.0 -pytz==2020.1 +pytz==2021.1 pytzdata==2020.1 pywinrm==0.4.1 -pyzmq==19.0.2 -qds-sdk==1.16.0 +pyzmq==22.0.3 +qds-sdk==1.16.1 redis==3.5.3 -regex==2020.7.14 -requests-futures==0.9.4 +regex==2021.4.4 requests-kerberos==0.12.0 requests-mock==1.8.0 requests-ntlm==1.1.0 -requests-oauthlib==1.3.0 +requests-oauthlib==1.1.0 requests-toolbelt==0.9.1 -requests==2.24.0 -responses==0.10.16 -rsa==4.6 -s3transfer==0.3.3 +requests==2.25.1 +responses==0.13.2 +rich==9.2.0 +rsa==4.7.2 +s3transfer==0.4.0 sasl==0.2.1 -sendgrid==5.6.0 +semver==2.13.0 +sendgrid==6.6.0 sentinels==1.0.0 -sentry-sdk==0.16.5 -setproctitle==1.1.10 +sentry-sdk==1.0.0 +setproctitle==1.2.2 +simple-salesforce==1.11.1 six==1.15.0 -slackclient==1.3.2 -snowballstemmer==2.0.0 -snowflake-connector-python==2.2.10 -snowflake-sqlalchemy==1.2.3 -soupsieve==2.0.1 +slack-sdk==3.5.0 +smmap==4.0.0 +snakebite-py3==3.0.5 +snowballstemmer==2.1.0 +snowflake-connector-python==2.4.2 +snowflake-sqlalchemy==1.2.4 +sortedcontainers==2.3.0 +soupsieve==2.2.1 +sphinx-airflow-theme==0.0.2 sphinx-argparse==0.2.5 sphinx-autoapi==1.0.0 -sphinx-copybutton==0.3.0 +sphinx-copybutton==0.3.1 sphinx-jinja==1.1.1 -sphinx-rtd-theme==0.5.0 +sphinx-rtd-theme==0.5.2 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-dotnetdomain==0.4 @@ -298,33 +449,51 @@ sphinxcontrib-htmlhelp==1.0.3 sphinxcontrib-httpdomain==1.7.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 +sphinxcontrib-redoc==1.6.0 sphinxcontrib-serializinghtml==1.1.4 -sshpubkeys==3.1.0 +sphinxcontrib-spelling==5.2.1 +spython==0.1.13 sshtunnel==0.1.5 -tabulate==0.8.7 -tenacity==4.12.0 +starkbank-ecdsa==1.1.0 +statsd==3.3.0 +swagger-ui-bundle==0.0.8 +tableauserverclient==0.15.0 +tabulate==0.8.9 +tblib==1.7.0 +tenacity==6.2.0 +termcolor==1.1.0 text-unidecode==1.3 textwrap3==0.9.2 thrift-sasl==0.4.2 thrift==0.13.0 -toml==0.10.1 -tornado==5.1.1 -tqdm==4.48.2 -traitlets==4.3.3 -typed-ast==1.4.1 -typing-extensions==3.7.4.2 -tzlocal==1.5.1 +toml==0.10.2 +toolz==0.11.1 +tornado==6.1 +tqdm==4.60.0 +traitlets==5.0.5 +trino==0.305.0 +typed-ast==1.4.3 +typing-extensions==3.7.4.3 +typing-inspect==0.6.0 +tzlocal==2.1 +ujson==4.0.2 unicodecsv==0.14.1 uritemplate==3.0.1 -urllib3==1.25.10 -vertica-python==0.11.0 +urllib3==1.25.11 +vertica-python==1.0.1 vine==1.3.0 -virtualenv==20.0.30 +virtualenv==20.4.4 +volatile==2.1.0 +watchtower==0.7.3 wcwidth==0.2.5 -websocket-client==0.57.0 +websocket-client==0.58.0 wrapt==1.12.1 xmltodict==0.12.0 -yamllint==1.24.2 +yamllint==1.26.1 +yandexcloud==0.81.0 +yarl==1.6.3 zdesk==2.7.1 -zipp==3.1.0 -zope.deprecation==4.4.0 \ No newline at end of file +zict==2.0.0 +zipp==3.4.1 +zope.event==4.5.0 +zope.interface==5.4.0 \ No newline at end of file diff --git a/docker/config/requirements.txt b/docker/config/requirements.txt index d5deb56d1..116097172 100644 --- a/docker/config/requirements.txt +++ b/docker/config/requirements.txt @@ -1,107 +1,117 @@ # Replacement for the local package installs from CodeArtifact -alembic==1.4.2 +alembic==1.5.8 amqp==2.6.1 -apache-airflow==1.10.12 -apispec==1.3.3 -argcomplete==1.12.0 -attrs==19.3.0 -Babel==2.8.0 -billiard==3.6.3.0 -boto3==1.17.16 -botocore==1.20.16 -cached-property==1.5.1 -cattrs==1.0.0 +apache-airflow==2.0.2 +apache-airflow-providers-amazon==1.3.0 +apache-airflow-providers-celery==1.0.1 +apache-airflow-providers-ftp==1.0.1 +apache-airflow-providers-http==1.1.1 +apache-airflow-providers-imap==1.0.1 +apache-airflow-providers-sqlite==1.0.2 +apispec==3.3.2 +argcomplete==1.12.3 +attrs==20.3.0 +Babel==2.9.0 +billiard==3.6.4.0 +blinker==1.4 +boto3==1.17.53 +botocore==1.20.53 +cached-property==1.5.2 +cattrs==1.5.0 celery==4.4.7 -certifi==2020.6.20 -cffi==1.14.2 +certifi==2020.12.5 +cffi==1.14.5 chardet==3.0.4 -click==6.7 -colorama==0.4.3 -colorlog==4.0.2 -configparser==3.5.3 -croniter==0.3.34 -cryptography==3.0 -defusedxml==0.6.0 +click==7.1.2 +clickclick==20.10.2 +colorama==0.4.4 +colorlog==5.0.1 +commonmark==0.9.1 +connexion==2.7.0 +croniter==0.3.37 +cryptography==3.4.7 +defusedxml==0.7.1 dill==0.3.2 dnspython==1.16.0 docutils==0.16 -email-validator==1.1.1 +email-validator==1.1.2 Flask==1.1.2 -Flask-Admin==1.5.4 -Flask-AppBuilder==2.3.4 +Flask-AppBuilder==3.2.3 Flask-Babel==1.0.0 -Flask-Caching==1.3.3 -Flask-JWT-Extended==3.24.1 +Flask-Caching==1.10.1 +Flask-JWT-Extended==3.25.1 Flask-Login==0.4.1 Flask-OpenID==1.2.5 -Flask-SQLAlchemy==2.4.4 -flask-swagger==0.2.14 +Flask-SQLAlchemy==2.5.1 Flask-WTF==0.14.3 -flower==0.9.5 -funcsigs==1.0.2 -future==0.18.2 -graphviz==0.14.1 -gunicorn==20.0.4 -humanize==2.6.0 +flower==0.9.7 +graphviz==0.16 +gunicorn==19.10.0 +humanize==3.4.1 idna==2.10 importlib-metadata==1.7.0 -iso8601==0.1.12 +importlib-resources==1.5.0 +inflection==0.5.1 +iso8601==0.1.14 +isodate==0.6.0 itsdangerous==1.1.0 -Jinja2==2.11.2 +Jinja2==2.11.3 jmespath==0.10.0 -json-merge-patch==0.2 jsonschema==3.2.0 kombu==4.6.11 -lazy-object-proxy==1.5.1 +lazy-object-proxy==1.4.3 lockfile==0.12.2 -Mako==1.1.3 -Markdown==2.6.11 +Mako==1.1.4 +Markdown==3.3.4 MarkupSafe==1.1.1 -marshmallow==2.21.0 +marshmallow==3.11.1 marshmallow-enum==1.5.1 +marshmallow-oneofschema==2.1.0 marshmallow-sqlalchemy==0.23.1 -natsort==7.0.1 -numpy==1.19.1 -pandas==1.1.0 -pendulum==1.4.4 +natsort==7.1.1 +numpy==1.20.2 +openapi-schema-validator==0.1.5 +openapi-spec-validator==0.3.0 +pandas==1.2.4 +pendulum==2.1.2 prison==0.1.3 prometheus-client==0.8.0 -psutil==5.7.2 +psutil==5.8.0 psycopg2==2.8.6 pycparser==2.20 pycurl==7.43.0.5 -Pygments==2.6.1 +Pygments==2.8.1 PyJWT==1.7.1 -pyrsistent==0.16.0 -python-daemon==2.2.4 +pyrsistent==0.17.3 +python-daemon==2.3.0 python-dateutil==2.8.1 python-editor==1.0.4 python-nvd3==0.15.0 python-slugify==4.0.1 python3-openid==3.2.0 -pytz==2020.1 +pytz==2021.1 pytzdata==2020.1 -PyYAML==5.3.1 -requests==2.24.0 -s3transfer==0.3.4 -setproctitle==1.1.10 +PyYAML==5.4.1 +requests==2.25.1 +rich==9.2.0 +s3transfer==0.3.7 +setproctitle==1.2.2 six==1.15.0 -SQLAlchemy==1.3.18 -SQLAlchemy-JSONField==0.9.0 -SQLAlchemy-Utils==0.36.8 +SQLAlchemy==1.3.24 +SQLAlchemy-JSONField==1.0.0 +SQLAlchemy-Utils==0.37.0 statsd==3.3.0 -tabulate==0.8.7 -tenacity==4.12.0 +swagger-ui-bundle==0.0.8 +tabulate==0.8.9 +tenacity==6.2.0 +termcolor==1.1.0 text-unidecode==1.3 -thrift==0.13.0 -tornado==5.1.1 -typing-extensions==3.7.4.2 -tzlocal==1.5.1 +tornado==6.1 +typing-extensions==3.7.4.3 unicodecsv==0.14.1 -urllib3==1.25.10 +urllib3==1.25.11 vine==1.3.0 -watchtower==1.0.1 -Werkzeug==0.16.1 +watchtower==0.7.3 +Werkzeug==1.0.1 WTForms==2.3.3 -zipp==3.1.0 -zope.deprecation==4.4.0 \ No newline at end of file +zipp==3.4.1 \ No newline at end of file diff --git a/docker/docker-compose-local.yml b/docker/docker-compose-local.yml index be440fa76..78fd19796 100644 --- a/docker/docker-compose-local.yml +++ b/docker/docker-compose-local.yml @@ -14,7 +14,7 @@ services: - "${PWD}/db-data:/var/lib/postgresql/data" local-runner: - image: amazon/mwaa-local:1.10 + image: amazon/mwaa-local:2.0 restart: always depends_on: - postgres diff --git a/docker/docker-compose-resetdb.yml b/docker/docker-compose-resetdb.yml index a8ac24ee7..bcc9fe3f2 100644 --- a/docker/docker-compose-resetdb.yml +++ b/docker/docker-compose-resetdb.yml @@ -14,7 +14,7 @@ services: - "${PWD}/db-data:/var/lib/postgresql/data" resetdb: - image: amazon/mwaa-local:1.10 + image: amazon/mwaa-local:2.0 depends_on: - postgres environment: diff --git a/docker/docker-compose-sequential.yml b/docker/docker-compose-sequential.yml index 5b7b22932..d25ce63cd 100644 --- a/docker/docker-compose-sequential.yml +++ b/docker/docker-compose-sequential.yml @@ -1,7 +1,7 @@ version: '3.7' services: webserver: - image: amazon/mwaa-local:1.10 + image: amazon/mwaa-local:2.0 restart: always environment: - LOAD_EX=n diff --git a/docker/script/bootstrap.sh b/docker/script/bootstrap.sh index 26909e6f6..f7ebd56af 100644 --- a/docker/script/bootstrap.sh +++ b/docker/script/bootstrap.sh @@ -2,6 +2,12 @@ set -e +# Upgrade pip version to latest +python3 -m pip install --upgrade pip + +# Install wheel to avoid legacy setup.py install +pip3 install wheel + # On RHL and Centos based linux, openssl needs to be set as Python Curl SSL library export PYCURL_SSL_LIBRARY=openssl pip3 install $PIP_OPTION --compile pycurl @@ -22,6 +28,9 @@ adduser -s /bin/bash -d "${AIRFLOW_USER_HOME}" airflow # install watchtower for Cloudwatch logging pip3 install $PIP_OPTION watchtower==1.0.1 +# Install default providers +pip3 install apache-airflow-providers-amazon + # Use symbolic link to ensure Airflow 2.0's backport packages are in the same namespace as Airflow itself # see https://airflow.apache.org/docs/apache-airflow/stable/backport-providers.html#troubleshooting-installing-backport-packages ln -s /usr/local/airflow/.local/lib/python3.7/site-packages/airflow/providers /usr/local/lib/python3.7/site-packages/airflow/providers @@ -42,6 +51,9 @@ then pip3 freeze > /requirements.txt else # flask-swagger depends on PyYAML that are known to be vulnerable - # even though Airflow names flask-swagger as a dependency, it doesn't seem to use it. - pip3 uninstall -y flask-swagger + # even though Airflow 1.10 names flask-swagger as a dependency, it doesn't seem to use it. + if [ "$AIRFLOW_VERSION" = "1.10.12" ] + then + pip3 uninstall -y flask-swagger + fi fi diff --git a/docker/script/entrypoint.sh b/docker/script/entrypoint.sh index 0037aef73..434447164 100644 --- a/docker/script/entrypoint.sh +++ b/docker/script/entrypoint.sh @@ -69,19 +69,19 @@ fi case "$1" in local-runner) install_requirements - airflow initdb + airflow db init if [ "$AIRFLOW__CORE__EXECUTOR" = "LocalExecutor" ] || [ "$AIRFLOW__CORE__EXECUTOR" = "SequentialExecutor" ]; then # With the "Local" and "Sequential" executors it should all run in one container. airflow scheduler & sleep 2 fi - airflow create_user -r Admin -u admin -e admin@example.com -f admin -l user -p test + airflow users create -r Admin -u admin -e admin@example.com -f admin -l user -p test exec airflow webserver ;; resetdb) - airflow resetdb -y + airflow db reset -y sleep 2 - airflow initdb + airflow db init ;; test-requirements) install_requirements diff --git a/docker/script/systemlibs.sh b/docker/script/systemlibs.sh index e86f267b8..48ef0caa7 100644 --- a/docker/script/systemlibs.sh +++ b/docker/script/systemlibs.sh @@ -6,7 +6,7 @@ yum update -y # install basic python environment yum install -y python37 gcc gcc-g++ python3-devel -# JDBC and Java dependencies +# JDBC and PyODBC dependencies yum install -y java-1.8.0-openjdk unixODBC-devel # Database clients diff --git a/mwaa-local-env b/mwaa-local-env index 18271c102..74ffed938 100755 --- a/mwaa-local-env +++ b/mwaa-local-env @@ -1,6 +1,6 @@ #!/bin/bash -AIRFLOW_VERSION=1.10 +AIRFLOW_VERSION=2.0 display_help() { # Display Help @@ -50,7 +50,7 @@ validate_prereqs() { } build_image() { - docker build --rm --compress -t amazon/mwaa-local:1.10 ./docker + docker build --rm --compress -t amazon/mwaa-local:2.0 ./docker } case "$1" in