# Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. ARG BASE_CONTAINER=jupyter/scipy-notebook FROM $BASE_CONTAINER LABEL maintainer="Jupyter Project " # Fix DL4006 SHELL ["/bin/bash", "-o", "pipefail", "-c"] USER root # Spark dependencies ENV APACHE_SPARK_VERSION=3.0.0 \ HADOOP_VERSION=3.2 RUN apt-get -y update && \ apt-get install --no-install-recommends -y openjdk-11-jre-headless ca-certificates-java && \ rm -rf /var/lib/apt/lists/* # Using the preferred mirror to download Spark WORKDIR /tmp # hadolint ignore=SC2046 RUN wget -q $(wget -qO- https://www.apache.org/dyn/closer.lua/spark/spark-${APACHE_SPARK_VERSION}/spark-${APACHE_SPARK_VERSION}-bin-hadoop${HADOOP_VERSION}.tgz\?as_json | \ python -c "import sys, json; content=json.load(sys.stdin); print(content['preferred']+content['path_info'])") && \ echo "BFE45406C67CC4AE00411AD18CC438F51E7D4B6F14EB61E7BF6B5450897C2E8D3AB020152657C0239F253735C263512FFABF538AC5B9FFFA38B8295736A9C387 *spark-${APACHE_SPARK_VERSION}-bin-hadoop${HADOOP_VERSION}.tgz" | sha512sum -c - && \ tar xzf "spark-${APACHE_SPARK_VERSION}-bin-hadoop${HADOOP_VERSION}.tgz" -C /usr/local --owner root --group root --no-same-owner && \ rm "spark-${APACHE_SPARK_VERSION}-bin-hadoop${HADOOP_VERSION}.tgz" WORKDIR /usr/local RUN ln -s "spark-${APACHE_SPARK_VERSION}-bin-hadoop${HADOOP_VERSION}" spark # Configure Spark ENV SPARK_HOME=/usr/local/spark ENV PYTHONPATH=$SPARK_HOME/python:$SPARK_HOME/python/lib/py4j-0.10.9-src.zip \ SPARK_OPTS="--driver-java-options=-Xms1024M --driver-java-options=-Xmx4096M --driver-java-options=-Dlog4j.logLevel=info" \ PATH=$PATH:$SPARK_HOME/bin USER $NB_UID # Install pyarrow RUN conda install --quiet -y 'pyarrow' && \ conda clean --all -f -y && \ fix-permissions "${CONDA_DIR}" && \ fix-permissions "/home/${NB_USER}" WORKDIR $HOME