mirror of
https://github.com/jupyter/docker-stacks.git
synced 2025-10-07 01:54:04 +00:00
80 lines
2.7 KiB
Docker
80 lines
2.7 KiB
Docker
# Copyright (c) Jupyter Development Team.
|
|
# Distributed under the terms of the Modified BSD License.
|
|
ARG REGISTRY=quay.io
|
|
ARG OWNER=jupyter
|
|
ARG BASE_IMAGE=$REGISTRY/$OWNER/scipy-notebook
|
|
FROM $BASE_IMAGE
|
|
|
|
LABEL maintainer="Jupyter Project <jupyter@googlegroups.com>"
|
|
|
|
# Fix: https://github.com/hadolint/hadolint/wiki/DL4006
|
|
# Fix: https://github.com/koalaman/shellcheck/wiki/SC3014
|
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
|
|
|
USER root
|
|
|
|
# Spark dependencies
|
|
# Default values can be overridden at build time
|
|
# (ARGS are in lowercase to distinguish them from ENV)
|
|
ARG openjdk_version="17"
|
|
|
|
RUN apt-get update --yes && \
|
|
apt-get install --yes --no-install-recommends \
|
|
"openjdk-${openjdk_version}-jre-headless" \
|
|
ca-certificates-java && \
|
|
apt-get clean && rm -rf /var/lib/apt/lists/*
|
|
|
|
# If spark_version is not set, latest Spark will be installed
|
|
ARG spark_version
|
|
ARG hadoop_version="3"
|
|
# If scala_version is not set, Spark without Scala will be installed
|
|
ARG scala_version
|
|
# URL to use for Spark downloads
|
|
# You need to use https://archive.apache.org/dist/spark/ website if you want to download old Spark versions
|
|
# But it seems to be slower, that's why we use the recommended site for download
|
|
ARG spark_download_url="https://dlcdn.apache.org/spark/"
|
|
|
|
ENV SPARK_HOME=/usr/local/spark
|
|
ENV PATH="${PATH}:${SPARK_HOME}/bin"
|
|
ENV SPARK_OPTS="--driver-java-options=-Xms1024M --driver-java-options=-Xmx4096M --driver-java-options=-Dlog4j.logLevel=info"
|
|
|
|
COPY setup_spark.py /opt/setup-scripts/
|
|
|
|
# Setup Spark
|
|
RUN /opt/setup-scripts/setup_spark.py \
|
|
--spark-version="${spark_version}" \
|
|
--hadoop-version="${hadoop_version}" \
|
|
--scala-version="${scala_version}" \
|
|
--spark-download-url="${spark_download_url}"
|
|
|
|
# Configure IPython system-wide
|
|
COPY ipython_kernel_config.py "/etc/ipython/"
|
|
RUN fix-permissions "/etc/ipython/"
|
|
|
|
# macOS Rosetta virtualization creates junk directory which gets owned by root further up.
|
|
# It'll get re-created, but as USER runner after the next directive so hopefully should not cause permission issues.
|
|
#
|
|
# More info: https://github.com/jupyter/docker-stacks/issues/2296
|
|
# hadolint ignore=DL3059
|
|
RUN rm -rf "/home/${NB_USER}/.cache/"
|
|
|
|
USER ${NB_UID}
|
|
|
|
# Install pyarrow
|
|
# NOTE: It's important to ensure compatibility between Pandas versions.
|
|
# The pandas version in this Dockerfile should match the version
|
|
# on which the Pandas API for Spark is built.
|
|
# To find the right version, check the pandas version being installed here:
|
|
# https://github.com/apache/spark/blob/<SPARK_VERSION>/dev/infra/Dockerfile
|
|
RUN mamba install --yes \
|
|
'grpcio-status' \
|
|
'grpcio' \
|
|
'pandas=2.2.3' \
|
|
'pyarrow' && \
|
|
mamba clean --all -f -y && \
|
|
fix-permissions "${CONDA_DIR}" && \
|
|
fix-permissions "/home/${NB_USER}"
|
|
|
|
WORKDIR "${HOME}"
|
|
EXPOSE 4040
|