mirror of
https://github.com/jupyter/docker-stacks.git
synced 2025-10-10 11:32:57 +00:00
Fix spark installation for Java 11 and Arrow
This commit is contained in:
72
all-spark-notebook/test/data/issue_1168.ipynb
Normal file
72
all-spark-notebook/test/data/issue_1168.ipynb
Normal file
@@ -0,0 +1,72 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This is a test for the issue [#1168](https://github.com/jupyter/docker-stacks/issues/1168)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pyspark.sql import SparkSession\n",
|
||||
"from pyspark.sql.functions import pandas_udf\n",
|
||||
"\n",
|
||||
"# Spark session & context\n",
|
||||
"spark = SparkSession.builder.master('local').getOrCreate()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"+---+---+\n",
|
||||
"| id|age|\n",
|
||||
"+---+---+\n",
|
||||
"| 1| 21|\n",
|
||||
"+---+---+\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"df = spark.createDataFrame([(1, 21), (2, 30)], (\"id\", \"age\"))\n",
|
||||
"def filter_func(iterator):\n",
|
||||
" for pdf in iterator:\n",
|
||||
" yield pdf[pdf.id == 1]\n",
|
||||
"\n",
|
||||
"df.mapInPandas(filter_func, df.schema).show()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
@@ -12,7 +12,7 @@ LOGGER = logging.getLogger(__name__)
|
||||
@pytest.mark.parametrize(
|
||||
"test_file",
|
||||
# TODO: add local_sparklyr
|
||||
["local_pyspark", "local_spylon", "local_sparkR"],
|
||||
["local_pyspark", "local_spylon", "local_sparkR", "issue_1168"],
|
||||
)
|
||||
def test_nbconvert(container, test_file):
|
||||
"""Check if Spark notebooks can be executed"""
|
||||
|
@@ -49,6 +49,12 @@ RUN ln -s "spark-${APACHE_SPARK_VERSION}-bin-hadoop${HADOOP_VERSION}" spark && \
|
||||
mkdir -p /usr/local/bin/before-notebook.d && \
|
||||
ln -s "${SPARK_HOME}/sbin/spark-config.sh" /usr/local/bin/before-notebook.d/spark-config.sh
|
||||
|
||||
# Fix Spark installation for Java 11 and Apache Arrow library
|
||||
# see: https://github.com/apache/spark/pull/27356, https://spark.apache.org/docs/latest/#downloading
|
||||
RUN cp -p "$SPARK_HOME/conf/spark-defaults.conf.template" "$SPARK_HOME/conf/spark-defaults.conf" && \
|
||||
echo 'spark.driver.extraJavaOptions="-Dio.netty.tryReflectionSetAccessible=true"' >> $SPARK_HOME/conf/spark-defaults.conf && \
|
||||
echo 'spark.executor.extraJavaOptions="-Dio.netty.tryReflectionSetAccessible=true"' >> $SPARK_HOME/conf/spark-defaults.conf
|
||||
|
||||
USER $NB_UID
|
||||
|
||||
# Install pyarrow
|
||||
|
Reference in New Issue
Block a user