mirror of
https://github.com/jupyter/docker-stacks.git
synced 2025-10-10 11:32:57 +00:00

* Test added for all kernels * Same examples as provided in the documentation (`specifics.md`) * Used the same use case for all examples: sum of the first 100 whole numbers Note: I've not automatically tested `local_sparklyr.ipynb` since it creates by default the `metastore_db` dir and the `derby.log` file in the working directory. Since I mount it in `RO` it's not working. I'm struggling to set it elsewhere...
63 lines
1.3 KiB
Plaintext
63 lines
1.3 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 6,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"%%init_spark\n",
|
|
"# Spark session & context\n",
|
|
"launcher.master = \"local\"\n",
|
|
"launcher.conf.spark.executor.cores = 1"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 7,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"rdd: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[8] at parallelize at <console>:28\n",
|
|
"res4: Double = 5050.0\n"
|
|
]
|
|
},
|
|
"execution_count": 7,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"// Sum of the first 100 whole numbers\n",
|
|
"val rdd = sc.parallelize(0 to 100)\n",
|
|
"rdd.sum()\n",
|
|
"// 5050"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "spylon-kernel",
|
|
"language": "scala",
|
|
"name": "spylon-kernel"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": "text/x-scala",
|
|
"file_extension": ".scala",
|
|
"help_links": [
|
|
{
|
|
"text": "MetaKernel Magics",
|
|
"url": "https://metakernel.readthedocs.io/en/latest/source/README.html"
|
|
}
|
|
],
|
|
"mimetype": "text/x-scala",
|
|
"name": "scala",
|
|
"pygments_lexer": "scala",
|
|
"version": "0.4.1"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 4
|
|
} |