mirror of
https://github.com/jupyterhub/jupyterhub.git
synced 2025-10-07 18:14:10 +00:00
Compare commits
338 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
bf73e6f7b7 | ||
![]() |
e2631b302a | ||
![]() |
0d89241c9f | ||
![]() |
5ac9e7f73a | ||
![]() |
9672b534ec | ||
![]() |
254365716d | ||
![]() |
dcac8c4efe | ||
![]() |
0611169dea | ||
![]() |
a432fa3bb6 | ||
![]() |
44141ae025 | ||
![]() |
04ae25d2c2 | ||
![]() |
69a1e97fbe | ||
![]() |
eb0c6514af | ||
![]() |
d03fc8c531 | ||
![]() |
1c8dce533b | ||
![]() |
bbfbc47bb3 | ||
![]() |
be6ec28dab | ||
![]() |
bd3a215c9e | ||
![]() |
3783a1bc6c | ||
![]() |
7b0f29b340 | ||
![]() |
f63e810dfe | ||
![]() |
909b3ad4d7 | ||
![]() |
114493be9b | ||
![]() |
4c0ac5ba91 | ||
![]() |
52793d65bd | ||
![]() |
320e1924a7 | ||
![]() |
2c90715c8d | ||
![]() |
c99bb32e12 | ||
![]() |
fee4ee23c0 | ||
![]() |
2c8b29b6bb | ||
![]() |
a53178a92b | ||
![]() |
e032cda638 | ||
![]() |
40820b3489 | ||
![]() |
80f4454371 | ||
![]() |
4d0005b0b7 | ||
![]() |
86761ff0d4 | ||
![]() |
32a2a3031c | ||
![]() |
16352496da | ||
![]() |
2259f57772 | ||
![]() |
574d343881 | ||
![]() |
c205385023 | ||
![]() |
9e0ac1594c | ||
![]() |
2fd434f511 | ||
![]() |
af39f39082 | ||
![]() |
ab751bda5c | ||
![]() |
f84078627f | ||
![]() |
3ec3dc5195 | ||
![]() |
73102e7aeb | ||
![]() |
b039e2985b | ||
![]() |
6d7863d56a | ||
![]() |
aba32e7200 | ||
![]() |
a71823c5ab | ||
![]() |
fcf9122519 | ||
![]() |
6c3fc41176 | ||
![]() |
0bdb1bac4d | ||
![]() |
35c76221fe | ||
![]() |
ffb092721c | ||
![]() |
8758b3af27 | ||
![]() |
5202cdff8c | ||
![]() |
ce0cb95282 | ||
![]() |
ee421f6427 | ||
![]() |
268da21bbf | ||
![]() |
4ad5f61bc7 | ||
![]() |
3df3850b3a | ||
![]() |
50733efa1b | ||
![]() |
98230ee770 | ||
![]() |
37f250b4d7 | ||
![]() |
869661bf25 | ||
![]() |
009fa955ed | ||
![]() |
7c8f7e9fcb | ||
![]() |
14539c4e0f | ||
![]() |
a11a292cd9 | ||
![]() |
5890064191 | ||
![]() |
1f30e693ad | ||
![]() |
32976f3d42 | ||
![]() |
30bc23f102 | ||
![]() |
786c7039d6 | ||
![]() |
19c3b02155 | ||
![]() |
1a80524772 | ||
![]() |
699a1cc01b | ||
![]() |
29ae04c921 | ||
![]() |
62a1652cc9 | ||
![]() |
290e031034 | ||
![]() |
7642302d17 | ||
![]() |
aebf833530 | ||
![]() |
86b51804c1 | ||
![]() |
aa12afa34d | ||
![]() |
2ff6d2b36c | ||
![]() |
e5f7aa6c2a | ||
![]() |
e3811edd87 | ||
![]() |
55cd9d806b | ||
![]() |
96789f5945 | ||
![]() |
81d481a110 | ||
![]() |
054c7f276e | ||
![]() |
1220673e61 | ||
![]() |
815274e966 | ||
![]() |
f1503b5a21 | ||
![]() |
4dcdf84d32 | ||
![]() |
dda0b611e2 | ||
![]() |
a23bfd1769 | ||
![]() |
a55ccce64e | ||
![]() |
42c5030b0e | ||
![]() |
be3df52b4f | ||
![]() |
0ca5eb4997 | ||
![]() |
9eeb84158e | ||
![]() |
37c2be778c | ||
![]() |
dc1b2c810d | ||
![]() |
88c7f188e0 | ||
![]() |
4181cc7065 | ||
![]() |
69e3fc2016 | ||
![]() |
56269f0226 | ||
![]() |
e446eff311 | ||
![]() |
00042de04c | ||
![]() |
82e0af763d | ||
![]() |
c5bfd28005 | ||
![]() |
0ffa5715fd | ||
![]() |
139312149e | ||
![]() |
29740b0af6 | ||
![]() |
9f6467be05 | ||
![]() |
caae99aa09 | ||
![]() |
8f2b14429f | ||
![]() |
af0d81436d | ||
![]() |
477ee23ad3 | ||
![]() |
27bcac5e8b | ||
![]() |
6535cc6bab | ||
![]() |
8173bbbf75 | ||
![]() |
2146eef150 | ||
![]() |
97b7ccbee4 | ||
![]() |
8eb98409d5 | ||
![]() |
a4390a1f4f | ||
![]() |
f42f7dd01f | ||
![]() |
0ca2ef68f0 | ||
![]() |
c3ca924ba8 | ||
![]() |
0155e6dc34 | ||
![]() |
727f9a0d49 | ||
![]() |
d31af27888 | ||
![]() |
9331dd13da | ||
![]() |
3c7203741f | ||
![]() |
4e79360567 | ||
![]() |
529273d105 | ||
![]() |
2e198396c1 | ||
![]() |
259c7512b8 | ||
![]() |
59b29f4c42 | ||
![]() |
bf3615aa96 | ||
![]() |
06a505f6df | ||
![]() |
c8d6c6aaa8 | ||
![]() |
cc2859a826 | ||
![]() |
26ccf6fd57 | ||
![]() |
f220bbca84 | ||
![]() |
4fb3f02870 | ||
![]() |
471d1f0a2f | ||
![]() |
1b12107c54 | ||
![]() |
b3a4adcbdd | ||
![]() |
12c69c6a94 | ||
![]() |
d3147f3fb7 | ||
![]() |
47265786e3 | ||
![]() |
1d9795c577 | ||
![]() |
e35b84b419 | ||
![]() |
5a57b03b61 | ||
![]() |
e526f36b81 | ||
![]() |
d289cd1e02 | ||
![]() |
4c3a32b51f | ||
![]() |
6c65624942 | ||
![]() |
cba22751b4 | ||
![]() |
c5d0265984 | ||
![]() |
fc772e1c39 | ||
![]() |
d70157e72a | ||
![]() |
91359bcaa7 | ||
![]() |
22fc580275 | ||
![]() |
2f304bffcc | ||
![]() |
162076c5dd | ||
![]() |
9bd97db90b | ||
![]() |
3a25b32ce6 | ||
![]() |
8fcc4b48a5 | ||
![]() |
289dee5996 | ||
![]() |
b1b7954e93 | ||
![]() |
35a55c6cbf | ||
![]() |
cd06f3fb12 | ||
![]() |
796d22d0d8 | ||
![]() |
be4357ad7a | ||
![]() |
202d6f93d4 | ||
![]() |
8b9b69ce22 | ||
![]() |
c40b3a4ad6 | ||
![]() |
c7f1b89f6c | ||
![]() |
dcff08ae13 | ||
![]() |
b0bf348908 | ||
![]() |
b73eca91ca | ||
![]() |
3db5eae9a9 | ||
![]() |
adb5f6ab2a | ||
![]() |
2a84353a51 | ||
![]() |
ca4fb3187f | ||
![]() |
8ab25e7c3d | ||
![]() |
f69ef9f846 | ||
![]() |
ba2608c643 | ||
![]() |
c3f5ad8b6d | ||
![]() |
4dbe5490f8 | ||
![]() |
711080616e | ||
![]() |
8e603e5212 | ||
![]() |
147167e589 | ||
![]() |
cebb1f3e22 | ||
![]() |
0b085a91b6 | ||
![]() |
ca3ceac4f3 | ||
![]() |
c833fae901 | ||
![]() |
8d3a7b704c | ||
![]() |
1e53fd1f8c | ||
![]() |
166b00867f | ||
![]() |
7c474396f1 | ||
![]() |
f6f6b3afa3 | ||
![]() |
a91197635a | ||
![]() |
88706d4c27 | ||
![]() |
29fac11bfe | ||
![]() |
947ef67184 | ||
![]() |
8ede924956 | ||
![]() |
55c2d3648e | ||
![]() |
2cf8e48fb5 | ||
![]() |
ae77038a64 | ||
![]() |
ffed8f67a0 | ||
![]() |
1efd7da6ee | ||
![]() |
6e161d0140 | ||
![]() |
5f4144cc98 | ||
![]() |
f866bbcf45 | ||
![]() |
ed6231d3aa | ||
![]() |
9d38259ad7 | ||
![]() |
4b254fe5ed | ||
![]() |
f8040209b0 | ||
![]() |
e59ee33a6e | ||
![]() |
ff15ced3ce | ||
![]() |
75acd6a67b | ||
![]() |
73ac6207af | ||
![]() |
e435fe66a5 | ||
![]() |
d7569d6f8e | ||
![]() |
ba6c2cf854 | ||
![]() |
970b25d017 | ||
![]() |
671ef0d5ef | ||
![]() |
77220d6662 | ||
![]() |
7e469f911d | ||
![]() |
18393ec6b4 | ||
![]() |
28fdbeb0c0 | ||
![]() |
5664e4d318 | ||
![]() |
24c83e721f | ||
![]() |
cc73ab711e | ||
![]() |
2cfe4474ac | ||
![]() |
74766e4786 | ||
![]() |
ed461ff4a7 | ||
![]() |
184d87ff2a | ||
![]() |
06ed7dc0cf | ||
![]() |
a0b229431c | ||
![]() |
2a06c8a94c | ||
![]() |
91159d08d3 | ||
![]() |
06a83f146b | ||
![]() |
7b66d1656b | ||
![]() |
40176a667f | ||
![]() |
e02345a4e8 | ||
![]() |
1408e9f5f4 | ||
![]() |
b66d204d69 | ||
![]() |
164447717f | ||
![]() |
0472ef0533 | ||
![]() |
202efae6d8 | ||
![]() |
2e043241fb | ||
![]() |
fa61f06fed | ||
![]() |
8b19413fa1 | ||
![]() |
7c2e7692b0 | ||
![]() |
ce11959b1a | ||
![]() |
097974d57d | ||
![]() |
09ff03ca4f | ||
![]() |
313f050c42 | ||
![]() |
4862831f71 | ||
![]() |
c46beb976a | ||
![]() |
11a85d1dc5 | ||
![]() |
67c4a86376 | ||
![]() |
e00ef1aef1 | ||
![]() |
fb5f98f2fa | ||
![]() |
82a1ba8402 | ||
![]() |
7f53ad52fb | ||
![]() |
73cdd687e9 | ||
![]() |
af09bc547a | ||
![]() |
3ddc796068 | ||
![]() |
3c071467bb | ||
![]() |
0c43feee1b | ||
![]() |
5bcbc8b328 | ||
![]() |
87e4f458fb | ||
![]() |
808e8711e1 | ||
![]() |
19935254a7 | ||
![]() |
a499940309 | ||
![]() |
74544009ca | ||
![]() |
665f9fa693 | ||
![]() |
24b555185a | ||
![]() |
24f4b7b6b6 | ||
![]() |
217dffa845 | ||
![]() |
a7b796fa57 | ||
![]() |
6c5fb5fe97 | ||
![]() |
20ea322e25 | ||
![]() |
4f9664cfe2 | ||
![]() |
be211a48ef | ||
![]() |
553ee26312 | ||
![]() |
7e6111448a | ||
![]() |
ccc0294f2e | ||
![]() |
3232ad61aa | ||
![]() |
202a5bf9a5 | ||
![]() |
47136f6a3c | ||
![]() |
5d3161c6ef | ||
![]() |
9da4aa236e | ||
![]() |
d581cf54cb | ||
![]() |
fca2528332 | ||
![]() |
5edd246474 | ||
![]() |
77ed2faf31 | ||
![]() |
4a17441e5a | ||
![]() |
e1166ec834 | ||
![]() |
2a1d341586 | ||
![]() |
55a59a2e43 | ||
![]() |
e019a33509 | ||
![]() |
737dcf65eb | ||
![]() |
9deaeb1fa9 | ||
![]() |
bcfc2c1b0d | ||
![]() |
f71bacc998 | ||
![]() |
ff14b1aa71 | ||
![]() |
ebbbdcb2b1 | ||
![]() |
d0fca9e56b | ||
![]() |
517737aa0b | ||
![]() |
5dadd34a87 | ||
![]() |
df134fefd0 | ||
![]() |
47cec97e63 | ||
![]() |
0b8b87d7d0 | ||
![]() |
3bf1d72905 | ||
![]() |
8cdd449cca | ||
![]() |
6fc3c19763 | ||
![]() |
265dc07c78 | ||
![]() |
1ae039ddef | ||
![]() |
378d34b213 | ||
![]() |
9657430cac | ||
![]() |
6271535f46 | ||
![]() |
2bef5ba981 | ||
![]() |
efb1f3c824 | ||
![]() |
53050a5836 | ||
![]() |
6428ad9f0b | ||
![]() |
9068ff2239 | ||
![]() |
b2e7b474ff | ||
![]() |
30b8bc3664 |
@@ -1,33 +0,0 @@
|
||||
# Python CircleCI 2.0 configuration file
|
||||
# Updating CircleCI configuration from v1 to v2
|
||||
# Check https://circleci.com/docs/2.0/language-python/ for more details
|
||||
#
|
||||
version: 2
|
||||
jobs:
|
||||
build:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: build images
|
||||
command: |
|
||||
docker build -t jupyterhub/jupyterhub .
|
||||
docker build -t jupyterhub/jupyterhub-onbuild onbuild
|
||||
docker build -t jupyterhub/jupyterhub:alpine -f dockerfiles/Dockerfile.alpine .
|
||||
docker build -t jupyterhub/singleuser singleuser
|
||||
- run:
|
||||
name: smoke test jupyterhub
|
||||
command: |
|
||||
docker run --rm -it jupyterhub/jupyterhub jupyterhub --help
|
||||
- run:
|
||||
name: verify static files
|
||||
command: |
|
||||
docker run --rm -it -v $PWD/dockerfiles:/io jupyterhub/jupyterhub python3 /io/test.py
|
||||
|
||||
|
||||
# Tell CircleCI to use this workflow when it builds the site
|
||||
workflows:
|
||||
version: 2
|
||||
default:
|
||||
jobs:
|
||||
- build
|
185
.github/workflows/release.yml
vendored
Normal file
185
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
# Build releases and (on tags) publish to PyPI
|
||||
name: Release
|
||||
|
||||
# always build releases (to make sure wheel-building works)
|
||||
# but only publish to PyPI on tags
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build-release:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
- uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: "14"
|
||||
|
||||
- name: install build package
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install build
|
||||
pip freeze
|
||||
|
||||
- name: build release
|
||||
run: |
|
||||
python -m build --sdist --wheel .
|
||||
ls -l dist
|
||||
|
||||
- name: verify wheel
|
||||
run: |
|
||||
cd dist
|
||||
pip install ./*.whl
|
||||
# verify data-files are installed where they are found
|
||||
cat <<EOF | python
|
||||
import os
|
||||
from jupyterhub._data import DATA_FILES_PATH
|
||||
print(f"DATA_FILES_PATH={DATA_FILES_PATH}")
|
||||
assert os.path.exists(DATA_FILES_PATH), DATA_FILES_PATH
|
||||
for subpath in (
|
||||
"templates/page.html",
|
||||
"static/css/style.min.css",
|
||||
"static/components/jquery/dist/jquery.js",
|
||||
):
|
||||
path = os.path.join(DATA_FILES_PATH, subpath)
|
||||
assert os.path.exists(path), path
|
||||
print("OK")
|
||||
EOF
|
||||
|
||||
# ref: https://github.com/actions/upload-artifact#readme
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: jupyterhub-${{ github.sha }}
|
||||
path: "dist/*"
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Publish to PyPI
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
|
||||
run: |
|
||||
pip install twine
|
||||
twine upload --skip-existing dist/*
|
||||
|
||||
publish-docker:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
services:
|
||||
# So that we can test this in PRs/branches
|
||||
local-registry:
|
||||
image: registry:2
|
||||
ports:
|
||||
- 5000:5000
|
||||
|
||||
steps:
|
||||
- name: Should we push this image to a public registry?
|
||||
run: |
|
||||
if [ "${{ startsWith(github.ref, 'refs/tags/') || (github.ref == 'refs/heads/main') }}" = "true" ]; then
|
||||
# Empty => Docker Hub
|
||||
echo "REGISTRY=" >> $GITHUB_ENV
|
||||
else
|
||||
echo "REGISTRY=localhost:5000/" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
# Setup docker to build for multiple platforms, see:
|
||||
# https://github.com/docker/build-push-action/tree/v2.4.0#usage
|
||||
# https://github.com/docker/build-push-action/blob/v2.4.0/docs/advanced/multi-platform.md
|
||||
|
||||
- name: Set up QEMU (for docker buildx)
|
||||
uses: docker/setup-qemu-action@25f0500ff22e406f7191a2a8ba8cda16901ca018 # associated tag: v1.0.2
|
||||
|
||||
- name: Set up Docker Buildx (for multi-arch builds)
|
||||
uses: docker/setup-buildx-action@2a4b53665e15ce7d7049afb11ff1f70ff1610609 # associated tag: v1.1.2
|
||||
with:
|
||||
# Allows pushing to registry on localhost:5000
|
||||
driver-opts: network=host
|
||||
|
||||
- name: Setup push rights to Docker Hub
|
||||
# This was setup by...
|
||||
# 1. Creating a Docker Hub service account "jupyterhubbot"
|
||||
# 2. Creating a access token for the service account specific to this
|
||||
# repository: https://hub.docker.com/settings/security
|
||||
# 3. Making the account part of the "bots" team, and granting that team
|
||||
# permissions to push to the relevant images:
|
||||
# https://hub.docker.com/orgs/jupyterhub/teams/bots/permissions
|
||||
# 4. Registering the username and token as a secret for this repo:
|
||||
# https://github.com/jupyterhub/jupyterhub/settings/secrets/actions
|
||||
if: env.REGISTRY != 'localhost:5000/'
|
||||
run: |
|
||||
docker login -u "${{ secrets.DOCKERHUB_USERNAME }}" -p "${{ secrets.DOCKERHUB_TOKEN }}"
|
||||
|
||||
# https://github.com/jupyterhub/action-major-minor-tag-calculator
|
||||
# If this is a tagged build this will return additional parent tags.
|
||||
# E.g. 1.2.3 is expanded to Docker tags
|
||||
# [{prefix}:1.2.3, {prefix}:1.2, {prefix}:1, {prefix}:latest] unless
|
||||
# this is a backported tag in which case the newer tags aren't updated.
|
||||
# For branches this will return the branch name.
|
||||
# If GITHUB_TOKEN isn't available (e.g. in PRs) returns no tags [].
|
||||
- name: Get list of jupyterhub tags
|
||||
id: jupyterhubtags
|
||||
uses: jupyterhub/action-major-minor-tag-calculator@v1
|
||||
with:
|
||||
githubToken: ${{ secrets.GITHUB_TOKEN }}
|
||||
prefix: "${{ env.REGISTRY }}jupyterhub/jupyterhub:"
|
||||
defaultTag: "${{ env.REGISTRY }}jupyterhub/jupyterhub:noref"
|
||||
|
||||
- name: Build and push jupyterhub
|
||||
uses: docker/build-push-action@e1b7f96249f2e4c8e4ac1519b9608c0d48944a1f # associated tag: v2.4.0
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
# tags parameter must be a string input so convert `gettags` JSON
|
||||
# array into a comma separated list of tags
|
||||
tags: ${{ join(fromJson(steps.jupyterhubtags.outputs.tags)) }}
|
||||
|
||||
# jupyterhub-onbuild
|
||||
|
||||
- name: Get list of jupyterhub-onbuild tags
|
||||
id: onbuildtags
|
||||
uses: jupyterhub/action-major-minor-tag-calculator@v1
|
||||
with:
|
||||
githubToken: ${{ secrets.GITHUB_TOKEN }}
|
||||
prefix: "${{ env.REGISTRY }}jupyterhub/jupyterhub-onbuild:"
|
||||
defaultTag: "${{ env.REGISTRY }}jupyterhub/jupyterhub-onbuild:noref"
|
||||
|
||||
- name: Build and push jupyterhub-onbuild
|
||||
uses: docker/build-push-action@e1b7f96249f2e4c8e4ac1519b9608c0d48944a1f # associated tag: v2.4.0
|
||||
with:
|
||||
build-args: |
|
||||
BASE_IMAGE=${{ fromJson(steps.jupyterhubtags.outputs.tags)[0] }}
|
||||
context: onbuild
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ join(fromJson(steps.onbuildtags.outputs.tags)) }}
|
||||
|
||||
# jupyterhub-demo
|
||||
|
||||
- name: Get list of jupyterhub-demo tags
|
||||
id: demotags
|
||||
uses: jupyterhub/action-major-minor-tag-calculator@v1
|
||||
with:
|
||||
githubToken: ${{ secrets.GITHUB_TOKEN }}
|
||||
prefix: "${{ env.REGISTRY }}jupyterhub/jupyterhub-demo:"
|
||||
defaultTag: "${{ env.REGISTRY }}jupyterhub/jupyterhub-demo:noref"
|
||||
|
||||
- name: Build and push jupyterhub-demo
|
||||
uses: docker/build-push-action@e1b7f96249f2e4c8e4ac1519b9608c0d48944a1f # associated tag: v2.4.0
|
||||
with:
|
||||
build-args: |
|
||||
BASE_IMAGE=${{ fromJson(steps.onbuildtags.outputs.tags)[0] }}
|
||||
context: demo-image
|
||||
# linux/arm64 currently fails:
|
||||
# ERROR: Could not build wheels for argon2-cffi which use PEP 517 and cannot be installed directly
|
||||
# ERROR: executor failed running [/bin/sh -c python3 -m pip install notebook]: exit code: 1
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: ${{ join(fromJson(steps.demotags.outputs.tags)) }}
|
246
.github/workflows/test.yml
vendored
Normal file
246
.github/workflows/test.yml
vendored
Normal file
@@ -0,0 +1,246 @@
|
||||
# This is a GitHub workflow defining a set of jobs with a set of steps.
|
||||
# ref: https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions
|
||||
#
|
||||
name: Test
|
||||
|
||||
# Trigger the workflow's on all PRs but only on pushed tags or commits to
|
||||
# main/master branch to avoid PRs developed in a GitHub fork's dedicated branch
|
||||
# to trigger.
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
|
||||
defaults:
|
||||
run:
|
||||
# Declare bash be used by default in this workflow's "run" steps.
|
||||
#
|
||||
# NOTE: bash will by default run with:
|
||||
# --noprofile: Ignore ~/.profile etc.
|
||||
# --norc: Ignore ~/.bashrc etc.
|
||||
# -e: Exit directly on errors
|
||||
# -o pipefail: Don't mask errors from a command piped into another command
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
# UTF-8 content may be interpreted as ascii and causes errors without this.
|
||||
LANG: C.UTF-8
|
||||
|
||||
jobs:
|
||||
# Run "pre-commit run --all-files"
|
||||
pre-commit:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 2
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
# ref: https://github.com/pre-commit/action
|
||||
- uses: pre-commit/action@v2.0.0
|
||||
- name: Help message if pre-commit fail
|
||||
if: ${{ failure() }}
|
||||
run: |
|
||||
echo "You can install pre-commit hooks to automatically run formatting"
|
||||
echo "on each commit with:"
|
||||
echo " pre-commit install"
|
||||
echo "or you can run by hand on staged files with"
|
||||
echo " pre-commit run"
|
||||
echo "or after-the-fact on already committed files with"
|
||||
echo " pre-commit run --all-files"
|
||||
|
||||
# Run "pytest jupyterhub/tests" in various configurations
|
||||
pytest:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
|
||||
strategy:
|
||||
# Keep running even if one variation of the job fail
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# We run this job multiple times with different parameterization
|
||||
# specified below, these parameters have no meaning on their own and
|
||||
# gain meaning on how job steps use them.
|
||||
#
|
||||
# subdomain:
|
||||
# Tests everything when JupyterHub is configured to add routes for
|
||||
# users with dedicated subdomains like user1.jupyter.example.com
|
||||
# rather than jupyter.example.com/user/user1.
|
||||
#
|
||||
# db: [mysql/postgres]
|
||||
# Tests everything when JupyterHub works against a dedicated mysql or
|
||||
# postgresql server.
|
||||
#
|
||||
# jupyter_server:
|
||||
# Tests everything when the user instances are started with
|
||||
# jupyter_server instead of notebook.
|
||||
#
|
||||
# ssl:
|
||||
# Tests everything using internal SSL connections instead of
|
||||
# unencrypted HTTP
|
||||
#
|
||||
# main_dependencies:
|
||||
# Tests everything when the we use the latest available dependencies
|
||||
# from: ipytraitlets.
|
||||
#
|
||||
# NOTE: Since only the value of these parameters are presented in the
|
||||
# GitHub UI when the workflow run, we avoid using true/false as
|
||||
# values by instead duplicating the name to signal true.
|
||||
include:
|
||||
- python: "3.6"
|
||||
oldest_dependencies: oldest_dependencies
|
||||
- python: "3.6"
|
||||
subdomain: subdomain
|
||||
- python: "3.7"
|
||||
db: mysql
|
||||
- python: "3.7"
|
||||
ssl: ssl
|
||||
- python: "3.8"
|
||||
db: postgres
|
||||
- python: "3.8"
|
||||
jupyter_server: jupyter_server
|
||||
- python: "3.9"
|
||||
main_dependencies: main_dependencies
|
||||
|
||||
steps:
|
||||
# NOTE: In GitHub workflows, environment variables are set by writing
|
||||
# assignment statements to a file. They will be set in the following
|
||||
# steps as if would used `export MY_ENV=my-value`.
|
||||
- name: Configure environment variables
|
||||
run: |
|
||||
if [ "${{ matrix.subdomain }}" != "" ]; then
|
||||
echo "JUPYTERHUB_TEST_SUBDOMAIN_HOST=http://localhost.jovyan.org:8000" >> $GITHUB_ENV
|
||||
fi
|
||||
if [ "${{ matrix.db }}" == "mysql" ]; then
|
||||
echo "MYSQL_HOST=127.0.0.1" >> $GITHUB_ENV
|
||||
echo "JUPYTERHUB_TEST_DB_URL=mysql+mysqlconnector://root@127.0.0.1:3306/jupyterhub" >> $GITHUB_ENV
|
||||
fi
|
||||
if [ "${{ matrix.ssl }}" == "ssl" ]; then
|
||||
echo "SSL_ENABLED=1" >> $GITHUB_ENV
|
||||
fi
|
||||
if [ "${{ matrix.db }}" == "postgres" ]; then
|
||||
echo "PGHOST=127.0.0.1" >> $GITHUB_ENV
|
||||
echo "PGUSER=test_user" >> $GITHUB_ENV
|
||||
echo "PGPASSWORD=hub[test/:?" >> $GITHUB_ENV
|
||||
echo "JUPYTERHUB_TEST_DB_URL=postgresql://test_user:hub%5Btest%2F%3A%3F@127.0.0.1:5432/jupyterhub" >> $GITHUB_ENV
|
||||
fi
|
||||
if [ "${{ matrix.jupyter_server }}" != "" ]; then
|
||||
echo "JUPYTERHUB_SINGLEUSER_APP=jupyterhub.tests.mockserverapp.MockServerApp" >> $GITHUB_ENV
|
||||
fi
|
||||
- uses: actions/checkout@v2
|
||||
# NOTE: actions/setup-node@v1 make use of a cache within the GitHub base
|
||||
# environment and setup in a fraction of a second.
|
||||
- name: Install Node v14
|
||||
uses: actions/setup-node@v1
|
||||
with:
|
||||
node-version: "14"
|
||||
- name: Install Node dependencies
|
||||
run: |
|
||||
npm install
|
||||
npm install -g configurable-http-proxy
|
||||
npm list
|
||||
|
||||
# NOTE: actions/setup-python@v2 make use of a cache within the GitHub base
|
||||
# environment and setup in a fraction of a second.
|
||||
- name: Install Python ${{ matrix.python }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python }}
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade . -r dev-requirements.txt
|
||||
|
||||
if [ "${{ matrix.oldest_dependencies }}" != "" ]; then
|
||||
# take any dependencies in requirements.txt such as tornado>=5.0
|
||||
# and transform them to tornado==5.0 so we can run tests with
|
||||
# the earliest-supported versions
|
||||
cat requirements.txt | grep '>=' | sed -e 's@>=@==@g' > oldest-requirements.txt
|
||||
pip install -r oldest-requirements.txt
|
||||
fi
|
||||
|
||||
if [ "${{ matrix.main_dependencies }}" != "" ]; then
|
||||
pip install git+https://github.com/ipython/traitlets#egg=traitlets --force
|
||||
fi
|
||||
if [ "${{ matrix.jupyter_server }}" != "" ]; then
|
||||
pip uninstall notebook --yes
|
||||
pip install jupyter_server
|
||||
fi
|
||||
if [ "${{ matrix.db }}" == "mysql" ]; then
|
||||
pip install mysql-connector-python
|
||||
fi
|
||||
if [ "${{ matrix.db }}" == "postgres" ]; then
|
||||
pip install psycopg2-binary
|
||||
fi
|
||||
|
||||
pip freeze
|
||||
|
||||
# NOTE: If you need to debug this DB setup step, consider the following.
|
||||
#
|
||||
# 1. mysql/postgressql are database servers we start as docker containers,
|
||||
# and we use clients named mysql/psql.
|
||||
#
|
||||
# 2. When we start a database server we need to pass environment variables
|
||||
# explicitly as part of the `docker run` command. These environment
|
||||
# variables are named differently from the similarly named environment
|
||||
# variables used by the clients.
|
||||
#
|
||||
# - mysql server ref: https://hub.docker.com/_/mysql/
|
||||
# - mysql client ref: https://dev.mysql.com/doc/refman/5.7/en/environment-variables.html
|
||||
# - postgres server ref: https://hub.docker.com/_/postgres/
|
||||
# - psql client ref: https://www.postgresql.org/docs/9.5/libpq-envars.html
|
||||
#
|
||||
# 3. When we connect, they should use 127.0.0.1 rather than the
|
||||
# default way of connecting which leads to errors like below both for
|
||||
# mysql and postgresql unless we set MYSQL_HOST/PGHOST to 127.0.0.1.
|
||||
#
|
||||
# - ERROR 2002 (HY000): Can't connect to local MySQL server through socket '/var/run/mysqld/mysqld.sock' (2)
|
||||
#
|
||||
- name: Start a database server (${{ matrix.db }})
|
||||
if: ${{ matrix.db }}
|
||||
run: |
|
||||
if [ "${{ matrix.db }}" == "mysql" ]; then
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y mysql-client
|
||||
DB=mysql bash ci/docker-db.sh
|
||||
DB=mysql bash ci/init-db.sh
|
||||
fi
|
||||
if [ "${{ matrix.db }}" == "postgres" ]; then
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y postgresql-client
|
||||
DB=postgres bash ci/docker-db.sh
|
||||
DB=postgres bash ci/init-db.sh
|
||||
fi
|
||||
|
||||
- name: Run pytest
|
||||
# FIXME: --color=yes explicitly set because:
|
||||
# https://github.com/actions/runner/issues/241
|
||||
run: |
|
||||
pytest -v --maxfail=2 --color=yes --cov=jupyterhub jupyterhub/tests
|
||||
- name: Submit codecov report
|
||||
run: |
|
||||
codecov
|
||||
|
||||
docker-build:
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 10
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: build images
|
||||
run: |
|
||||
docker build -t jupyterhub/jupyterhub .
|
||||
docker build -t jupyterhub/jupyterhub-onbuild onbuild
|
||||
docker build -t jupyterhub/jupyterhub:alpine -f dockerfiles/Dockerfile.alpine .
|
||||
docker build -t jupyterhub/singleuser singleuser
|
||||
|
||||
- name: smoke test jupyterhub
|
||||
run: |
|
||||
docker run --rm -t jupyterhub/jupyterhub jupyterhub --help
|
||||
|
||||
- name: verify static files
|
||||
run: |
|
||||
docker run --rm -t -v $PWD/dockerfiles:/io jupyterhub/jupyterhub python3 /io/test.py
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -28,3 +28,4 @@ htmlcov
|
||||
.pytest_cache
|
||||
pip-wheel-metadata
|
||||
docs/source/reference/metrics.rst
|
||||
oldest-requirements.txt
|
||||
|
@@ -1,19 +1,24 @@
|
||||
repos:
|
||||
- repo: https://github.com/asottile/reorder_python_imports
|
||||
rev: v1.9.0
|
||||
hooks:
|
||||
- id: reorder-python-imports
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 19.10b0
|
||||
hooks:
|
||||
- id: black
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v2.4.0
|
||||
hooks:
|
||||
- id: end-of-file-fixer
|
||||
- id: check-json
|
||||
- id: check-yaml
|
||||
- id: check-case-conflict
|
||||
- id: check-executables-have-shebangs
|
||||
- id: requirements-txt-fixer
|
||||
- id: flake8
|
||||
- repo: https://github.com/asottile/reorder_python_imports
|
||||
rev: v1.9.0
|
||||
hooks:
|
||||
- id: reorder-python-imports
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 20.8b1
|
||||
hooks:
|
||||
- id: black
|
||||
- repo: https://github.com/pre-commit/mirrors-prettier
|
||||
rev: v2.2.1
|
||||
hooks:
|
||||
- id: prettier
|
||||
- repo: https://gitlab.com/pycqa/flake8
|
||||
rev: "3.8.4"
|
||||
hooks:
|
||||
- id: flake8
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.4.0
|
||||
hooks:
|
||||
- id: end-of-file-fixer
|
||||
- id: check-case-conflict
|
||||
- id: check-executables-have-shebangs
|
||||
- id: requirements-txt-fixer
|
||||
|
1
.prettierignore
Normal file
1
.prettierignore
Normal file
@@ -0,0 +1 @@
|
||||
share/jupyterhub/templates/
|
120
.travis.yml
120
.travis.yml
@@ -1,120 +0,0 @@
|
||||
dist: bionic
|
||||
language: python
|
||||
cache:
|
||||
- pip
|
||||
env:
|
||||
global:
|
||||
- MYSQL_HOST=127.0.0.1
|
||||
- MYSQL_TCP_PORT=13306
|
||||
|
||||
# request additional services for the jobs to access
|
||||
services:
|
||||
- postgresql
|
||||
- docker
|
||||
|
||||
# install dependencies for running pytest (but not linting)
|
||||
before_install:
|
||||
- set -e
|
||||
- nvm install 6; nvm use 6
|
||||
- npm install
|
||||
- npm install -g configurable-http-proxy
|
||||
- |
|
||||
# setup database
|
||||
if [[ $JUPYTERHUB_TEST_DB_URL == mysql* ]]; then
|
||||
unset MYSQL_UNIX_PORT
|
||||
DB=mysql bash ci/docker-db.sh
|
||||
DB=mysql bash ci/init-db.sh
|
||||
# FIXME: mysql-connector-python 8.0.16 incorrectly decodes bytes to str
|
||||
# ref: https://bugs.mysql.com/bug.php?id=94944
|
||||
pip install 'mysql-connector-python==8.0.11'
|
||||
elif [[ $JUPYTERHUB_TEST_DB_URL == postgresql* ]]; then
|
||||
psql -c "CREATE USER $PGUSER WITH PASSWORD '$PGPASSWORD';" -U postgres
|
||||
DB=postgres bash ci/init-db.sh
|
||||
pip install psycopg2-binary
|
||||
fi
|
||||
|
||||
# install general dependencies
|
||||
install:
|
||||
- pip install --upgrade pip
|
||||
- pip install --upgrade --pre -r dev-requirements.txt .
|
||||
- |
|
||||
if [[ "$MASTER_DEPENDENCIES" == "True" ]]; then
|
||||
pip install git+https://github.com/ipython/traitlets#egg=traitlets --force
|
||||
fi
|
||||
- |
|
||||
if [[ "$TEST" == "jupyter_server" ]]; then
|
||||
pip uninstall notebook --yes
|
||||
pip install jupyter_server
|
||||
fi
|
||||
- pip freeze
|
||||
|
||||
# run tests
|
||||
script:
|
||||
- pytest -v --maxfail=2 --cov=jupyterhub jupyterhub/tests
|
||||
|
||||
# collect test coverage information
|
||||
after_success:
|
||||
- codecov
|
||||
|
||||
# list the jobs
|
||||
jobs:
|
||||
include:
|
||||
- name: autoformatting check
|
||||
python: 3.6
|
||||
# NOTE: It does not suffice to override to: null, [], or [""]. Travis will
|
||||
# fall back to the default if we do.
|
||||
before_install: echo "Do nothing before install."
|
||||
script:
|
||||
- pre-commit run --all-files
|
||||
after_success: echo "Do nothing after success."
|
||||
after_failure:
|
||||
- |
|
||||
echo "You can install pre-commit hooks to automatically run formatting"
|
||||
echo "on each commit with:"
|
||||
echo " pre-commit install"
|
||||
echo "or you can run by hand on staged files with"
|
||||
echo " pre-commit run"
|
||||
echo "or after-the-fact on already committed files with"
|
||||
echo " pre-commit run --all-files"
|
||||
# When we run pytest, we want to run it with python>=3.5 as well as with
|
||||
# various configurations. We increment the python version at the same time
|
||||
# as we test new configurations in order to reduce the number of test jobs.
|
||||
- name: python:3.5 + dist:xenial
|
||||
python: 3.5
|
||||
dist: xenial
|
||||
- name: python:3.6 + subdomain
|
||||
python: 3.6
|
||||
env: JUPYTERHUB_TEST_SUBDOMAIN_HOST=http://localhost.jovyan.org:8000
|
||||
- name: python:3.7 + mysql
|
||||
python: 3.7
|
||||
env:
|
||||
- JUPYTERHUB_TEST_DB_URL=mysql+mysqlconnector://root@127.0.0.1:$MYSQL_TCP_PORT/jupyterhub
|
||||
- name: python:3.8 + postgresql
|
||||
python: 3.8
|
||||
env:
|
||||
- PGUSER=jupyterhub
|
||||
- PGPASSWORD=hub[test/:?
|
||||
# The password in url below is url-encoded with: urllib.parse.quote($PGPASSWORD, safe='')
|
||||
- JUPYTERHUB_TEST_DB_URL=postgresql://jupyterhub:hub%5Btest%2F%3A%3F@127.0.0.1/jupyterhub
|
||||
- name: python:3.8 + master dependencies
|
||||
python: 3.8
|
||||
env:
|
||||
- PGUSER=jupyterhub
|
||||
- PGPASSWORD=hub[test/:?
|
||||
# The password in url below is url-encoded with: urllib.parse.quote($PGPASSWORD, safe='')
|
||||
- JUPYTERHUB_TEST_DB_URL=postgresql://jupyterhub:hub%5Btest%2F%3A%3F@127.0.0.1/jupyterhub
|
||||
- MASTER_DEPENDENCIES=True
|
||||
- name: python:3.8 + jupyter_server
|
||||
python: 3.8
|
||||
env:
|
||||
- TEST=jupyter_server
|
||||
- JUPYTERHUB_SINGLEUSER_APP=jupyterhub.tests.mockserverapp.MockServerApp
|
||||
|
||||
- name: python:nightly
|
||||
python: nightly
|
||||
allow_failures:
|
||||
- name: python:nightly
|
||||
# https://github.com/jupyterhub/jupyterhub/issues/3141
|
||||
# The latest traitlets is close to release so it should not fail
|
||||
# - name: python:3.8 + master dependencies
|
||||
fast_finish: true
|
@@ -2,24 +2,24 @@
|
||||
|
||||
- [ ] Upgrade Docs prior to Release
|
||||
|
||||
- [ ] Change log
|
||||
- [ ] New features documented
|
||||
- [ ] Update the contributor list - thank you page
|
||||
- [ ] Change log
|
||||
- [ ] New features documented
|
||||
- [ ] Update the contributor list - thank you page
|
||||
|
||||
- [ ] Upgrade and test Reference Deployments
|
||||
|
||||
- [ ] Release software
|
||||
|
||||
- [ ] Make sure 0 issues in milestone
|
||||
- [ ] Follow release process steps
|
||||
- [ ] Send builds to PyPI (Warehouse) and Conda Forge
|
||||
- [ ] Make sure 0 issues in milestone
|
||||
- [ ] Follow release process steps
|
||||
- [ ] Send builds to PyPI (Warehouse) and Conda Forge
|
||||
|
||||
- [ ] Blog post and/or release note
|
||||
|
||||
- [ ] Notify users of release
|
||||
|
||||
- [ ] Email Jupyter and Jupyter In Education mailing lists
|
||||
- [ ] Tweet (optional)
|
||||
- [ ] Email Jupyter and Jupyter In Education mailing lists
|
||||
- [ ] Tweet (optional)
|
||||
|
||||
- [ ] Increment the version number for the next release
|
||||
|
||||
|
@@ -1 +1 @@
|
||||
Please refer to [Project Jupyter's Code of Conduct](https://github.com/jupyter/governance/blob/master/conduct/code_of_conduct.md).
|
||||
Please refer to [Project Jupyter's Code of Conduct](https://github.com/jupyter/governance/blob/HEAD/conduct/code_of_conduct.md).
|
||||
|
@@ -1,9 +1,9 @@
|
||||
# Contributing to JupyterHub
|
||||
|
||||
Welcome! As a [Jupyter](https://jupyter.org) project,
|
||||
you can follow the [Jupyter contributor guide](https://jupyter.readthedocs.io/en/latest/contributor/content-contributor.html).
|
||||
you can follow the [Jupyter contributor guide](https://jupyter.readthedocs.io/en/latest/contributing/content-contributor.html).
|
||||
|
||||
Make sure to also follow [Project Jupyter's Code of Conduct](https://github.com/jupyter/governance/blob/master/conduct/code_of_conduct.md)
|
||||
Make sure to also follow [Project Jupyter's Code of Conduct](https://github.com/jupyter/governance/blob/HEAD/conduct/code_of_conduct.md)
|
||||
for a friendly and welcoming collaborative environment.
|
||||
|
||||
## Setting up a development environment
|
||||
@@ -18,39 +18,41 @@ JupyterHub requires Python >= 3.5 and nodejs.
|
||||
|
||||
As a Python project, a development install of JupyterHub follows standard practices for the basics (steps 1-2).
|
||||
|
||||
|
||||
1. clone the repo
|
||||
```bash
|
||||
git clone https://github.com/jupyterhub/jupyterhub
|
||||
```
|
||||
```bash
|
||||
git clone https://github.com/jupyterhub/jupyterhub
|
||||
```
|
||||
2. do a development install with pip
|
||||
|
||||
```bash
|
||||
cd jupyterhub
|
||||
python3 -m pip install --editable .
|
||||
```
|
||||
```bash
|
||||
cd jupyterhub
|
||||
python3 -m pip install --editable .
|
||||
```
|
||||
|
||||
3. install the development requirements,
|
||||
which include things like testing tools
|
||||
|
||||
```bash
|
||||
python3 -m pip install -r dev-requirements.txt
|
||||
```
|
||||
```bash
|
||||
python3 -m pip install -r dev-requirements.txt
|
||||
```
|
||||
|
||||
4. install configurable-http-proxy with npm:
|
||||
|
||||
```bash
|
||||
npm install -g configurable-http-proxy
|
||||
```
|
||||
```bash
|
||||
npm install -g configurable-http-proxy
|
||||
```
|
||||
|
||||
5. set up pre-commit hooks for automatic code formatting, etc.
|
||||
|
||||
```bash
|
||||
pre-commit install
|
||||
```
|
||||
```bash
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
You can also invoke the pre-commit hook manually at any time with
|
||||
You can also invoke the pre-commit hook manually at any time with
|
||||
|
||||
```bash
|
||||
pre-commit run
|
||||
```
|
||||
```bash
|
||||
pre-commit run
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -71,7 +73,7 @@ into your text editor to format code automatically.
|
||||
|
||||
If you have already committed files before setting up the pre-commit
|
||||
hook with `pre-commit install`, you can fix everything up using
|
||||
`pre-commit run --all-files`. You need to make the fixing commit
|
||||
`pre-commit run --all-files`. You need to make the fixing commit
|
||||
yourself after that.
|
||||
|
||||
## Testing
|
||||
|
@@ -24,7 +24,7 @@ software without specific prior written permission.
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
@@ -46,8 +46,8 @@ Jupyter uses a shared copyright model. Each contributor maintains copyright
|
||||
over their contributions to Jupyter. But, it is important to note that these
|
||||
contributions are typically only changes to the repositories. Thus, the Jupyter
|
||||
source code, in its entirety is not the copyright of any single person or
|
||||
institution. Instead, it is the collective copyright of the entire Jupyter
|
||||
Development Team. If individual contributors want to maintain a record of what
|
||||
institution. Instead, it is the collective copyright of the entire Jupyter
|
||||
Development Team. If individual contributors want to maintain a record of what
|
||||
changes/contributions they have specific copyright on, they should indicate
|
||||
their copyright in the commit message of the change, when they commit the
|
||||
change to one of the Jupyter repositories.
|
||||
|
@@ -21,7 +21,7 @@
|
||||
# your jupyterhub_config.py will be added automatically
|
||||
# from your docker directory.
|
||||
|
||||
ARG BASE_IMAGE=ubuntu:focal-20200729@sha256:6f2fb2f9fb5582f8b587837afd6ea8f37d8d1d9e41168c90f410a6ef15fa8ce5
|
||||
ARG BASE_IMAGE=ubuntu:focal-20200729
|
||||
FROM $BASE_IMAGE AS builder
|
||||
|
||||
USER root
|
||||
|
27
README.md
27
README.md
@@ -6,17 +6,15 @@
|
||||
**[License](#license)** |
|
||||
**[Help and Resources](#help-and-resources)**
|
||||
|
||||
|
||||
# [JupyterHub](https://github.com/jupyterhub/jupyterhub)
|
||||
|
||||
|
||||
[](https://pypi.python.org/pypi/jupyterhub)
|
||||
[](https://www.npmjs.com/package/jupyterhub)
|
||||
[](https://jupyterhub.readthedocs.org/en/latest/)
|
||||
[](https://travis-ci.com/jupyterhub/jupyterhub)
|
||||
[](https://github.com/jupyterhub/jupyterhub/actions)
|
||||
[](https://hub.docker.com/r/jupyterhub/jupyterhub/tags)
|
||||
[](https://circleci.com/gh/jupyterhub/jupyterhub)<!-- CircleCI Token: b5b65862eb2617b9a8d39e79340b0a6b816da8cc -->
|
||||
[](https://codecov.io/gh/jupyterhub/jupyterhub)
|
||||
[](https://codecov.io/gh/jupyterhub/jupyterhub)
|
||||
[](https://github.com/jupyterhub/jupyterhub/issues)
|
||||
[](https://discourse.jupyter.org/c/jupyterhub)
|
||||
[](https://gitter.im/jupyterhub/jupyterhub)
|
||||
@@ -48,22 +46,21 @@ Basic principles for operation are:
|
||||
servers.
|
||||
|
||||
JupyterHub also provides a
|
||||
[REST API](http://petstore.swagger.io/?url=https://raw.githubusercontent.com/jupyter/jupyterhub/master/docs/rest-api.yml#/default)
|
||||
[REST API](https://petstore3.swagger.io/?url=https://raw.githubusercontent.com/jupyter/jupyterhub/HEAD/docs/rest-api.yml#/default)
|
||||
for administration of the Hub and its users.
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
### Check prerequisites
|
||||
|
||||
- A Linux/Unix based system
|
||||
- [Python](https://www.python.org/downloads/) 3.5 or greater
|
||||
- [nodejs/npm](https://www.npmjs.com/)
|
||||
|
||||
* If you are using **`conda`**, the nodejs and npm dependencies will be installed for
|
||||
- If you are using **`conda`**, the nodejs and npm dependencies will be installed for
|
||||
you by conda.
|
||||
|
||||
* If you are using **`pip`**, install a recent version of
|
||||
- If you are using **`pip`**, install a recent version of
|
||||
[nodejs/npm](https://docs.npmjs.com/getting-started/installing-node).
|
||||
For example, install it on Linux (Debian/Ubuntu) using:
|
||||
|
||||
@@ -102,7 +99,7 @@ JupyterHub can be installed with `pip`, and the proxy with `npm`:
|
||||
|
||||
```bash
|
||||
npm install -g configurable-http-proxy
|
||||
python3 -m pip install jupyterhub
|
||||
python3 -m pip install jupyterhub
|
||||
```
|
||||
|
||||
If you plan to run notebook servers locally, you will need to install the
|
||||
@@ -120,10 +117,10 @@ To start the Hub server, run the command:
|
||||
Visit `https://localhost:8000` in your browser, and sign in with your unix
|
||||
PAM credentials.
|
||||
|
||||
*Note*: To allow multiple users to sign into the server, you will need to
|
||||
run the `jupyterhub` command as a *privileged user*, such as root.
|
||||
_Note_: To allow multiple users to sign into the server, you will need to
|
||||
run the `jupyterhub` command as a _privileged user_, such as root.
|
||||
The [wiki](https://github.com/jupyterhub/jupyterhub/wiki/Using-sudo-to-run-JupyterHub-without-root-privileges)
|
||||
describes how to run the server as a *less privileged user*, which requires
|
||||
describes how to run the server as a _less privileged user_, which requires
|
||||
more configuration of the system.
|
||||
|
||||
## Configuration
|
||||
@@ -142,7 +139,7 @@ To generate a default config file with settings and descriptions:
|
||||
|
||||
### Start the Hub
|
||||
|
||||
To start the Hub on a specific url and port ``10.0.1.2:443`` with **https**:
|
||||
To start the Hub on a specific url and port `10.0.1.2:443` with **https**:
|
||||
|
||||
jupyterhub --ip 10.0.1.2 --port 443 --ssl-key my_ssl.key --ssl-cert my_ssl.cert
|
||||
|
||||
@@ -204,7 +201,7 @@ These accounts will be used for authentication in JupyterHub's default configura
|
||||
## Contributing
|
||||
|
||||
If you would like to contribute to the project, please read our
|
||||
[contributor documentation](http://jupyter.readthedocs.io/en/latest/contributor/content-contributor.html)
|
||||
[contributor documentation](https://jupyter.readthedocs.io/en/latest/contributing/content-contributor.html)
|
||||
and the [`CONTRIBUTING.md`](CONTRIBUTING.md). The `CONTRIBUTING.md` file
|
||||
explains how to set up a development installation, how to run the test suite,
|
||||
and how to contribute to documentation.
|
||||
@@ -242,7 +239,7 @@ our JupyterHub [Gitter](https://gitter.im/jupyterhub/jupyterhub) channel.
|
||||
- [Reporting Issues](https://github.com/jupyterhub/jupyterhub/issues)
|
||||
- [JupyterHub tutorial](https://github.com/jupyterhub/jupyterhub-tutorial)
|
||||
- [Documentation for JupyterHub](https://jupyterhub.readthedocs.io/en/latest/) | [PDF (latest)](https://media.readthedocs.org/pdf/jupyterhub/latest/jupyterhub.pdf) | [PDF (stable)](https://media.readthedocs.org/pdf/jupyterhub/stable/jupyterhub.pdf)
|
||||
- [Documentation for JupyterHub's REST API](http://petstore.swagger.io/?url=https://raw.githubusercontent.com/jupyter/jupyterhub/master/docs/rest-api.yml#/default)
|
||||
- [Documentation for JupyterHub's REST API](https://petstore3.swagger.io/?url=https://raw.githubusercontent.com/jupyter/jupyterhub/HEAD/docs/rest-api.yml#/default)
|
||||
- [Documentation for Project Jupyter](http://jupyter.readthedocs.io/en/latest/index.html) | [PDF](https://media.readthedocs.org/pdf/jupyter/latest/jupyter.pdf)
|
||||
- [Project Jupyter website](https://jupyter.org)
|
||||
- [Project Jupyter community](https://jupyter.org/community)
|
||||
|
@@ -1,59 +1,60 @@
|
||||
#!/usr/bin/env bash
|
||||
# source this file to setup postgres and mysql
|
||||
# for local testing (as similar as possible to docker)
|
||||
# The goal of this script is to start a database server as a docker container.
|
||||
#
|
||||
# Required environment variables:
|
||||
# - DB: The database server to start, either "postgres" or "mysql".
|
||||
#
|
||||
# - PGUSER/PGPASSWORD: For the creation of a postgresql user with associated
|
||||
# password.
|
||||
|
||||
set -eu
|
||||
|
||||
export MYSQL_HOST=127.0.0.1
|
||||
export MYSQL_TCP_PORT=${MYSQL_TCP_PORT:-13306}
|
||||
export PGHOST=127.0.0.1
|
||||
NAME="hub-test-$DB"
|
||||
DOCKER_RUN="docker run -d --name $NAME"
|
||||
# Stop and remove any existing database container
|
||||
DOCKER_CONTAINER="hub-test-$DB"
|
||||
docker rm -f "$DOCKER_CONTAINER" 2>/dev/null || true
|
||||
|
||||
docker rm -f "$NAME" 2>/dev/null || true
|
||||
# Prepare environment variables to startup and await readiness of either a mysql
|
||||
# or postgresql server.
|
||||
if [[ "$DB" == "mysql" ]]; then
|
||||
# Environment variables can influence both the mysql server in the docker
|
||||
# container and the mysql client.
|
||||
#
|
||||
# ref server: https://hub.docker.com/_/mysql/
|
||||
# ref client: https://dev.mysql.com/doc/refman/5.7/en/setting-environment-variables.html
|
||||
#
|
||||
DOCKER_RUN_ARGS="-p 3306:3306 --env MYSQL_ALLOW_EMPTY_PASSWORD=1 mysql:5.7"
|
||||
READINESS_CHECK="mysql --user root --execute \q"
|
||||
elif [[ "$DB" == "postgres" ]]; then
|
||||
# Environment variables can influence both the postgresql server in the
|
||||
# docker container and the postgresql client (psql).
|
||||
#
|
||||
# ref server: https://hub.docker.com/_/postgres/
|
||||
# ref client: https://www.postgresql.org/docs/9.5/libpq-envars.html
|
||||
#
|
||||
# POSTGRES_USER / POSTGRES_PASSWORD will create a user on startup of the
|
||||
# postgres server, but PGUSER and PGPASSWORD are the environment variables
|
||||
# used by the postgresql client psql, so we configure the user based on how
|
||||
# we want to connect.
|
||||
#
|
||||
DOCKER_RUN_ARGS="-p 5432:5432 --env "POSTGRES_USER=${PGUSER}" --env "POSTGRES_PASSWORD=${PGPASSWORD}" postgres:9.5"
|
||||
READINESS_CHECK="psql --command \q"
|
||||
else
|
||||
echo '$DB must be mysql or postgres'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case "$DB" in
|
||||
"mysql")
|
||||
RUN_ARGS="-e MYSQL_ALLOW_EMPTY_PASSWORD=1 -p $MYSQL_TCP_PORT:3306 mysql:5.7"
|
||||
CHECK="mysql --host $MYSQL_HOST --port $MYSQL_TCP_PORT --user root -e \q"
|
||||
;;
|
||||
"postgres")
|
||||
RUN_ARGS="-p 5432:5432 postgres:9.5"
|
||||
CHECK="psql --user postgres -c \q"
|
||||
;;
|
||||
*)
|
||||
echo '$DB must be mysql or postgres'
|
||||
exit 1
|
||||
esac
|
||||
|
||||
$DOCKER_RUN $RUN_ARGS
|
||||
# Start the database server
|
||||
docker run --detach --name "$DOCKER_CONTAINER" $DOCKER_RUN_ARGS
|
||||
|
||||
# Wait for the database server to start
|
||||
echo -n "waiting for $DB "
|
||||
for i in {1..60}; do
|
||||
if $CHECK; then
|
||||
echo 'done'
|
||||
break
|
||||
else
|
||||
echo -n '.'
|
||||
sleep 1
|
||||
fi
|
||||
if $READINESS_CHECK; then
|
||||
echo 'done'
|
||||
break
|
||||
else
|
||||
echo -n '.'
|
||||
sleep 1
|
||||
fi
|
||||
done
|
||||
$CHECK
|
||||
|
||||
case "$DB" in
|
||||
"mysql")
|
||||
;;
|
||||
"postgres")
|
||||
# create the user
|
||||
psql --user postgres -c "CREATE USER $PGUSER WITH PASSWORD '$PGPASSWORD';"
|
||||
;;
|
||||
*)
|
||||
esac
|
||||
|
||||
echo -e "
|
||||
Set these environment variables:
|
||||
|
||||
export MYSQL_HOST=127.0.0.1
|
||||
export MYSQL_TCP_PORT=$MYSQL_TCP_PORT
|
||||
export PGHOST=127.0.0.1
|
||||
"
|
||||
$READINESS_CHECK
|
||||
|
@@ -1,27 +1,26 @@
|
||||
#!/usr/bin/env bash
|
||||
# initialize jupyterhub databases for testing
|
||||
# The goal of this script is to initialize a running database server with clean
|
||||
# databases for use during tests.
|
||||
#
|
||||
# Required environment variables:
|
||||
# - DB: The database server to start, either "postgres" or "mysql".
|
||||
|
||||
set -eu
|
||||
|
||||
MYSQL="mysql --user root --host $MYSQL_HOST --port $MYSQL_TCP_PORT -e "
|
||||
PSQL="psql --user postgres -c "
|
||||
|
||||
case "$DB" in
|
||||
"mysql")
|
||||
EXTRA_CREATE='CHARACTER SET utf8 COLLATE utf8_general_ci'
|
||||
SQL="$MYSQL"
|
||||
;;
|
||||
"postgres")
|
||||
SQL="$PSQL"
|
||||
;;
|
||||
*)
|
||||
echo '$DB must be mysql or postgres'
|
||||
exit 1
|
||||
esac
|
||||
# Prepare env vars SQL_CLIENT and EXTRA_CREATE_DATABASE_ARGS
|
||||
if [[ "$DB" == "mysql" ]]; then
|
||||
SQL_CLIENT="mysql --user root --execute "
|
||||
EXTRA_CREATE_DATABASE_ARGS='CHARACTER SET utf8 COLLATE utf8_general_ci'
|
||||
elif [[ "$DB" == "postgres" ]]; then
|
||||
SQL_CLIENT="psql --command "
|
||||
else
|
||||
echo '$DB must be mysql or postgres'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Configure a set of databases in the database server for upgrade tests
|
||||
set -x
|
||||
|
||||
for SUFFIX in '' _upgrade_072 _upgrade_081 _upgrade_094; do
|
||||
$SQL "DROP DATABASE jupyterhub${SUFFIX};" 2>/dev/null || true
|
||||
$SQL "CREATE DATABASE jupyterhub${SUFFIX} ${EXTRA_CREATE:-};"
|
||||
$SQL_CLIENT "DROP DATABASE jupyterhub${SUFFIX};" 2>/dev/null || true
|
||||
$SQL_CLIENT "CREATE DATABASE jupyterhub${SUFFIX} ${EXTRA_CREATE_DATABASE_ARGS:-};"
|
||||
done
|
||||
|
@@ -15,9 +15,10 @@ This should only be used for demo or testing purposes!
|
||||
It shouldn't be used as a base image to build on.
|
||||
|
||||
### Try it
|
||||
|
||||
1. `cd` to the root of your jupyterhub repo.
|
||||
|
||||
2. Build the demo image with `docker build -t jupyterhub-demo demo-image`.
|
||||
2. Build the demo image with `docker build -t jupyterhub-demo demo-image`.
|
||||
|
||||
3. Run the demo image with `docker run -d -p 8000:8000 jupyterhub-demo`.
|
||||
|
||||
|
@@ -10,9 +10,9 @@ html5lib # needed for beautifulsoup
|
||||
mock
|
||||
notebook
|
||||
pre-commit
|
||||
pytest>=3.3
|
||||
pytest-asyncio
|
||||
pytest-cov
|
||||
pytest>=3.3
|
||||
requests-mock
|
||||
# blacklist urllib3 releases affected by https://github.com/urllib3/urllib3/issues/1683
|
||||
# I *think* this should only affect testing, not production
|
||||
|
@@ -1,9 +1,14 @@
|
||||
FROM python:3.6.3-alpine3.6
|
||||
|
||||
ARG JUPYTERHUB_VERSION=0.8.1
|
||||
|
||||
RUN pip3 install --no-cache jupyterhub==${JUPYTERHUB_VERSION}
|
||||
FROM alpine:3.13
|
||||
ENV LANG=en_US.UTF-8
|
||||
RUN apk add --no-cache \
|
||||
python3 \
|
||||
py3-pip \
|
||||
py3-ruamel.yaml \
|
||||
py3-cryptography \
|
||||
py3-sqlalchemy
|
||||
|
||||
ARG JUPYTERHUB_VERSION=1.3.0
|
||||
RUN pip3 install --no-cache jupyterhub==${JUPYTERHUB_VERSION}
|
||||
|
||||
USER nobody
|
||||
CMD ["jupyterhub"]
|
||||
|
@@ -1,20 +1,20 @@
|
||||
## What is Dockerfile.alpine
|
||||
Dockerfile.alpine contains base image for jupyterhub. It does not work independently, but only as part of a full jupyterhub cluster
|
||||
|
||||
Dockerfile.alpine contains base image for jupyterhub. It does not work independently, but only as part of a full jupyterhub cluster
|
||||
|
||||
## How to use it?
|
||||
|
||||
1. A running configurable-http-proxy, whose API is accessible.
|
||||
1. A running configurable-http-proxy, whose API is accessible.
|
||||
2. A jupyterhub_config file.
|
||||
3. Authentication and other libraries required by the specific jupyterhub_config file.
|
||||
|
||||
|
||||
## Steps to test it outside a cluster
|
||||
|
||||
* start configurable-http-proxy in another container
|
||||
* specify CONFIGPROXY_AUTH_TOKEN env in both containers
|
||||
* put both containers on the same network (e.g. docker network create jupyterhub; docker run ... --net jupyterhub)
|
||||
* tell jupyterhub where CHP is (e.g. c.ConfigurableHTTPProxy.api_url = 'http://chp:8001')
|
||||
* tell jupyterhub not to start the proxy itself (c.ConfigurableHTTPProxy.should_start = False)
|
||||
* Use dummy authenticator for ease of testing. Update following in jupyterhub_config file
|
||||
- c.JupyterHub.authenticator_class = 'dummyauthenticator.DummyAuthenticator'
|
||||
- c.DummyAuthenticator.password = "your strong password"
|
||||
- start configurable-http-proxy in another container
|
||||
- specify CONFIGPROXY_AUTH_TOKEN env in both containers
|
||||
- put both containers on the same network (e.g. docker network create jupyterhub; docker run ... --net jupyterhub)
|
||||
- tell jupyterhub where CHP is (e.g. c.ConfigurableHTTPProxy.api_url = 'http://chp:8001')
|
||||
- tell jupyterhub not to start the proxy itself (c.ConfigurableHTTPProxy.should_start = False)
|
||||
- Use dummy authenticator for ease of testing. Update following in jupyterhub_config file
|
||||
- c.JupyterHub.authenticator_class = 'dummyauthenticator.DummyAuthenticator'
|
||||
- c.DummyAuthenticator.password = "your strong password"
|
||||
|
@@ -1,12 +1,11 @@
|
||||
-r ../requirements.txt
|
||||
|
||||
alabaster_jupyterhub
|
||||
# Temporary fix of #3021. Revert back to released autodoc-traits when
|
||||
# 0.1.0 released.
|
||||
https://github.com/jupyterhub/autodoc-traits/archive/75885ee24636efbfebfceed1043459715049cd84.zip
|
||||
autodoc-traits
|
||||
docutils<0.18
|
||||
pydata-sphinx-theme
|
||||
pytablewriter>=0.56
|
||||
recommonmark>=0.6
|
||||
sphinx>=1.7
|
||||
sphinx-copybutton
|
||||
sphinx-jsonschema
|
||||
sphinx>=1.7
|
||||
|
@@ -1,13 +1,12 @@
|
||||
# see me at: http://petstore.swagger.io/?url=https://raw.githubusercontent.com/jupyterhub/jupyterhub/master/docs/rest-api.yml#/default
|
||||
swagger: '2.0'
|
||||
# see me at: https://petstore3.swagger.io/?url=https://raw.githubusercontent.com/jupyterhub/jupyterhub/HEAD/docs/rest-api.yml#/default
|
||||
swagger: "2.0"
|
||||
info:
|
||||
title: JupyterHub
|
||||
description: The REST API for JupyterHub
|
||||
version: 1.2.0dev
|
||||
version: 1.5.0
|
||||
license:
|
||||
name: BSD-3-Clause
|
||||
schemes:
|
||||
[http, https]
|
||||
schemes: [http, https]
|
||||
securityDefinitions:
|
||||
token:
|
||||
type: apiKey
|
||||
@@ -28,7 +27,7 @@ paths:
|
||||
This endpoint is not authenticated for the purpose of clients and user
|
||||
to identify the JupyterHub version before setting up authentication.
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: The JupyterHub version
|
||||
schema:
|
||||
type: object
|
||||
@@ -44,7 +43,7 @@ paths:
|
||||
JupyterHub's version and executable path,
|
||||
and which Authenticator and Spawner are active.
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: Detailed JupyterHub info
|
||||
schema:
|
||||
type: object
|
||||
@@ -79,13 +78,28 @@ paths:
|
||||
/users:
|
||||
get:
|
||||
summary: List users
|
||||
parameters:
|
||||
- name: state
|
||||
in: query
|
||||
required: false
|
||||
type: string
|
||||
enum: ["inactive", "active", "ready"]
|
||||
description: |
|
||||
Return only users who have servers in the given state.
|
||||
If unspecified, return all users.
|
||||
|
||||
active: all users with any active servers (ready OR pending)
|
||||
ready: all users who have any ready servers (running, not pending)
|
||||
inactive: all users who have *no* active servers (complement of active)
|
||||
|
||||
Added in JupyterHub 1.3
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: The Hub's user list
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/definitions/User'
|
||||
$ref: "#/definitions/User"
|
||||
post:
|
||||
summary: Create multiple users
|
||||
parameters:
|
||||
@@ -104,13 +118,13 @@ paths:
|
||||
description: whether the created users should be admins
|
||||
type: boolean
|
||||
responses:
|
||||
'201':
|
||||
"201":
|
||||
description: The users have been created
|
||||
schema:
|
||||
type: array
|
||||
description: The created users
|
||||
items:
|
||||
$ref: '#/definitions/User'
|
||||
$ref: "#/definitions/User"
|
||||
/users/{name}:
|
||||
get:
|
||||
summary: Get a user by name
|
||||
@@ -121,10 +135,10 @@ paths:
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: The User model
|
||||
schema:
|
||||
$ref: '#/definitions/User'
|
||||
$ref: "#/definitions/User"
|
||||
post:
|
||||
summary: Create a single user
|
||||
parameters:
|
||||
@@ -134,10 +148,10 @@ paths:
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
'201':
|
||||
"201":
|
||||
description: The user has been created
|
||||
schema:
|
||||
$ref: '#/definitions/User'
|
||||
$ref: "#/definitions/User"
|
||||
patch:
|
||||
summary: Modify a user
|
||||
description: Change a user's name or admin status
|
||||
@@ -161,10 +175,10 @@ paths:
|
||||
type: boolean
|
||||
description: update admin (optional, if another key is updated i.e. name)
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: The updated user info
|
||||
schema:
|
||||
$ref: '#/definitions/User'
|
||||
$ref: "#/definitions/User"
|
||||
delete:
|
||||
summary: Delete a user
|
||||
parameters:
|
||||
@@ -174,14 +188,12 @@ paths:
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
'204':
|
||||
"204":
|
||||
description: The user has been deleted
|
||||
/users/{name}/activity:
|
||||
post:
|
||||
summary:
|
||||
Notify Hub of activity for a given user.
|
||||
description:
|
||||
Notify the Hub of activity by the user,
|
||||
summary: Notify Hub of activity for a given user.
|
||||
description: Notify the Hub of activity by the user,
|
||||
e.g. accessing a service or (more likely)
|
||||
actively using a server.
|
||||
parameters:
|
||||
@@ -209,7 +221,7 @@ paths:
|
||||
The default server has an empty name ('').
|
||||
type: object
|
||||
properties:
|
||||
'<server name>':
|
||||
"<server name>":
|
||||
description: |
|
||||
Activity for a single server.
|
||||
type: object
|
||||
@@ -222,16 +234,16 @@ paths:
|
||||
description: |
|
||||
Timestamp of last-seen activity on this server.
|
||||
example:
|
||||
last_activity: '2019-02-06T12:54:14Z'
|
||||
last_activity: "2019-02-06T12:54:14Z"
|
||||
servers:
|
||||
'':
|
||||
last_activity: '2019-02-06T12:54:14Z'
|
||||
"":
|
||||
last_activity: "2019-02-06T12:54:14Z"
|
||||
gpu:
|
||||
last_activity: '2019-02-06T12:54:14Z'
|
||||
last_activity: "2019-02-06T12:54:14Z"
|
||||
responses:
|
||||
'401':
|
||||
$ref: '#/responses/Unauthorized'
|
||||
'404':
|
||||
"401":
|
||||
$ref: "#/responses/Unauthorized"
|
||||
"404":
|
||||
description: No such user
|
||||
/users/{name}/server:
|
||||
post:
|
||||
@@ -256,9 +268,9 @@ paths:
|
||||
type: object
|
||||
|
||||
responses:
|
||||
'201':
|
||||
"201":
|
||||
description: The user's notebook server has started
|
||||
'202':
|
||||
"202":
|
||||
description: The user's notebook server has not yet started, but has been requested
|
||||
delete:
|
||||
summary: Stop a user's server
|
||||
@@ -269,9 +281,9 @@ paths:
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
'204':
|
||||
"204":
|
||||
description: The user's notebook server has stopped
|
||||
'202':
|
||||
"202":
|
||||
description: The user's notebook server has not yet stopped as it is taking a while to stop
|
||||
/users/{name}/servers/{server_name}:
|
||||
post:
|
||||
@@ -284,8 +296,8 @@ paths:
|
||||
type: string
|
||||
- name: server_name
|
||||
description: |
|
||||
name given to a named-server.
|
||||
|
||||
name given to a named-server.
|
||||
|
||||
Note that depending on your JupyterHub infrastructure there are chracterter size limitation to `server_name`. Default spawner with K8s pod will not allow Jupyter Notebooks to be spawned with a name that contains more than 253 characters (keep in mind that the pod will be spawned with extra characters to identify the user and hub).
|
||||
in: path
|
||||
required: true
|
||||
@@ -301,9 +313,9 @@ paths:
|
||||
schema:
|
||||
type: object
|
||||
responses:
|
||||
'201':
|
||||
"201":
|
||||
description: The user's notebook named-server has started
|
||||
'202':
|
||||
"202":
|
||||
description: The user's notebook named-server has not yet started, but has been requested
|
||||
delete:
|
||||
summary: Stop a user's named-server
|
||||
@@ -331,9 +343,9 @@ paths:
|
||||
Removing a server deletes things like the state of the stopped server.
|
||||
Default: false.
|
||||
responses:
|
||||
'204':
|
||||
"204":
|
||||
description: The user's notebook named-server has stopped
|
||||
'202':
|
||||
"202":
|
||||
description: The user's notebook named-server has not yet stopped as it is taking a while to stop
|
||||
/users/{name}/tokens:
|
||||
parameters:
|
||||
@@ -345,15 +357,15 @@ paths:
|
||||
get:
|
||||
summary: List tokens for the user
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: The list of tokens
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/definitions/Token'
|
||||
'401':
|
||||
$ref: '#/responses/Unauthorized'
|
||||
'404':
|
||||
$ref: "#/definitions/Token"
|
||||
"401":
|
||||
$ref: "#/responses/Unauthorized"
|
||||
"404":
|
||||
description: No such user
|
||||
post:
|
||||
summary: Create a new token for the user
|
||||
@@ -371,11 +383,11 @@ paths:
|
||||
type: string
|
||||
description: A note attached to the token for future bookkeeping
|
||||
responses:
|
||||
'201':
|
||||
"201":
|
||||
description: The newly created token
|
||||
schema:
|
||||
$ref: '#/definitions/Token'
|
||||
'400':
|
||||
$ref: "#/definitions/Token"
|
||||
"400":
|
||||
description: Body must be a JSON dict or empty
|
||||
/users/{name}/tokens/{token_id}:
|
||||
parameters:
|
||||
@@ -391,33 +403,33 @@ paths:
|
||||
get:
|
||||
summary: Get the model for a token by id
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: The info for the new token
|
||||
schema:
|
||||
$ref: '#/definitions/Token'
|
||||
$ref: "#/definitions/Token"
|
||||
delete:
|
||||
summary: Delete (revoke) a token by id
|
||||
responses:
|
||||
'204':
|
||||
"204":
|
||||
description: The token has been deleted
|
||||
/user:
|
||||
get:
|
||||
summary: Return authenticated user's model
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: The authenticated user's model is returned.
|
||||
schema:
|
||||
$ref: '#/definitions/User'
|
||||
$ref: "#/definitions/User"
|
||||
/groups:
|
||||
get:
|
||||
summary: List groups
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: The list of groups
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/definitions/Group'
|
||||
$ref: "#/definitions/Group"
|
||||
/groups/{name}:
|
||||
get:
|
||||
summary: Get a group by name
|
||||
@@ -428,10 +440,10 @@ paths:
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: The group model
|
||||
schema:
|
||||
$ref: '#/definitions/Group'
|
||||
$ref: "#/definitions/Group"
|
||||
post:
|
||||
summary: Create a group
|
||||
parameters:
|
||||
@@ -441,10 +453,10 @@ paths:
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
'201':
|
||||
"201":
|
||||
description: The group has been created
|
||||
schema:
|
||||
$ref: '#/definitions/Group'
|
||||
$ref: "#/definitions/Group"
|
||||
delete:
|
||||
summary: Delete a group
|
||||
parameters:
|
||||
@@ -454,7 +466,7 @@ paths:
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
'204':
|
||||
"204":
|
||||
description: The group has been deleted
|
||||
/groups/{name}/users:
|
||||
post:
|
||||
@@ -478,10 +490,10 @@ paths:
|
||||
items:
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: The users have been added to the group
|
||||
schema:
|
||||
$ref: '#/definitions/Group'
|
||||
$ref: "#/definitions/Group"
|
||||
delete:
|
||||
summary: Remove users from a group
|
||||
parameters:
|
||||
@@ -503,18 +515,18 @@ paths:
|
||||
items:
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: The users have been removed from the group
|
||||
/services:
|
||||
get:
|
||||
summary: List services
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: The service list
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/definitions/Service'
|
||||
$ref: "#/definitions/Service"
|
||||
/services/{name}:
|
||||
get:
|
||||
summary: Get a service by name
|
||||
@@ -525,16 +537,16 @@ paths:
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: The Service model
|
||||
schema:
|
||||
$ref: '#/definitions/Service'
|
||||
$ref: "#/definitions/Service"
|
||||
/proxy:
|
||||
get:
|
||||
summary: Get the proxy's routing table
|
||||
description: A convenience alias for getting the routing table directly from the proxy
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: Routing table
|
||||
schema:
|
||||
type: object
|
||||
@@ -542,7 +554,7 @@ paths:
|
||||
post:
|
||||
summary: Force the Hub to sync with the proxy
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: Success
|
||||
patch:
|
||||
summary: Notify the Hub about a new proxy
|
||||
@@ -568,7 +580,7 @@ paths:
|
||||
type: string
|
||||
description: CONFIGPROXY_AUTH_TOKEN for the new proxy
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: Success
|
||||
/authorizations/token:
|
||||
post:
|
||||
@@ -590,7 +602,7 @@ paths:
|
||||
password:
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: The new API token
|
||||
schema:
|
||||
type: object
|
||||
@@ -598,7 +610,7 @@ paths:
|
||||
token:
|
||||
type: string
|
||||
description: The new API token.
|
||||
'403':
|
||||
"403":
|
||||
description: The user can not be authenticated.
|
||||
/authorizations/token/{token}:
|
||||
get:
|
||||
@@ -609,9 +621,9 @@ paths:
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: The user or service identified by the API token
|
||||
'404':
|
||||
"404":
|
||||
description: A user or service is not found.
|
||||
/authorizations/cookie/{cookie_name}/{cookie_value}:
|
||||
get:
|
||||
@@ -627,15 +639,15 @@ paths:
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: The user identified by the cookie
|
||||
schema:
|
||||
$ref: '#/definitions/User'
|
||||
'404':
|
||||
$ref: "#/definitions/User"
|
||||
"404":
|
||||
description: A user is not found.
|
||||
/oauth2/authorize:
|
||||
get:
|
||||
summary: 'OAuth 2.0 authorize endpoint'
|
||||
summary: "OAuth 2.0 authorize endpoint"
|
||||
description: |
|
||||
Redirect users to this URL to begin the OAuth process.
|
||||
It is not an API endpoint.
|
||||
@@ -661,9 +673,9 @@ paths:
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: Success
|
||||
'400':
|
||||
"400":
|
||||
description: OAuth2Error
|
||||
/oauth2/token:
|
||||
post:
|
||||
@@ -700,7 +712,7 @@ paths:
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
'200':
|
||||
"200":
|
||||
description: JSON response including the token
|
||||
schema:
|
||||
type: object
|
||||
@@ -727,9 +739,9 @@ paths:
|
||||
type: boolean
|
||||
description: Whether users' notebook servers should be shutdown as well (default from Hub config)
|
||||
responses:
|
||||
'202':
|
||||
"202":
|
||||
description: Shutdown successful
|
||||
'400':
|
||||
"400":
|
||||
description: Unexpeced value for proxy or servers
|
||||
# Descriptions of common responses
|
||||
responses:
|
||||
@@ -767,7 +779,7 @@ definitions:
|
||||
type: array
|
||||
description: The active servers for this user.
|
||||
items:
|
||||
$ref: '#/definitions/Server'
|
||||
$ref: "#/definitions/Server"
|
||||
Server:
|
||||
type: object
|
||||
properties:
|
||||
@@ -861,7 +873,7 @@ definitions:
|
||||
description: The user that owns a token (undefined if owned by a service)
|
||||
service:
|
||||
type: string
|
||||
description: The service that owns the token (undefined of owned by a user)
|
||||
description: The service that owns the token (undefined if owned by a user)
|
||||
note:
|
||||
type: string
|
||||
description: A note about the token, typically describing what it was created for.
|
||||
|
@@ -1,4 +1,4 @@
|
||||
/* Added to avoid logo being too squeezed */
|
||||
.navbar-brand {
|
||||
height: 4rem !important;
|
||||
}
|
||||
/* Added to avoid logo being too squeezed */
|
||||
.navbar-brand {
|
||||
height: 4rem !important;
|
||||
}
|
||||
|
@@ -18,7 +18,7 @@ information on:
|
||||
- learning more about JupyterHub's API
|
||||
|
||||
The same JupyterHub API spec, as found here, is available in an interactive form
|
||||
`here (on swagger's petstore) <http://petstore.swagger.io/?url=https://raw.githubusercontent.com/jupyterhub/jupyterhub/master/docs/rest-api.yml#!/default>`__.
|
||||
`here (on swagger's petstore) <https://petstore3.swagger.io/?url=https://raw.githubusercontent.com/jupyterhub/jupyterhub/HEAD/docs/rest-api.yml#!/default>`__.
|
||||
The `OpenAPI Initiative`_ (fka Swagger™) is a project used to describe
|
||||
and document RESTful APIs.
|
||||
|
||||
|
File diff suppressed because one or more lines are too long
@@ -13,7 +13,7 @@ Building documentation locally
|
||||
We use `sphinx <http://sphinx-doc.org>`_ to build our documentation. It takes
|
||||
our documentation source files (written in `markdown
|
||||
<https://daringfireball.net/projects/markdown/>`_ or `reStructuredText
|
||||
<http://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`_ &
|
||||
<https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`_ &
|
||||
stored under the ``docs/source`` directory) and converts it into various
|
||||
formats for people to read. To make sure the documentation you write or
|
||||
change renders correctly, it is good practice to test it locally.
|
||||
@@ -39,8 +39,8 @@ change renders correctly, it is good practice to test it locally.
|
||||
along with the filename / line number in which they occurred. Fix them,
|
||||
and re-run the ``make html`` command to re-render the documentation.
|
||||
|
||||
#. View the rendered documentation by opening ``build/html/index.html`` in
|
||||
a web browser.
|
||||
#. View the rendered documentation by opening ``build/html/index.html`` in
|
||||
a web browser.
|
||||
|
||||
.. tip::
|
||||
|
||||
|
@@ -6,8 +6,8 @@ We want you to contribute to JupyterHub in ways that are most exciting
|
||||
& useful to you. We value documentation, testing, bug reporting & code equally,
|
||||
and are glad to have your contributions in whatever form you wish :)
|
||||
|
||||
Our `Code of Conduct <https://github.com/jupyter/governance/blob/master/conduct/code_of_conduct.md>`_
|
||||
(`reporting guidelines <https://github.com/jupyter/governance/blob/master/conduct/reporting_online.md>`_)
|
||||
Our `Code of Conduct <https://github.com/jupyter/governance/blob/HEAD/conduct/code_of_conduct.md>`_
|
||||
(`reporting guidelines <https://github.com/jupyter/governance/blob/HEAD/conduct/reporting_online.md>`_)
|
||||
helps keep our community welcoming to as many people as possible.
|
||||
|
||||
.. toctree::
|
||||
|
@@ -6,8 +6,8 @@ the community of users, contributors, and maintainers.
|
||||
The goal is to communicate priorities and upcoming release plans.
|
||||
It is not a aimed at limiting contributions to what is listed here.
|
||||
|
||||
|
||||
## Using the roadmap
|
||||
|
||||
### Sharing Feedback on the Roadmap
|
||||
|
||||
All of the community is encouraged to provide feedback as well as share new
|
||||
@@ -22,17 +22,17 @@ maintainers will help identify what a good next step is for the issue.
|
||||
When submitting an issue, think about what "next step" category best describes
|
||||
your issue:
|
||||
|
||||
* **now**, concrete/actionable step that is ready for someone to start work on.
|
||||
These might be items that have a link to an issue or more abstract like
|
||||
"decrease typos and dead links in the documentation"
|
||||
* **soon**, less concrete/actionable step that is going to happen soon,
|
||||
discussions around the topic are coming close to an end at which point it can
|
||||
move into the "now" category
|
||||
* **later**, abstract ideas or tasks, need a lot of discussion or
|
||||
experimentation to shape the idea so that it can be executed. Can also
|
||||
contain concrete/actionable steps that have been postponed on purpose
|
||||
(these are steps that could be in "now" but the decision was taken to work on
|
||||
them later)
|
||||
- **now**, concrete/actionable step that is ready for someone to start work on.
|
||||
These might be items that have a link to an issue or more abstract like
|
||||
"decrease typos and dead links in the documentation"
|
||||
- **soon**, less concrete/actionable step that is going to happen soon,
|
||||
discussions around the topic are coming close to an end at which point it can
|
||||
move into the "now" category
|
||||
- **later**, abstract ideas or tasks, need a lot of discussion or
|
||||
experimentation to shape the idea so that it can be executed. Can also
|
||||
contain concrete/actionable steps that have been postponed on purpose
|
||||
(these are steps that could be in "now" but the decision was taken to work on
|
||||
them later)
|
||||
|
||||
### Reviewing and Updating the Roadmap
|
||||
|
||||
@@ -47,8 +47,8 @@ For those please create a
|
||||
The roadmap should give the reader an idea of what is happening next, what needs
|
||||
input and discussion before it can happen and what has been postponed.
|
||||
|
||||
|
||||
## The roadmap proper
|
||||
|
||||
### Project vision
|
||||
|
||||
JupyterHub is a dependable tool used by humans that reduces the complexity of
|
||||
@@ -58,20 +58,19 @@ creating the environment in which a piece of software can be executed.
|
||||
|
||||
These "Now" items are considered active areas of focus for the project:
|
||||
|
||||
* HubShare - a sharing service for use with JupyterHub.
|
||||
* Users should be able to:
|
||||
- Push a project to other users.
|
||||
- Get a checkout of a project from other users.
|
||||
- Push updates to a published project.
|
||||
- Pull updates from a published project.
|
||||
- Manage conflicts/merges by simply picking a version (our/theirs)
|
||||
- Get a checkout of a project from the internet. These steps are completely different from saving notebooks/files.
|
||||
- Have directories that are managed by git completely separately from our stuff.
|
||||
- Look at pushed content that they have access to without an explicit pull.
|
||||
- Define and manage teams of users.
|
||||
- Adding/removing a user to/from a team gives/removes them access to all projects that team has access to.
|
||||
- Build other services, such as static HTML publishing and dashboarding on top of these things.
|
||||
|
||||
- HubShare - a sharing service for use with JupyterHub.
|
||||
- Users should be able to:
|
||||
- Push a project to other users.
|
||||
- Get a checkout of a project from other users.
|
||||
- Push updates to a published project.
|
||||
- Pull updates from a published project.
|
||||
- Manage conflicts/merges by simply picking a version (our/theirs)
|
||||
- Get a checkout of a project from the internet. These steps are completely different from saving notebooks/files.
|
||||
- Have directories that are managed by git completely separately from our stuff.
|
||||
- Look at pushed content that they have access to without an explicit pull.
|
||||
- Define and manage teams of users.
|
||||
- Adding/removing a user to/from a team gives/removes them access to all projects that team has access to.
|
||||
- Build other services, such as static HTML publishing and dashboarding on top of these things.
|
||||
|
||||
### Soon
|
||||
|
||||
@@ -79,11 +78,10 @@ These "Soon" items are under discussion. Once an item reaches the point of an
|
||||
actionable plan, the item will be moved to the "Now" section. Typically,
|
||||
these will be moved at a future review of the roadmap.
|
||||
|
||||
* resource monitoring and management:
|
||||
- (prometheus?) API for resource monitoring
|
||||
- tracking activity on single-user servers instead of the proxy
|
||||
- notes and activity tracking per API token
|
||||
|
||||
- resource monitoring and management:
|
||||
- (prometheus?) API for resource monitoring
|
||||
- tracking activity on single-user servers instead of the proxy
|
||||
- notes and activity tracking per API token
|
||||
|
||||
### Later
|
||||
|
||||
@@ -92,6 +90,6 @@ time there is no active plan for an item. The project would like to find the
|
||||
resources and time to discuss these ideas.
|
||||
|
||||
- real-time collaboration
|
||||
- Enter into real-time collaboration mode for a project that starts a shared execution context.
|
||||
- Once the single-user notebook package supports realtime collaboration,
|
||||
implement sharing mechanism integrated into the Hub.
|
||||
- Enter into real-time collaboration mode for a project that starts a shared execution context.
|
||||
- Once the single-user notebook package supports realtime collaboration,
|
||||
implement sharing mechanism integrated into the Hub.
|
||||
|
@@ -1,10 +1,7 @@
|
||||
Eventlogging and Telemetry
|
||||
==========================
|
||||
|
||||
JupyterHub can be configured to record structured events from a running server using Jupyter's `Telemetry System`_. The types of events that JupyterHub emits are defined by `JSON schemas`_ listed below_
|
||||
|
||||
emitted as JSON data, defined and validated by the JSON schemas listed below.
|
||||
|
||||
JupyterHub can be configured to record structured events from a running server using Jupyter's `Telemetry System`_. The types of events that JupyterHub emits are defined by `JSON schemas`_ listed at the bottom of this page_.
|
||||
|
||||
.. _logging: https://docs.python.org/3/library/logging.html
|
||||
.. _`Telemetry System`: https://github.com/jupyter/telemetry
|
||||
@@ -38,13 +35,12 @@ Here's a basic example:
|
||||
The output is a file, ``"event.log"``, with events recorded as JSON data.
|
||||
|
||||
|
||||
|
||||
.. _below:
|
||||
.. _page:
|
||||
|
||||
Event schemas
|
||||
-------------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:maxdepth: 2
|
||||
|
||||
server-actions.rst
|
||||
server-actions.rst
|
||||
|
@@ -8,27 +8,29 @@ high performance computing.
|
||||
|
||||
Please submit pull requests to update information or to add new institutions or uses.
|
||||
|
||||
|
||||
## Academic Institutions, Research Labs, and Supercomputer Centers
|
||||
|
||||
### University of California Berkeley
|
||||
|
||||
- [BIDS - Berkeley Institute for Data Science](https://bids.berkeley.edu/)
|
||||
- [Teaching with Jupyter notebooks and JupyterHub](https://bids.berkeley.edu/resources/videos/teaching-ipythonjupyter-notebooks-and-jupyterhub)
|
||||
|
||||
- [Teaching with Jupyter notebooks and JupyterHub](https://bids.berkeley.edu/resources/videos/teaching-ipythonjupyter-notebooks-and-jupyterhub)
|
||||
|
||||
- [Data 8](http://data8.org/)
|
||||
- [GitHub organization](https://github.com/data-8)
|
||||
|
||||
- [GitHub organization](https://github.com/data-8)
|
||||
|
||||
- [NERSC](http://www.nersc.gov/)
|
||||
- [Press release on Jupyter and Cori](http://www.nersc.gov/news-publications/nersc-news/nersc-center-news/2016/jupyter-notebooks-will-open-up-new-possibilities-on-nerscs-cori-supercomputer/)
|
||||
- [Moving and sharing data](https://www.nersc.gov/assets/Uploads/03-MovingAndSharingData-Cholia.pdf)
|
||||
|
||||
- [Press release on Jupyter and Cori](http://www.nersc.gov/news-publications/nersc-news/nersc-center-news/2016/jupyter-notebooks-will-open-up-new-possibilities-on-nerscs-cori-supercomputer/)
|
||||
- [Moving and sharing data](https://www.nersc.gov/assets/Uploads/03-MovingAndSharingData-Cholia.pdf)
|
||||
|
||||
- [Research IT](http://research-it.berkeley.edu)
|
||||
- [JupyterHub server supports campus research computation](http://research-it.berkeley.edu/blog/17/01/24/free-fully-loaded-jupyterhub-server-supports-campus-research-computation)
|
||||
- [JupyterHub server supports campus research computation](http://research-it.berkeley.edu/blog/17/01/24/free-fully-loaded-jupyterhub-server-supports-campus-research-computation)
|
||||
|
||||
### University of California Davis
|
||||
|
||||
- [Spinning up multiple Jupyter Notebooks on AWS for a tutorial](https://github.com/mblmicdiv/course2017/blob/master/exercises/sourmash-setup.md)
|
||||
- [Spinning up multiple Jupyter Notebooks on AWS for a tutorial](https://github.com/mblmicdiv/course2017/blob/HEAD/exercises/sourmash-setup.md)
|
||||
|
||||
Although not technically a JupyterHub deployment, this tutorial setup
|
||||
may be helpful to others in the Jupyter community.
|
||||
@@ -62,20 +64,21 @@ easy to do with RStudio too.
|
||||
### Clemson University
|
||||
|
||||
- Advanced Computing
|
||||
- [Palmetto cluster and JupyterHub](http://citi.sites.clemson.edu/2016/08/18/JupyterHub-for-Palmetto-Cluster.html)
|
||||
- [Palmetto cluster and JupyterHub](http://citi.sites.clemson.edu/2016/08/18/JupyterHub-for-Palmetto-Cluster.html)
|
||||
|
||||
### University of Colorado Boulder
|
||||
|
||||
- (CU Research Computing) CURC
|
||||
- [JupyterHub User Guide](https://www.rc.colorado.edu/support/user-guide/jupyterhub.html)
|
||||
- Slurm job dispatched on Crestone compute cluster
|
||||
- log troubleshooting
|
||||
- Profiles in IPython Clusters tab
|
||||
- [Parallel Processing with JupyterHub tutorial](https://www.rc.colorado.edu/support/examples-and-tutorials/parallel-processing-with-jupyterhub.html)
|
||||
- [Parallel Programming with JupyterHub document](https://www.rc.colorado.edu/book/export/html/833)
|
||||
|
||||
- [JupyterHub User Guide](https://www.rc.colorado.edu/support/user-guide/jupyterhub.html)
|
||||
- Slurm job dispatched on Crestone compute cluster
|
||||
- log troubleshooting
|
||||
- Profiles in IPython Clusters tab
|
||||
- [Parallel Processing with JupyterHub tutorial](https://www.rc.colorado.edu/support/examples-and-tutorials/parallel-processing-with-jupyterhub.html)
|
||||
- [Parallel Programming with JupyterHub document](https://www.rc.colorado.edu/book/export/html/833)
|
||||
|
||||
- Earth Lab at CU
|
||||
- [Tutorial on Parallel R on JupyterHub](https://earthdatascience.org/tutorials/parallel-r-on-jupyterhub/)
|
||||
- [Tutorial on Parallel R on JupyterHub](https://earthdatascience.org/tutorials/parallel-r-on-jupyterhub/)
|
||||
|
||||
### George Washington University
|
||||
|
||||
@@ -112,7 +115,7 @@ easy to do with RStudio too.
|
||||
### Paderborn University
|
||||
|
||||
- [Data Science (DICE) group](https://dice.cs.uni-paderborn.de/)
|
||||
- [nbgraderutils](https://github.com/dice-group/nbgraderutils): Use JupyterHub + nbgrader + iJava kernel for online Java exercises. Used in lecture Statistical Natural Language Processing.
|
||||
- [nbgraderutils](https://github.com/dice-group/nbgraderutils): Use JupyterHub + nbgrader + iJava kernel for online Java exercises. Used in lecture Statistical Natural Language Processing.
|
||||
|
||||
### Penn State University
|
||||
|
||||
@@ -125,27 +128,28 @@ easy to do with RStudio too.
|
||||
### University of California San Diego
|
||||
|
||||
- San Diego Supercomputer Center - Andrea Zonca
|
||||
- [Deploy JupyterHub on a Supercomputer with SSH](https://zonca.github.io/2017/05/jupyterhub-hpc-batchspawner-ssh.html)
|
||||
- [Run Jupyterhub on a Supercomputer](https://zonca.github.io/2015/04/jupyterhub-hpc.html)
|
||||
- [Deploy JupyterHub on a VM for a Workshop](https://zonca.github.io/2016/04/jupyterhub-sdsc-cloud.html)
|
||||
- [Customize your Python environment in Jupyterhub](https://zonca.github.io/2017/02/customize-python-environment-jupyterhub.html)
|
||||
- [Jupyterhub deployment on multiple nodes with Docker Swarm](https://zonca.github.io/2016/05/jupyterhub-docker-swarm.html)
|
||||
- [Sample deployment of Jupyterhub in HPC on SDSC Comet](https://zonca.github.io/2017/02/sample-deployment-jupyterhub-hpc.html)
|
||||
|
||||
- [Deploy JupyterHub on a Supercomputer with SSH](https://zonca.github.io/2017/05/jupyterhub-hpc-batchspawner-ssh.html)
|
||||
- [Run Jupyterhub on a Supercomputer](https://zonca.github.io/2015/04/jupyterhub-hpc.html)
|
||||
- [Deploy JupyterHub on a VM for a Workshop](https://zonca.github.io/2016/04/jupyterhub-sdsc-cloud.html)
|
||||
- [Customize your Python environment in Jupyterhub](https://zonca.github.io/2017/02/customize-python-environment-jupyterhub.html)
|
||||
- [Jupyterhub deployment on multiple nodes with Docker Swarm](https://zonca.github.io/2016/05/jupyterhub-docker-swarm.html)
|
||||
- [Sample deployment of Jupyterhub in HPC on SDSC Comet](https://zonca.github.io/2017/02/sample-deployment-jupyterhub-hpc.html)
|
||||
|
||||
- Educational Technology Services - Paul Jamason
|
||||
- [jupyterhub.ucsd.edu](https://jupyterhub.ucsd.edu)
|
||||
- [jupyterhub.ucsd.edu](https://jupyterhub.ucsd.edu)
|
||||
|
||||
### TACC University of Texas
|
||||
|
||||
### Texas A&M
|
||||
|
||||
- Kristen Thyng - Oceanography
|
||||
- [Teaching with JupyterHub and nbgrader](http://kristenthyng.com/blog/2016/09/07/jupyterhub+nbgrader/)
|
||||
- [Teaching with JupyterHub and nbgrader](http://kristenthyng.com/blog/2016/09/07/jupyterhub+nbgrader/)
|
||||
|
||||
### Elucidata
|
||||
- What's new in Jupyter Notebooks @[Elucidata](https://elucidata.io/):
|
||||
- Using Jupyter Notebooks with Jupyterhub on GCP, managed by GKE
|
||||
- https://medium.com/elucidata/why-you-should-be-using-a-jupyter-notebook-8385a4ccd93d
|
||||
|
||||
- What's new in Jupyter Notebooks @[Elucidata](https://elucidata.io/):
|
||||
- Using Jupyter Notebooks with Jupyterhub on GCP, managed by GKE - https://medium.com/elucidata/why-you-should-be-using-a-jupyter-notebook-8385a4ccd93d
|
||||
|
||||
## Service Providers
|
||||
|
||||
@@ -175,7 +179,6 @@ easy to do with RStudio too.
|
||||
|
||||
- [Deploying JupyterHub on Hadoop](https://jupyterhub-on-hadoop.readthedocs.io)
|
||||
|
||||
|
||||
## Miscellaneous
|
||||
|
||||
- https://medium.com/@ybarraud/setting-up-jupyterhub-with-sudospawner-and-anaconda-844628c0dbee#.rm3yt87e1
|
||||
|
@@ -9,7 +9,6 @@ with an account and password on the system will be allowed to login.
|
||||
You can restrict which users are allowed to login with a set,
|
||||
`Authenticator.allowed_users`:
|
||||
|
||||
|
||||
```python
|
||||
c.Authenticator.allowed_users = {'mal', 'zoe', 'inara', 'kaylee'}
|
||||
```
|
||||
@@ -23,18 +22,19 @@ Admin users of JupyterHub, `admin_users`, can add and remove users from
|
||||
the user `allowed_users` set. `admin_users` can take actions on other users'
|
||||
behalf, such as stopping and restarting their servers.
|
||||
|
||||
A set of initial admin users, `admin_users` can configured be as follows:
|
||||
A set of initial admin users, `admin_users` can be configured as follows:
|
||||
|
||||
```python
|
||||
c.Authenticator.admin_users = {'mal', 'zoe'}
|
||||
```
|
||||
|
||||
Users in the admin set are automatically added to the user `allowed_users` set,
|
||||
if they are not already present.
|
||||
|
||||
Each authenticator may have different ways of determining whether a user is an
|
||||
administrator. By default JupyterHub use the PAMAuthenticator which provide the
|
||||
`admin_groups` option and can determine administrator status base on a user
|
||||
groups. For example we can let any users in the `wheel` group be admin:
|
||||
administrator. By default JupyterHub uses the PAMAuthenticator which provides the
|
||||
`admin_groups` option and can set administrator status based on a user
|
||||
group. For example we can let any user in the `wheel` group be admin:
|
||||
|
||||
```python
|
||||
c.PAMAuthenticator.admin_groups = {'wheel'}
|
||||
@@ -42,10 +42,10 @@ c.PAMAuthenticator.admin_groups = {'wheel'}
|
||||
|
||||
## Give admin access to other users' notebook servers (`admin_access`)
|
||||
|
||||
Since the default `JupyterHub.admin_access` setting is False, the admins
|
||||
Since the default `JupyterHub.admin_access` setting is `False`, the admins
|
||||
do not have permission to log in to the single user notebook servers
|
||||
owned by *other users*. If `JupyterHub.admin_access` is set to True,
|
||||
then admins have permission to log in *as other users* on their
|
||||
owned by _other users_. If `JupyterHub.admin_access` is set to `True`,
|
||||
then admins have permission to log in _as other users_ on their
|
||||
respective machines, for debugging. **As a courtesy, you should make
|
||||
sure your users know if admin_access is enabled.**
|
||||
|
||||
@@ -53,8 +53,8 @@ sure your users know if admin_access is enabled.**
|
||||
|
||||
Users can be added to and removed from the Hub via either the admin
|
||||
panel or the REST API. When a user is **added**, the user will be
|
||||
automatically added to the allowed users set and database. Restarting the Hub
|
||||
will not require manually updating the allowed users set in your config file,
|
||||
automatically added to the `allowed_users` set and database. Restarting the Hub
|
||||
will not require manually updating the `allowed_users` set in your config file,
|
||||
as the users will be loaded from the database.
|
||||
|
||||
After starting the Hub once, it is not sufficient to **remove** a user
|
||||
@@ -91,6 +91,7 @@ JupyterHub's [OAuthenticator][] currently supports the following
|
||||
popular services:
|
||||
|
||||
- Auth0
|
||||
- Azure AD
|
||||
- Bitbucket
|
||||
- CILogon
|
||||
- GitHub
|
||||
@@ -106,8 +107,8 @@ with any provider, is also available.
|
||||
|
||||
## Use DummyAuthenticator for testing
|
||||
|
||||
The :class:`~jupyterhub.auth.DummyAuthenticator` is a simple authenticator that
|
||||
allows for any username/password unless if a global password has been set. If
|
||||
The `DummyAuthenticator` is a simple authenticator that
|
||||
allows for any username/password unless a global password has been set. If
|
||||
set, it will allow for any username as long as the correct password is provided.
|
||||
To set a global password, add this to the config file:
|
||||
|
||||
@@ -115,5 +116,5 @@ To set a global password, add this to the config file:
|
||||
c.DummyAuthenticator.password = "some_password"
|
||||
```
|
||||
|
||||
[PAM]: https://en.wikipedia.org/wiki/Pluggable_authentication_module
|
||||
[OAuthenticator]: https://github.com/jupyterhub/oauthenticator
|
||||
[pam]: https://en.wikipedia.org/wiki/Pluggable_authentication_module
|
||||
[oauthenticator]: https://github.com/jupyterhub/oauthenticator
|
||||
|
@@ -44,7 +44,7 @@ jupyterhub -f /etc/jupyterhub/jupyterhub_config.py
|
||||
```
|
||||
|
||||
The IPython documentation provides additional information on the
|
||||
[config system](http://ipython.readthedocs.io/en/stable/development/config)
|
||||
[config system](http://ipython.readthedocs.io/en/stable/development/config.html)
|
||||
that Jupyter uses.
|
||||
|
||||
## Configure using command line options
|
||||
@@ -56,18 +56,18 @@ To display all command line options that are available for configuration:
|
||||
```
|
||||
|
||||
Configuration using the command line options is done when launching JupyterHub.
|
||||
For example, to start JupyterHub on ``10.0.1.2:443`` with https, you
|
||||
For example, to start JupyterHub on `10.0.1.2:443` with https, you
|
||||
would enter:
|
||||
|
||||
```bash
|
||||
jupyterhub --ip 10.0.1.2 --port 443 --ssl-key my_ssl.key --ssl-cert my_ssl.cert
|
||||
```
|
||||
|
||||
All configurable options may technically be set on the command-line,
|
||||
All configurable options may technically be set on the command line,
|
||||
though some are inconvenient to type. To set a particular configuration
|
||||
parameter, `c.Class.trait`, you would use the command line option,
|
||||
`--Class.trait`, when starting JupyterHub. For example, to configure the
|
||||
`c.Spawner.notebook_dir` trait from the command-line, use the
|
||||
`c.Spawner.notebook_dir` trait from the command line, use the
|
||||
`--Spawner.notebook_dir` option:
|
||||
|
||||
```bash
|
||||
@@ -88,13 +88,13 @@ meant as illustration, are:
|
||||
|
||||
## Run the proxy separately
|
||||
|
||||
This is *not* strictly necessary, but useful in many cases. If you
|
||||
use a custom proxy (e.g. Traefik), this also not needed.
|
||||
This is _not_ strictly necessary, but useful in many cases. If you
|
||||
use a custom proxy (e.g. Traefik), this is also not needed.
|
||||
|
||||
Connections to user servers go through the proxy, and *not* the hub
|
||||
itself. If the proxy stays running when the hub restarts (for
|
||||
maintenance, re-configuration, etc.), then use connections are not
|
||||
interrupted. For simplicity, by default the hub starts the proxy
|
||||
Connections to user servers go through the proxy, and _not_ the hub
|
||||
itself. If the proxy stays running when the hub restarts (for
|
||||
maintenance, re-configuration, etc.), then user connections are not
|
||||
interrupted. For simplicity, by default the hub starts the proxy
|
||||
automatically, so if the hub restarts, the proxy restarts, and user
|
||||
connections are interrupted. It is easy to run the proxy separately,
|
||||
connections are interrupted. It is easy to run the proxy separately,
|
||||
for information see [the separate proxy page](../reference/separate-proxy).
|
||||
|
@@ -1,6 +1,5 @@
|
||||
# Frequently asked questions
|
||||
|
||||
|
||||
### How do I share links to notebooks?
|
||||
|
||||
In short, where you see `/user/name/notebooks/foo.ipynb` use `/hub/user-redirect/notebooks/foo.ipynb` (replace `/user/name` with `/hub/user-redirect`).
|
||||
@@ -11,9 +10,9 @@ Your first instinct might be to copy the URL you see in the browser,
|
||||
e.g. `hub.jupyter.org/user/yourname/notebooks/coolthing.ipynb`.
|
||||
However, let's break down what this URL means:
|
||||
|
||||
`hub.jupyter.org/user/yourname/` is the URL prefix handled by *your server*,
|
||||
`hub.jupyter.org/user/yourname/` is the URL prefix handled by _your server_,
|
||||
which means that sharing this URL is asking the person you share the link with
|
||||
to come to *your server* and look at the exact same file.
|
||||
to come to _your server_ and look at the exact same file.
|
||||
In most circumstances, this is forbidden by permissions because the person you share with does not have access to your server.
|
||||
What actually happens when someone visits this URL will depend on whether your server is running and other factors.
|
||||
|
||||
@@ -22,12 +21,12 @@ A typical situation is that you have some shared or common filesystem,
|
||||
such that the same path corresponds to the same document
|
||||
(either the exact same document or another copy of it).
|
||||
Typically, what folks want when they do sharing like this
|
||||
is for each visitor to open the same file *on their own server*,
|
||||
is for each visitor to open the same file _on their own server_,
|
||||
so Breq would open `/user/breq/notebooks/foo.ipynb` and
|
||||
Seivarden would open `/user/seivarden/notebooks/foo.ipynb`, etc.
|
||||
|
||||
JupyterHub has a special URL that does exactly this!
|
||||
It's called `/hub/user-redirect/...` and after the visitor logs in,
|
||||
It's called `/hub/user-redirect/...`.
|
||||
So if you replace `/user/yourname` in your URL bar
|
||||
with `/hub/user-redirect` any visitor should get the same
|
||||
URL on their own server, rather than visiting yours.
|
||||
|
@@ -11,30 +11,30 @@ Yes! JupyterHub has been used at-scale for large pools of users, as well
|
||||
as complex and high-performance computing. For example, UC Berkeley uses
|
||||
JupyterHub for its Data Science Education Program courses (serving over
|
||||
3,000 students). The Pangeo project uses JupyterHub to provide access
|
||||
to scalable cloud computing with Dask. JupyterHub is stable customizable
|
||||
to scalable cloud computing with Dask. JupyterHub is stable and customizable
|
||||
to the use-cases of large organizations.
|
||||
|
||||
### I keep hearing about Jupyter Notebook, JupyterLab, and now JupyterHub. What’s the difference?
|
||||
|
||||
Here is a quick breakdown of these three tools:
|
||||
|
||||
* **The Jupyter Notebook** is a document specification (the `.ipynb`) file that interweaves
|
||||
- **The Jupyter Notebook** is a document specification (the `.ipynb`) file that interweaves
|
||||
narrative text with code cells and their outputs. It is also a graphical interface
|
||||
that allows users to edit these documents. There are also several other graphical interfaces
|
||||
that allow users to edit the `.ipynb` format (nteract, Jupyter Lab, Google Colab, Kaggle, etc).
|
||||
* **JupyterLab** is a flexible and extendible user interface for interactive computing. It
|
||||
- **JupyterLab** is a flexible and extendible user interface for interactive computing. It
|
||||
has several extensions that are tailored for using Jupyter Notebooks, as well as extensions
|
||||
for other parts of the data science stack.
|
||||
* **JupyterHub** is an application that manages interactive computing sessions for **multiple users**.
|
||||
- **JupyterHub** is an application that manages interactive computing sessions for **multiple users**.
|
||||
It also connects them with infrastructure those users wish to access. It can provide
|
||||
remote access to Jupyter Notebooks and Jupyter Lab for many people.
|
||||
remote access to Jupyter Notebooks and JupyterLab for many people.
|
||||
|
||||
## For management
|
||||
|
||||
### Briefly, what problem does JupyterHub solve for us?
|
||||
|
||||
JupyterHub provides a shared platform for data science and collaboration.
|
||||
It allows users to utilize familiar data science workflows (such as the scientific python stack,
|
||||
It allows users to utilize familiar data science workflows (such as the scientific Python stack,
|
||||
the R tidyverse, and Jupyter Notebooks) on institutional infrastructure. It also allows administrators
|
||||
some control over access to resources, security, environments, and authentication.
|
||||
|
||||
@@ -50,20 +50,20 @@ scalable infrastructure, large datasets, and high-performance computing.
|
||||
JupyterHub is used at a variety of institutions in academia,
|
||||
industry, and government research labs. It is most-commonly used by two kinds of groups:
|
||||
|
||||
* Small teams (e.g., data science teams, research labs, or collaborative projects) to provide a
|
||||
- Small teams (e.g., data science teams, research labs, or collaborative projects) to provide a
|
||||
shared resource for interactive computing, collaboration, and analytics.
|
||||
* Large teams (e.g., a department, a large class, or a large group of remote users) to provide
|
||||
- Large teams (e.g., a department, a large class, or a large group of remote users) to provide
|
||||
access to organizational hardware, data, and analytics environments at scale.
|
||||
|
||||
Here are a sample of organizations that use JupyterHub:
|
||||
Here is a sample of organizations that use JupyterHub:
|
||||
|
||||
* **Universities and colleges**: UC Berkeley, UC San Diego, Cal Poly SLO, Harvard University, University of Chicago,
|
||||
- **Universities and colleges**: UC Berkeley, UC San Diego, Cal Poly SLO, Harvard University, University of Chicago,
|
||||
University of Oslo, University of Sheffield, Université Paris Sud, University of Versailles
|
||||
* **Research laboratories**: NASA, NCAR, NOAA, the Large Synoptic Survey Telescope, Brookhaven National Lab,
|
||||
- **Research laboratories**: NASA, NCAR, NOAA, the Large Synoptic Survey Telescope, Brookhaven National Lab,
|
||||
Minnesota Supercomputing Institute, ALCF, CERN, Lawrence Livermore National Laboratory
|
||||
* **Online communities**: Pangeo, Quantopian, mybinder.org, MathHub, Open Humans
|
||||
* **Computing infrastructure providers**: NERSC, San Diego Supercomputing Center, Compute Canada
|
||||
* **Companies**: Capital One, SANDVIK code, Globus
|
||||
- **Online communities**: Pangeo, Quantopian, mybinder.org, MathHub, Open Humans
|
||||
- **Computing infrastructure providers**: NERSC, San Diego Supercomputing Center, Compute Canada
|
||||
- **Companies**: Capital One, SANDVIK code, Globus
|
||||
|
||||
See the [Gallery of JupyterHub deployments](../gallery-jhub-deployments.md) for
|
||||
a more complete list of JupyterHub deployments at institutions.
|
||||
@@ -95,14 +95,13 @@ The most common way to set up a JupyterHub is to use a JupyterHub distribution,
|
||||
and opinionated ways to set up a JupyterHub on particular kinds of infrastructure. The two distributions
|
||||
that we currently suggest are:
|
||||
|
||||
* [Zero to JupyterHub for Kubernetes](https://z2jh.jupyter.org) is a scalable JupyterHub deployment and
|
||||
- [Zero to JupyterHub for Kubernetes](https://z2jh.jupyter.org) is a scalable JupyterHub deployment and
|
||||
guide that runs on Kubernetes. Better for larger or dynamic user groups (50-10,000) or more complex
|
||||
compute/data needs.
|
||||
* [The Littlest JupyterHub](https://tljh.jupyter.org) is a lightweight JupyterHub that runs on a single
|
||||
single machine (in the cloud or under your desk). Better for smaller usergroups (4-80) or more
|
||||
- [The Littlest JupyterHub](https://tljh.jupyter.org) is a lightweight JupyterHub that runs on a single
|
||||
single machine (in the cloud or under your desk). Better for smaller user groups (4-80) or more
|
||||
lightweight computational resources.
|
||||
|
||||
|
||||
### Does JupyterHub run well in the cloud?
|
||||
|
||||
Yes - most deployments of JupyterHub are run via cloud infrastructure and on a variety of cloud providers.
|
||||
@@ -123,9 +122,9 @@ The short answer: yes. JupyterHub as a standalone application has been battle-te
|
||||
level for several years, and makes a number of "default" security decisions that are reasonable for most
|
||||
users.
|
||||
|
||||
* For security considerations in the base JupyterHub application,
|
||||
[see the JupyterHub security page](https://jupyterhub.readthedocs.io/en/stable/reference/websecurity.html)
|
||||
* For security considerations when deploying JupyterHub on Kubernetes, see the
|
||||
- For security considerations in the base JupyterHub application,
|
||||
[see the JupyterHub security page](https://jupyterhub.readthedocs.io/en/stable/reference/websecurity.html).
|
||||
- For security considerations when deploying JupyterHub on Kubernetes, see the
|
||||
[JupyterHub on Kubernetes security page](https://zero-to-jupyterhub.readthedocs.io/en/latest/security.html).
|
||||
|
||||
The longer answer: it depends on your deployment. Because JupyterHub is very flexible, it can be used
|
||||
@@ -137,15 +136,13 @@ If you are worried about security, don't hesitate to reach out to the JupyterHub
|
||||
[Jupyter Community Forum](https://discourse.jupyter.org/c/jupyterhub). This community of practice has many
|
||||
individuals with experience running secure JupyterHub deployments.
|
||||
|
||||
|
||||
### Does JupyterHub provide computing or data infrastructure?
|
||||
|
||||
No - JupyterHub manages user sessions and can *control* computing infrastructure, but it does not provide these
|
||||
No - JupyterHub manages user sessions and can _control_ computing infrastructure, but it does not provide these
|
||||
things itself. You are expected to run JupyterHub on your own infrastructure (local or in the cloud). Moreover,
|
||||
JupyterHub has no internal concept of "data", but is designed to be able to communicate with data repositories
|
||||
(again, either locally or remotely) for use within interactive computing sessions.
|
||||
|
||||
|
||||
### How do I manage users?
|
||||
|
||||
JupyterHub offers a few options for managing your users. Upon setting up a JupyterHub, you can choose what
|
||||
@@ -154,7 +151,7 @@ email address, or choose a username / password when they first log-in, or offloa
|
||||
another service such as an organization's OAuth.
|
||||
|
||||
The users of a JupyterHub are stored locally, and can be modified manually by an administrator of the JupyterHub.
|
||||
Moreover, the *active* users on a JupyterHub can be found on the administrator's page. This page
|
||||
Moreover, the _active_ users on a JupyterHub can be found on the administrator's page. This page
|
||||
gives you the abiltiy to stop or restart kernels, inspect user filesystems, and even take over user
|
||||
sessions to assist them with debugging.
|
||||
|
||||
@@ -182,12 +179,11 @@ connect with other infrastructure tools (like Dask or Spark). This allows users
|
||||
scalable or high-performance resources from within their JupyterHub sessions. The logic of
|
||||
how those resources are controlled is taken care of by the non-JupyterHub application.
|
||||
|
||||
|
||||
### Can JupyterHub be used with my high-performance computing resources?
|
||||
|
||||
Yes - JupyterHub can provide access to many kinds of computing infrastructure.
|
||||
Especially when combined with other open-source schedulers such as Dask, you can manage fairly
|
||||
complex computing infrastructure from the interactive sessions of a JupyterHub. For example
|
||||
complex computing infrastructures from the interactive sessions of a JupyterHub. For example
|
||||
[see the Dask HPC page](https://docs.dask.org/en/latest/setup/hpc.html).
|
||||
|
||||
### How much resources do user sessions take?
|
||||
@@ -196,7 +192,7 @@ This is highly configurable by the administrator. If you wish for your users to
|
||||
data analytics environments for prototyping and light data exploring, you can restrict their
|
||||
memory and CPU based on the resources that you have available. If you'd like your JupyterHub
|
||||
to serve as a gateway to high-performance compute or data resources, you may increase the
|
||||
resources available on user machines, or connect them with computing infrastructure elsewhere.
|
||||
resources available on user machines, or connect them with computing infrastructures elsewhere.
|
||||
|
||||
### Can I customize the look and feel of a JupyterHub?
|
||||
|
||||
@@ -218,16 +214,14 @@ the technologies your JupyterHub will use (e.g., dev-ops knowledge with cloud co
|
||||
In general, the base JupyterHub deployment is not the bottleneck for setup, it is connecting
|
||||
your JupyterHub with the various services and tools that you wish to provide to your users.
|
||||
|
||||
|
||||
### How well does JupyterHub scale? What are JupyterHub's limitations?
|
||||
|
||||
JupyterHub works well at both a small scale (e.g., a single VM or machine) as well as a
|
||||
high scale (e.g., a scalable Kubernetes cluster). It can be used for teams as small a 2, and
|
||||
high scale (e.g., a scalable Kubernetes cluster). It can be used for teams as small as 2, and
|
||||
for user bases as large as 10,000. The scalability of JupyterHub largely depends on the
|
||||
infrastructure on which it is deployed. JupyterHub has been designed to be lightweight and
|
||||
flexible, so you can tailor your JupyterHub deployment to your needs.
|
||||
|
||||
|
||||
### Is JupyterHub resilient? What happens when a machine goes down?
|
||||
|
||||
For JupyterHubs that are deployed in a containerized environment (e.g., Kubernetes), it is
|
||||
@@ -255,7 +249,7 @@ share their results with one another.
|
||||
|
||||
JupyterHub also provides a computational framework to share computational narratives between
|
||||
different levels of an organization. For example, data scientists can share Jupyter Notebooks
|
||||
rendered as [voila dashboards](https://voila.readthedocs.io/en/stable/) with those who are not
|
||||
rendered as [Voilà dashboards](https://voila.readthedocs.io/en/stable/) with those who are not
|
||||
familiar with programming, or create publicly-available interactive analyses to allow others to
|
||||
interact with your work.
|
||||
|
||||
|
@@ -11,7 +11,7 @@ This section will help you with basic proxy and network configuration to:
|
||||
|
||||
The Proxy's main IP address setting determines where JupyterHub is available to users.
|
||||
By default, JupyterHub is configured to be available on all network interfaces
|
||||
(`''`) on port 8000. *Note*: Use of `'*'` is discouraged for IP configuration;
|
||||
(`''`) on port 8000. _Note_: Use of `'*'` is discouraged for IP configuration;
|
||||
instead, use of `'0.0.0.0'` is preferred.
|
||||
|
||||
Changing the Proxy's main IP address and port can be done with the following
|
||||
@@ -43,7 +43,7 @@ port.
|
||||
|
||||
By default, this REST API listens on port 8001 of `localhost` only.
|
||||
The Hub service talks to the proxy via a REST API on a secondary port. The
|
||||
API URL can be configured separately and override the default settings.
|
||||
API URL can be configured separately to override the default settings.
|
||||
|
||||
### Set api_url
|
||||
|
||||
@@ -74,7 +74,7 @@ The Hub service listens only on `localhost` (port 8081) by default.
|
||||
The Hub needs to be accessible from both the proxy and all Spawners.
|
||||
When spawning local servers, an IP address setting of `localhost` is fine.
|
||||
|
||||
If *either* the Proxy *or* (more likely) the Spawners will be remote or
|
||||
If _either_ the Proxy _or_ (more likely) the Spawners will be remote or
|
||||
isolated in containers, the Hub must listen on an IP that is accessible.
|
||||
|
||||
```python
|
||||
@@ -82,20 +82,20 @@ c.JupyterHub.hub_ip = '10.0.1.4'
|
||||
c.JupyterHub.hub_port = 54321
|
||||
```
|
||||
|
||||
**Added in 0.8:** The `c.JupyterHub.hub_connect_ip` setting is the ip address or
|
||||
**Added in 0.8:** The `c.JupyterHub.hub_connect_ip` setting is the IP address or
|
||||
hostname that other services should use to connect to the Hub. A common
|
||||
configuration for, e.g. docker, is:
|
||||
|
||||
```python
|
||||
c.JupyterHub.hub_ip = '0.0.0.0' # listen on all interfaces
|
||||
c.JupyterHub.hub_connect_ip = '10.0.1.4' # ip as seen on the docker network. Can also be a hostname.
|
||||
c.JupyterHub.hub_connect_ip = '10.0.1.4' # IP as seen on the docker network. Can also be a hostname.
|
||||
```
|
||||
|
||||
## Adjusting the hub's URL
|
||||
|
||||
The hub will most commonly be running on a hostname of its own. If it
|
||||
The hub will most commonly be running on a hostname of its own. If it
|
||||
is not – for example, if the hub is being reverse-proxied and being
|
||||
exposed at a URL such as `https://proxy.example.org/jupyter/` – then
|
||||
you will need to tell JupyterHub the base URL of the service. In such
|
||||
you will need to tell JupyterHub the base URL of the service. In such
|
||||
a case, it is both necessary and sufficient to set
|
||||
`c.JupyterHub.base_url = '/jupyter/'` in the configuration.
|
||||
|
@@ -2,10 +2,10 @@
|
||||
|
||||
When working with JupyterHub, a **Service** is defined as a process
|
||||
that interacts with the Hub's REST API. A Service may perform a specific
|
||||
or action or task. For example, shutting down individuals' single user
|
||||
action or task. For example, shutting down individuals' single user
|
||||
notebook servers that have been idle for some time is a good example of
|
||||
a task that could be automated by a Service. Let's look at how the
|
||||
[cull_idle_servers][] script can be used as a Service.
|
||||
[jupyterhub_idle_culler][] script can be used as a Service.
|
||||
|
||||
## Real-world example to cull idle servers
|
||||
|
||||
@@ -15,11 +15,11 @@ document will:
|
||||
- explain some basic information about API tokens
|
||||
- clarify that API tokens can be used to authenticate to
|
||||
single-user servers as of [version 0.8.0](../changelog)
|
||||
- show how the [cull_idle_servers][] script can be:
|
||||
- used in a Hub-managed service
|
||||
- run as a standalone script
|
||||
- show how the [jupyterhub_idle_culler][] script can be:
|
||||
- used in a Hub-managed service
|
||||
- run as a standalone script
|
||||
|
||||
Both examples for `cull_idle_servers` will communicate tasks to the
|
||||
Both examples for `jupyterhub_idle_culler` will communicate tasks to the
|
||||
Hub via the REST API.
|
||||
|
||||
## API Token basics
|
||||
@@ -78,17 +78,23 @@ single-user servers, and only cookies can be used for authentication.
|
||||
0.8 supports using JupyterHub API tokens to authenticate to single-user
|
||||
servers.
|
||||
|
||||
## Configure `cull-idle` to run as a Hub-Managed Service
|
||||
## Configure the idle culler to run as a Hub-Managed Service
|
||||
|
||||
Install the idle culler:
|
||||
|
||||
```
|
||||
pip install jupyterhub-idle-culler
|
||||
```
|
||||
|
||||
In `jupyterhub_config.py`, add the following dictionary for the
|
||||
`cull-idle` Service to the `c.JupyterHub.services` list:
|
||||
`idle-culler` Service to the `c.JupyterHub.services` list:
|
||||
|
||||
```python
|
||||
c.JupyterHub.services = [
|
||||
{
|
||||
'name': 'cull-idle',
|
||||
'name': 'idle-culler',
|
||||
'admin': True,
|
||||
'command': [sys.executable, 'cull_idle_servers.py', '--timeout=3600'],
|
||||
'command': [sys.executable, '-m', 'jupyterhub_idle_culler', '--timeout=3600'],
|
||||
}
|
||||
]
|
||||
```
|
||||
@@ -101,21 +107,21 @@ where:
|
||||
|
||||
## Run `cull-idle` manually as a standalone script
|
||||
|
||||
Now you can run your script, i.e. `cull_idle_servers`, by providing it
|
||||
Now you can run your script by providing it
|
||||
the API token and it will authenticate through the REST API to
|
||||
interact with it.
|
||||
|
||||
This will run `cull-idle` manually. `cull-idle` can be run as a standalone
|
||||
This will run the idle culler service manually. It can be run as a standalone
|
||||
script anywhere with access to the Hub, and will periodically check for idle
|
||||
servers and shut them down via the Hub's REST API. In order to shutdown the
|
||||
servers, the token given to cull-idle must have admin privileges.
|
||||
servers, the token given to `cull-idle` must have admin privileges.
|
||||
|
||||
Generate an API token and store it in the `JUPYTERHUB_API_TOKEN` environment
|
||||
variable. Run `cull_idle_servers.py` manually.
|
||||
variable. Run `jupyterhub_idle_culler` manually.
|
||||
|
||||
```bash
|
||||
export JUPYTERHUB_API_TOKEN='token'
|
||||
python3 cull_idle_servers.py [--timeout=900] [--url=http://127.0.0.1:8081/hub/api]
|
||||
python -m jupyterhub_idle_culler [--timeout=900] [--url=http://127.0.0.1:8081/hub/api]
|
||||
```
|
||||
|
||||
[cull_idle_servers]: https://github.com/jupyterhub/jupyterhub/blob/master/examples/cull-idle/cull_idle_servers.py
|
||||
[jupyterhub_idle_culler]: https://github.com/jupyterhub/jupyterhub-idle-culler
|
||||
|
@@ -1,8 +1,8 @@
|
||||
# Spawners and single-user notebook servers
|
||||
|
||||
Since the single-user server is an instance of `jupyter notebook`, an entire separate
|
||||
multi-process application, there are many aspect of that server can configure, and a lot of ways
|
||||
to express that configuration.
|
||||
multi-process application, there are many aspects of that server that can be configured, and a lot
|
||||
of ways to express that configuration.
|
||||
|
||||
At the JupyterHub level, you can set some values on the Spawner. The simplest of these is
|
||||
`Spawner.notebook_dir`, which lets you set the root directory for a user's server. This root
|
||||
@@ -14,7 +14,7 @@ expanded to the user's home directory.
|
||||
c.Spawner.notebook_dir = '~/notebooks'
|
||||
```
|
||||
|
||||
You can also specify extra command-line arguments to the notebook server with:
|
||||
You can also specify extra command line arguments to the notebook server with:
|
||||
|
||||
```python
|
||||
c.Spawner.args = ['--debug', '--profile=PHYS131']
|
||||
|
@@ -115,8 +115,8 @@ We want you to contribute to JupyterHub in ways that are most exciting
|
||||
& useful to you. We value documentation, testing, bug reporting & code equally,
|
||||
and are glad to have your contributions in whatever form you wish :)
|
||||
|
||||
Our `Code of Conduct <https://github.com/jupyter/governance/blob/master/conduct/code_of_conduct.md>`_
|
||||
(`reporting guidelines <https://github.com/jupyter/governance/blob/master/conduct/reporting_online.md>`_)
|
||||
Our `Code of Conduct <https://github.com/jupyter/governance/blob/HEAD/conduct/code_of_conduct.md>`_
|
||||
(`reporting guidelines <https://github.com/jupyter/governance/blob/HEAD/conduct/reporting_online.md>`_)
|
||||
helps keep our community welcoming to as many people as possible.
|
||||
|
||||
.. toctree::
|
||||
@@ -147,4 +147,4 @@ Questions? Suggestions?
|
||||
|
||||
.. _JupyterHub: https://github.com/jupyterhub/jupyterhub
|
||||
.. _Jupyter notebook: https://jupyter-notebook.readthedocs.io/en/latest/
|
||||
.. _REST API: http://petstore.swagger.io/?url=https://raw.githubusercontent.com/jupyterhub/jupyterhub/master/docs/rest-api.yml#!/default
|
||||
.. _REST API: https://petstore3.swagger.io/?url=https://raw.githubusercontent.com/jupyterhub/jupyterhub/HEAD/docs/rest-api.yml#!/default
|
||||
|
@@ -1,347 +0,0 @@
|
||||
# Install JupyterHub and JupyterLab from the ground up
|
||||
|
||||
The combination of [JupyterHub](https://jupyterhub.readthedocs.io) and [JupyterLab](https://jupyterlab.readthedocs.io)
|
||||
is a great way to make shared computing resources available to a group.
|
||||
|
||||
These instructions are a guide for a manual, 'bare metal' install of [JupyterHub](https://jupyterhub.readthedocs.io)
|
||||
and [JupyterLab](https://jupyterlab.readthedocs.io). This is ideal for running on a single server: build a beast
|
||||
of a machine and share it within your lab, or use a virtual machine from any VPS or cloud provider.
|
||||
|
||||
This guide has similar goals to [The Littlest JupyterHub](https://the-littlest-jupyterhub.readthedocs.io) setup
|
||||
script. However, instead of bundling all these step for you into one installer, we will perform every step manually.
|
||||
This makes it easy to customize any part (e.g. if you want to run other services on the same system and need to make them
|
||||
work together), as well as giving you full control and understanding of your setup.
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Your own server with administrator (root) access. This could be a local machine, a remotely hosted one, or a cloud instance
|
||||
or VPS. Each user who will access JupyterHub should have a standard user account on the machine. The install will be done
|
||||
through the command line - useful if you log into your machine remotely using SSH.
|
||||
|
||||
This tutorial was tested on **Ubuntu 18.04**. No other Linux distributions have been tested, but the instructions
|
||||
should be reasonably straightforward to adapt.
|
||||
|
||||
|
||||
## Goals
|
||||
|
||||
JupyterLab enables access to a multiple 'kernels', each one being a given environment for a given language. The most
|
||||
common is a Python environment, for scientific computing usually one managed by the `conda` package manager.
|
||||
|
||||
This guide will set up JupyterHub and JupyterLab seperately from the Python environment. In other words, we treat
|
||||
JupyterHub+JupyterLab as a 'app' or webservice, which will connect to the kernels available on the system. Specifically:
|
||||
|
||||
- We will create an installation of JupyterHub and JupyterLab using a virtualenv under `/opt` using the system Python.
|
||||
|
||||
- We will install conda globally.
|
||||
|
||||
- We will create a shared conda environment which can be used (but not modified) by all users.
|
||||
|
||||
- We will show how users can create their own private conda environments, where they can install whatever they like.
|
||||
|
||||
|
||||
The default JupyterHub Authenticator uses PAM to authenticate system users with their username and password. One can
|
||||
[choose the authenticator](https://jupyterhub.readthedocs.io/en/stable/reference/authenticators.html#authenticators)
|
||||
that best suits their needs. In this guide we will use the default Authenticator because it makes it easy for everyone to manage data
|
||||
in their home folder and to mix and match different services and access methods (e.g. SSH) which all work using the
|
||||
Linux system user accounts. Therefore, each user of JupyterHub will need a standard system user account.
|
||||
|
||||
Another goal of this guide is to use system provided packages wherever possible. This has the advantage that these packages
|
||||
get automatic patches and security updates (be sure to turn on automatic updates in Ubuntu). This means less maintenance
|
||||
work and a more reliable system.
|
||||
|
||||
## Part 1: JupyterHub and JupyterLab
|
||||
|
||||
### Setup the JupyterHub and JupyterLab in a virtual environment
|
||||
|
||||
First we create a virtual environment under '/opt/jupyterhub'. The '/opt' folder is where apps not belonging to the operating
|
||||
system are [commonly installed](https://unix.stackexchange.com/questions/11544/what-is-the-difference-between-opt-and-usr-local).
|
||||
Both jupyterlab and jupyterhub will be installed into this virtualenv. Create it with the command:
|
||||
|
||||
```sh
|
||||
sudo python3 -m venv /opt/jupyterhub/
|
||||
```
|
||||
|
||||
Now we use pip to install the required Python packages into the new virtual environment. Be sure to install
|
||||
`wheel` first. Since we are separating the user interface from the computing kernels, we don't install
|
||||
any Python scientific packages here. The only exception is `ipywidgets` because this is needed to allow connection
|
||||
between interactive tools running in the kernel and the user interface.
|
||||
|
||||
Note that we use `/opt/jupyterhub/bin/python3 -m pip install` each time - this [makes sure](https://snarky.ca/why-you-should-use-python-m-pip/)
|
||||
that the packages are installed to the correct virtual environment.
|
||||
|
||||
Perform the install using the following commands:
|
||||
|
||||
```sh
|
||||
sudo /opt/jupyterhub/bin/python3 -m pip install wheel
|
||||
sudo /opt/jupyterhub/bin/python3 -m pip install jupyterhub jupyterlab
|
||||
sudo /opt/jupyterhub/bin/python3 -m pip install ipywidgets
|
||||
```
|
||||
|
||||
JupyterHub also currently defaults to requiring `configurable-http-proxy`, which needs `nodejs` and `npm`. The versions
|
||||
of these available in Ubuntu therefore need to be installed first (they are a bit old but this is ok for our needs):
|
||||
|
||||
```sh
|
||||
sudo apt install nodejs npm
|
||||
```
|
||||
|
||||
Then install `configurable-http-proxy`:
|
||||
|
||||
```sh
|
||||
sudo npm install -g configurable-http-proxy
|
||||
```
|
||||
|
||||
### Create the configuration for JupyterHub
|
||||
|
||||
Now we start creating configuration files. To keep everything together, we put all the configuration into the folder
|
||||
created for the virtualenv, under `/opt/jupyterhub/etc/`. For each thing needing configuration, we will create a further
|
||||
subfolder and necessary files.
|
||||
|
||||
First create the folder for the JupyterHub configuration and navigate to it:
|
||||
|
||||
```sh
|
||||
sudo mkdir -p /opt/jupyterhub/etc/jupyterhub/
|
||||
cd /opt/jupyterhub/etc/jupyterhub/
|
||||
```
|
||||
Then generate the default configuration file
|
||||
|
||||
```sh
|
||||
sudo /opt/jupyterhub/bin/jupyterhub --generate-config
|
||||
```
|
||||
This will produce the default configuration file `/opt/jupyterhub/etc/jupyterhub/jupyterhub_config.py`
|
||||
|
||||
You will need to edit the configuration file to make the JupyterLab interface by the default.
|
||||
Set the following configuration option in your `jupyterhub_config.py` file:
|
||||
|
||||
```python
|
||||
c.Spawner.default_url = '/lab'
|
||||
```
|
||||
|
||||
Further configuration options may be found in the documentation.
|
||||
|
||||
### Setup Systemd service
|
||||
|
||||
We will setup JupyterHub to run as a system service using Systemd (which is responsible for managing all services and
|
||||
servers that run on startup in Ubuntu). We will create a service file in a suitable location in the virtualenv folder
|
||||
and then link it to the system services. First create the folder for the service file:
|
||||
|
||||
```sh
|
||||
sudo mkdir -p /opt/jupyterhub/etc/systemd
|
||||
```
|
||||
|
||||
Then create the following text file using your [favourite editor](https://micro-editor.github.io/) at
|
||||
```sh
|
||||
/opt/jupyterhub/etc/systemd/jupyterhub.service
|
||||
```
|
||||
|
||||
Paste the following service unit definition into the file:
|
||||
|
||||
```
|
||||
[Unit]
|
||||
Description=JupyterHub
|
||||
After=syslog.target network.target
|
||||
|
||||
[Service]
|
||||
User=root
|
||||
Environment="PATH=/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/opt/jupyterhub/bin"
|
||||
ExecStart=/opt/jupyterhub/bin/jupyterhub -f /opt/jupyterhub/etc/jupyterhub/jupyterhub_config.py
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
This sets up the environment to use the virtual environment we created, tells Systemd how to start jupyterhub using
|
||||
the configuration file we created, specifies that jupyterhub will be started as the `root` user (needed so that it can
|
||||
start jupyter on behalf of other logged in users), and specifies that jupyterhub should start on boot after the network
|
||||
is enabled.
|
||||
|
||||
Finally, we need to make systemd aware of our service file. First we symlink our file into systemd's directory:
|
||||
|
||||
```sh
|
||||
sudo ln -s /opt/jupyterhub/etc/systemd/jupyterhub.service /etc/systemd/system/jupyterhub.service
|
||||
```
|
||||
|
||||
Then tell systemd to reload its configuration files
|
||||
|
||||
```sh
|
||||
sudo systemctl daemon-reload
|
||||
```
|
||||
|
||||
And finally enable the service
|
||||
|
||||
```sh
|
||||
sudo systemctl enable jupyterhub.service
|
||||
```
|
||||
|
||||
The service will start on reboot, but we can start it straight away using:
|
||||
|
||||
```sh
|
||||
sudo systemctl start jupyterhub.service
|
||||
```
|
||||
|
||||
...and check that it's running using:
|
||||
|
||||
```sh
|
||||
sudo systemctl status jupyterhub.service
|
||||
```
|
||||
|
||||
You should now be already be able to access jupyterhub using `<your servers ip>:8000` (assuming you haven't already set
|
||||
up a firewall or something). However, when you log in the jupyter notebooks will be trying to use the Python virtualenv
|
||||
that was created to install JupyterHub, this is not what we want. So on to part 2
|
||||
|
||||
## Part 2: Conda environments
|
||||
|
||||
### Install conda for the whole system
|
||||
|
||||
We will use `conda` to manage Python environments. We will install the officially maintained `conda` packages for Ubuntu,
|
||||
this means they will get automatic updates with the rest of the system. Setup repo for the official Conda debian packages,
|
||||
instructions are copied from [here](https://docs.conda.io/projects/conda/en/latest/user-guide/install/rpm-debian.html):
|
||||
|
||||
Install Anacononda public gpg key to trusted store
|
||||
```sh
|
||||
curl https://repo.anaconda.com/pkgs/misc/gpgkeys/anaconda.asc | gpg --dearmor > conda.gpg
|
||||
sudo install -o root -g root -m 644 conda.gpg /etc/apt/trusted.gpg.d/
|
||||
```
|
||||
|
||||
Add Debian repo
|
||||
|
||||
```sh
|
||||
echo "deb [arch=amd64] https://repo.anaconda.com/pkgs/misc/debrepo/conda stable main" | sudo tee /etc/apt/sources.list.d/conda.list
|
||||
```
|
||||
|
||||
Install conda
|
||||
|
||||
```sh
|
||||
sudo apt update
|
||||
sudo apt install conda
|
||||
```
|
||||
|
||||
This will install conda into the folder `/opt/conda/`, with the conda command available at `/opt/conda/bin/conda`.
|
||||
|
||||
Finally, we can make conda more easily available to users by symlinking the conda shell setup script to the profile
|
||||
'drop in' folder so that it gets run on login
|
||||
|
||||
```sh
|
||||
sudo ln -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh
|
||||
```
|
||||
|
||||
### Install a default conda environment for all users
|
||||
|
||||
First create a folder for conda envs (might exist already):
|
||||
```sh
|
||||
sudo mkdir /opt/conda/envs/
|
||||
```
|
||||
|
||||
Then create a conda environment to your liking within that folder. Here we have called it 'python' because it will
|
||||
be the obvious default - call it whatever you like. You can install whatever you like into this environment, but you MUST at least install `ipykernel`.
|
||||
|
||||
```sh
|
||||
sudo /opt/conda/bin/conda create --prefix /opt/conda/envs/python python=3.7 ipykernel
|
||||
```
|
||||
|
||||
Once your env is set up as desired, make it visible to Jupyter by installing the kernel spec. There are two options here:
|
||||
|
||||
1 ) Install into the JupyterHub virtualenv - this ensures it overrides the default python version. It will only be visible
|
||||
to the JupyterHub installation we have just created. This is useful to avoid conda environments appearing where they are not expected.
|
||||
|
||||
```sh
|
||||
sudo /opt/conda/envs/python/bin/python -m ipykernel install --prefix=/opt/jupyterhub/ --name 'python' --display-name "Python (default)"
|
||||
```
|
||||
|
||||
2 ) Install it system-wide by putting it into `/usr/local`. It will be visible to any parallel install of JupyterHub or
|
||||
JupyterLab, and will persist even if you later delete or modify the JupyterHub installation. This is useful if the kernels
|
||||
might be used by other services, or if you want to modify the JupyterHub installation independently from the conda environments.
|
||||
|
||||
```sh
|
||||
sudo /opt/conda/envs/python/bin/python -m ipykernel install --prefix /usr/local/ --name 'python' --display-name "Python (default)"
|
||||
````
|
||||
|
||||
### Setting up users' own conda environments
|
||||
|
||||
There is relatively little for the administrator to do here, as users will have to set up their own environments using the shell.
|
||||
On login they should run `conda init` or `/opt/conda/bin/conda`. The can then use conda to set up their environment,
|
||||
although they must also install `ipykernel`. Once done, they can enable their kernel using:
|
||||
|
||||
```sh
|
||||
/path/to/kernel/env/bin/python -m ipykernel install --name 'python-my-env' --display-name "Python My Env"
|
||||
```
|
||||
|
||||
This will place the kernel spec into their home folder, where Jupyter will look for it on startup.
|
||||
|
||||
|
||||
## Setting up a reverse proxy
|
||||
|
||||
The guide so far results in JupyterHub running on port 8000. It is not generally advisable to run open web services in
|
||||
this way - instead, use a reverse proxy running on standard HTTP/HTTPS ports.
|
||||
|
||||
> **Important**: Be aware of the security implications especially if you are running a server that is accessible from the open internet
|
||||
> i.e. not protected within an institutional intranet or private home/office network. You should set up a firewall and
|
||||
> HTTPS encryption, which is outside of the scope of this guide. For HTTPS consider using [LetsEncrypt](https://letsencrypt.org/)
|
||||
> or setting up a [self-signed certificate](https://www.digitalocean.com/community/tutorials/how-to-create-a-self-signed-ssl-certificate-for-nginx-in-ubuntu-18-04).
|
||||
> Firewalls may be set up using `ufw` or `firewalld` and combined with `fail2ban`.
|
||||
|
||||
### Using Nginx
|
||||
Nginx is a mature and established web server and reverse proxy and is easy to install using `sudo apt install nginx`.
|
||||
Details on using Nginx as a reverse proxy can be found elsewhere. Here, we will only outline the additional steps needed
|
||||
to setup JupyterHub with Nginx and host it at a given URL e.g. `<your-server-ip-or-url>/jupyter`.
|
||||
This could be useful for example if you are running several services or web pages on the same server.
|
||||
|
||||
To achieve this needs a few tweaks to both the JupyterHub configuration and the Nginx config. First, edit the
|
||||
configuration file `/opt/jupyterhub/etc/jupyterhub/jupyterhub_config.py` and add the line:
|
||||
|
||||
```python
|
||||
c.JupyterHub.bind_url = 'http://:8000/jupyter'
|
||||
```
|
||||
|
||||
where `/jupyter` will be the relative URL of the JupyterHub.
|
||||
|
||||
Now Nginx must be configured with a to pass all traffic from `/jupyter` to the the local address `127.0.0.1:8000`.
|
||||
Add the following snippet to your nginx configuration file (e.g. `/etc/nginx/sites-available/default`).
|
||||
|
||||
```
|
||||
location /jupyter/ {
|
||||
# NOTE important to also set base url of jupyterhub to /jupyter in its config
|
||||
proxy_pass http://127.0.0.1:8000;
|
||||
|
||||
proxy_redirect off;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# websocket headers
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
Also add this snippet before the *server* block:
|
||||
|
||||
```
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
```
|
||||
|
||||
Nginx will not run if there are errors in the configuration, check your configuration using:
|
||||
|
||||
```sh
|
||||
nginx -t
|
||||
```
|
||||
|
||||
If there are no errors, you can restart the Nginx service for the new configuration to take effect.
|
||||
|
||||
```sh
|
||||
sudo systemctl restart nginx.service
|
||||
```
|
||||
|
||||
|
||||
## Getting started using your new JupyterHub
|
||||
|
||||
Once you have setup JupyterHub and Nginx proxy as described, you can browse to your JupyterHub IP or URL
|
||||
(e.g. if your server IP address is `123.456.789.1` and you decided to host JupyterHub at the `/jupyter` URL, browse
|
||||
to `123.456.789.1/jupyter`). You will find a login page where you enter your Linux username and password. On login
|
||||
you will be presented with the JupyterLab interface, with the file browser pane showing the contents of your users'
|
||||
home directory on the server.
|
6
docs/source/installation-guide-hard.rst
Normal file
6
docs/source/installation-guide-hard.rst
Normal file
@@ -0,0 +1,6 @@
|
||||
:orphan:
|
||||
|
||||
JupyterHub the hard way
|
||||
=======================
|
||||
|
||||
This guide has moved to https://github.com/jupyterhub/jupyterhub-the-hard-way/blob/HEAD/docs/installation-guide-hard.md
|
@@ -11,4 +11,3 @@ running on your own infrastructure.
|
||||
quickstart
|
||||
quickstart-docker
|
||||
installation-basics
|
||||
installation-guide-hard
|
||||
|
@@ -12,23 +12,23 @@ Before installing JupyterHub, you will need:
|
||||
- [nodejs/npm](https://www.npmjs.com/). [Install nodejs/npm](https://docs.npmjs.com/getting-started/installing-node),
|
||||
using your operating system's package manager.
|
||||
|
||||
* If you are using **`conda`**, the nodejs and npm dependencies will be installed for
|
||||
- If you are using **`conda`**, the nodejs and npm dependencies will be installed for
|
||||
you by conda.
|
||||
|
||||
* If you are using **`pip`**, install a recent version of
|
||||
- If you are using **`pip`**, install a recent version of
|
||||
[nodejs/npm](https://docs.npmjs.com/getting-started/installing-node).
|
||||
For example, install it on Linux (Debian/Ubuntu) using:
|
||||
|
||||
```
|
||||
sudo apt-get install npm nodejs-legacy
|
||||
```
|
||||
|
||||
|
||||
The `nodejs-legacy` package installs the `node` executable and is currently
|
||||
required for npm to work on Debian/Ubuntu.
|
||||
|
||||
- A [pluggable authentication module (PAM)](https://en.wikipedia.org/wiki/Pluggable_authentication_module)
|
||||
- A [pluggable authentication module (PAM)](https://en.wikipedia.org/wiki/Pluggable_authentication_module)
|
||||
to use the [default Authenticator](./getting-started/authenticators-users-basics.md).
|
||||
PAM is often available by default on most distributions, if this is not the case it can be installed by
|
||||
PAM is often available by default on most distributions, if this is not the case it can be installed by
|
||||
using the operating system's package manager.
|
||||
- TLS certificate and key for HTTPS communication
|
||||
- Domain name
|
||||
@@ -78,12 +78,12 @@ Visit `https://localhost:8000` in your browser, and sign in with your unix
|
||||
credentials.
|
||||
|
||||
To **allow multiple users to sign in** to the Hub server, you must start
|
||||
`jupyterhub` as a *privileged user*, such as root:
|
||||
`jupyterhub` as a _privileged user_, such as root:
|
||||
|
||||
```bash
|
||||
sudo jupyterhub
|
||||
```
|
||||
|
||||
The [wiki](https://github.com/jupyterhub/jupyterhub/wiki/Using-sudo-to-run-JupyterHub-without-root-privileges)
|
||||
describes how to run the server as a *less privileged user*. This requires
|
||||
describes how to run the server as a _less privileged user_. This requires
|
||||
additional configuration of the system.
|
||||
|
@@ -89,7 +89,6 @@ class DictionaryAuthenticator(Authenticator):
|
||||
return data['username']
|
||||
```
|
||||
|
||||
|
||||
#### Normalize usernames
|
||||
|
||||
Since the Authenticator and Spawner both use the same username,
|
||||
@@ -111,11 +110,10 @@ When using `PAMAuthenticator`, you can set
|
||||
normalize usernames using PAM (basically round-tripping them: username
|
||||
to uid to username), which is useful in case you use some external
|
||||
service that allows multiple usernames mapping to the same user (such
|
||||
as ActiveDirectory, yes, this really happens). When
|
||||
`pam_normalize_username` is on, usernames are *not* normalized to
|
||||
as ActiveDirectory, yes, this really happens). When
|
||||
`pam_normalize_username` is on, usernames are _not_ normalized to
|
||||
lowercase.
|
||||
|
||||
|
||||
#### Validate usernames
|
||||
|
||||
In most cases, there is a very limited set of acceptable usernames.
|
||||
@@ -132,7 +130,6 @@ To only allow usernames that start with 'w':
|
||||
c.Authenticator.username_pattern = r'w.*'
|
||||
```
|
||||
|
||||
|
||||
### How to write a custom authenticator
|
||||
|
||||
You can use custom Authenticator subclasses to enable authentication
|
||||
@@ -145,7 +142,6 @@ and [post_spawn_stop(user, spawner)][], are hooks that can be used to do
|
||||
auth-related startup (e.g. opening PAM sessions) and cleanup
|
||||
(e.g. closing PAM sessions).
|
||||
|
||||
|
||||
See a list of custom Authenticators [on the wiki](https://github.com/jupyterhub/jupyterhub/wiki/Authenticators).
|
||||
|
||||
If you are interested in writing a custom authenticator, you can read
|
||||
@@ -186,7 +182,6 @@ Additionally, configurable attributes for your authenticator will
|
||||
appear in jupyterhub help output and auto-generated configuration files
|
||||
via `jupyterhub --generate-config`.
|
||||
|
||||
|
||||
### Authentication state
|
||||
|
||||
JupyterHub 0.8 adds the ability to persist state related to authentication,
|
||||
@@ -220,12 +215,10 @@ To store auth_state, two conditions must be met:
|
||||
export JUPYTERHUB_CRYPT_KEY=$(openssl rand -hex 32)
|
||||
```
|
||||
|
||||
|
||||
JupyterHub uses [Fernet](https://cryptography.io/en/latest/fernet/) to encrypt auth_state.
|
||||
To facilitate key-rotation, `JUPYTERHUB_CRYPT_KEY` may be a semicolon-separated list of encryption keys.
|
||||
If there are multiple keys present, the **first** key is always used to persist any new auth_state.
|
||||
|
||||
|
||||
#### Using auth_state
|
||||
|
||||
Typically, if `auth_state` is persisted it is desirable to affect the Spawner environment in some way.
|
||||
@@ -235,10 +228,9 @@ to Spawner environment:
|
||||
|
||||
```python
|
||||
class MyAuthenticator(Authenticator):
|
||||
@gen.coroutine
|
||||
def authenticate(self, handler, data=None):
|
||||
username = yield identify_user(handler, data)
|
||||
upstream_token = yield token_for_user(username)
|
||||
async def authenticate(self, handler, data=None):
|
||||
username = await identify_user(handler, data)
|
||||
upstream_token = await token_for_user(username)
|
||||
return {
|
||||
'name': username,
|
||||
'auth_state': {
|
||||
@@ -246,10 +238,9 @@ class MyAuthenticator(Authenticator):
|
||||
},
|
||||
}
|
||||
|
||||
@gen.coroutine
|
||||
def pre_spawn_start(self, user, spawner):
|
||||
async def pre_spawn_start(self, user, spawner):
|
||||
"""Pass upstream_token to spawner via environment variable"""
|
||||
auth_state = yield user.get_auth_state()
|
||||
auth_state = await user.get_auth_state()
|
||||
if not auth_state:
|
||||
# auth_state not enabled
|
||||
return
|
||||
@@ -268,11 +259,10 @@ PAM session.
|
||||
|
||||
Beginning with version 0.8, JupyterHub is an OAuth provider.
|
||||
|
||||
|
||||
[Authenticator]: https://github.com/jupyterhub/jupyterhub/blob/master/jupyterhub/auth.py
|
||||
[PAM]: https://en.wikipedia.org/wiki/Pluggable_authentication_module
|
||||
[OAuth]: https://en.wikipedia.org/wiki/OAuth
|
||||
[GitHub OAuth]: https://developer.github.com/v3/oauth/
|
||||
[OAuthenticator]: https://github.com/jupyterhub/oauthenticator
|
||||
[authenticator]: https://github.com/jupyterhub/jupyterhub/blob/HEAD/jupyterhub/auth.py
|
||||
[pam]: https://en.wikipedia.org/wiki/Pluggable_authentication_module
|
||||
[oauth]: https://en.wikipedia.org/wiki/OAuth
|
||||
[github oauth]: https://developer.github.com/v3/oauth/
|
||||
[oauthenticator]: https://github.com/jupyterhub/oauthenticator
|
||||
[pre_spawn_start(user, spawner)]: https://jupyterhub.readthedocs.io/en/latest/api/auth.html#jupyterhub.auth.Authenticator.pre_spawn_start
|
||||
[post_spawn_stop(user, spawner)]: https://jupyterhub.readthedocs.io/en/latest/api/auth.html#jupyterhub.auth.Authenticator.post_spawn_stop
|
||||
|
@@ -3,18 +3,17 @@
|
||||
In this example, we show a configuration file for a fairly standard JupyterHub
|
||||
deployment with the following assumptions:
|
||||
|
||||
* Running JupyterHub on a single cloud server
|
||||
* Using SSL on the standard HTTPS port 443
|
||||
* Using GitHub OAuth (using oauthenticator) for login
|
||||
* Using the default spawner (to configure other spawners, uncomment and edit
|
||||
- Running JupyterHub on a single cloud server
|
||||
- Using SSL on the standard HTTPS port 443
|
||||
- Using GitHub OAuth (using oauthenticator) for login
|
||||
- Using the default spawner (to configure other spawners, uncomment and edit
|
||||
`spawner_class` as well as follow the instructions for your desired spawner)
|
||||
* Users exist locally on the server
|
||||
* Users' notebooks to be served from `~/assignments` to allow users to browse
|
||||
- Users exist locally on the server
|
||||
- Users' notebooks to be served from `~/assignments` to allow users to browse
|
||||
for notebooks within other users' home directories
|
||||
* You want the landing page for each user to be a `Welcome.ipynb` notebook in
|
||||
- You want the landing page for each user to be a `Welcome.ipynb` notebook in
|
||||
their assignments directory.
|
||||
* All runtime files are put into `/srv/jupyterhub` and log files in `/var/log`.
|
||||
|
||||
- All runtime files are put into `/srv/jupyterhub` and log files in `/var/log`.
|
||||
|
||||
The `jupyterhub_config.py` file would have these settings:
|
||||
|
||||
|
@@ -6,12 +6,12 @@ SSL port `443`. This could be useful if the JupyterHub server machine is also
|
||||
hosting other domains or content on `443`. The goal in this example is to
|
||||
satisfy the following:
|
||||
|
||||
* JupyterHub is running on a server, accessed *only* via `HUB.DOMAIN.TLD:443`
|
||||
* On the same machine, `NO_HUB.DOMAIN.TLD` strictly serves different content,
|
||||
- JupyterHub is running on a server, accessed _only_ via `HUB.DOMAIN.TLD:443`
|
||||
- On the same machine, `NO_HUB.DOMAIN.TLD` strictly serves different content,
|
||||
also on port `443`
|
||||
* `nginx` or `apache` is used as the public access point (which means that
|
||||
only nginx/apache will bind to `443`)
|
||||
* After testing, the server in question should be able to score at least an A on the
|
||||
- `nginx` or `apache` is used as the public access point (which means that
|
||||
only nginx/apache will bind to `443`)
|
||||
- After testing, the server in question should be able to score at least an A on the
|
||||
Qualys SSL Labs [SSL Server Test](https://www.ssllabs.com/ssltest/)
|
||||
|
||||
Let's start out with needed JupyterHub configuration in `jupyterhub_config.py`:
|
||||
@@ -86,6 +86,7 @@ server {
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_set_header X-Scheme $scheme;
|
||||
|
||||
proxy_buffering off;
|
||||
}
|
||||
@@ -143,6 +144,7 @@ Now restart `nginx`, restart the JupyterHub, and enjoy accessing
|
||||
`https://NO_HUB.DOMAIN.TLD`.
|
||||
|
||||
### SELinux permissions for nginx
|
||||
|
||||
On distributions with SELinux enabled (e.g. Fedora), one may encounter permission errors
|
||||
when the nginx service is started.
|
||||
|
||||
@@ -154,8 +156,8 @@ semanage port -a -t http_port_t -p tcp 8000
|
||||
setsebool -P httpd_can_network_relay 1
|
||||
setsebool -P httpd_can_network_connect 1
|
||||
```
|
||||
Replace 8000 with the port the jupyterhub server is running from.
|
||||
|
||||
Replace 8000 with the port the jupyterhub server is running from.
|
||||
|
||||
## Apache
|
||||
|
||||
@@ -210,22 +212,24 @@ Listen 443
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
|
||||
In case of the need to run the jupyterhub under /jhub/ or other location please use the below configurations:
|
||||
|
||||
- JupyterHub running locally at http://127.0.0.1:8000/jhub/ or other location
|
||||
|
||||
httpd.conf amendments:
|
||||
|
||||
```bash
|
||||
RewriteRule /jhub/(.*) ws://127.0.0.1:8000/jhub/$1 [NE.P,L]
|
||||
RewriteRule /jhub/(.*) http://127.0.0.1:8000/jhub/$1 [NE,P,L]
|
||||
|
||||
|
||||
ProxyPass /jhub/ http://127.0.0.1:8000/jhub/
|
||||
ProxyPassReverse /jhub/ http://127.0.0.1:8000/jhub/
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
jupyterhub_config.py amendments:
|
||||
```bash
|
||||
--The public facing URL of the whole JupyterHub application.
|
||||
--This is the address on which the proxy will bind. Sets protocol, ip, base_url
|
||||
c.JupyterHub.bind_url = 'http://127.0.0.1:8000/jhub/'
|
||||
```
|
||||
|
||||
```bash
|
||||
--The public facing URL of the whole JupyterHub application.
|
||||
--This is the address on which the proxy will bind. Sets protocol, ip, base_url
|
||||
c.JupyterHub.bind_url = 'http://127.0.0.1:8000/jhub/'
|
||||
```
|
||||
|
@@ -9,7 +9,7 @@ Only do this if you are very sure you must.
|
||||
There are many Authenticators and Spawners available for JupyterHub. Some, such
|
||||
as DockerSpawner or OAuthenticator, do not need any elevated permissions. This
|
||||
document describes how to get the full default behavior of JupyterHub while
|
||||
running notebook servers as real system users on a shared system without
|
||||
running notebook servers as real system users on a shared system without
|
||||
running the Hub itself as root.
|
||||
|
||||
Since JupyterHub needs to spawn processes as other users, the simplest way
|
||||
@@ -50,10 +50,9 @@ To do this we add to `/etc/sudoers` (use `visudo` for safe editing of sudoers):
|
||||
|
||||
- specify the list of users `JUPYTER_USERS` for whom `rhea` can spawn servers
|
||||
- set the command `JUPYTER_CMD` that `rhea` can execute on behalf of users
|
||||
- give `rhea` permission to run `JUPYTER_CMD` on behalf of `JUPYTER_USERS`
|
||||
- give `rhea` permission to run `JUPYTER_CMD` on behalf of `JUPYTER_USERS`
|
||||
without entering a password
|
||||
|
||||
|
||||
For example:
|
||||
|
||||
```bash
|
||||
@@ -91,16 +90,16 @@ $ adduser -G jupyterhub newuser
|
||||
Test that the new user doesn't need to enter a password to run the sudospawner
|
||||
command.
|
||||
|
||||
This should prompt for your password to switch to rhea, but *not* prompt for
|
||||
This should prompt for your password to switch to rhea, but _not_ prompt for
|
||||
any password for the second switch. It should show some help output about
|
||||
logging options:
|
||||
|
||||
```bash
|
||||
$ sudo -u rhea sudo -n -u $USER /usr/local/bin/sudospawner --help
|
||||
Usage: /usr/local/bin/sudospawner [OPTIONS]
|
||||
|
||||
|
||||
Options:
|
||||
|
||||
|
||||
--help show this help information
|
||||
...
|
||||
```
|
||||
@@ -151,12 +150,13 @@ We want our new user to be able to read the shadow passwords, so add it to the s
|
||||
$ sudo usermod -a -G shadow rhea
|
||||
```
|
||||
|
||||
If you want jupyterhub to serve pages on a restricted port (such as port 80 for http),
|
||||
If you want jupyterhub to serve pages on a restricted port (such as port 80 for http),
|
||||
then you will need to give `node` permission to do so:
|
||||
|
||||
```bash
|
||||
sudo setcap 'cap_net_bind_service=+ep' /usr/bin/node
|
||||
```
|
||||
|
||||
However, you may want to further understand the consequences of this.
|
||||
|
||||
You may also be interested in limiting the amount of CPU any process can use
|
||||
@@ -165,7 +165,6 @@ distributions' packaging system. This can be used to keep any user's process
|
||||
from using too much CPU cycles. You can configure it accoring to [these
|
||||
instructions](http://ubuntuforums.org/showthread.php?t=992706).
|
||||
|
||||
|
||||
### Shadow group (FreeBSD)
|
||||
|
||||
**NOTE:** This has not been tested and may not work as expected.
|
||||
@@ -186,7 +185,7 @@ $ sudo chgrp shadow /etc/master.passwd
|
||||
$ sudo chmod g+r /etc/master.passwd
|
||||
```
|
||||
|
||||
We want our new user to be able to read the shadow passwords, so add it to the
|
||||
We want our new user to be able to read the shadow passwords, so add it to the
|
||||
shadow group:
|
||||
|
||||
```bash
|
||||
@@ -220,7 +219,7 @@ Finally, start the server as our newly configured user, `rhea`:
|
||||
```bash
|
||||
$ cd /etc/jupyterhub
|
||||
$ sudo -u rhea jupyterhub --JupyterHub.spawner_class=sudospawner.SudoSpawner
|
||||
```
|
||||
```
|
||||
|
||||
And try logging in.
|
||||
|
||||
@@ -228,7 +227,7 @@ And try logging in.
|
||||
|
||||
If you still get a generic `Permission denied` `PermissionError`, it's possible SELinux is blocking you.
|
||||
Here's how you can make a module to allow this.
|
||||
First, put this in a file named `sudo_exec_selinux.te`:
|
||||
First, put this in a file named `sudo_exec_selinux.te`:
|
||||
|
||||
```bash
|
||||
module sudo_exec_selinux 1.1;
|
||||
|
@@ -22,20 +22,18 @@ This section will focus on user environments, including:
|
||||
- Installing kernelspecs
|
||||
- Using containers vs. multi-user hosts
|
||||
|
||||
|
||||
## Installing packages
|
||||
|
||||
To make packages available to users, you generally will install packages
|
||||
system-wide or in a shared environment.
|
||||
|
||||
This installation location should always be in the same environment that
|
||||
`jupyterhub-singleuser` itself is installed in, and must be *readable and
|
||||
executable* by your users. If you want users to be able to install additional
|
||||
packages, it must also be *writable* by your users.
|
||||
`jupyterhub-singleuser` itself is installed in, and must be _readable and
|
||||
executable_ by your users. If you want users to be able to install additional
|
||||
packages, it must also be _writable_ by your users.
|
||||
|
||||
If you are using a standard system Python install, you would use:
|
||||
|
||||
|
||||
```bash
|
||||
sudo python3 -m pip install numpy
|
||||
```
|
||||
@@ -47,7 +45,6 @@ You may also use conda to install packages. If you do, you should make sure
|
||||
that the conda environment has appropriate permissions for users to be able to
|
||||
run Python code in the env.
|
||||
|
||||
|
||||
## Configuring Jupyter and IPython
|
||||
|
||||
[Jupyter](https://jupyter-notebook.readthedocs.io/en/stable/config_overview.html)
|
||||
@@ -64,6 +61,7 @@ users. It's generally more efficient to configure user environments "system-wide
|
||||
and it's a good idea to avoid creating files in users' home directories.
|
||||
|
||||
The typical locations for these config files are:
|
||||
|
||||
- **system-wide** in `/etc/{jupyter|ipython}`
|
||||
- **env-wide** (environment wide) in `{sys.prefix}/etc/{jupyter|ipython}`.
|
||||
|
||||
@@ -91,7 +89,6 @@ c.MappingKernelManager.cull_idle_timeout = 20 * 60
|
||||
c.MappingKernelManager.cull_interval = 2 * 60
|
||||
```
|
||||
|
||||
|
||||
## Installing kernelspecs
|
||||
|
||||
You may have multiple Jupyter kernels installed and want to make sure that
|
||||
@@ -119,7 +116,6 @@ sure are available, I can install their specs system-wide (in /usr/local) with:
|
||||
/path/to/python2 -m IPython kernel install --prefix=/usr/local
|
||||
```
|
||||
|
||||
|
||||
## Multi-user hosts vs. Containers
|
||||
|
||||
There are two broad categories of user environments that depend on what
|
||||
@@ -141,8 +137,8 @@ When JupyterHub uses **container-based** Spawners (e.g. KubeSpawner or
|
||||
DockerSpawner), the 'system-wide' environment is really the container image
|
||||
which you are using for users.
|
||||
|
||||
In both cases, you want to *avoid putting configuration in user home
|
||||
directories* because users can change those configuration settings. Also,
|
||||
In both cases, you want to _avoid putting configuration in user home
|
||||
directories_ because users can change those configuration settings. Also,
|
||||
home directories typically persist once they are created, so they are
|
||||
difficult for admins to update later.
|
||||
|
||||
@@ -179,3 +175,13 @@ The number of named servers per user can be limited by setting
|
||||
```python
|
||||
c.JupyterHub.named_server_limit_per_user = 5
|
||||
```
|
||||
|
||||
## Switching to Jupyter Server
|
||||
|
||||
[Jupyter Server](https://jupyter-server.readthedocs.io/en/latest/) is a new Tornado Server backend for Jupyter web applications (e.g. JupyterLab 3.0 uses this package as its default backend).
|
||||
|
||||
By default, the single-user notebook server uses the (old) `NotebookApp` from the [notebook](https://github.com/jupyter/notebook) package. You can switch to using Jupyter Server's `ServerApp` backend (this will likely become the default in future releases) by setting the `JUPYTERHUB_SINGLEUSER_APP` environment variable to:
|
||||
|
||||
```bash
|
||||
export JUPYTERHUB_SINGLEUSER_APP='jupyter_server.serverapp.ServerApp'
|
||||
```
|
||||
|
@@ -46,8 +46,8 @@ additional configuration required for MySQL that is not needed for PostgreSQL.
|
||||
|
||||
- You should use the `pymysql` sqlalchemy provider (the other one, MySQLdb,
|
||||
isn't available for py3).
|
||||
- You also need to set `pool_recycle` to some value (typically 60 - 300)
|
||||
which depends on your MySQL setup. This is necessary since MySQL kills
|
||||
- You also need to set `pool_recycle` to some value (typically 60 - 300)
|
||||
which depends on your MySQL setup. This is necessary since MySQL kills
|
||||
connections serverside if they've been idle for a while, and the connection
|
||||
from the hub will be idle for longer than most connections. This behavior
|
||||
will lead to frustrating 'the connection has gone away' errors from
|
||||
|
@@ -54,7 +54,7 @@ class MyProxy(Proxy):
|
||||
"""Stop the proxy"""
|
||||
```
|
||||
|
||||
These methods **may** be coroutines.
|
||||
These methods **may** be coroutines.
|
||||
|
||||
`c.Proxy.should_start` is a configurable flag that determines whether the
|
||||
Hub should call these methods when the Hub itself starts and stops.
|
||||
@@ -103,7 +103,7 @@ route to be proxied, such as `/user/name/`. A routespec will:
|
||||
|
||||
When adding a route, JupyterHub may pass a JSON-serializable dict as a `data`
|
||||
argument that should be attached to the proxy route. When that route is
|
||||
retrieved, the `data` argument should be returned as well. If your proxy
|
||||
retrieved, the `data` argument should be returned as well. If your proxy
|
||||
implementation doesn't support storing data attached to routes, then your
|
||||
Python wrapper may have to handle storing the `data` piece itself, e.g in a
|
||||
simple file or database.
|
||||
@@ -136,7 +136,7 @@ async def delete_route(self, routespec):
|
||||
|
||||
### Retrieving routes
|
||||
|
||||
For retrieval, you only *need* to implement a single method that retrieves all
|
||||
For retrieval, you only _need_ to implement a single method that retrieves all
|
||||
routes. The return value for this function should be a dictionary, keyed by
|
||||
`routespect`, of dicts whose keys are the same three arguments passed to
|
||||
`add_route` (`routespec`, `target`, `data`)
|
||||
|
@@ -169,7 +169,7 @@ curl -X POST -H "Authorization: token <token>" "http://127.0.0.1:8081/hub/api/us
|
||||
```
|
||||
|
||||
With the named-server functionality, it's now possible to launch more than one
|
||||
specifically named servers against a given user. This could be used, for instance,
|
||||
specifically named servers against a given user. This could be used, for instance,
|
||||
to launch each server based on a different image.
|
||||
|
||||
First you must enable named-servers by including the following setting in the `jupyterhub_config.py` file.
|
||||
@@ -187,6 +187,7 @@ hub:
|
||||
```
|
||||
|
||||
With that setting in place, a new named-server is activated like this:
|
||||
|
||||
```bash
|
||||
curl -X POST -H "Authorization: token <token>" "http://127.0.0.1:8081/hub/api/users/<user>/servers/<serverA>"
|
||||
curl -X POST -H "Authorization: token <token>" "http://127.0.0.1:8081/hub/api/users/<user>/servers/<serverB>"
|
||||
@@ -201,7 +202,6 @@ will need to be able to handle the case of multiple servers per user and ensure
|
||||
uniqueness of names, particularly if servers are spawned via docker containers
|
||||
or kubernetes pods.
|
||||
|
||||
|
||||
## Learn more about the API
|
||||
|
||||
You can see the full [JupyterHub REST API][] for details. This REST API Spec can
|
||||
@@ -209,7 +209,7 @@ be viewed in a more [interactive style on swagger's petstore][].
|
||||
Both resources contain the same information and differ only in its display.
|
||||
Note: The Swagger specification is being renamed the [OpenAPI Initiative][].
|
||||
|
||||
[interactive style on swagger's petstore]: http://petstore.swagger.io/?url=https://raw.githubusercontent.com/jupyterhub/jupyterhub/master/docs/rest-api.yml#!/default
|
||||
[OpenAPI Initiative]: https://www.openapis.org/
|
||||
[JupyterHub REST API]: ./rest-api
|
||||
[Jupyter Notebook REST API]: http://petstore.swagger.io/?url=https://raw.githubusercontent.com/jupyter/notebook/master/notebook/services/api/api.yaml
|
||||
[interactive style on swagger's petstore]: https://petstore3.swagger.io/?url=https://raw.githubusercontent.com/jupyterhub/jupyterhub/HEAD/docs/rest-api.yml#!/default
|
||||
[openapi initiative]: https://www.openapis.org/
|
||||
[jupyterhub rest api]: ./rest-api
|
||||
[jupyter notebook rest api]: https://petstore3.swagger.io/?url=https://raw.githubusercontent.com/jupyter/notebook/HEAD/notebook/services/api/api.yaml
|
||||
|
@@ -1,28 +1,26 @@
|
||||
# Running proxy separately from the hub
|
||||
|
||||
|
||||
## Background
|
||||
|
||||
The thing which users directly connect to is the proxy, by default
|
||||
`configurable-http-proxy`. The proxy either redirects users to the
|
||||
`configurable-http-proxy`. The proxy either redirects users to the
|
||||
hub (for login and managing servers), or to their own single-user
|
||||
servers. Thus, as long as the proxy stays running, access to existing
|
||||
servers. Thus, as long as the proxy stays running, access to existing
|
||||
servers continues, even if the hub itself restarts or goes down.
|
||||
|
||||
When you first configure the hub, you may not even realize this
|
||||
because the proxy is automatically managed by the hub. This is great
|
||||
because the proxy is automatically managed by the hub. This is great
|
||||
for getting started and even most use, but everytime you restart the
|
||||
hub, all user connections also get restarted. But it's also simple to
|
||||
hub, all user connections also get restarted. But it's also simple to
|
||||
run the proxy as a service separate from the hub, so that you are free
|
||||
to reconfigure the hub while only interrupting users who are currently
|
||||
actively starting the hub.
|
||||
|
||||
The default JupyterHub proxy is
|
||||
[configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy),
|
||||
and that page has some docs. If you are using a different proxy, such
|
||||
and that page has some docs. If you are using a different proxy, such
|
||||
as Traefik, these instructions are probably not relevant to you.
|
||||
|
||||
|
||||
## Configuration options
|
||||
|
||||
`c.JupyterHub.cleanup_servers = False` should be set, which tells the
|
||||
@@ -37,24 +35,20 @@ it yourself).
|
||||
token for authenticating communication with the proxy.
|
||||
|
||||
`c.ConfigurableHTTPProxy.api_url = 'http://localhost:8001'` should be
|
||||
set to the URL which the hub uses to connect *to the proxy's API*.
|
||||
|
||||
set to the URL which the hub uses to connect _to the proxy's API_.
|
||||
|
||||
## Proxy configuration
|
||||
|
||||
You need to configure a service to start the proxy. An example
|
||||
command line for this is `configurable-http-proxy --ip=127.0.0.1
|
||||
--port=8000 --api-ip=127.0.0.1 --api-port=8001
|
||||
--default-target=http://localhost:8081
|
||||
--error-target=http://localhost:8081/hub/error`. (Details for how to
|
||||
You need to configure a service to start the proxy. An example
|
||||
command line for this is `configurable-http-proxy --ip=127.0.0.1 --port=8000 --api-ip=127.0.0.1 --api-port=8001 --default-target=http://localhost:8081 --error-target=http://localhost:8081/hub/error`. (Details for how to
|
||||
do this is out of scope for this tutorial - for example it might be a
|
||||
systemd service on within another docker cotainer). The proxy has no
|
||||
systemd service on within another docker cotainer). The proxy has no
|
||||
configuration files, all configuration is via the command line and
|
||||
environment variables.
|
||||
|
||||
`--api-ip` and `--api-port` (which tells the proxy where to listen) should match the hub's `ConfigurableHTTPProxy.api_url`.
|
||||
|
||||
`--ip`, `-port`, and other options configure the *user* connections to the proxy.
|
||||
`--ip`, `-port`, and other options configure the _user_ connections to the proxy.
|
||||
|
||||
`--default-target` and `--error-target` should point to the hub, and used when users navigate to the proxy originally.
|
||||
|
||||
@@ -63,18 +57,16 @@ match the token given to `c.ConfigurableHTTPProxy.auth_token`.
|
||||
|
||||
You should check the [configurable-http-proxy
|
||||
options](https://github.com/jupyterhub/configurable-http-proxy) to see
|
||||
what other options are needed, for example SSL options. Note that
|
||||
what other options are needed, for example SSL options. Note that
|
||||
these are configured in the hub if the hub is starting the proxy - you
|
||||
need to move the options to here.
|
||||
|
||||
|
||||
## Docker image
|
||||
|
||||
You can use [jupyterhub configurable-http-proxy docker
|
||||
image](https://hub.docker.com/r/jupyterhub/configurable-http-proxy/)
|
||||
to run the proxy.
|
||||
|
||||
|
||||
## See also
|
||||
|
||||
* [jupyterhub configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy)
|
||||
- [jupyterhub configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy)
|
||||
|
@@ -45,17 +45,14 @@ A Service may have the following properties:
|
||||
- `url: str (default - None)` - The URL where the service is/should be. If a
|
||||
url is specified for where the Service runs its own web server,
|
||||
the service will be added to the proxy at `/services/:name`
|
||||
- `api_token: str (default - None)` - For Externally-Managed Services you need to specify
|
||||
- `api_token: str (default - None)` - For Externally-Managed Services you need to specify
|
||||
an API token to perform API requests to the Hub
|
||||
|
||||
If a service is also to be managed by the Hub, it has a few extra options:
|
||||
|
||||
- `command: (str/Popen list`) - Command for JupyterHub to spawn the service.
|
||||
- Only use this if the service should be a subprocess.
|
||||
- If command is not specified, the Service is assumed to be managed
|
||||
externally.
|
||||
- If a command is specified for launching the Service, the Service will
|
||||
be started and managed by the Hub.
|
||||
- `command: (str/Popen list)` - Command for JupyterHub to spawn the service. - Only use this if the service should be a subprocess. - If command is not specified, the Service is assumed to be managed
|
||||
externally. - If a command is specified for launching the Service, the Service will
|
||||
be started and managed by the Hub.
|
||||
- `environment: dict` - additional environment variables for the Service.
|
||||
- `user: str` - the name of a system user to manage the Service. If
|
||||
unspecified, run as the same user as the Hub.
|
||||
@@ -91,9 +88,9 @@ This example would be configured as follows in `jupyterhub_config.py`:
|
||||
```python
|
||||
c.JupyterHub.services = [
|
||||
{
|
||||
'name': 'cull-idle',
|
||||
'name': 'idle-culler',
|
||||
'admin': True,
|
||||
'command': [sys.executable, '/path/to/cull-idle.py', '--timeout']
|
||||
'command': [sys.executable, '-m', 'jupyterhub_idle_culler', '--timeout=3600']
|
||||
}
|
||||
]
|
||||
```
|
||||
@@ -103,9 +100,9 @@ parameters, which describe the environment needed to start the Service process:
|
||||
|
||||
- `environment: dict` - additional environment variables for the Service.
|
||||
- `user: str` - name of the user to run the server if different from the Hub.
|
||||
Requires Hub to be root.
|
||||
Requires Hub to be root.
|
||||
- `cwd: path` directory in which to run the Service, if different from the
|
||||
Hub directory.
|
||||
Hub directory.
|
||||
|
||||
The Hub will pass the following environment variables to launch the Service:
|
||||
|
||||
@@ -123,15 +120,14 @@ For the previous 'cull idle' Service example, these environment variables
|
||||
would be passed to the Service when the Hub starts the 'cull idle' Service:
|
||||
|
||||
```bash
|
||||
JUPYTERHUB_SERVICE_NAME: 'cull-idle'
|
||||
JUPYTERHUB_SERVICE_NAME: 'idle-culler'
|
||||
JUPYTERHUB_API_TOKEN: API token assigned to the service
|
||||
JUPYTERHUB_API_URL: http://127.0.0.1:8080/hub/api
|
||||
JUPYTERHUB_BASE_URL: https://mydomain[:port]
|
||||
JUPYTERHUB_SERVICE_PREFIX: /services/cull-idle/
|
||||
JUPYTERHUB_SERVICE_PREFIX: /services/idle-culler/
|
||||
```
|
||||
|
||||
See the JupyterHub GitHub repo for additional information about the
|
||||
[`cull-idle` example](https://github.com/jupyterhub/jupyterhub/tree/master/examples/cull-idle).
|
||||
See the GitHub repo for additional information about the [jupyterhub_idle_culler][].
|
||||
|
||||
## Externally-Managed Services
|
||||
|
||||
@@ -200,16 +196,16 @@ can be used by services. You may go beyond this reference implementation and
|
||||
create custom hub-authenticating clients and services. We describe the process
|
||||
below.
|
||||
|
||||
The reference, or base, implementation is the [`HubAuth`][HubAuth] class,
|
||||
The reference, or base, implementation is the [`HubAuth`][hubauth] class,
|
||||
which implements the requests to the Hub.
|
||||
|
||||
To use HubAuth, you must set the `.api_token`, either programmatically when constructing the class,
|
||||
or via the `JUPYTERHUB_API_TOKEN` environment variable.
|
||||
|
||||
Most of the logic for authentication implementation is found in the
|
||||
[`HubAuth.user_for_cookie`][HubAuth.user_for_cookie]
|
||||
and in the
|
||||
[`HubAuth.user_for_token`][HubAuth.user_for_token]
|
||||
Most of the logic for authentication implementation is found in the
|
||||
[`HubAuth.user_for_cookie`][hubauth.user_for_cookie]
|
||||
and in the
|
||||
[`HubAuth.user_for_token`][hubauth.user_for_token]
|
||||
methods, which makes a request of the Hub, and returns:
|
||||
|
||||
- None, if no user could be identified, or
|
||||
@@ -234,7 +230,7 @@ configurable by the `cookie_cache_max_age` setting (default: five minutes).
|
||||
For example, you have a Flask service that returns information about a user.
|
||||
JupyterHub's HubAuth class can be used to authenticate requests to the Flask
|
||||
service. See the `service-whoami-flask` example in the
|
||||
[JupyterHub GitHub repo](https://github.com/jupyterhub/jupyterhub/tree/master/examples/service-whoami-flask)
|
||||
[JupyterHub GitHub repo](https://github.com/jupyterhub/jupyterhub/tree/HEAD/examples/service-whoami-flask)
|
||||
for more details.
|
||||
|
||||
```python
|
||||
@@ -286,11 +282,10 @@ def whoami(user):
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
### Authenticating tornado services with JupyterHub
|
||||
|
||||
Since most Jupyter services are written with tornado,
|
||||
we include a mixin class, [`HubAuthenticated`][HubAuthenticated],
|
||||
we include a mixin class, [`HubAuthenticated`][hubauthenticated],
|
||||
for quickly authenticating your own tornado services with JupyterHub.
|
||||
|
||||
Tornado's `@web.authenticated` method calls a Handler's `.get_current_user`
|
||||
@@ -311,7 +306,6 @@ class MyHandler(HubAuthenticated, web.RequestHandler):
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
The HubAuth will automatically load the desired configuration from the Service
|
||||
environment variables.
|
||||
|
||||
@@ -321,44 +315,42 @@ username and user group list, respectively. If a user matches neither the user
|
||||
list nor the group list, they will not be allowed access. If both are left
|
||||
undefined, then any user will be allowed.
|
||||
|
||||
|
||||
### Implementing your own Authentication with JupyterHub
|
||||
|
||||
If you don't want to use the reference implementation
|
||||
(e.g. you find the implementation a poor fit for your Flask app),
|
||||
you can implement authentication via the Hub yourself.
|
||||
We recommend looking at the [`HubAuth`][HubAuth] class implementation for reference,
|
||||
We recommend looking at the [`HubAuth`][hubauth] class implementation for reference,
|
||||
and taking note of the following process:
|
||||
|
||||
1. retrieve the cookie `jupyterhub-services` from the request.
|
||||
2. Make an API request `GET /hub/api/authorizations/cookie/jupyterhub-services/cookie-value`,
|
||||
where cookie-value is the url-encoded value of the `jupyterhub-services` cookie.
|
||||
This request must be authenticated with a Hub API token in the `Authorization` header,
|
||||
for example using the `api_token` from your [external service's configuration](#externally-managed-services).
|
||||
where cookie-value is the url-encoded value of the `jupyterhub-services` cookie.
|
||||
This request must be authenticated with a Hub API token in the `Authorization` header,
|
||||
for example using the `api_token` from your [external service's configuration](#externally-managed-services).
|
||||
|
||||
For example, with [requests][]:
|
||||
For example, with [requests][]:
|
||||
|
||||
```python
|
||||
r = requests.get(
|
||||
'/'.join((["http://127.0.0.1:8081/hub/api",
|
||||
"authorizations/cookie/jupyterhub-services",
|
||||
quote(encrypted_cookie, safe=''),
|
||||
]),
|
||||
headers = {
|
||||
'Authorization' : 'token %s' % api_token,
|
||||
},
|
||||
)
|
||||
r.raise_for_status()
|
||||
user = r.json()
|
||||
```
|
||||
```python
|
||||
r = requests.get(
|
||||
'/'.join(["http://127.0.0.1:8081/hub/api",
|
||||
"authorizations/cookie/jupyterhub-services",
|
||||
quote(encrypted_cookie, safe=''),
|
||||
]),
|
||||
headers = {
|
||||
'Authorization' : 'token %s' % api_token,
|
||||
},
|
||||
)
|
||||
r.raise_for_status()
|
||||
user = r.json()
|
||||
```
|
||||
|
||||
3. On success, the reply will be a JSON model describing the user:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "inara",
|
||||
"groups": ["serenity", "guild"],
|
||||
|
||||
"groups": ["serenity", "guild"]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -368,11 +360,11 @@ and an example of its configuration is found [here](https://github.com/jupyter/n
|
||||
nbviewer can also be run as a Hub-Managed Service as described [nbviewer README][nbviewer example]
|
||||
section on securing the notebook viewer.
|
||||
|
||||
|
||||
[requests]: http://docs.python-requests.org/en/master/
|
||||
[services_auth]: ../api/services.auth.html
|
||||
[HubAuth]: ../api/services.auth.html#jupyterhub.services.auth.HubAuth
|
||||
[HubAuth.user_for_cookie]: ../api/services.auth.html#jupyterhub.services.auth.HubAuth.user_for_cookie
|
||||
[HubAuth.user_for_token]: ../api/services.auth.html#jupyterhub.services.auth.HubAuth.user_for_token
|
||||
[HubAuthenticated]: ../api/services.auth.html#jupyterhub.services.auth.HubAuthenticated
|
||||
[hubauth]: ../api/services.auth.html#jupyterhub.services.auth.HubAuth
|
||||
[hubauth.user_for_cookie]: ../api/services.auth.html#jupyterhub.services.auth.HubAuth.user_for_cookie
|
||||
[hubauth.user_for_token]: ../api/services.auth.html#jupyterhub.services.auth.HubAuth.user_for_token
|
||||
[hubauthenticated]: ../api/services.auth.html#jupyterhub.services.auth.HubAuthenticated
|
||||
[nbviewer example]: https://github.com/jupyter/nbviewer#securing-the-notebook-viewer
|
||||
[jupyterhub_idle_culler]: https://github.com/jupyterhub/jupyterhub-idle-culler
|
||||
|
@@ -8,18 +8,17 @@ and a custom Spawner needs to be able to take three actions:
|
||||
- poll whether the process is still running
|
||||
- stop the process
|
||||
|
||||
|
||||
## Examples
|
||||
|
||||
Custom Spawners for JupyterHub can be found on the [JupyterHub wiki](https://github.com/jupyterhub/jupyterhub/wiki/Spawners).
|
||||
Some examples include:
|
||||
|
||||
- [DockerSpawner](https://github.com/jupyterhub/dockerspawner) for spawning user servers in Docker containers
|
||||
* `dockerspawner.DockerSpawner` for spawning identical Docker containers for
|
||||
- `dockerspawner.DockerSpawner` for spawning identical Docker containers for
|
||||
each users
|
||||
* `dockerspawner.SystemUserSpawner` for spawning Docker containers with an
|
||||
- `dockerspawner.SystemUserSpawner` for spawning Docker containers with an
|
||||
environment and home directory for each users
|
||||
* both `DockerSpawner` and `SystemUserSpawner` also work with Docker Swarm for
|
||||
- both `DockerSpawner` and `SystemUserSpawner` also work with Docker Swarm for
|
||||
launching containers on remote machines
|
||||
- [SudoSpawner](https://github.com/jupyterhub/sudospawner) enables JupyterHub to
|
||||
run without being root, by spawning an intermediate process via `sudo`
|
||||
@@ -30,7 +29,6 @@ Some examples include:
|
||||
- [SSHSpawner](https://github.com/NERSC/sshspawner) to spawn notebooks
|
||||
on a remote server using SSH
|
||||
|
||||
|
||||
## Spawner control methods
|
||||
|
||||
### Spawner.start
|
||||
@@ -41,7 +39,7 @@ an object encapsulating the user's name, authentication, and server info.
|
||||
|
||||
The return value of `Spawner.start` should be the (ip, port) of the running server.
|
||||
|
||||
**NOTE:** When writing coroutines, *never* `yield` in between a database change and a commit.
|
||||
**NOTE:** When writing coroutines, _never_ `yield` in between a database change and a commit.
|
||||
|
||||
Most `Spawner.start` functions will look similar to this example:
|
||||
|
||||
@@ -80,7 +78,6 @@ to check if the local process is still running. On Windows, it uses `psutil.pid_
|
||||
|
||||
`Spawner.stop` should stop the process. It must be a tornado coroutine, which should return when the process has finished exiting.
|
||||
|
||||
|
||||
## Spawner state
|
||||
|
||||
JupyterHub should be able to stop and restart without tearing down
|
||||
@@ -112,7 +109,6 @@ def clear_state(self):
|
||||
self.pid = 0
|
||||
```
|
||||
|
||||
|
||||
## Spawner options form
|
||||
|
||||
(new in 0.4)
|
||||
@@ -129,7 +125,7 @@ If the `Spawner.options_form` is defined, when a user tries to start their serve
|
||||
|
||||
If `Spawner.options_form` is undefined, the user's server is spawned directly, and no spawn page is rendered.
|
||||
|
||||
See [this example](https://github.com/jupyterhub/jupyterhub/blob/master/examples/spawn-form/jupyterhub_config.py) for a form that allows custom CLI args for the local spawner.
|
||||
See [this example](https://github.com/jupyterhub/jupyterhub/blob/HEAD/examples/spawn-form/jupyterhub_config.py) for a form that allows custom CLI args for the local spawner.
|
||||
|
||||
### `Spawner.options_from_form`
|
||||
|
||||
@@ -170,8 +166,7 @@ which would return:
|
||||
|
||||
When `Spawner.start` is called, this dictionary is accessible as `self.user_options`.
|
||||
|
||||
|
||||
[Spawner]: https://github.com/jupyterhub/jupyterhub/blob/master/jupyterhub/spawner.py
|
||||
[spawner]: https://github.com/jupyterhub/jupyterhub/blob/HEAD/jupyterhub/spawner.py
|
||||
|
||||
## Writing a custom spawner
|
||||
|
||||
@@ -212,7 +207,6 @@ Additionally, configurable attributes for your spawner will
|
||||
appear in jupyterhub help output and auto-generated configuration files
|
||||
via `jupyterhub --generate-config`.
|
||||
|
||||
|
||||
## Spawners, resource limits, and guarantees (Optional)
|
||||
|
||||
Some spawners of the single-user notebook servers allow setting limits or
|
||||
@@ -224,10 +218,9 @@ support for them**. For example, LocalProcessSpawner, the default
|
||||
spawner, does not support limits and guarantees. One of the spawners
|
||||
that supports limits and guarantees is the `systemdspawner`.
|
||||
|
||||
|
||||
### Memory Limits & Guarantees
|
||||
|
||||
`c.Spawner.mem_limit`: A **limit** specifies the *maximum amount of memory*
|
||||
`c.Spawner.mem_limit`: A **limit** specifies the _maximum amount of memory_
|
||||
that may be allocated, though there is no promise that the maximum amount will
|
||||
be available. In supported spawners, you can set `c.Spawner.mem_limit` to
|
||||
limit the total amount of memory that a single-user notebook server can
|
||||
@@ -235,8 +228,8 @@ allocate. Attempting to use more memory than this limit will cause errors. The
|
||||
single-user notebook server can discover its own memory limit by looking at
|
||||
the environment variable `MEM_LIMIT`, which is specified in absolute bytes.
|
||||
|
||||
`c.Spawner.mem_guarantee`: Sometimes, a **guarantee** of a *minimum amount of
|
||||
memory* is desirable. In this case, you can set `c.Spawner.mem_guarantee` to
|
||||
`c.Spawner.mem_guarantee`: Sometimes, a **guarantee** of a _minimum amount of
|
||||
memory_ is desirable. In this case, you can set `c.Spawner.mem_guarantee` to
|
||||
to provide a guarantee that at minimum this much memory will always be
|
||||
available for the single-user notebook server to use. The environment variable
|
||||
`MEM_GUARANTEE` will also be set in the single-user notebook server.
|
||||
@@ -271,7 +264,7 @@ utilize these certs, there are two methods of interest on the base `Spawner`
|
||||
class: `.create_certs` and `.move_certs`.
|
||||
|
||||
The first method, `.create_certs` will sign a key-cert pair using an internally
|
||||
trusted authority for notebooks. During this process, `.create_certs` can
|
||||
trusted authority for notebooks. During this process, `.create_certs` can
|
||||
apply `ip` and `dns` name information to the cert via an `alt_names` `kwarg`.
|
||||
This is used for certificate authentication (verification). Without proper
|
||||
verification, the `Notebook` will be unable to communicate with the `Hub` and
|
||||
|
@@ -1,8 +1,8 @@
|
||||
# Working with templates and UI
|
||||
|
||||
The pages of the JupyterHub application are generated from
|
||||
[Jinja](http://jinja.pocoo.org/) templates. These allow the header, for
|
||||
example, to be defined once and incorporated into all pages. By providing
|
||||
[Jinja](http://jinja.pocoo.org/) templates. These allow the header, for
|
||||
example, to be defined once and incorporated into all pages. By providing
|
||||
your own templates, you can have complete control over JupyterHub's
|
||||
appearance.
|
||||
|
||||
@@ -10,7 +10,7 @@ appearance.
|
||||
|
||||
JupyterHub will look for custom templates in all of the paths in the
|
||||
`JupyterHub.template_paths` configuration option, falling back on the
|
||||
[default templates](https://github.com/jupyterhub/jupyterhub/tree/master/share/jupyterhub/templates)
|
||||
[default templates](https://github.com/jupyterhub/jupyterhub/tree/HEAD/share/jupyterhub/templates)
|
||||
if no custom template with that name is found. This fallback
|
||||
behavior is new in version 0.9; previous versions searched only those paths
|
||||
explicitly included in `template_paths`. You may override as many
|
||||
@@ -20,8 +20,8 @@ or as few templates as you desire.
|
||||
|
||||
Jinja provides a mechanism to [extend templates](http://jinja.pocoo.org/docs/2.10/templates/#template-inheritance).
|
||||
A base template can define a `block`, and child templates can replace or
|
||||
supplement the material in the block. The
|
||||
[JupyterHub templates](https://github.com/jupyterhub/jupyterhub/tree/master/share/jupyterhub/templates)
|
||||
supplement the material in the block. The
|
||||
[JupyterHub templates](https://github.com/jupyterhub/jupyterhub/tree/HEAD/share/jupyterhub/templates)
|
||||
make extensive use of blocks, which allows you to customize parts of the
|
||||
interface easily.
|
||||
|
||||
@@ -32,8 +32,8 @@ In general, a child template can extend a base template, `page.html`, by beginni
|
||||
```
|
||||
|
||||
This works, unless you are trying to extend the default template for the same
|
||||
file name. Starting in version 0.9, you may refer to the base file with a
|
||||
`templates/` prefix. Thus, if you are writing a custom `page.html`, start the
|
||||
file name. Starting in version 0.9, you may refer to the base file with a
|
||||
`templates/` prefix. Thus, if you are writing a custom `page.html`, start the
|
||||
file with this block:
|
||||
|
||||
```html
|
||||
@@ -41,7 +41,7 @@ file with this block:
|
||||
```
|
||||
|
||||
By defining `block`s with same name as in the base template, child templates
|
||||
can replace those sections with custom content. The content from the base
|
||||
can replace those sections with custom content. The content from the base
|
||||
template can be included with the `{{ super() }}` directive.
|
||||
|
||||
### Example
|
||||
@@ -52,10 +52,7 @@ text about the server starting up, place this content in a file named
|
||||
`JupyterHub.template_paths` configuration option.
|
||||
|
||||
```html
|
||||
{% extends "templates/spawn_pending.html" %}
|
||||
|
||||
{% block message %}
|
||||
{{ super() }}
|
||||
{% extends "templates/spawn_pending.html" %} {% block message %} {{ super() }}
|
||||
<p>Patience is a virtue.</p>
|
||||
{% endblock %}
|
||||
```
|
||||
@@ -69,9 +66,8 @@ To add announcements to be displayed on a page, you have two options:
|
||||
|
||||
### Announcement Configuration Variables
|
||||
|
||||
If you set the configuration variable `JupyterHub.template_vars =
|
||||
{'announcement': 'some_text'}`, the given `some_text` will be placed on
|
||||
the top of all pages. The more specific variables
|
||||
If you set the configuration variable `JupyterHub.template_vars = {'announcement': 'some_text'}`, the given `some_text` will be placed on
|
||||
the top of all pages. The more specific variables
|
||||
`announcement_login`, `announcement_spawn`, `announcement_home`, and
|
||||
`announcement_logout` are more specific and only show on their
|
||||
respective pages (overriding the global `announcement` variable).
|
||||
@@ -79,13 +75,12 @@ Note that changing these variables require a restart, unlike direct
|
||||
template extension.
|
||||
|
||||
You can get the same effect by extending templates, which allows you
|
||||
to update the messages without restarting. Set
|
||||
to update the messages without restarting. Set
|
||||
`c.JupyterHub.template_paths` as mentioned above, and then create a
|
||||
template (for example, `login.html`) with:
|
||||
|
||||
```html
|
||||
{% extends "templates/login.html" %}
|
||||
{% set announcement = 'some message' %}
|
||||
{% extends "templates/login.html" %} {% set announcement = 'some message' %}
|
||||
```
|
||||
|
||||
Extending `page.html` puts the message on all pages, but note that
|
||||
|
@@ -11,8 +11,6 @@ All authenticated handlers redirect to `/hub/login` to login users
|
||||
prior to being redirected back to the originating page.
|
||||
The returned request should preserve all query parameters.
|
||||
|
||||
|
||||
|
||||
## `/`
|
||||
|
||||
The top-level request is always a simple redirect to `/hub/`,
|
||||
@@ -61,7 +59,7 @@ for starting and stopping the user's server.
|
||||
If named servers are enabled, there will be some additional
|
||||
tools for management of named servers.
|
||||
|
||||
*Version added: 1.0* named server UI is new in 1.0.
|
||||
_Version added: 1.0_ named server UI is new in 1.0.
|
||||
|
||||
## `/hub/login`
|
||||
|
||||
@@ -111,7 +109,7 @@ not the Hub.
|
||||
The username is the first part and, if using named servers,
|
||||
the server name is the second part.
|
||||
|
||||
If the user's server is *not* running, this will be redirected to `/hub/user/:username/...`
|
||||
If the user's server is _not_ running, this will be redirected to `/hub/user/:username/...`
|
||||
|
||||
## `/hub/user/:username[/:servername]`
|
||||
|
||||
@@ -123,8 +121,8 @@ Handling this URL is the most complicated condition in JupyterHub,
|
||||
because there can be many states:
|
||||
|
||||
1. server is not active
|
||||
a. user matches
|
||||
b. user doesn't match
|
||||
a. user matches
|
||||
b. user doesn't match
|
||||
2. server is ready
|
||||
3. server is pending, but not ready
|
||||
|
||||
@@ -146,7 +144,7 @@ without additional user action (i.e. clicking the link on the page)
|
||||
|
||||

|
||||
|
||||
*Version changed: 1.0*
|
||||
_Version changed: 1.0_
|
||||
|
||||
Prior to 1.0, this URL itself was responsible for spawning servers,
|
||||
and served the progress page if it was pending,
|
||||
@@ -165,7 +163,7 @@ indicating how to spawn the server.
|
||||
This is meant to help applications such as JupyterLab
|
||||
that are connected to a server that has stopped.
|
||||
|
||||
*Version changed: 1.0*
|
||||
_Version changed: 1.0_
|
||||
|
||||
JupyterHub 0.9 failed these API requests with status 404,
|
||||
but 1.0 uses 503.
|
||||
@@ -207,12 +205,12 @@ and a POST request will trigger the actual spawn and redirect.
|
||||
|
||||

|
||||
|
||||
*Version added: 1.0*
|
||||
_Version added: 1.0_
|
||||
|
||||
1.0 adds the ability to specify username and servername.
|
||||
Prior to 1.0, only `/hub/spawn` was recognized for the default server.
|
||||
|
||||
*Version changed: 1.0*
|
||||
_Version changed: 1.0_
|
||||
|
||||
Prior to 1.0, this page redirected back to `/hub/user/:username`,
|
||||
which was responsible for triggering spawn and rendering progress, etc.
|
||||
@@ -221,7 +219,7 @@ which was responsible for triggering spawn and rendering progress, etc.
|
||||
|
||||

|
||||
|
||||
*Version added: 1.0* this URL is new in JupyterHub 1.0.
|
||||
_Version added: 1.0_ this URL is new in JupyterHub 1.0.
|
||||
|
||||
This page renders the progress view for the given spawn request.
|
||||
Once the server is ready,
|
||||
|
@@ -12,17 +12,17 @@ works.
|
||||
|
||||
## Semi-trusted and untrusted users
|
||||
|
||||
JupyterHub is designed to be a *simple multi-user server for modestly sized
|
||||
groups* of **semi-trusted** users. While the design reflects serving semi-trusted
|
||||
JupyterHub is designed to be a _simple multi-user server for modestly sized
|
||||
groups_ of **semi-trusted** users. While the design reflects serving semi-trusted
|
||||
users, JupyterHub is not necessarily unsuitable for serving **untrusted** users.
|
||||
|
||||
Using JupyterHub with **untrusted** users does mean more work by the
|
||||
administrator. Much care is required to secure a Hub, with extra caution on
|
||||
protecting users from each other as the Hub is serving untrusted users.
|
||||
|
||||
One aspect of JupyterHub's *design simplicity* for **semi-trusted** users is that
|
||||
the Hub and single-user servers are placed in a *single domain*, behind a
|
||||
[*proxy*][configurable-http-proxy]. If the Hub is serving untrusted
|
||||
One aspect of JupyterHub's _design simplicity_ for **semi-trusted** users is that
|
||||
the Hub and single-user servers are placed in a _single domain_, behind a
|
||||
[_proxy_][configurable-http-proxy]. If the Hub is serving untrusted
|
||||
users, many of the web's cross-site protections are not applied between
|
||||
single-user servers and the Hub, or between single-user servers and each
|
||||
other, since browsers see the whole thing (proxy, Hub, and single user
|
||||
@@ -40,7 +40,7 @@ server.
|
||||
To protect all users from each other, JupyterHub administrators must
|
||||
ensure that:
|
||||
|
||||
* A user **does not have permission** to modify their single-user notebook server,
|
||||
- A user **does not have permission** to modify their single-user notebook server,
|
||||
including:
|
||||
- A user **may not** install new packages in the Python environment that runs
|
||||
their single-user server.
|
||||
@@ -49,11 +49,11 @@ ensure that:
|
||||
directory that precedes the directory containing `jupyterhub-singleuser`.
|
||||
- A user may not modify environment variables (e.g. PATH, PYTHONPATH) for
|
||||
their single-user server.
|
||||
* A user **may not** modify the configuration of the notebook server
|
||||
- A user **may not** modify the configuration of the notebook server
|
||||
(the `~/.jupyter` or `JUPYTER_CONFIG_DIR` directory).
|
||||
|
||||
If any additional services are run on the same domain as the Hub, the services
|
||||
**must never** display user-authored HTML that is neither *sanitized* nor *sandboxed*
|
||||
**must never** display user-authored HTML that is neither _sanitized_ nor _sandboxed_
|
||||
(e.g. IFramed) to any user that lacks authentication as the author of a file.
|
||||
|
||||
## Mitigate security issues
|
||||
@@ -85,7 +85,7 @@ admin must enforce.
|
||||
### Prevent spawners from evaluating shell configuration files
|
||||
|
||||
For most Spawners, `PATH` is not something users can influence, but care should
|
||||
be taken to ensure that the Spawner does *not* evaluate shell configuration
|
||||
be taken to ensure that the Spawner does _not_ evaluate shell configuration
|
||||
files prior to launching the server.
|
||||
|
||||
### Isolate packages using virtualenv
|
||||
@@ -125,7 +125,6 @@ versions up to date.
|
||||
A handy website for testing your deployment is
|
||||
[Qualsys' SSL analyzer tool](https://www.ssllabs.com/ssltest/analyze.html).
|
||||
|
||||
|
||||
[configurable-http-proxy]: https://github.com/jupyterhub/configurable-http-proxy
|
||||
|
||||
## Vulnerability reporting
|
||||
|
@@ -4,17 +4,20 @@ When troubleshooting, you may see unexpected behaviors or receive an error
|
||||
message. This section provide links for identifying the cause of the
|
||||
problem and how to resolve it.
|
||||
|
||||
[*Behavior*](#behavior)
|
||||
[_Behavior_](#behavior)
|
||||
|
||||
- JupyterHub proxy fails to start
|
||||
- sudospawner fails to run
|
||||
- What is the default behavior when none of the lists (admin, allowed,
|
||||
allowed groups) are set?
|
||||
- JupyterHub Docker container not accessible at localhost
|
||||
|
||||
[*Errors*](#errors)
|
||||
[_Errors_](#errors)
|
||||
|
||||
- 500 error after spawning my single-user server
|
||||
|
||||
[*How do I...?*](#how-do-i)
|
||||
[_How do I...?_](#how-do-i)
|
||||
|
||||
- Use a chained SSL certificate
|
||||
- Install JupyterHub without a network connection
|
||||
- I want access to the whole filesystem, but still default users to their home directory
|
||||
@@ -25,7 +28,7 @@ problem and how to resolve it.
|
||||
- Toree integration with HDFS rack awareness script
|
||||
- Where do I find Docker images and Dockerfiles related to JupyterHub?
|
||||
|
||||
[*Troubleshooting commands*](#troubleshooting-commands)
|
||||
[_Troubleshooting commands_](#troubleshooting-commands)
|
||||
|
||||
## Behavior
|
||||
|
||||
@@ -34,8 +37,8 @@ problem and how to resolve it.
|
||||
If you have tried to start the JupyterHub proxy and it fails to start:
|
||||
|
||||
- check if the JupyterHub IP configuration setting is
|
||||
``c.JupyterHub.ip = '*'``; if it is, try ``c.JupyterHub.ip = ''``
|
||||
- Try starting with ``jupyterhub --ip=0.0.0.0``
|
||||
`c.JupyterHub.ip = '*'`; if it is, try `c.JupyterHub.ip = ''`
|
||||
- Try starting with `jupyterhub --ip=0.0.0.0`
|
||||
|
||||
**Note**: If this occurs on Ubuntu/Debian, check that the you are using a
|
||||
recent version of node. Some versions of Ubuntu/Debian come with a version
|
||||
@@ -66,13 +69,13 @@ things like inspect other users' servers, or modify the user list at runtime).
|
||||
|
||||
### JupyterHub Docker container not accessible at localhost
|
||||
|
||||
Even though the command to start your Docker container exposes port 8000
|
||||
(`docker run -p 8000:8000 -d --name jupyterhub jupyterhub/jupyterhub jupyterhub`),
|
||||
it is possible that the IP address itself is not accessible/visible. As a result
|
||||
when you try http://localhost:8000 in your browser, you are unable to connect
|
||||
even though the container is running properly. One workaround is to explicitly
|
||||
tell Jupyterhub to start at `0.0.0.0` which is visible to everyone. Try this
|
||||
command:
|
||||
Even though the command to start your Docker container exposes port 8000
|
||||
(`docker run -p 8000:8000 -d --name jupyterhub jupyterhub/jupyterhub jupyterhub`),
|
||||
it is possible that the IP address itself is not accessible/visible. As a result
|
||||
when you try http://localhost:8000 in your browser, you are unable to connect
|
||||
even though the container is running properly. One workaround is to explicitly
|
||||
tell Jupyterhub to start at `0.0.0.0` which is visible to everyone. Try this
|
||||
command:
|
||||
`docker run -p 8000:8000 -d --name jupyterhub jupyterhub/jupyterhub jupyterhub --ip 0.0.0.0 --port 8000`
|
||||
|
||||
### How can I kill ports from JupyterHub managed services that have been orphaned?
|
||||
@@ -108,7 +111,7 @@ sudo MY_ENV=abc123 \
|
||||
|
||||
### How can I view the logs for JupyterHub or the user's Notebook servers when using the DockerSpawner?
|
||||
|
||||
Use `docker logs <container>` where `<container>` is the container name defined within `docker-compose.yml`. For example, to view the logs of the JupyterHub container use:
|
||||
Use `docker logs <container>` where `<container>` is the container name defined within `docker-compose.yml`. For example, to view the logs of the JupyterHub container use:
|
||||
|
||||
docker logs hub
|
||||
|
||||
@@ -132,11 +135,11 @@ There are two likely reasons for this:
|
||||
|
||||
1. The single-user server cannot connect to the Hub's API (networking
|
||||
configuration problems)
|
||||
2. The single-user server cannot *authenticate* its requests (invalid token)
|
||||
2. The single-user server cannot _authenticate_ its requests (invalid token)
|
||||
|
||||
#### Symptoms
|
||||
|
||||
The main symptom is a failure to load *any* page served by the single-user
|
||||
The main symptom is a failure to load _any_ page served by the single-user
|
||||
server, met with a 500 error. This is typically the first page at `/user/<your_name>`
|
||||
after logging in or clicking "Start my server". When a single-user notebook server
|
||||
receives a request, the notebook server makes an API request to the Hub to
|
||||
@@ -198,15 +201,15 @@ your server again.
|
||||
|
||||
##### Proxy settings (403 GET)
|
||||
|
||||
When your whole JupyterHub sits behind a organization proxy (*not* a reverse proxy like NGINX as part of your setup and *not* the configurable-http-proxy) the environment variables `HTTP_PROXY`, `HTTPS_PROXY`, `http_proxy` and `https_proxy` might be set. This confuses the jupyterhub-singleuser servers: When connecting to the Hub for authorization they connect via the proxy instead of directly connecting to the Hub on localhost. The proxy might deny the request (403 GET). This results in the singleuser server thinking it has a wrong auth token. To circumvent this you should add `<hub_url>,<hub_ip>,localhost,127.0.0.1` to the environment variables `NO_PROXY` and `no_proxy`.
|
||||
When your whole JupyterHub sits behind a organization proxy (_not_ a reverse proxy like NGINX as part of your setup and _not_ the configurable-http-proxy) the environment variables `HTTP_PROXY`, `HTTPS_PROXY`, `http_proxy` and `https_proxy` might be set. This confuses the jupyterhub-singleuser servers: When connecting to the Hub for authorization they connect via the proxy instead of directly connecting to the Hub on localhost. The proxy might deny the request (403 GET). This results in the singleuser server thinking it has a wrong auth token. To circumvent this you should add `<hub_url>,<hub_ip>,localhost,127.0.0.1` to the environment variables `NO_PROXY` and `no_proxy`.
|
||||
|
||||
### Launching Jupyter Notebooks to run as an externally managed JupyterHub service with the `jupyterhub-singleuser` command returns a `JUPYTERHUB_API_TOKEN` error
|
||||
|
||||
[JupyterHub services](https://jupyterhub.readthedocs.io/en/stable/reference/services.html) allow processes to interact with JupyterHub's REST API. Example use-cases include:
|
||||
|
||||
* **Secure Testing**: provide a canonical Jupyter Notebook for testing production data to reduce the number of entry points into production systems.
|
||||
* **Grading Assignments**: provide access to shared Jupyter Notebooks that may be used for management tasks such grading assignments.
|
||||
* **Private Dashboards**: share dashboards with certain group members.
|
||||
- **Secure Testing**: provide a canonical Jupyter Notebook for testing production data to reduce the number of entry points into production systems.
|
||||
- **Grading Assignments**: provide access to shared Jupyter Notebooks that may be used for management tasks such grading assignments.
|
||||
- **Private Dashboards**: share dashboards with certain group members.
|
||||
|
||||
If possible, try to run the Jupyter Notebook as an externally managed service with one of the provided [jupyter/docker-stacks](https://github.com/jupyter/docker-stacks).
|
||||
|
||||
@@ -231,7 +234,7 @@ With a docker container, pass in the environment variable with the run command:
|
||||
-e JUPYTERHUB_API_TOKEN=my_secret_token \
|
||||
jupyter/datascience-notebook:latest
|
||||
|
||||
[This example](https://github.com/jupyterhub/jupyterhub/tree/master/examples/service-notebook/external) demonstrates how to combine the use of the `jupyterhub-singleuser` environment variables when launching a Notebook as an externally managed service.
|
||||
[This example](https://github.com/jupyterhub/jupyterhub/tree/HEAD/examples/service-notebook/external) demonstrates how to combine the use of the `jupyterhub-singleuser` environment variables when launching a Notebook as an externally managed service.
|
||||
|
||||
## How do I...?
|
||||
|
||||
@@ -250,7 +253,6 @@ You would then set in your `jupyterhub_config.py` file the `ssl_key` and
|
||||
c.JupyterHub.ssl_cert = your_host-chained.crt
|
||||
c.JupyterHub.ssl_key = your_host.key
|
||||
|
||||
|
||||
#### Example
|
||||
|
||||
Your certificate provider gives you the following files: `example_host.crt`,
|
||||
@@ -402,8 +404,8 @@ SyntaxError: Missing parentheses in call to 'print'
|
||||
In order to resolve this issue, there are two potential options.
|
||||
|
||||
1. Update HDFS core-site.xml, so the parameter "net.topology.script.file.name" points to a custom
|
||||
script (e.g. /etc/hadoop/conf/custom_topology_script.py). Copy the original script and change the first line point
|
||||
to a python two installation (e.g. /usr/bin/python).
|
||||
script (e.g. /etc/hadoop/conf/custom_topology_script.py). Copy the original script and change the first line point
|
||||
to a python two installation (e.g. /usr/bin/python).
|
||||
2. In spark-env.sh add a Python 2 installation to your path (e.g. export PATH=/opt/anaconda2/bin:$PATH).
|
||||
|
||||
### Where do I find Docker images and Dockerfiles related to JupyterHub?
|
||||
|
@@ -1,34 +1,34 @@
|
||||
# Bootstrapping your users
|
||||
|
||||
Before spawning a notebook to the user, it could be useful to
|
||||
Before spawning a notebook to the user, it could be useful to
|
||||
do some preparation work in a bootstrapping process.
|
||||
|
||||
Common use cases are:
|
||||
|
||||
*Providing writeable storage for LDAP users*
|
||||
_Providing writeable storage for LDAP users_
|
||||
|
||||
Your Jupyterhub is configured to use the LDAPAuthenticator and DockerSpawer.
|
||||
|
||||
* The user has no file directory on the host since your are using LDAP.
|
||||
* When a user has no directory and DockerSpawner wants to mount a volume,
|
||||
the spawner will use docker to create a directory.
|
||||
Since the docker daemon is running as root, the generated directory for the volume
|
||||
mount will not be writeable by the `jovyan` user inside of the container.
|
||||
For the directory to be useful to the user, the permissions on the directory
|
||||
need to be modified for the user to have write access.
|
||||
- The user has no file directory on the host since your are using LDAP.
|
||||
- When a user has no directory and DockerSpawner wants to mount a volume,
|
||||
the spawner will use docker to create a directory.
|
||||
Since the docker daemon is running as root, the generated directory for the volume
|
||||
mount will not be writeable by the `jovyan` user inside of the container.
|
||||
For the directory to be useful to the user, the permissions on the directory
|
||||
need to be modified for the user to have write access.
|
||||
|
||||
*Prepopulating Content*
|
||||
_Prepopulating Content_
|
||||
|
||||
Another use would be to copy initial content, such as tutorial files or reference
|
||||
material, into the user's space when a notebook server is newly spawned.
|
||||
material, into the user's space when a notebook server is newly spawned.
|
||||
|
||||
You can define your own bootstrap process by implementing a `pre_spawn_hook` on any spawner.
|
||||
The Spawner itself is passed as parameter to your hook and you can easily get the contextual information out of the spawning process.
|
||||
The Spawner itself is passed as parameter to your hook and you can easily get the contextual information out of the spawning process.
|
||||
|
||||
Similarly, there may be cases where you would like to clean up after a spawner stops.
|
||||
You may implement a `post_stop_hook` that is always executed after the spawner stops.
|
||||
|
||||
If you implement a hook, make sure that it is *idempotent*. It will be executed every time
|
||||
If you implement a hook, make sure that it is _idempotent_. It will be executed every time
|
||||
a notebook server is spawned to the user. That means you should somehow
|
||||
ensure that things which should run only once are not running again and again.
|
||||
For example, before you create a directory, check if it exists.
|
||||
@@ -41,13 +41,13 @@ Create a directory for the user, if none exists
|
||||
|
||||
```python
|
||||
|
||||
# in jupyterhub_config.py
|
||||
# in jupyterhub_config.py
|
||||
import os
|
||||
def create_dir_hook(spawner):
|
||||
username = spawner.user.name # get the username
|
||||
volume_path = os.path.join('/volumes/jupyterhub', username)
|
||||
if not os.path.exists(volume_path):
|
||||
# create a directory with umask 0755
|
||||
# create a directory with umask 0755
|
||||
# hub and container user must have the same UID to be writeable
|
||||
# still readable by other users on the system
|
||||
os.mkdir(volume_path, 0o755)
|
||||
@@ -83,17 +83,17 @@ in a new file in `/etc/sudoers.d`, or simply in `/etc/sudoers`.
|
||||
|
||||
All new home directories will be created from `/etc/skel`, so make sure to place any custom homedir-contents in there.
|
||||
|
||||
### Example #3 - Run a shell script
|
||||
### Example #3 - Run a shell script
|
||||
|
||||
You can specify a plain ole' shell script (or any other executable) to be run
|
||||
You can specify a plain ole' shell script (or any other executable) to be run
|
||||
by the bootstrap process.
|
||||
|
||||
For example, you can execute a shell script and as first parameter pass the name
|
||||
For example, you can execute a shell script and as first parameter pass the name
|
||||
of the user:
|
||||
|
||||
```python
|
||||
|
||||
# in jupyterhub_config.py
|
||||
# in jupyterhub_config.py
|
||||
from subprocess import check_call
|
||||
import os
|
||||
def my_script_hook(spawner):
|
||||
@@ -106,7 +106,7 @@ c.Spawner.pre_spawn_hook = my_script_hook
|
||||
|
||||
```
|
||||
|
||||
Here's an example on what you could do in your shell script. See also
|
||||
Here's an example on what you could do in your shell script. See also
|
||||
`/examples/bootstrap-script/`
|
||||
|
||||
```bash
|
||||
@@ -126,7 +126,7 @@ fi
|
||||
|
||||
# This example script will do the following:
|
||||
# - create one directory for the user $USER in a BASE_DIRECTORY (see below)
|
||||
# - create a "tutorials" directory within and download and unzip
|
||||
# - create a "tutorials" directory within and download and unzip
|
||||
# the PythonDataScienceHandbook from GitHub
|
||||
|
||||
# Start the Bootstrap Process
|
||||
@@ -148,9 +148,9 @@ else
|
||||
echo "...initial content loading for user ..."
|
||||
mkdir $USER_DIRECTORY/tutorials
|
||||
cd $USER_DIRECTORY/tutorials
|
||||
wget https://github.com/jakevdp/PythonDataScienceHandbook/archive/master.zip
|
||||
unzip -o master.zip
|
||||
rm master.zip
|
||||
wget https://github.com/jakevdp/PythonDataScienceHandbook/archive/HEAD.zip
|
||||
unzip -o HEAD.zip
|
||||
rm HEAD.zip
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
@@ -40,9 +40,9 @@ else
|
||||
echo "...initial content loading for user ..."
|
||||
mkdir $USER_DIRECTORY/tutorials
|
||||
cd $USER_DIRECTORY/tutorials
|
||||
wget https://github.com/jakevdp/PythonDataScienceHandbook/archive/master.zip
|
||||
unzip -o master.zip
|
||||
rm master.zip
|
||||
wget https://github.com/jakevdp/PythonDataScienceHandbook/archive/HEAD.zip
|
||||
unzip -o HEAD.zip
|
||||
rm HEAD.zip
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
@@ -16,63 +16,62 @@ implementations in other web servers or languages.
|
||||
|
||||
## Run the example
|
||||
|
||||
1. generate an API token:
|
||||
1. generate an API token:
|
||||
|
||||
export JUPYTERHUB_API_TOKEN=$(openssl rand -hex 32)
|
||||
|
||||
2. launch a version of the the whoami service.
|
||||
For `whoami-oauth`:
|
||||
2. launch a version of the the whoami service.
|
||||
For `whoami-oauth`:
|
||||
|
||||
bash launch-service.sh &
|
||||
bash launch-service.sh &
|
||||
|
||||
or for `whoami-oauth-basic`:
|
||||
|
||||
bash launch-service-basic.sh &
|
||||
bash launch-service-basic.sh &
|
||||
|
||||
3. Launch JupyterHub:
|
||||
3. Launch JupyterHub:
|
||||
|
||||
jupyterhub
|
||||
|
||||
4. Visit http://127.0.0.1:5555/
|
||||
4. Visit http://127.0.0.1:5555/
|
||||
|
||||
After logging in with your local-system credentials, you should see a JSON dump of your user info:
|
||||
|
||||
```json
|
||||
{
|
||||
"admin": false,
|
||||
"last_activity": "2016-05-27T14:05:18.016372",
|
||||
"name": "queequeg",
|
||||
"pending": null,
|
||||
"server": "/user/queequeg"
|
||||
"admin": false,
|
||||
"last_activity": "2016-05-27T14:05:18.016372",
|
||||
"name": "queequeg",
|
||||
"pending": null,
|
||||
"server": "/user/queequeg"
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
The essential pieces for using JupyterHub as an OAuth provider are:
|
||||
|
||||
1. registering your service with jupyterhub:
|
||||
|
||||
```python
|
||||
c.JupyterHub.services = [
|
||||
{
|
||||
# the name of your service
|
||||
# should be simple and unique.
|
||||
# mostly used to identify your service in logging
|
||||
"name": "my-service",
|
||||
# the oauth client id of your service
|
||||
# must be unique but isn't private
|
||||
# can be randomly generated or hand-written
|
||||
"oauth_client_id": "abc123",
|
||||
# the API token and client secret of the service
|
||||
# should be generated securely,
|
||||
# e.g. via `openssl rand -hex 32`
|
||||
"api_token": "abc123...",
|
||||
# the redirect target for jupyterhub to send users
|
||||
# after successful authentication
|
||||
"oauth_redirect_uri": "https://service-host/oauth_callback"
|
||||
}
|
||||
]
|
||||
```
|
||||
```python
|
||||
c.JupyterHub.services = [
|
||||
{
|
||||
# the name of your service
|
||||
# should be simple and unique.
|
||||
# mostly used to identify your service in logging
|
||||
"name": "my-service",
|
||||
# the oauth client id of your service
|
||||
# must be unique but isn't private
|
||||
# can be randomly generated or hand-written
|
||||
"oauth_client_id": "abc123",
|
||||
# the API token and client secret of the service
|
||||
# should be generated securely,
|
||||
# e.g. via `openssl rand -hex 32`
|
||||
"api_token": "abc123...",
|
||||
# the redirect target for jupyterhub to send users
|
||||
# after successful authentication
|
||||
"oauth_redirect_uri": "https://service-host/oauth_callback"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
2. Telling your service how to authenticate with JupyterHub.
|
||||
|
||||
|
@@ -4,14 +4,14 @@ This example shows how you can connect Jupyterhub to a Postgres database
|
||||
instead of the default SQLite backend.
|
||||
|
||||
### Running Postgres with Jupyterhub on the host.
|
||||
|
||||
0. Uncomment and replace `ENV JPY_PSQL_PASSWORD arglebargle` with your own
|
||||
password in the Dockerfile for `examples/postgres/db`. (Alternatively, pass
|
||||
-e `JPY_PSQL_PASSWORD=<password>` when you start the db container.)
|
||||
|
||||
1. `cd` to the root of your jupyterhub repo.
|
||||
|
||||
2. Build the postgres image with `docker build -t jupyterhub-postgres-db
|
||||
examples/postgres/db`. This may take a minute or two the first time it's
|
||||
2. Build the postgres image with `docker build -t jupyterhub-postgres-db examples/postgres/db`. This may take a minute or two the first time it's
|
||||
run.
|
||||
|
||||
3. Run the db image with `docker run -d -p 5433:5432 jupyterhub-postgres-db`.
|
||||
@@ -24,24 +24,22 @@ instead of the default SQLite backend.
|
||||
5. Log in as the user running jupyterhub on your host machine.
|
||||
|
||||
### Running Postgres with Containerized Jupyterhub.
|
||||
|
||||
0. Do steps 0-2 in from the above section, ensuring that the values set/passed
|
||||
for `JPY_PSQL_PASSWORD` match for the hub and db containers.
|
||||
|
||||
1. Build the hub image with `docker build -t jupyterhub-postgres-hub
|
||||
examples/postgres/hub`. This may take a minute or two the first time it's
|
||||
1. Build the hub image with `docker build -t jupyterhub-postgres-hub examples/postgres/hub`. This may take a minute or two the first time it's
|
||||
run.
|
||||
|
||||
2. Run the db image with `docker run -d --name=jpy-db
|
||||
jupyterhub-postgres`. Note that, unlike when connecting to a host machine
|
||||
2. Run the db image with `docker run -d --name=jpy-db jupyterhub-postgres`. Note that, unlike when connecting to a host machine
|
||||
jupyterhub, we don't specify a port-forwarding scheme here, but we do need
|
||||
to specify a name for the container.
|
||||
|
||||
3. Run the containerized hub with `docker run -it --link jpy-db:postgres
|
||||
jupyterhub-postgres-hub`. This instructs docker to run the hub container
|
||||
3. Run the containerized hub with `docker run -it --link jpy-db:postgres jupyterhub-postgres-hub`. This instructs docker to run the hub container
|
||||
with a link to the already-running db container, which will forward
|
||||
environment and connection information from the DB to the hub.
|
||||
|
||||
4. Log in as one of the users defined in the `examples/postgres/hub/`
|
||||
Dockerfile. By default `rhea` is the server's admin user, `io` and
|
||||
Dockerfile. By default `rhea` is the server's admin user, `io` and
|
||||
`ganymede` are non-admin users, and all users' passwords are their
|
||||
usernames.
|
||||
|
@@ -1,4 +1,3 @@
|
||||
|
||||
# Simple Announcement Service Example
|
||||
|
||||
This is a simple service that allows administrators to manage announcements
|
||||
@@ -16,10 +15,10 @@ configuration file something like:
|
||||
]
|
||||
|
||||
This starts the announcements service up at `/services/announcement` when
|
||||
JupyterHub launches. By default the announcement text is empty.
|
||||
JupyterHub launches. By default the announcement text is empty.
|
||||
|
||||
The `announcement` module has a configurable port (default 8888) and an API
|
||||
prefix setting. By default the API prefix is `JUPYTERHUB_SERVICE_PREFIX` if
|
||||
prefix setting. By default the API prefix is `JUPYTERHUB_SERVICE_PREFIX` if
|
||||
that environment variable is set or `/` if it is not.
|
||||
|
||||
## Managing the Announcement
|
||||
@@ -27,7 +26,7 @@ that environment variable is set or `/` if it is not.
|
||||
Admin users can set the announcement text with an API token:
|
||||
|
||||
$ curl -X POST -H "Authorization: token <token>" \
|
||||
-d "{'announcement':'JupyterHub will be upgraded on August 14!'}" \
|
||||
-d '{"announcement":"JupyterHub will be upgraded on August 14!"}' \
|
||||
https://.../services/announcement
|
||||
|
||||
Anyone can read the announcement:
|
||||
@@ -42,7 +41,7 @@ Anyone can read the announcement:
|
||||
The time the announcement was posted is recorded in the `timestamp` field and
|
||||
the user who posted the announcement is recorded in the `user` field.
|
||||
|
||||
To clear the announcement text, just DELETE. Only admin users can do this.
|
||||
To clear the announcement text, just DELETE. Only admin users can do this.
|
||||
|
||||
$ curl -X POST -H "Authorization: token <token>" \
|
||||
https://.../services/announcement
|
||||
@@ -50,11 +49,11 @@ To clear the announcement text, just DELETE. Only admin users can do this.
|
||||
## Seeing the Announcement in JupyterHub
|
||||
|
||||
To be able to render the announcement, include the provide `page.html` template
|
||||
that extends the base `page.html` template. Set `c.JupyterHub.template_paths`
|
||||
that extends the base `page.html` template. Set `c.JupyterHub.template_paths`
|
||||
in JupyterHub's configuration to include the path to the extending template.
|
||||
The template changes the `announcement` element and does a JQuery `$.get()` call
|
||||
to retrieve the announcement text.
|
||||
|
||||
JupyterHub's configurable announcement template variables can be set for various
|
||||
pages like login, logout, spawn, and home. Including the template provided in
|
||||
pages like login, logout, spawn, and home. Including the template provided in
|
||||
this example overrides all of those.
|
||||
|
@@ -1,14 +1,9 @@
|
||||
{% extends "templates/page.html" %}
|
||||
{% block announcement %}
|
||||
<div class="container text-center announcement">
|
||||
</div>
|
||||
{% endblock %}
|
||||
|
||||
{% block script %}
|
||||
{{ super() }}
|
||||
{% extends "templates/page.html" %} {% block announcement %}
|
||||
<div class="container text-center announcement"></div>
|
||||
{% endblock %} {% block script %} {{ super() }}
|
||||
<script>
|
||||
$.get("/services/announcement/", function(data) {
|
||||
$.get("/services/announcement/", function (data) {
|
||||
$(".announcement").html(data["announcement"]);
|
||||
});
|
||||
});
|
||||
</script>
|
||||
{% endblock %}
|
||||
|
13
examples/service-fastapi/Dockerfile
Normal file
13
examples/service-fastapi/Dockerfile
Normal file
@@ -0,0 +1,13 @@
|
||||
FROM jupyterhub/jupyterhub
|
||||
|
||||
# Create test user (PAM auth) and install single-user Jupyter
|
||||
RUN useradd testuser --create-home --shell /bin/bash
|
||||
RUN echo 'testuser:passwd' | chpasswd
|
||||
RUN pip install jupyter
|
||||
|
||||
COPY app ./app
|
||||
COPY jupyterhub_config.py .
|
||||
COPY requirements.txt /tmp/requirements.txt
|
||||
RUN pip install -r /tmp/requirements.txt
|
||||
|
||||
CMD ["jupyterhub", "--ip", "0.0.0.0"]
|
107
examples/service-fastapi/README.md
Normal file
107
examples/service-fastapi/README.md
Normal file
@@ -0,0 +1,107 @@
|
||||
# Fastapi
|
||||
|
||||
[FastAPI](https://fastapi.tiangolo.com/) is a popular new web framework attractive for its type hinting, async support, automatic doc generation (Swagger), and more. Their [Feature highlights](https://fastapi.tiangolo.com/features/) sum it up nicely.
|
||||
|
||||
# Swagger UI with OAuth demo
|
||||
|
||||

|
||||
|
||||
# Try it out locally
|
||||
|
||||
1. Install `fastapi` and other dependencies, then launch Jupyterhub
|
||||
|
||||
```
|
||||
pip install -r requirements.txt
|
||||
jupyterhub --ip=127.0.0.1
|
||||
```
|
||||
|
||||
2. Visit http://127.0.0.1:8000/services/fastapi or http://127.0.0.1:8000/services/fastapi/docs
|
||||
|
||||
3. Try interacting programmatically. If you create a new token in your control panel or pull out the `JUPYTERHUB_API_TOKEN` in the single user environment, you can skip the third step here.
|
||||
|
||||
```
|
||||
$ curl -X GET http://127.0.0.1:8000/services/fastapi/
|
||||
{"Hello":"World"}
|
||||
|
||||
$ curl -X GET http://127.0.0.1:8000/services/fastapi/me
|
||||
{"detail":"Must login with token parameter, cookie, or header"}
|
||||
|
||||
$ curl -X POST http://127.0.0.1:8000/hub/api/authorizations/token \
|
||||
-d '{"username": "myname", "password": "mypasswd!"}' \
|
||||
| jq '.token'
|
||||
"3fee13ce6d2845da9bd5f2c2170d3428"
|
||||
|
||||
$ curl -X GET http://127.0.0.1:8000/services/fastapi/me \
|
||||
-H "Authorization: Bearer 3fee13ce6d2845da9bd5f2c2170d3428" \
|
||||
| jq .
|
||||
{
|
||||
"name": "myname",
|
||||
"admin": false,
|
||||
"groups": [],
|
||||
"server": null,
|
||||
"pending": null,
|
||||
"last_activity": "2021-04-07T18:05:11.587638+00:00",
|
||||
"servers": null
|
||||
}
|
||||
```
|
||||
|
||||
# Try it out in Docker
|
||||
|
||||
1. Build and run the Docker image locally
|
||||
|
||||
```bash
|
||||
sudo docker build . -t service-fastapi
|
||||
sudo docker run -it -p 8000:8000 service-fastapi
|
||||
```
|
||||
|
||||
2. Visit http://127.0.0.1:8000/services/fastapi/docs. When going through the OAuth flow or getting a token from the control panel, you can log in with `testuser` / `passwd`.
|
||||
|
||||
# PUBLIC_HOST
|
||||
|
||||
If you are running your service behind a proxy, or on a Docker / Kubernetes infrastructure, you might run into an error during OAuth that says `Mismatching redirect URI`. In the Jupterhub logs, there will be a warning along the lines of: `[W 2021-04-06 23:40:06.707 JupyterHub provider:498] Redirect uri https://jupyterhub.my.cloud/services/fastapi/oauth_callback != /services/fastapi/oauth_callback`. This happens because Swagger UI adds the request host, as seen in the browser, to the Authorization URL.
|
||||
|
||||
To solve that problem, the `oauth_redirect_uri` value in the service initialization needs to match what Swagger will auto-generate and what the service will use when POST'ing to `/oauth2/token`. In this example, setting the `PUBLIC_HOST` environment variable to your public-facing Hub domain (e.g. `https://jupyterhub.my.cloud`) should make it work.
|
||||
|
||||
# Notes on security.py
|
||||
|
||||
FastAPI has a concept of a [dependency injection](https://fastapi.tiangolo.com/tutorial/dependencies) using a `Depends` object (and a subclass `Security`) that is automatically instantiated/executed when it is a parameter for your endpoint routes. You can utilize a `Depends` object for re-useable common parameters or authentication mechanisms like the [`get_user`](https://fastapi.tiangolo.com/tutorial/security/get-current-user) pattern.
|
||||
|
||||
JupyterHub OAuth has three ways to authenticate: a `token` url parameter; a `Authorization: Bearer <token>` header; and a (deprecated) `jupyterhub-services` cookie. FastAPI has helper functions that let us create `Security` (dependency injection) objects for each of those. When you need to allow multiple / optional authentication dependencies (`Security` objects), then you can use the argument `auto_error=False` and it will return `None` instead of raising an `HTTPException`.
|
||||
|
||||
Endpoints that need authentication (`/me` and `/debug` in this example) can leverage the `get_user` pattern and effectively pull the user model from the Hub API when a request has authenticated with cookie / token / header all using the simple syntax,
|
||||
|
||||
```python
|
||||
from .security import get_current_user
|
||||
from .models import User
|
||||
|
||||
@router.get("/new_endpoint")
|
||||
async def new_endpoint(user: User = Depends(get_current_user)):
|
||||
"Function that needs to work with an authenticated user"
|
||||
return {"Hello": user.name}
|
||||
```
|
||||
|
||||
# Notes on client.py
|
||||
|
||||
FastAPI is designed to be an asynchronous web server, so the interactions with the Hub API should be made asynchronously as well. Instead of using `requests` to get user information from a token/cookie, this example uses [`httpx`](https://www.python-httpx.org/). `client.py` defines a small function that creates a `Client` (equivalent of `requests.Session`) with the Hub API url as it's `base_url` and adding the `JUPYTERHUB_API_TOKEN` to every header.
|
||||
|
||||
Consider this a very minimal alternative to using `jupyterhub.services.auth.HubOAuth`
|
||||
|
||||
```python
|
||||
# client.py
|
||||
import os
|
||||
|
||||
def get_client():
|
||||
base_url = os.environ["JUPYTERHUB_API_URL"]
|
||||
token = os.environ["JUPYTERHUB_API_TOKEN"]
|
||||
headers = {"Authorization": "Bearer %s" % token}
|
||||
return httpx.AsyncClient(base_url=base_url, headers=headers)
|
||||
```
|
||||
|
||||
```python
|
||||
# other modules
|
||||
from .client import get_client
|
||||
|
||||
async with get_client() as client:
|
||||
resp = await client.get('/endpoint')
|
||||
...
|
||||
```
|
1
examples/service-fastapi/app/__init__.py
Normal file
1
examples/service-fastapi/app/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from .app import app
|
25
examples/service-fastapi/app/app.py
Normal file
25
examples/service-fastapi/app/app.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import os
|
||||
|
||||
from fastapi import FastAPI
|
||||
|
||||
from .service import router
|
||||
|
||||
### When managed by Jupyterhub, the actual endpoints
|
||||
### will be served out prefixed by /services/:name.
|
||||
### One way to handle this with FastAPI is to use an APIRouter.
|
||||
### All routes are defined in service.py
|
||||
|
||||
app = FastAPI(
|
||||
title="Example FastAPI Service",
|
||||
version="0.1",
|
||||
### Serve out Swagger from the service prefix (<hub>/services/:name/docs)
|
||||
openapi_url=router.prefix + "/openapi.json",
|
||||
docs_url=router.prefix + "/docs",
|
||||
redoc_url=router.prefix + "/redoc",
|
||||
### Add our service client id to the /docs Authorize form automatically
|
||||
swagger_ui_init_oauth={"clientId": os.environ["JUPYTERHUB_CLIENT_ID"]},
|
||||
### Default /docs/oauth2 redirect will cause Hub
|
||||
### to raise oauth2 redirect uri mismatch errors
|
||||
swagger_ui_oauth2_redirect_url=os.environ["JUPYTERHUB_OAUTH_CALLBACK_URL"],
|
||||
)
|
||||
app.include_router(router)
|
11
examples/service-fastapi/app/client.py
Normal file
11
examples/service-fastapi/app/client.py
Normal file
@@ -0,0 +1,11 @@
|
||||
import os
|
||||
|
||||
import httpx
|
||||
|
||||
|
||||
# a minimal alternative to using HubOAuth class
|
||||
def get_client():
|
||||
base_url = os.environ["JUPYTERHUB_API_URL"]
|
||||
token = os.environ["JUPYTERHUB_API_TOKEN"]
|
||||
headers = {"Authorization": "Bearer %s" % token}
|
||||
return httpx.AsyncClient(base_url=base_url, headers=headers)
|
46
examples/service-fastapi/app/models.py
Normal file
46
examples/service-fastapi/app/models.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
# https://jupyterhub.readthedocs.io/en/stable/_static/rest-api/index.html
|
||||
class Server(BaseModel):
|
||||
name: str
|
||||
ready: bool
|
||||
pending: Optional[str]
|
||||
url: str
|
||||
progress_url: str
|
||||
started: datetime
|
||||
last_activity: datetime
|
||||
state: Optional[Any]
|
||||
user_options: Optional[Any]
|
||||
|
||||
|
||||
class User(BaseModel):
|
||||
name: str
|
||||
admin: bool
|
||||
groups: List[str]
|
||||
server: Optional[str]
|
||||
pending: Optional[str]
|
||||
last_activity: datetime
|
||||
servers: Optional[List[Server]]
|
||||
|
||||
|
||||
# https://stackoverflow.com/questions/64501193/fastapi-how-to-use-httpexception-in-responses
|
||||
class AuthorizationError(BaseModel):
|
||||
detail: str
|
||||
|
||||
|
||||
class HubResponse(BaseModel):
|
||||
msg: str
|
||||
request_url: str
|
||||
token: str
|
||||
response_code: int
|
||||
hub_response: dict
|
||||
|
||||
|
||||
class HubApiError(BaseModel):
|
||||
detail: HubResponse
|
61
examples/service-fastapi/app/security.py
Normal file
61
examples/service-fastapi/app/security.py
Normal file
@@ -0,0 +1,61 @@
|
||||
import os
|
||||
|
||||
from fastapi import HTTPException
|
||||
from fastapi import Security
|
||||
from fastapi import status
|
||||
from fastapi.security import OAuth2AuthorizationCodeBearer
|
||||
from fastapi.security.api_key import APIKeyQuery
|
||||
|
||||
from .client import get_client
|
||||
from .models import User
|
||||
|
||||
### Endpoints can require authentication using Depends(get_current_user)
|
||||
### get_current_user will look for a token in url params or
|
||||
### Authorization: bearer token (header).
|
||||
### Hub technically supports cookie auth too, but it is deprecated so
|
||||
### not being included here.
|
||||
auth_by_param = APIKeyQuery(name="token", auto_error=False)
|
||||
|
||||
auth_url = os.environ["PUBLIC_HOST"] + "/hub/api/oauth2/authorize"
|
||||
auth_by_header = OAuth2AuthorizationCodeBearer(
|
||||
authorizationUrl=auth_url, tokenUrl="get_token", auto_error=False
|
||||
)
|
||||
### ^^ The flow for OAuth2 in Swagger is that the "authorize" button
|
||||
### will redirect user (browser) to "auth_url", which is the Hub login page.
|
||||
### After logging in, the browser will POST to our internal /get_token endpoint
|
||||
### with the auth code. That endpoint POST's to Hub /oauth2/token with
|
||||
### our client_secret (JUPYTERHUB_API_TOKEN) and that code to get an
|
||||
### access_token, which it returns to browser, which places in Authorization header.
|
||||
|
||||
### For consideration: optimize performance with a cache instead of
|
||||
### always hitting the Hub api?
|
||||
async def get_current_user(
|
||||
auth_by_param: str = Security(auth_by_param),
|
||||
auth_by_header: str = Security(auth_by_header),
|
||||
):
|
||||
token = auth_by_param or auth_by_header
|
||||
if token is None:
|
||||
raise HTTPException(
|
||||
status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Must login with token parameter or Authorization bearer header",
|
||||
)
|
||||
|
||||
async with get_client() as client:
|
||||
endpoint = "/user"
|
||||
# normally we auth to Hub API with service api token,
|
||||
# but this time auth as the user token to get user model
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
resp = await client.get(endpoint, headers=headers)
|
||||
if resp.is_error:
|
||||
raise HTTPException(
|
||||
status.HTTP_400_BAD_REQUEST,
|
||||
detail={
|
||||
"msg": "Error getting user info from token",
|
||||
"request_url": str(resp.request.url),
|
||||
"token": token,
|
||||
"response_code": resp.status_code,
|
||||
"hub_response": resp.json(),
|
||||
},
|
||||
)
|
||||
user = User(**resp.json())
|
||||
return user
|
70
examples/service-fastapi/app/service.py
Normal file
70
examples/service-fastapi/app/service.py
Normal file
@@ -0,0 +1,70 @@
|
||||
import os
|
||||
|
||||
from fastapi import APIRouter
|
||||
from fastapi import Depends
|
||||
from fastapi import Form
|
||||
from fastapi import Request
|
||||
|
||||
from .client import get_client
|
||||
from .models import AuthorizationError
|
||||
from .models import HubApiError
|
||||
from .models import User
|
||||
from .security import get_current_user
|
||||
|
||||
# APIRouter prefix cannot end in /
|
||||
service_prefix = os.getenv("JUPYTERHUB_SERVICE_PREFIX", "").rstrip("/")
|
||||
router = APIRouter(prefix=service_prefix)
|
||||
|
||||
|
||||
@router.post("/get_token", include_in_schema=False)
|
||||
async def get_token(code: str = Form(...)):
|
||||
"Callback function for OAuth2AuthorizationCodeBearer scheme"
|
||||
# The only thing we need in this form post is the code
|
||||
# Everything else we can hardcode / pull from env
|
||||
async with get_client() as client:
|
||||
redirect_uri = (
|
||||
os.environ["PUBLIC_HOST"] + os.environ["JUPYTERHUB_OAUTH_CALLBACK_URL"],
|
||||
)
|
||||
data = {
|
||||
"client_id": os.environ["JUPYTERHUB_CLIENT_ID"],
|
||||
"client_secret": os.environ["JUPYTERHUB_API_TOKEN"],
|
||||
"grant_type": "authorization_code",
|
||||
"code": code,
|
||||
"redirect_uri": redirect_uri,
|
||||
}
|
||||
resp = await client.post("/oauth2/token", data=data)
|
||||
### resp.json() is {'access_token': <token>, 'token_type': 'Bearer'}
|
||||
return resp.json()
|
||||
|
||||
|
||||
@router.get("/")
|
||||
async def index():
|
||||
"Non-authenticated function that returns {'Hello': 'World'}"
|
||||
return {"Hello": "World"}
|
||||
|
||||
|
||||
# response_model and responses dict translate to OpenAPI (Swagger) hints
|
||||
# compare and contrast what the /me endpoint looks like in Swagger vs /debug
|
||||
@router.get(
|
||||
"/me",
|
||||
response_model=User,
|
||||
responses={401: {'model': AuthorizationError}, 400: {'model': HubApiError}},
|
||||
)
|
||||
async def me(user: User = Depends(get_current_user)):
|
||||
"Authenticated function that returns the User model"
|
||||
return user
|
||||
|
||||
|
||||
@router.get("/debug")
|
||||
async def index(request: Request, user: User = Depends(get_current_user)):
|
||||
"""
|
||||
Authenticated function that returns a few pieces of debug
|
||||
* Environ of the service process
|
||||
* Request headers
|
||||
* User model
|
||||
"""
|
||||
return {
|
||||
"env": dict(os.environ),
|
||||
"headers": dict(request.headers),
|
||||
"user": user,
|
||||
}
|
BIN
examples/service-fastapi/fastapi_example.gif
Normal file
BIN
examples/service-fastapi/fastapi_example.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.7 MiB |
31
examples/service-fastapi/jupyterhub_config.py
Normal file
31
examples/service-fastapi/jupyterhub_config.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import os
|
||||
import warnings
|
||||
|
||||
# When Swagger performs OAuth2 in the browser, it will set
|
||||
# the request host + relative path as the redirect uri, causing a
|
||||
# uri mismatch if the oauth_redirect_uri is just the relative path
|
||||
# is set in the c.JupyterHub.services entry (as per default).
|
||||
# Therefore need to know the request host ahead of time.
|
||||
if "PUBLIC_HOST" not in os.environ:
|
||||
msg = (
|
||||
"env PUBLIC_HOST is not set, defaulting to http://127.0.0.1:8000. "
|
||||
"This can cause problems with OAuth. "
|
||||
"Set PUBLIC_HOST to your public (browser accessible) host."
|
||||
)
|
||||
warnings.warn(msg)
|
||||
public_host = "http://127.0.0.1:8000"
|
||||
else:
|
||||
public_host = os.environ["PUBLIC_HOST"].rstrip('/')
|
||||
service_name = "fastapi"
|
||||
oauth_redirect_uri = f"{public_host}/services/{service_name}/oauth_callback"
|
||||
|
||||
c.JupyterHub.services = [
|
||||
{
|
||||
"name": service_name,
|
||||
"url": "http://127.0.0.1:10202",
|
||||
"command": ["uvicorn", "app:app", "--port", "10202"],
|
||||
"admin": True,
|
||||
"oauth_redirect_uri": oauth_redirect_uri,
|
||||
"environment": {"PUBLIC_HOST": public_host},
|
||||
}
|
||||
]
|
4
examples/service-fastapi/requirements.txt
Normal file
4
examples/service-fastapi/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
fastapi
|
||||
httpx
|
||||
python-multipart
|
||||
uvicorn
|
@@ -17,8 +17,8 @@ and the name of the shared-notebook service.
|
||||
|
||||
In the external example, some extra steps are required to set up supervisor:
|
||||
|
||||
1. select a system user to run the service. This is a user on the system, and does not need to be a Hub user. Add this to the user field in `shared-notebook.conf`, replacing `someuser`.
|
||||
1. select a system user to run the service. This is a user on the system, and does not need to be a Hub user. Add this to the user field in `shared-notebook.conf`, replacing `someuser`.
|
||||
2. generate a secret token for authentication, and replace the `super-secret` fields in `shared-notebook-service` and `jupyterhub_config.py`
|
||||
3. install `shared-notebook-service` somewhere on your system, and update `/path/to/shared-notebook-service` to the absolute path of this destination
|
||||
3. copy `shared-notebook.conf` to `/etc/supervisor/conf.d/`
|
||||
4. `supervisorctl reload`
|
||||
4. copy `shared-notebook.conf` to `/etc/supervisor/conf.d/`
|
||||
5. `supervisorctl reload`
|
||||
|
@@ -4,21 +4,21 @@ Uses `jupyterhub.services.HubAuth` to authenticate requests with the Hub in a [f
|
||||
|
||||
## Run
|
||||
|
||||
1. Launch JupyterHub and the `whoami service` with
|
||||
1. Launch JupyterHub and the `whoami service` with
|
||||
|
||||
jupyterhub --ip=127.0.0.1
|
||||
|
||||
2. Visit http://127.0.0.1:8000/services/whoami/ or http://127.0.0.1:8000/services/whoami-oauth/
|
||||
2. Visit http://127.0.0.1:8000/services/whoami/ or http://127.0.0.1:8000/services/whoami-oauth/
|
||||
|
||||
After logging in with your local-system credentials, you should see a JSON dump of your user info:
|
||||
|
||||
```json
|
||||
{
|
||||
"admin": false,
|
||||
"last_activity": "2016-05-27T14:05:18.016372",
|
||||
"name": "queequeg",
|
||||
"pending": null,
|
||||
"server": "/user/queequeg"
|
||||
"admin": false,
|
||||
"last_activity": "2016-05-27T14:05:18.016372",
|
||||
"name": "queequeg",
|
||||
"pending": null,
|
||||
"server": "/user/queequeg"
|
||||
}
|
||||
```
|
||||
|
||||
@@ -29,5 +29,4 @@ A similar service could be run externally, by setting the JupyterHub service env
|
||||
JUPYTERHUB_API_TOKEN
|
||||
JUPYTERHUB_SERVICE_PREFIX
|
||||
|
||||
|
||||
[flask]: http://flask.pocoo.org
|
||||
|
@@ -6,21 +6,21 @@ There is an implementation each of cookie-based `HubAuthenticated` and OAuth-bas
|
||||
|
||||
## Run
|
||||
|
||||
1. Launch JupyterHub and the `whoami service` with
|
||||
1. Launch JupyterHub and the `whoami service` with
|
||||
|
||||
jupyterhub --ip=127.0.0.1
|
||||
|
||||
2. Visit http://127.0.0.1:8000/services/whoami or http://127.0.0.1:8000/services/whoami-oauth
|
||||
2. Visit http://127.0.0.1:8000/services/whoami or http://127.0.0.1:8000/services/whoami-oauth
|
||||
|
||||
After logging in with your local-system credentials, you should see a JSON dump of your user info:
|
||||
|
||||
```json
|
||||
{
|
||||
"admin": false,
|
||||
"last_activity": "2016-05-27T14:05:18.016372",
|
||||
"name": "queequeg",
|
||||
"pending": null,
|
||||
"server": "/user/queequeg"
|
||||
"admin": false,
|
||||
"last_activity": "2016-05-27T14:05:18.016372",
|
||||
"name": "queequeg",
|
||||
"pending": null,
|
||||
"server": "/user/queequeg"
|
||||
}
|
||||
```
|
||||
|
||||
|
@@ -1,3 +0,0 @@
|
||||
# Docker Cloud build hooks
|
||||
|
||||
These are the hooks
|
@@ -1,7 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -exuo pipefail
|
||||
|
||||
# build jupyterhub-onbuild image
|
||||
docker build --build-arg BASE_IMAGE=$DOCKER_REPO:$DOCKER_TAG -t ${DOCKER_REPO}-onbuild:$DOCKER_TAG onbuild
|
||||
# build jupyterhub-demo image
|
||||
docker build --build-arg BASE_IMAGE=${DOCKER_REPO}-onbuild:$DOCKER_TAG -t ${DOCKER_REPO}-demo:$DOCKER_TAG demo-image
|
@@ -1,42 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -exuo pipefail
|
||||
|
||||
export ONBUILD=${DOCKER_REPO}-onbuild
|
||||
export DEMO=${DOCKER_REPO}-demo
|
||||
export REPOS="${DOCKER_REPO} ${ONBUILD} ${DEMO}"
|
||||
# push ONBUILD image
|
||||
docker push $ONBUILD:$DOCKER_TAG
|
||||
docker push $DEMO:$DOCKER_TAG
|
||||
|
||||
function get_hub_version() {
|
||||
rm -f hub_version
|
||||
docker run --rm -v $PWD:/version -u $(id -u) -i $DOCKER_REPO:$DOCKER_TAG sh -c 'jupyterhub --version > /version/hub_version'
|
||||
hub_xyz=$(cat hub_version)
|
||||
split=( ${hub_xyz//./ } )
|
||||
hub_xy="${split[0]}.${split[1]}"
|
||||
# add .dev on hub_xy so it's 1.0.dev
|
||||
if [[ ! -z "${split[3]:-}" ]]; then
|
||||
hub_xy="${hub_xy}.${split[3]}"
|
||||
latest=0
|
||||
else
|
||||
latest=1
|
||||
fi
|
||||
}
|
||||
|
||||
get_hub_version
|
||||
|
||||
for repo in ${REPOS}; do
|
||||
# when building master, push 0.9.0.dev as well
|
||||
docker tag $repo:$DOCKER_TAG $repo:$hub_xyz
|
||||
docker push $repo:$hub_xyz
|
||||
|
||||
# when building 0.9.x, push 0.9 as well
|
||||
docker tag $repo:$DOCKER_TAG $repo:$hub_xy
|
||||
docker push $repo:$hub_xy
|
||||
|
||||
# if building a stable release, tag latest as well
|
||||
if [[ "$latest" == "1" ]]; then
|
||||
docker tag $repo:$DOCKER_TAG $repo:latest
|
||||
docker push $repo:latest
|
||||
fi
|
||||
done
|
@@ -4,9 +4,9 @@
|
||||
|
||||
version_info = (
|
||||
1,
|
||||
2,
|
||||
1,
|
||||
# "", # release (b1, rc1, or "" for final or dev)
|
||||
5,
|
||||
0,
|
||||
"", # release (b1, rc1, or "" for final or dev)
|
||||
# "dev", # dev or nothing for beta/rc/stable releases
|
||||
)
|
||||
|
||||
|
@@ -23,7 +23,7 @@ tables = ('oauth_access_tokens', 'oauth_codes')
|
||||
|
||||
def add_column_if_table_exists(table, column):
|
||||
engine = op.get_bind().engine
|
||||
if table not in engine.table_names():
|
||||
if table not in sa.inspect(engine).get_table_names():
|
||||
# table doesn't exist, no need to upgrade
|
||||
# because jupyterhub will create it on launch
|
||||
logger.warning("Skipping upgrade of absent table: %s", table)
|
||||
|
@@ -17,7 +17,8 @@ from jupyterhub.orm import JSONDict
|
||||
|
||||
|
||||
def upgrade():
|
||||
tables = op.get_bind().engine.table_names()
|
||||
engine = op.get_bind().engine
|
||||
tables = sa.inspect(engine).get_table_names()
|
||||
if 'spawners' in tables:
|
||||
op.add_column('spawners', sa.Column('user_options', JSONDict()))
|
||||
|
||||
|
@@ -20,7 +20,8 @@ logger = logging.getLogger('alembic')
|
||||
|
||||
|
||||
def upgrade():
|
||||
tables = op.get_bind().engine.table_names()
|
||||
engine = op.get_bind().engine
|
||||
tables = sa.inspect(engine).get_table_names()
|
||||
op.add_column('api_tokens', sa.Column('created', sa.DateTime(), nullable=True))
|
||||
op.add_column(
|
||||
'api_tokens', sa.Column('last_activity', sa.DateTime(), nullable=True)
|
||||
|
@@ -31,7 +31,7 @@ def upgrade():
|
||||
% (now,)
|
||||
)
|
||||
|
||||
tables = c.engine.table_names()
|
||||
tables = sa.inspect(c.engine).get_table_names()
|
||||
|
||||
if 'spawners' in tables:
|
||||
op.add_column('spawners', sa.Column('started', sa.DateTime, nullable=True))
|
||||
|
@@ -16,7 +16,8 @@ import sqlalchemy as sa
|
||||
|
||||
|
||||
def upgrade():
|
||||
tables = op.get_bind().engine.table_names()
|
||||
engine = op.get_bind().engine
|
||||
tables = sa.inspect(engine).get_table_names()
|
||||
if 'oauth_clients' in tables:
|
||||
op.add_column(
|
||||
'oauth_clients', sa.Column('description', sa.Unicode(length=1023))
|
||||
|
@@ -222,6 +222,14 @@ class OAuthAuthorizeHandler(OAuthHandler, BaseHandler):
|
||||
# default: require confirmation
|
||||
return True
|
||||
|
||||
def get_login_url(self):
|
||||
"""
|
||||
Support automatically logging in when JupyterHub is used as auth provider
|
||||
"""
|
||||
if self.authenticator.auto_login_oauth2_authorize:
|
||||
return self.authenticator.login_url(self.hub.base_url)
|
||||
return super().get_login_url()
|
||||
|
||||
@web.authenticated
|
||||
async def get(self):
|
||||
"""GET /oauth/authorization
|
||||
@@ -253,7 +261,7 @@ class OAuthAuthorizeHandler(OAuthHandler, BaseHandler):
|
||||
# Render oauth 'Authorize application...' page
|
||||
auth_state = await self.current_user.get_auth_state()
|
||||
self.write(
|
||||
self.render_template(
|
||||
await self.render_template(
|
||||
"oauth.html",
|
||||
auth_state=auth_state,
|
||||
scopes=scopes,
|
||||
|
@@ -16,9 +16,9 @@ class ShutdownAPIHandler(APIHandler):
|
||||
@admin_only
|
||||
def post(self):
|
||||
"""POST /api/shutdown triggers a clean shutdown
|
||||
|
||||
|
||||
POST (JSON) parameters:
|
||||
|
||||
|
||||
- servers: specify whether single-user servers should be terminated
|
||||
- proxy: specify whether the proxy should be terminated
|
||||
"""
|
||||
@@ -57,7 +57,7 @@ class RootAPIHandler(APIHandler):
|
||||
"""GET /api/ returns info about the Hub and its API.
|
||||
|
||||
It is not an authenticated endpoint.
|
||||
|
||||
|
||||
For now, it just returns the version of JupyterHub itself.
|
||||
"""
|
||||
data = {'version': __version__}
|
||||
@@ -70,7 +70,7 @@ class InfoAPIHandler(APIHandler):
|
||||
"""GET /api/info returns detailed info about the Hub and its API.
|
||||
|
||||
It is not an authenticated endpoint.
|
||||
|
||||
|
||||
For now, it just returns the version of JupyterHub itself.
|
||||
"""
|
||||
|
||||
|
@@ -9,6 +9,7 @@ from datetime import timezone
|
||||
|
||||
from async_generator import aclosing
|
||||
from dateutil.parser import parse as parse_date
|
||||
from sqlalchemy import func
|
||||
from tornado import web
|
||||
from tornado.iostream import StreamClosedError
|
||||
|
||||
@@ -35,15 +36,69 @@ class SelfAPIHandler(APIHandler):
|
||||
user = self.get_current_user_oauth_token()
|
||||
if user is None:
|
||||
raise web.HTTPError(403)
|
||||
self.write(json.dumps(self.user_model(user)))
|
||||
if isinstance(user, orm.Service):
|
||||
model = self.service_model(user)
|
||||
else:
|
||||
model = self.user_model(user)
|
||||
self.write(json.dumps(model))
|
||||
|
||||
|
||||
class UserListAPIHandler(APIHandler):
|
||||
def _user_has_ready_spawner(self, orm_user):
|
||||
"""Return True if a user has *any* ready spawners
|
||||
|
||||
Used for filtering from active -> ready
|
||||
"""
|
||||
user = self.users[orm_user]
|
||||
return any(spawner.ready for spawner in user.spawners.values())
|
||||
|
||||
@admin_only
|
||||
def get(self):
|
||||
state_filter = self.get_argument("state", None)
|
||||
|
||||
# post_filter
|
||||
post_filter = None
|
||||
|
||||
if state_filter in {"active", "ready"}:
|
||||
# only get users with active servers
|
||||
# an 'active' Spawner has a server record in the database
|
||||
# which means Spawner.server != None
|
||||
# it may still be in a pending start/stop state.
|
||||
# join filters out users with no Spawners
|
||||
query = (
|
||||
self.db.query(orm.User)
|
||||
# join filters out any Users with no Spawners
|
||||
.join(orm.Spawner)
|
||||
# this implicitly gets Users with *any* active server
|
||||
.filter(orm.Spawner.server != None)
|
||||
)
|
||||
if state_filter == "ready":
|
||||
# have to post-process query results because active vs ready
|
||||
# can only be distinguished with in-memory Spawner properties
|
||||
post_filter = self._user_has_ready_spawner
|
||||
|
||||
elif state_filter == "inactive":
|
||||
# only get users with *no* active servers
|
||||
# as opposed to users with *any inactive servers*
|
||||
# this is the complement to the above query.
|
||||
# how expensive is this with lots of servers?
|
||||
query = (
|
||||
self.db.query(orm.User)
|
||||
.outerjoin(orm.Spawner)
|
||||
.outerjoin(orm.Server)
|
||||
.group_by(orm.User.id)
|
||||
.having(func.count(orm.Server.id) == 0)
|
||||
)
|
||||
elif state_filter:
|
||||
raise web.HTTPError(400, "Unrecognized state filter: %r" % state_filter)
|
||||
else:
|
||||
# no filter, return all users
|
||||
query = self.db.query(orm.User)
|
||||
|
||||
data = [
|
||||
self.user_model(u, include_servers=True, include_state=True)
|
||||
for u in self.db.query(orm.User)
|
||||
for u in query
|
||||
if (post_filter is None or post_filter(u))
|
||||
]
|
||||
self.write(json.dumps(data))
|
||||
|
||||
@@ -182,6 +237,9 @@ class UserAPIHandler(APIHandler):
|
||||
)
|
||||
|
||||
await maybe_future(self.authenticator.delete_user(user))
|
||||
|
||||
await user.delete_spawners()
|
||||
|
||||
# remove from registry
|
||||
self.users.delete(user)
|
||||
|
||||
@@ -422,10 +480,18 @@ class UserServerAPIHandler(APIHandler):
|
||||
options = self.get_json_body()
|
||||
remove = (options or {}).get('remove', False)
|
||||
|
||||
def _remove_spawner(f=None):
|
||||
if f and f.exception():
|
||||
return
|
||||
async def _remove_spawner(f=None):
|
||||
"""Remove the spawner object
|
||||
|
||||
only called after it stops successfully
|
||||
"""
|
||||
if f:
|
||||
# await f, stop on error,
|
||||
# leaving resources in the db in case of failure to stop
|
||||
await f
|
||||
self.log.info("Deleting spawner %s", spawner._log_name)
|
||||
await maybe_future(user._delete_spawner(spawner))
|
||||
|
||||
self.db.delete(spawner.orm_spawner)
|
||||
user.spawners.pop(server_name, None)
|
||||
self.db.commit()
|
||||
@@ -446,7 +512,8 @@ class UserServerAPIHandler(APIHandler):
|
||||
self.set_header('Content-Type', 'text/plain')
|
||||
self.set_status(202)
|
||||
if remove:
|
||||
spawner._stop_future.add_done_callback(_remove_spawner)
|
||||
# schedule remove when stop completes
|
||||
asyncio.ensure_future(_remove_spawner(spawner._stop_future))
|
||||
return
|
||||
|
||||
if spawner.pending:
|
||||
@@ -464,9 +531,10 @@ class UserServerAPIHandler(APIHandler):
|
||||
|
||||
if remove:
|
||||
if stop_future:
|
||||
stop_future.add_done_callback(_remove_spawner)
|
||||
# schedule remove when stop completes
|
||||
asyncio.ensure_future(_remove_spawner(spawner._stop_future))
|
||||
else:
|
||||
_remove_spawner()
|
||||
await _remove_spawner()
|
||||
|
||||
status = 202 if spawner._stop_pending else 204
|
||||
self.set_header('Content-Type', 'text/plain')
|
||||
|
@@ -9,6 +9,7 @@ import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import secrets
|
||||
import signal
|
||||
import socket
|
||||
import sys
|
||||
@@ -29,6 +30,14 @@ from urllib.parse import urlunparse
|
||||
if sys.version_info[:2] < (3, 3):
|
||||
raise ValueError("Python < 3.3 not supported: %s" % sys.version)
|
||||
|
||||
# For compatibility with python versions 3.6 or earlier.
|
||||
# asyncio.Task.all_tasks() is fully moved to asyncio.all_tasks() starting with 3.9. Also applies to current_task.
|
||||
try:
|
||||
asyncio_all_tasks = asyncio.all_tasks
|
||||
asyncio_current_task = asyncio.current_task
|
||||
except AttributeError as e:
|
||||
asyncio_all_tasks = asyncio.Task.all_tasks
|
||||
asyncio_current_task = asyncio.Task.current_task
|
||||
|
||||
from dateutil.parser import parse as parse_date
|
||||
from jinja2 import Environment, FileSystemLoader, PrefixLoader, ChoiceLoader
|
||||
@@ -347,6 +356,42 @@ class JupyterHub(Application):
|
||||
Default is two weeks.
|
||||
""",
|
||||
).tag(config=True)
|
||||
|
||||
oauth_token_expires_in = Integer(
|
||||
help="""Expiry (in seconds) of OAuth access tokens.
|
||||
|
||||
The default is to expire when the cookie storing them expires,
|
||||
according to `cookie_max_age_days` config.
|
||||
|
||||
These are the tokens stored in cookies when you visit
|
||||
a single-user server or service.
|
||||
When they expire, you must re-authenticate with the Hub,
|
||||
even if your Hub authentication is still valid.
|
||||
If your Hub authentication is valid,
|
||||
logging in may be a transparent redirect as you refresh the page.
|
||||
|
||||
This does not affect JupyterHub API tokens in general,
|
||||
which do not expire by default.
|
||||
Only tokens issued during the oauth flow
|
||||
accessing services and single-user servers are affected.
|
||||
|
||||
.. versionadded:: 1.4
|
||||
OAuth token expires_in was not previously configurable.
|
||||
.. versionchanged:: 1.4
|
||||
Default now uses cookie_max_age_days so that oauth tokens
|
||||
which are generally stored in cookies,
|
||||
expire when the cookies storing them expire.
|
||||
Previously, it was one hour.
|
||||
""",
|
||||
config=True,
|
||||
)
|
||||
|
||||
@default("oauth_token_expires_in")
|
||||
def _cookie_max_age_seconds(self):
|
||||
"""default to cookie max age, where these tokens are stored"""
|
||||
# convert cookie max age days to seconds
|
||||
return int(self.cookie_max_age_days * 24 * 3600)
|
||||
|
||||
redirect_to_server = Bool(
|
||||
True, help="Redirect user to server (if running), instead of control panel."
|
||||
).tag(config=True)
|
||||
@@ -366,7 +411,8 @@ class JupyterHub(Application):
|
||||
300, help="Interval (in seconds) at which to update last-activity timestamps."
|
||||
).tag(config=True)
|
||||
proxy_check_interval = Integer(
|
||||
30, help="Interval (in seconds) at which to check if the proxy is running."
|
||||
5,
|
||||
help="DEPRECATED since version 0.8: Use ConfigurableHTTPProxy.check_running_interval",
|
||||
).tag(config=True)
|
||||
service_check_interval = Integer(
|
||||
60,
|
||||
@@ -680,6 +726,7 @@ class JupyterHub(Application):
|
||||
).tag(config=True)
|
||||
|
||||
_proxy_config_map = {
|
||||
'proxy_check_interval': 'check_running_interval',
|
||||
'proxy_cmd': 'command',
|
||||
'debug_proxy': 'debug',
|
||||
'proxy_auth_token': 'auth_token',
|
||||
@@ -811,6 +858,66 @@ class JupyterHub(Application):
|
||||
def _hub_prefix_default(self):
|
||||
return url_path_join(self.base_url, '/hub/')
|
||||
|
||||
hub_routespec = Unicode(
|
||||
"/",
|
||||
help="""
|
||||
The routing prefix for the Hub itself.
|
||||
|
||||
Override to send only a subset of traffic to the Hub.
|
||||
Default is to use the Hub as the default route for all requests.
|
||||
|
||||
This is necessary for normal jupyterhub operation,
|
||||
as the Hub must receive requests for e.g. `/user/:name`
|
||||
when the user's server is not running.
|
||||
|
||||
However, some deployments using only the JupyterHub API
|
||||
may want to handle these events themselves,
|
||||
in which case they can register their own default target with the proxy
|
||||
and set e.g. `hub_routespec = /hub/` to serve only the hub's own pages, or even `/hub/api/` for api-only operation.
|
||||
|
||||
Note: hub_routespec must include the base_url, if any.
|
||||
|
||||
.. versionadded:: 1.4
|
||||
""",
|
||||
).tag(config=True)
|
||||
|
||||
@default("hub_routespec")
|
||||
def _default_hub_routespec(self):
|
||||
# Default routespec for the Hub is the *app* base url
|
||||
# not the hub URL, so the Hub receives requests for non-running servers
|
||||
# use `/` with host-based routing so the Hub
|
||||
# gets requests for all hosts
|
||||
if self.subdomain_host:
|
||||
routespec = '/'
|
||||
else:
|
||||
routespec = self.base_url
|
||||
return routespec
|
||||
|
||||
@validate("hub_routespec")
|
||||
def _validate_hub_routespec(self, proposal):
|
||||
"""ensure leading/trailing / on custom routespec prefix
|
||||
|
||||
- trailing '/' always required
|
||||
- leading '/' required unless using subdomains
|
||||
"""
|
||||
routespec = proposal.value
|
||||
if not routespec.endswith("/"):
|
||||
routespec = routespec + "/"
|
||||
if not self.subdomain_host and not routespec.startswith("/"):
|
||||
routespec = "/" + routespec
|
||||
return routespec
|
||||
|
||||
@observe("hub_routespec")
|
||||
def _hub_routespec_changed(self, change):
|
||||
if change.new == change.old:
|
||||
return
|
||||
routespec = change.new
|
||||
if routespec not in {'/', self.base_url}:
|
||||
self.log.warning(
|
||||
f"Using custom route for Hub: {routespec}."
|
||||
" Requests for not-running servers may not be handled."
|
||||
)
|
||||
|
||||
@observe('base_url')
|
||||
def _update_hub_prefix(self, change):
|
||||
"""add base URL to hub prefix"""
|
||||
@@ -838,15 +945,30 @@ class JupyterHub(Application):
|
||||
to reduce the cost of checking authentication tokens.
|
||||
""",
|
||||
).tag(config=True)
|
||||
cookie_secret = Bytes(
|
||||
cookie_secret = Union(
|
||||
[Bytes(), Unicode()],
|
||||
help="""The cookie secret to use to encrypt cookies.
|
||||
|
||||
Loaded from the JPY_COOKIE_SECRET env variable by default.
|
||||
|
||||
Should be exactly 256 bits (32 bytes).
|
||||
"""
|
||||
""",
|
||||
).tag(config=True, env='JPY_COOKIE_SECRET')
|
||||
|
||||
@validate('cookie_secret')
|
||||
def _validate_secret_key(self, proposal):
|
||||
"""Coerces strings with even number of hexadecimal characters to bytes."""
|
||||
r = proposal['value']
|
||||
if isinstance(r, str):
|
||||
try:
|
||||
return bytes.fromhex(r)
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
"cookie_secret set as a string must contain an even amount of hexadecimal characters."
|
||||
)
|
||||
else:
|
||||
return r
|
||||
|
||||
@observe('cookie_secret')
|
||||
def _cookie_secret_check(self, change):
|
||||
secret = change.new
|
||||
@@ -1326,7 +1448,7 @@ class JupyterHub(Application):
|
||||
Can be a Unicode string (e.g. '/hub/home') or a callable based on the handler object:
|
||||
|
||||
::
|
||||
|
||||
|
||||
def default_url_fn(handler):
|
||||
user = handler.current_user
|
||||
if user and user.admin:
|
||||
@@ -1354,6 +1476,26 @@ class JupyterHub(Application):
|
||||
""",
|
||||
).tag(config=True)
|
||||
|
||||
use_legacy_stopped_server_status_code = Bool(
|
||||
True,
|
||||
help="""
|
||||
Return 503 rather than 424 when request comes in for a non-running server.
|
||||
|
||||
Prior to JupyterHub 2.0, this returns a 503 when any request comes in for
|
||||
a user server that is currently not running. By default, JupyterHub 2.0
|
||||
will return a 424 - this makes operational metric dashboards more useful.
|
||||
|
||||
JupyterLab < 3.2 expected the 503 to know if the user server is no longer
|
||||
running, and prompted the user to start their server. Set this config to
|
||||
true to retain the old behavior, so JupyterLab < 3.2 can continue to show
|
||||
the appropriate UI when the user server is stopped.
|
||||
|
||||
This option will default to False in JupyterHub 2.0, and be removed in a
|
||||
future release.
|
||||
""",
|
||||
config=True,
|
||||
)
|
||||
|
||||
def init_handlers(self):
|
||||
h = []
|
||||
# load handlers from the authenticator
|
||||
@@ -1450,7 +1592,7 @@ class JupyterHub(Application):
|
||||
if not secret:
|
||||
secret_from = 'new'
|
||||
self.log.debug("Generating new %s", trait_name)
|
||||
secret = os.urandom(COOKIE_SECRET_BYTES)
|
||||
secret = secrets.token_bytes(COOKIE_SECRET_BYTES)
|
||||
|
||||
if secret_file and secret_from == 'new':
|
||||
# if we generated a new secret, store it in the secret_file
|
||||
@@ -1607,6 +1749,7 @@ class JupyterHub(Application):
|
||||
"""Load the Hub URL config"""
|
||||
hub_args = dict(
|
||||
base_url=self.hub_prefix,
|
||||
routespec=self.hub_routespec,
|
||||
public_host=self.subdomain_host,
|
||||
certfile=self.internal_ssl_cert,
|
||||
keyfile=self.internal_ssl_key,
|
||||
@@ -1622,17 +1765,15 @@ class JupyterHub(Application):
|
||||
hub_args['ip'] = self.hub_ip
|
||||
hub_args['port'] = self.hub_port
|
||||
|
||||
# routespec for the Hub is the *app* base url
|
||||
# not the hub URL, so it receives requests for non-running servers
|
||||
# use `/` with host-based routing so the Hub
|
||||
# gets requests for all hosts
|
||||
host = ''
|
||||
if self.subdomain_host:
|
||||
routespec = '/'
|
||||
else:
|
||||
routespec = self.base_url
|
||||
self.hub = Hub(**hub_args)
|
||||
|
||||
self.hub = Hub(routespec=routespec, **hub_args)
|
||||
if not self.subdomain_host:
|
||||
api_prefix = url_path_join(self.hub.base_url, "api/")
|
||||
if not api_prefix.startswith(self.hub.routespec):
|
||||
self.log.warning(
|
||||
f"Hub API prefix {api_prefix} not on prefix {self.hub.routespec}. "
|
||||
"The Hub may not receive any API requests from outside."
|
||||
)
|
||||
|
||||
if self.hub_connect_ip:
|
||||
self.hub.connect_ip = self.hub_connect_ip
|
||||
@@ -1851,7 +1992,7 @@ class JupyterHub(Application):
|
||||
# don't allow bad tokens to create users
|
||||
db.delete(obj)
|
||||
db.commit()
|
||||
raise
|
||||
raise
|
||||
else:
|
||||
self.log.debug("Not duplicating token %s", orm_token)
|
||||
db.commit()
|
||||
@@ -1921,18 +2062,14 @@ class JupyterHub(Application):
|
||||
raise AttributeError("No such service field: %s" % key)
|
||||
setattr(service, key, value)
|
||||
|
||||
if service.managed:
|
||||
if not service.api_token:
|
||||
# generate new token
|
||||
# TODO: revoke old tokens?
|
||||
service.api_token = service.orm.new_api_token(
|
||||
note="generated at startup"
|
||||
)
|
||||
else:
|
||||
# ensure provided token is registered
|
||||
self.service_tokens[service.api_token] = service.name
|
||||
else:
|
||||
if service.api_token:
|
||||
self.service_tokens[service.api_token] = service.name
|
||||
elif service.managed:
|
||||
# generate new token
|
||||
# TODO: revoke old tokens?
|
||||
service.api_token = service.orm.new_api_token(
|
||||
note="generated at startup"
|
||||
)
|
||||
|
||||
if service.url:
|
||||
parsed = urlparse(service.url)
|
||||
@@ -2120,7 +2257,7 @@ class JupyterHub(Application):
|
||||
self.log.debug(
|
||||
"Awaiting checks for %i possibly-running spawners", len(check_futures)
|
||||
)
|
||||
await gen.multi(check_futures)
|
||||
await asyncio.gather(*check_futures)
|
||||
db.commit()
|
||||
|
||||
# only perform this query if we are going to log it
|
||||
@@ -2138,6 +2275,7 @@ class JupyterHub(Application):
|
||||
lambda: self.db,
|
||||
url_prefix=url_path_join(base_url, 'api/oauth2'),
|
||||
login_url=url_path_join(base_url, 'login'),
|
||||
token_expires_in=self.oauth_token_expires_in,
|
||||
)
|
||||
|
||||
def cleanup_oauth_clients(self):
|
||||
@@ -2187,7 +2325,7 @@ class JupyterHub(Application):
|
||||
def init_tornado_settings(self):
|
||||
"""Set up the tornado settings dict."""
|
||||
base_url = self.hub.base_url
|
||||
jinja_options = dict(autoescape=True)
|
||||
jinja_options = dict(autoescape=True, enable_async=True)
|
||||
jinja_options.update(self.jinja_environment_options)
|
||||
base_path = self._template_paths_default()[0]
|
||||
if base_path not in self.template_paths:
|
||||
@@ -2199,6 +2337,14 @@ class JupyterHub(Application):
|
||||
]
|
||||
)
|
||||
jinja_env = Environment(loader=loader, **jinja_options)
|
||||
# We need a sync jinja environment too, for the times we *must* use sync
|
||||
# code - particularly in RequestHandler.write_error. Since *that*
|
||||
# is called from inside the asyncio event loop, we can't actulaly just
|
||||
# schedule it on the loop - without starting another thread with its
|
||||
# own loop, which seems not worth the trouble. Instead, we create another
|
||||
# environment, exactly like this one, but sync
|
||||
del jinja_options['enable_async']
|
||||
jinja_env_sync = Environment(loader=loader, **jinja_options)
|
||||
|
||||
login_url = url_path_join(base_url, 'login')
|
||||
logout_url = self.authenticator.logout_url(base_url)
|
||||
@@ -2245,6 +2391,7 @@ class JupyterHub(Application):
|
||||
template_path=self.template_paths,
|
||||
template_vars=self.template_vars,
|
||||
jinja2_env=jinja_env,
|
||||
jinja2_env_sync=jinja_env_sync,
|
||||
version_hash=version_hash,
|
||||
subdomain_host=self.subdomain_host,
|
||||
domain=self.domain,
|
||||
@@ -2808,9 +2955,7 @@ class JupyterHub(Application):
|
||||
async def shutdown_cancel_tasks(self, sig):
|
||||
"""Cancel all other tasks of the event loop and initiate cleanup"""
|
||||
self.log.critical("Received signal %s, initiating shutdown...", sig.name)
|
||||
tasks = [
|
||||
t for t in asyncio.Task.all_tasks() if t is not asyncio.Task.current_task()
|
||||
]
|
||||
tasks = [t for t in asyncio_all_tasks() if t is not asyncio_current_task()]
|
||||
|
||||
if tasks:
|
||||
self.log.debug("Cancelling pending tasks")
|
||||
@@ -2823,7 +2968,7 @@ class JupyterHub(Application):
|
||||
except StopAsyncIteration as e:
|
||||
self.log.error("Caught StopAsyncIteration Exception", exc_info=True)
|
||||
|
||||
tasks = [t for t in asyncio.Task.all_tasks()]
|
||||
tasks = [t for t in asyncio_all_tasks()]
|
||||
for t in tasks:
|
||||
self.log.debug("Task status: %s", t)
|
||||
await self.cleanup()
|
||||
|
@@ -101,7 +101,10 @@ class Authenticator(LoggingConfigurable):
|
||||
"""
|
||||
).tag(config=True)
|
||||
|
||||
whitelist = Set(help="Deprecated, use `Authenticator.allowed_users`", config=True,)
|
||||
whitelist = Set(
|
||||
help="Deprecated, use `Authenticator.allowed_users`",
|
||||
config=True,
|
||||
)
|
||||
|
||||
allowed_users = Set(
|
||||
help="""
|
||||
@@ -182,6 +185,13 @@ class Authenticator(LoggingConfigurable):
|
||||
"""
|
||||
)
|
||||
|
||||
def get_custom_html(self, base_url):
|
||||
"""Get custom HTML for the authenticator.
|
||||
|
||||
.. versionadded: 1.4
|
||||
"""
|
||||
return self.custom_html
|
||||
|
||||
login_service = Unicode(
|
||||
help="""
|
||||
Name of the login service that this authenticator is providing using to authenticate users.
|
||||
@@ -636,6 +646,26 @@ class Authenticator(LoggingConfigurable):
|
||||
""",
|
||||
)
|
||||
|
||||
auto_login_oauth2_authorize = Bool(
|
||||
False,
|
||||
config=True,
|
||||
help="""
|
||||
Automatically begin login process for OAuth2 authorization requests
|
||||
|
||||
When another application is using JupyterHub as OAuth2 provider, it
|
||||
sends users to `/hub/api/oauth2/authorize`. If the user isn't logged
|
||||
in already, and auto_login is not set, the user will be dumped on the
|
||||
hub's home page, without any context on what to do next.
|
||||
|
||||
Setting this to true will automatically redirect users to login if
|
||||
they aren't logged in *only* on the `/hub/api/oauth2/authorize`
|
||||
endpoint.
|
||||
|
||||
.. versionadded:: 1.5
|
||||
|
||||
""",
|
||||
)
|
||||
|
||||
def login_url(self, base_url):
|
||||
"""Override this when registering a custom login handler
|
||||
|
||||
@@ -715,7 +745,9 @@ for _old_name, _new_name, _version in [
|
||||
("check_blacklist", "check_blocked_users", "1.2"),
|
||||
]:
|
||||
setattr(
|
||||
Authenticator, _old_name, _deprecated_method(_old_name, _new_name, _version),
|
||||
Authenticator,
|
||||
_old_name,
|
||||
_deprecated_method(_old_name, _new_name, _version),
|
||||
)
|
||||
|
||||
|
||||
@@ -778,7 +810,9 @@ class LocalAuthenticator(Authenticator):
|
||||
"""
|
||||
).tag(config=True)
|
||||
|
||||
group_whitelist = Set(help="""DEPRECATED: use allowed_groups""",).tag(config=True)
|
||||
group_whitelist = Set(
|
||||
help="""DEPRECATED: use allowed_groups""",
|
||||
).tag(config=True)
|
||||
|
||||
allowed_groups = Set(
|
||||
help="""
|
||||
@@ -938,8 +972,8 @@ class PAMAuthenticator(LocalAuthenticator):
|
||||
help="""
|
||||
Whether to check the user's account status via PAM during authentication.
|
||||
|
||||
The PAM account stack performs non-authentication based account
|
||||
management. It is typically used to restrict/permit access to a
|
||||
The PAM account stack performs non-authentication based account
|
||||
management. It is typically used to restrict/permit access to a
|
||||
service and this step is needed to access the host's user access control.
|
||||
|
||||
Disabling this can be dangerous as authenticated but unauthorized users may
|
||||
|
@@ -26,10 +26,9 @@ def write_alembic_ini(alembic_ini='alembic.ini', db_url='sqlite:///jupyterhub.sq
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
alembic_ini: str
|
||||
alembic_ini : str
|
||||
path to the alembic.ini file that should be written.
|
||||
db_url: str
|
||||
db_url : str
|
||||
The SQLAlchemy database url, e.g. `sqlite:///jupyterhub.sqlite`.
|
||||
"""
|
||||
with open(ALEMBIC_INI_TEMPLATE_PATH) as f:
|
||||
@@ -58,13 +57,11 @@ def _temp_alembic_ini(db_url):
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
db_url: str
|
||||
db_url : str
|
||||
The SQLAlchemy database url, e.g. `sqlite:///jupyterhub.sqlite`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
alembic_ini: str
|
||||
The path to the temporary alembic.ini that we have created.
|
||||
This file will be cleaned up on exit from the context manager.
|
||||
|
@@ -19,14 +19,14 @@ description: |
|
||||
2. Events are only recorded when an action succeeds.
|
||||
type: object
|
||||
required:
|
||||
- action
|
||||
- username
|
||||
- servername
|
||||
- action
|
||||
- username
|
||||
- servername
|
||||
properties:
|
||||
action:
|
||||
enum:
|
||||
- start
|
||||
- stop
|
||||
- start
|
||||
- stop
|
||||
description: |
|
||||
Action performed by JupyterHub.
|
||||
|
||||
@@ -36,7 +36,7 @@ properties:
|
||||
|
||||
1. start
|
||||
A user's server was successfully started
|
||||
|
||||
|
||||
2. stop
|
||||
A user's server was successfully stopped
|
||||
username:
|
||||
|
@@ -40,6 +40,7 @@ from ..metrics import SERVER_STOP_DURATION_SECONDS
|
||||
from ..metrics import ServerPollStatus
|
||||
from ..metrics import ServerSpawnStatus
|
||||
from ..metrics import ServerStopStatus
|
||||
from ..metrics import TOTAL_USERS
|
||||
from ..objects import Server
|
||||
from ..spawner import LocalProcessSpawner
|
||||
from ..user import User
|
||||
@@ -453,6 +454,7 @@ class BaseHandler(RequestHandler):
|
||||
# not found, create and register user
|
||||
u = orm.User(name=username)
|
||||
self.db.add(u)
|
||||
TOTAL_USERS.inc()
|
||||
self.db.commit()
|
||||
user = self._user_from_orm(u)
|
||||
return user
|
||||
@@ -489,7 +491,12 @@ class BaseHandler(RequestHandler):
|
||||
self.clear_cookie(
|
||||
'jupyterhub-services',
|
||||
path=url_path_join(self.base_url, 'services'),
|
||||
**kwargs
|
||||
**kwargs,
|
||||
)
|
||||
# clear tornado cookie
|
||||
self.clear_cookie(
|
||||
'_xsrf',
|
||||
**self.settings.get('xsrf_cookie_kwargs', {}),
|
||||
)
|
||||
# Reset _jupyterhub_user
|
||||
self._jupyterhub_user = None
|
||||
@@ -634,6 +641,12 @@ class BaseHandler(RequestHandler):
|
||||
next_url,
|
||||
)
|
||||
|
||||
# this is where we know if next_url is coming from ?next= param or we are using a default url
|
||||
if next_url:
|
||||
next_url_from_param = True
|
||||
else:
|
||||
next_url_from_param = False
|
||||
|
||||
if not next_url:
|
||||
# custom default URL, usually passed because user landed on that page but was not logged in
|
||||
if default:
|
||||
@@ -659,7 +672,10 @@ class BaseHandler(RequestHandler):
|
||||
else:
|
||||
next_url = url_path_join(self.hub.base_url, 'home')
|
||||
|
||||
next_url = self.append_query_parameters(next_url, exclude=['next'])
|
||||
if not next_url_from_param:
|
||||
# when a request made with ?next=... assume all the params have already been encoded
|
||||
# otherwise, preserve params from the current request across the redirect
|
||||
next_url = self.append_query_parameters(next_url, exclude=['next'])
|
||||
return next_url
|
||||
|
||||
def append_query_parameters(self, url, exclude=None):
|
||||
@@ -1156,16 +1172,36 @@ class BaseHandler(RequestHandler):
|
||||
"<a href='{home}'>home page</a>.".format(home=home)
|
||||
)
|
||||
|
||||
def get_template(self, name):
|
||||
"""Return the jinja template object for a given name"""
|
||||
return self.settings['jinja2_env'].get_template(name)
|
||||
def get_template(self, name, sync=False):
|
||||
"""
|
||||
Return the jinja template object for a given name
|
||||
|
||||
def render_template(self, name, **ns):
|
||||
If sync is True, we return a Template that is compiled without async support.
|
||||
Only those can be used in synchronous code.
|
||||
|
||||
If sync is False, we return a Template that is compiled with async support
|
||||
"""
|
||||
if sync:
|
||||
key = 'jinja2_env_sync'
|
||||
else:
|
||||
key = 'jinja2_env'
|
||||
return self.settings[key].get_template(name)
|
||||
|
||||
def render_template(self, name, sync=False, **ns):
|
||||
"""
|
||||
Render jinja2 template
|
||||
|
||||
If sync is set to True, we render the template & return a string
|
||||
If sync is set to False, we return an awaitable
|
||||
"""
|
||||
template_ns = {}
|
||||
template_ns.update(self.template_namespace)
|
||||
template_ns.update(ns)
|
||||
template = self.get_template(name)
|
||||
return template.render(**template_ns)
|
||||
template = self.get_template(name, sync)
|
||||
if sync:
|
||||
return template.render(**template_ns)
|
||||
else:
|
||||
return template.render_async(**template_ns)
|
||||
|
||||
@property
|
||||
def template_namespace(self):
|
||||
@@ -1240,17 +1276,19 @@ class BaseHandler(RequestHandler):
|
||||
# Content-Length must be recalculated.
|
||||
self.clear_header('Content-Length')
|
||||
|
||||
# render the template
|
||||
# render_template is async, but write_error can't be!
|
||||
# so we run it sync here, instead of making a sync version of render_template
|
||||
|
||||
try:
|
||||
html = self.render_template('%s.html' % status_code, **ns)
|
||||
html = self.render_template('%s.html' % status_code, sync=True, **ns)
|
||||
except TemplateNotFound:
|
||||
self.log.debug("No template for %d", status_code)
|
||||
try:
|
||||
html = self.render_template('error.html', **ns)
|
||||
html = self.render_template('error.html', sync=True, **ns)
|
||||
except:
|
||||
# In this case, any side effect must be avoided.
|
||||
ns['no_spawner_check'] = True
|
||||
html = self.render_template('error.html', **ns)
|
||||
html = self.render_template('error.html', sync=True, **ns)
|
||||
|
||||
self.write(html)
|
||||
|
||||
@@ -1292,7 +1330,7 @@ class UserUrlHandler(BaseHandler):
|
||||
|
||||
**Changed Behavior as of 1.0** This handler no longer triggers a spawn. Instead, it checks if:
|
||||
|
||||
1. server is not active, serve page prompting for spawn (status: 503)
|
||||
1. server is not active, serve page prompting for spawn (status: 424)
|
||||
2. server is ready (This shouldn't happen! Proxy isn't updated yet. Wait a bit and redirect.)
|
||||
3. server is active, redirect to /hub/spawn-pending to monitor launch progress
|
||||
(will redirect back when finished)
|
||||
@@ -1311,7 +1349,14 @@ class UserUrlHandler(BaseHandler):
|
||||
self.log.warning(
|
||||
"Failing suspected API request to not-running server: %s", self.request.path
|
||||
)
|
||||
self.set_status(503)
|
||||
|
||||
# If we got here, the server is not running. To differentiate
|
||||
# that the *server* itself is not running, rather than just the particular
|
||||
# resource *in* the server is not found, we return a 424 instead of a 404.
|
||||
# We allow retaining the old behavior to support older JupyterLab versions
|
||||
self.set_status(
|
||||
424 if not self.app.use_legacy_stopped_server_status_code else 503
|
||||
)
|
||||
self.set_header("Content-Type", "application/json")
|
||||
|
||||
spawn_url = urlparse(self.request.full_url())._replace(query="")
|
||||
@@ -1454,10 +1499,14 @@ class UserUrlHandler(BaseHandler):
|
||||
|
||||
# if request is expecting JSON, assume it's an API request and fail with 503
|
||||
# because it won't like the redirect to the pending page
|
||||
if get_accepted_mimetype(
|
||||
self.request.headers.get('Accept', ''),
|
||||
choices=['application/json', 'text/html'],
|
||||
) == 'application/json' or 'api' in user_path.split('/'):
|
||||
if (
|
||||
get_accepted_mimetype(
|
||||
self.request.headers.get('Accept', ''),
|
||||
choices=['application/json', 'text/html'],
|
||||
)
|
||||
== 'application/json'
|
||||
or 'api' in user_path.split('/')
|
||||
):
|
||||
self._fail_api_request(user_name, server_name)
|
||||
return
|
||||
|
||||
@@ -1472,18 +1521,20 @@ class UserUrlHandler(BaseHandler):
|
||||
self.redirect(pending_url, status=303)
|
||||
return
|
||||
|
||||
# if we got here, the server is not running
|
||||
# serve a page prompting for spawn and 503 error
|
||||
# visiting /user/:name no longer triggers implicit spawn
|
||||
# without explicit user action
|
||||
# If we got here, the server is not running. To differentiate
|
||||
# that the *server* itself is not running, rather than just the particular
|
||||
# page *in* the server is not found, we return a 424 instead of a 404.
|
||||
# We allow retaining the old behavior to support older JupyterLab versions
|
||||
spawn_url = url_concat(
|
||||
url_path_join(self.hub.base_url, "spawn", user.escaped_name, server_name),
|
||||
{"next": self.request.uri},
|
||||
)
|
||||
self.set_status(503)
|
||||
self.set_status(
|
||||
424 if not self.app.use_legacy_stopped_server_status_code else 503
|
||||
)
|
||||
|
||||
auth_state = await user.get_auth_state()
|
||||
html = self.render_template(
|
||||
html = await self.render_template(
|
||||
"not_running.html",
|
||||
user=user,
|
||||
server_name=server_name,
|
||||
@@ -1533,20 +1584,23 @@ class UserUrlHandler(BaseHandler):
|
||||
if self.subdomain_host:
|
||||
target = user.host + target
|
||||
|
||||
referer = self.request.headers.get('Referer', '')
|
||||
# record redirect count in query parameter
|
||||
if redirects:
|
||||
self.log.warning("Redirect loop detected on %s", self.request.uri)
|
||||
# add capped exponential backoff where cap is 10s
|
||||
await gen.sleep(min(1 * (2 ** redirects), 10))
|
||||
await asyncio.sleep(min(1 * (2 ** redirects), 10))
|
||||
# rewrite target url with new `redirects` query value
|
||||
url_parts = urlparse(target)
|
||||
query_parts = parse_qs(url_parts.query)
|
||||
query_parts['redirects'] = redirects + 1
|
||||
url_parts = url_parts._replace(query=urlencode(query_parts, doseq=True))
|
||||
target = urlunparse(url_parts)
|
||||
elif '/user/{}'.format(user.name) in referer or not referer:
|
||||
# add first counter only if it's a redirect from /user/:name -> /hub/user/:name
|
||||
else:
|
||||
# Start redirect counter.
|
||||
# This should only occur for redirects from /user/:name -> /hub/user/:name
|
||||
# when the corresponding server is already ready.
|
||||
# We don't check this explicitly (direct visits to /hub/user are technically possible),
|
||||
# but that's now the only normal way to get here.
|
||||
target = url_concat(target, {'redirects': 1})
|
||||
|
||||
self.redirect(target)
|
||||
|
@@ -3,6 +3,7 @@
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
import asyncio
|
||||
|
||||
from jinja2 import Template
|
||||
from tornado import web
|
||||
from tornado.escape import url_escape
|
||||
from tornado.httputil import url_concat
|
||||
@@ -72,14 +73,14 @@ class LogoutHandler(BaseHandler):
|
||||
Override this function to set a custom logout page.
|
||||
"""
|
||||
if self.authenticator.auto_login:
|
||||
html = self.render_template('logout.html')
|
||||
html = await self.render_template('logout.html')
|
||||
self.finish(html)
|
||||
else:
|
||||
self.redirect(self.settings['login_url'], permanent=False)
|
||||
|
||||
async def get(self):
|
||||
"""Log the user out, call the custom action, forward the user
|
||||
to the logout page
|
||||
to the logout page
|
||||
"""
|
||||
await self.default_handle_logout()
|
||||
await self.handle_logout()
|
||||
@@ -90,17 +91,23 @@ class LoginHandler(BaseHandler):
|
||||
"""Render the login page."""
|
||||
|
||||
def _render(self, login_error=None, username=None):
|
||||
return self.render_template(
|
||||
'login.html',
|
||||
next=url_escape(self.get_argument('next', default='')),
|
||||
username=username,
|
||||
login_error=login_error,
|
||||
custom_html=self.authenticator.custom_html,
|
||||
login_url=self.settings['login_url'],
|
||||
authenticator_login_url=url_concat(
|
||||
context = {
|
||||
"next": url_escape(self.get_argument('next', default='')),
|
||||
"username": username,
|
||||
"login_error": login_error,
|
||||
"login_url": self.settings['login_url'],
|
||||
"authenticator_login_url": url_concat(
|
||||
self.authenticator.login_url(self.hub.base_url),
|
||||
{'next': self.get_argument('next', '')},
|
||||
),
|
||||
}
|
||||
custom_html = Template(
|
||||
self.authenticator.get_custom_html(self.hub.base_url)
|
||||
).render(**context)
|
||||
return self.render_template(
|
||||
'login.html',
|
||||
**context,
|
||||
custom_html=custom_html,
|
||||
)
|
||||
|
||||
async def get(self):
|
||||
@@ -132,7 +139,7 @@ class LoginHandler(BaseHandler):
|
||||
self.redirect(auto_login_url)
|
||||
return
|
||||
username = self.get_argument('username', default='')
|
||||
self.finish(self._render(username=username))
|
||||
self.finish(await self._render(username=username))
|
||||
|
||||
async def post(self):
|
||||
# parse the arguments dict
|
||||
@@ -149,7 +156,7 @@ class LoginHandler(BaseHandler):
|
||||
self._jupyterhub_user = user
|
||||
self.redirect(self.get_next_url(user))
|
||||
else:
|
||||
html = self._render(
|
||||
html = await self._render(
|
||||
login_error='Invalid username or password', username=data['username']
|
||||
)
|
||||
self.finish(html)
|
||||
|
@@ -1,3 +1,4 @@
|
||||
"""Handlers for serving prometheus metrics"""
|
||||
from prometheus_client import CONTENT_TYPE_LATEST
|
||||
from prometheus_client import generate_latest
|
||||
from prometheus_client import REGISTRY
|
||||
@@ -17,4 +18,7 @@ class MetricsHandler(BaseHandler):
|
||||
self.write(generate_latest(REGISTRY))
|
||||
|
||||
|
||||
default_handlers = [(r'/metrics$', MetricsHandler)]
|
||||
default_handlers = [
|
||||
(r'/metrics$', MetricsHandler),
|
||||
(r'/api/metrics$', MetricsHandler),
|
||||
]
|
||||
|
@@ -40,11 +40,15 @@ class RootHandler(BaseHandler):
|
||||
def get(self):
|
||||
user = self.current_user
|
||||
if self.default_url:
|
||||
url = self.default_url
|
||||
# As set in jupyterhub_config.py
|
||||
if callable(self.default_url):
|
||||
url = self.default_url(self)
|
||||
else:
|
||||
url = self.default_url
|
||||
elif user:
|
||||
url = self.get_next_url(user)
|
||||
else:
|
||||
url = self.settings['login_url']
|
||||
url = url_concat(self.settings["login_url"], dict(next=self.request.uri))
|
||||
self.redirect(url)
|
||||
|
||||
|
||||
@@ -67,7 +71,7 @@ class HomeHandler(BaseHandler):
|
||||
url = url_path_join(self.hub.base_url, 'spawn', user.escaped_name)
|
||||
|
||||
auth_state = await user.get_auth_state()
|
||||
html = self.render_template(
|
||||
html = await self.render_template(
|
||||
'home.html',
|
||||
auth_state=auth_state,
|
||||
user=user,
|
||||
@@ -94,7 +98,7 @@ class SpawnHandler(BaseHandler):
|
||||
|
||||
async def _render_form(self, for_user, spawner_options_form, message=''):
|
||||
auth_state = await for_user.get_auth_state()
|
||||
return self.render_template(
|
||||
return await self.render_template(
|
||||
'spawn.html',
|
||||
for_user=for_user,
|
||||
auth_state=auth_state,
|
||||
@@ -378,7 +382,7 @@ class SpawnPendingHandler(BaseHandler):
|
||||
self.hub.base_url, "spawn", user.escaped_name, server_name
|
||||
)
|
||||
self.set_status(500)
|
||||
html = self.render_template(
|
||||
html = await self.render_template(
|
||||
"not_running.html",
|
||||
user=user,
|
||||
auth_state=auth_state,
|
||||
@@ -402,7 +406,7 @@ class SpawnPendingHandler(BaseHandler):
|
||||
page = "stop_pending.html"
|
||||
else:
|
||||
page = "spawn_pending.html"
|
||||
html = self.render_template(
|
||||
html = await self.render_template(
|
||||
page,
|
||||
user=user,
|
||||
spawner=spawner,
|
||||
@@ -429,7 +433,7 @@ class SpawnPendingHandler(BaseHandler):
|
||||
spawn_url = url_path_join(
|
||||
self.hub.base_url, "spawn", user.escaped_name, server_name
|
||||
)
|
||||
html = self.render_template(
|
||||
html = await self.render_template(
|
||||
"not_running.html",
|
||||
user=user,
|
||||
auth_state=auth_state,
|
||||
@@ -453,7 +457,8 @@ class AdminHandler(BaseHandler):
|
||||
@web.authenticated
|
||||
@admin_only
|
||||
async def get(self):
|
||||
page, per_page, offset = Pagination(config=self.config).get_page_args(self)
|
||||
pagination = Pagination(url=self.request.uri, config=self.config)
|
||||
page, per_page, offset = pagination.get_page_args(self)
|
||||
|
||||
available = {'name', 'admin', 'running', 'last_activity'}
|
||||
default_sort = ['admin', 'name']
|
||||
@@ -496,30 +501,27 @@ class AdminHandler(BaseHandler):
|
||||
# get User.col.desc() order objects
|
||||
ordered = [getattr(c, o)() for c, o in zip(cols, orders)]
|
||||
|
||||
query = self.db.query(orm.User).outerjoin(orm.Spawner).distinct(orm.User.id)
|
||||
subquery = query.subquery("users")
|
||||
users = (
|
||||
self.db.query(orm.User)
|
||||
.select_entity_from(subquery)
|
||||
.outerjoin(orm.Spawner)
|
||||
.order_by(*ordered)
|
||||
.limit(per_page)
|
||||
.offset(offset)
|
||||
)
|
||||
|
||||
users = [self._user_from_orm(u) for u in users]
|
||||
|
||||
running = []
|
||||
for u in users:
|
||||
running.extend(s for s in u.spawners.values() if s.active)
|
||||
|
||||
total = self.db.query(orm.User.id).count()
|
||||
pagination = Pagination(
|
||||
url=self.request.uri,
|
||||
total=total,
|
||||
page=page,
|
||||
per_page=per_page,
|
||||
config=self.config,
|
||||
)
|
||||
pagination.total = query.count()
|
||||
|
||||
auth_state = await self.current_user.get_auth_state()
|
||||
html = self.render_template(
|
||||
html = await self.render_template(
|
||||
'admin.html',
|
||||
current_user=self.current_user,
|
||||
auth_state=auth_state,
|
||||
@@ -609,7 +611,7 @@ class TokenPageHandler(BaseHandler):
|
||||
oauth_clients = sorted(oauth_clients, key=sort_key, reverse=True)
|
||||
|
||||
auth_state = await self.current_user.get_auth_state()
|
||||
html = self.render_template(
|
||||
html = await self.render_template(
|
||||
'token.html',
|
||||
api_tokens=api_tokens,
|
||||
oauth_clients=oauth_clients,
|
||||
@@ -621,7 +623,7 @@ class TokenPageHandler(BaseHandler):
|
||||
class ProxyErrorHandler(BaseHandler):
|
||||
"""Handler for rendering proxy error pages"""
|
||||
|
||||
def get(self, status_code_s):
|
||||
async def get(self, status_code_s):
|
||||
status_code = int(status_code_s)
|
||||
status_message = responses.get(status_code, 'Unknown HTTP Error')
|
||||
# build template namespace
|
||||
@@ -645,10 +647,10 @@ class ProxyErrorHandler(BaseHandler):
|
||||
self.set_header('Content-Type', 'text/html')
|
||||
# render the template
|
||||
try:
|
||||
html = self.render_template('%s.html' % status_code, **ns)
|
||||
html = await self.render_template('%s.html' % status_code, **ns)
|
||||
except TemplateNotFound:
|
||||
self.log.debug("No template for %d", status_code)
|
||||
html = self.render_template('error.html', **ns)
|
||||
html = await self.render_template('error.html', **ns)
|
||||
|
||||
self.write(html)
|
||||
|
||||
@@ -676,4 +678,5 @@ default_handlers = [
|
||||
(r'/token', TokenPageHandler),
|
||||
(r'/error/(\d+)', ProxyErrorHandler),
|
||||
(r'/health$', HealthCheckHandler),
|
||||
(r'/api/health$', HealthCheckHandler),
|
||||
]
|
||||
|
@@ -7,7 +7,7 @@ from tornado.web import StaticFileHandler
|
||||
|
||||
class CacheControlStaticFilesHandler(StaticFileHandler):
|
||||
"""StaticFileHandler subclass that sets Cache-Control: no-cache without `?v=`
|
||||
|
||||
|
||||
rather than relying on default browser cache behavior.
|
||||
"""
|
||||
|
||||
|
@@ -2,7 +2,9 @@
|
||||
# Copyright (c) Jupyter Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
import json
|
||||
import logging
|
||||
import traceback
|
||||
from functools import partial
|
||||
from http.cookies import SimpleCookie
|
||||
from urllib.parse import urlparse
|
||||
from urllib.parse import urlunparse
|
||||
@@ -132,19 +134,25 @@ def log_request(handler):
|
||||
status < 300 and isinstance(handler, (StaticFileHandler, HealthCheckHandler))
|
||||
):
|
||||
# static-file success and 304 Found are debug-level
|
||||
log_method = access_log.debug
|
||||
log_level = logging.DEBUG
|
||||
elif status < 400:
|
||||
log_method = access_log.info
|
||||
log_level = logging.INFO
|
||||
elif status < 500:
|
||||
log_method = access_log.warning
|
||||
log_level = logging.WARNING
|
||||
else:
|
||||
log_method = access_log.error
|
||||
log_level = logging.ERROR
|
||||
|
||||
uri = _scrub_uri(request.uri)
|
||||
headers = _scrub_headers(request.headers)
|
||||
|
||||
request_time = 1000.0 * handler.request.request_time()
|
||||
|
||||
# always log slow responses (longer than 1s) at least info-level
|
||||
if request_time >= 1000 and log_level < logging.INFO:
|
||||
log_level = logging.INFO
|
||||
|
||||
log_method = partial(access_log.log, log_level)
|
||||
|
||||
try:
|
||||
user = handler.current_user
|
||||
except (HTTPError, RuntimeError):
|
||||
|
@@ -3,9 +3,9 @@ Prometheus metrics exported by JupyterHub
|
||||
|
||||
Read https://prometheus.io/docs/practices/naming/ for naming
|
||||
conventions for metrics & labels. We generally prefer naming them
|
||||
`<noun>_<verb>_<type_suffix>`. So a histogram that's tracking
|
||||
`jupyterhub_<noun>_<verb>_<type_suffix>`. So a histogram that's tracking
|
||||
the duration (in seconds) of servers spawning would be called
|
||||
SERVER_SPAWN_DURATION_SECONDS.
|
||||
jupyterhub_server_spawn_duration_seconds.
|
||||
|
||||
We also create an Enum for each 'status' type label in every metric
|
||||
we collect. This is to make sure that the metrics exist regardless
|
||||
@@ -14,6 +14,10 @@ create them, the metric spawn_duration_seconds{status="failure"}
|
||||
will not actually exist until the first failure. This makes dashboarding
|
||||
and alerting difficult, so we explicitly list statuses and create
|
||||
them manually here.
|
||||
|
||||
.. versionchanged:: 1.3
|
||||
|
||||
added ``jupyterhub_`` prefix to metric names.
|
||||
"""
|
||||
from enum import Enum
|
||||
|
||||
@@ -21,13 +25,13 @@ from prometheus_client import Gauge
|
||||
from prometheus_client import Histogram
|
||||
|
||||
REQUEST_DURATION_SECONDS = Histogram(
|
||||
'request_duration_seconds',
|
||||
'jupyterhub_request_duration_seconds',
|
||||
'request duration for all HTTP requests',
|
||||
['method', 'handler', 'code'],
|
||||
)
|
||||
|
||||
SERVER_SPAWN_DURATION_SECONDS = Histogram(
|
||||
'server_spawn_duration_seconds',
|
||||
'jupyterhub_server_spawn_duration_seconds',
|
||||
'time taken for server spawning operation',
|
||||
['status'],
|
||||
# Use custom bucket sizes, since the default bucket ranges
|
||||
@@ -36,25 +40,27 @@ SERVER_SPAWN_DURATION_SECONDS = Histogram(
|
||||
)
|
||||
|
||||
RUNNING_SERVERS = Gauge(
|
||||
'running_servers', 'the number of user servers currently running'
|
||||
'jupyterhub_running_servers', 'the number of user servers currently running'
|
||||
)
|
||||
|
||||
TOTAL_USERS = Gauge('total_users', 'total number of users')
|
||||
TOTAL_USERS = Gauge('jupyterhub_total_users', 'total number of users')
|
||||
|
||||
CHECK_ROUTES_DURATION_SECONDS = Histogram(
|
||||
'check_routes_duration_seconds', 'Time taken to validate all routes in proxy'
|
||||
'jupyterhub_check_routes_duration_seconds',
|
||||
'Time taken to validate all routes in proxy',
|
||||
)
|
||||
|
||||
HUB_STARTUP_DURATION_SECONDS = Histogram(
|
||||
'hub_startup_duration_seconds', 'Time taken for Hub to start'
|
||||
'jupyterhub_hub_startup_duration_seconds', 'Time taken for Hub to start'
|
||||
)
|
||||
|
||||
INIT_SPAWNERS_DURATION_SECONDS = Histogram(
|
||||
'init_spawners_duration_seconds', 'Time taken for spawners to initialize'
|
||||
'jupyterhub_init_spawners_duration_seconds', 'Time taken for spawners to initialize'
|
||||
)
|
||||
|
||||
PROXY_POLL_DURATION_SECONDS = Histogram(
|
||||
'proxy_poll_duration_seconds', 'duration for polling all routes from proxy'
|
||||
'jupyterhub_proxy_poll_duration_seconds',
|
||||
'duration for polling all routes from proxy',
|
||||
)
|
||||
|
||||
|
||||
@@ -79,7 +85,9 @@ for s in ServerSpawnStatus:
|
||||
|
||||
|
||||
PROXY_ADD_DURATION_SECONDS = Histogram(
|
||||
'proxy_add_duration_seconds', 'duration for adding user routes to proxy', ['status']
|
||||
'jupyterhub_proxy_add_duration_seconds',
|
||||
'duration for adding user routes to proxy',
|
||||
['status'],
|
||||
)
|
||||
|
||||
|
||||
@@ -100,7 +108,7 @@ for s in ProxyAddStatus:
|
||||
|
||||
|
||||
SERVER_POLL_DURATION_SECONDS = Histogram(
|
||||
'server_poll_duration_seconds',
|
||||
'jupyterhub_server_poll_duration_seconds',
|
||||
'time taken to poll if server is running',
|
||||
['status'],
|
||||
)
|
||||
@@ -127,7 +135,9 @@ for s in ServerPollStatus:
|
||||
|
||||
|
||||
SERVER_STOP_DURATION_SECONDS = Histogram(
|
||||
'server_stop_seconds', 'time taken for server stopping operation', ['status']
|
||||
'jupyterhub_server_stop_seconds',
|
||||
'time taken for server stopping operation',
|
||||
['status'],
|
||||
)
|
||||
|
||||
|
||||
@@ -148,7 +158,7 @@ for s in ServerStopStatus:
|
||||
|
||||
|
||||
PROXY_DELETE_DURATION_SECONDS = Histogram(
|
||||
'proxy_delete_duration_seconds',
|
||||
'jupyterhub_proxy_delete_duration_seconds',
|
||||
'duration for deleting user routes from proxy',
|
||||
['status'],
|
||||
)
|
||||
|
@@ -256,7 +256,7 @@ class JupyterHubRequestValidator(RequestValidator):
|
||||
self.db.commit()
|
||||
|
||||
def get_authorization_code_scopes(self, client_id, code, redirect_uri, request):
|
||||
""" Extracts scopes from saved authorization code.
|
||||
"""Extracts scopes from saved authorization code.
|
||||
The scopes returned by this method is used to route token requests
|
||||
based on scopes passed to Authorization Code requests.
|
||||
With that the token endpoint knows when to include OpenIDConnect
|
||||
@@ -342,7 +342,7 @@ class JupyterHubRequestValidator(RequestValidator):
|
||||
orm_access_token = orm.OAuthAccessToken(
|
||||
client=client,
|
||||
grant_type=orm.GrantType.authorization_code,
|
||||
expires_at=orm.OAuthAccessToken.now() + token['expires_in'],
|
||||
expires_at=int(orm.OAuthAccessToken.now() + token['expires_in']),
|
||||
refresh_token=token['refresh_token'],
|
||||
# TODO: save scopes,
|
||||
# scopes=scopes,
|
||||
@@ -558,20 +558,25 @@ class JupyterHubOAuthServer(WebApplicationServer):
|
||||
|
||||
hash its client_secret before putting it in the database.
|
||||
"""
|
||||
# clear existing clients with same ID
|
||||
for orm_client in self.db.query(orm.OAuthClient).filter_by(
|
||||
identifier=client_id
|
||||
):
|
||||
self.db.delete(orm_client)
|
||||
self.db.commit()
|
||||
|
||||
orm_client = orm.OAuthClient(
|
||||
identifier=client_id,
|
||||
secret=hash_token(client_secret),
|
||||
redirect_uri=redirect_uri,
|
||||
description=description,
|
||||
# Update client if it already exists, else create it
|
||||
# Sqlalchemy doesn't have a good db agnostic UPSERT,
|
||||
# so we do this manually. It's protected inside a
|
||||
# transaction, so should fail if there are multiple
|
||||
# rows with the same identifier.
|
||||
orm_client = (
|
||||
self.db.query(orm.OAuthClient).filter_by(identifier=client_id).one_or_none()
|
||||
)
|
||||
self.db.add(orm_client)
|
||||
if orm_client is None:
|
||||
orm_client = orm.OAuthClient(
|
||||
identifier=client_id,
|
||||
)
|
||||
self.db.add(orm_client)
|
||||
app_log.info(f'Creating oauth client {client_id}')
|
||||
else:
|
||||
app_log.info(f'Updating oauth client {client_id}')
|
||||
orm_client.secret = hash_token(client_secret)
|
||||
orm_client.redirect_uri = redirect_uri
|
||||
orm_client.description = description
|
||||
self.db.commit()
|
||||
|
||||
def fetch_by_client_id(self, client_id):
|
||||
@@ -579,9 +584,9 @@ class JupyterHubOAuthServer(WebApplicationServer):
|
||||
return self.db.query(orm.OAuthClient).filter_by(identifier=client_id).first()
|
||||
|
||||
|
||||
def make_provider(session_factory, url_prefix, login_url):
|
||||
def make_provider(session_factory, url_prefix, login_url, **oauth_server_kwargs):
|
||||
"""Make an OAuth provider"""
|
||||
db = session_factory()
|
||||
validator = JupyterHubRequestValidator(db)
|
||||
server = JupyterHubOAuthServer(db, validator)
|
||||
server = JupyterHubOAuthServer(db, validator, **oauth_server_kwargs)
|
||||
return server
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user