mirror of
https://github.com/jupyterhub/jupyterhub.git
synced 2025-10-08 02:24:08 +00:00
Compare commits
95 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
cdc2151f75 | ||
![]() |
b4a06ea53f | ||
![]() |
5fcaaac331 | ||
![]() |
4ea8fcb031 | ||
![]() |
ca7df636cb | ||
![]() |
759a4f0624 | ||
![]() |
2a89495323 | ||
![]() |
671c8ab78d | ||
![]() |
49aaf5050f | ||
![]() |
0c20f3e867 | ||
![]() |
db7d0920cd | ||
![]() |
ff2db557a8 | ||
![]() |
0cd5e51dd4 | ||
![]() |
b0fbf6a61e | ||
![]() |
9c810b1436 | ||
![]() |
3d1f936a46 | ||
![]() |
2c609d0936 | ||
![]() |
8c3025dc4f | ||
![]() |
d51f9f8998 | ||
![]() |
41583c1322 | ||
![]() |
c65e48b2b6 | ||
![]() |
01aeb84a13 | ||
![]() |
4c2e3f176a | ||
![]() |
554248b083 | ||
![]() |
4a859664da | ||
![]() |
00b37c9415 | ||
![]() |
3a9c631526 | ||
![]() |
4c868cdfb6 | ||
![]() |
96e75bb4ac | ||
![]() |
f09fdf4761 | ||
![]() |
7ef70eb74f | ||
![]() |
5c4eab0c15 | ||
![]() |
8ca8750b04 | ||
![]() |
eb1bf1dc58 | ||
![]() |
7852dbc1dc | ||
![]() |
3caea2a463 | ||
![]() |
6679c389b5 | ||
![]() |
954bbbe7d9 | ||
![]() |
3338de2619 | ||
![]() |
33c09daf5b | ||
![]() |
f3cc79e453 | ||
![]() |
cc0bc531d3 | ||
![]() |
fd2919b36f | ||
![]() |
b6e4225482 | ||
![]() |
18d7003580 | ||
![]() |
873f60781c | ||
![]() |
d1d8c02cb9 | ||
![]() |
67dd7742ef | ||
![]() |
3ee808e35c | ||
![]() |
78369901b2 | ||
![]() |
d7a7589821 | ||
![]() |
8437e66db9 | ||
![]() |
6ea07a7dd0 | ||
![]() |
fc184c4ec7 | ||
![]() |
df4f96eaf9 | ||
![]() |
d8bb3f4402 | ||
![]() |
4082c2ddbc | ||
![]() |
300f49d1ab | ||
![]() |
6abc096cbc | ||
![]() |
a6aba9a7e1 | ||
![]() |
8c3ff64511 | ||
![]() |
104593b9ec | ||
![]() |
495ebe406c | ||
![]() |
5100c60831 | ||
![]() |
bec737bf27 | ||
![]() |
2bb27653e2 | ||
![]() |
e8fbe84ac8 | ||
![]() |
8564ff015c | ||
![]() |
fb85cfb118 | ||
![]() |
25384051aa | ||
![]() |
2623aa5e46 | ||
![]() |
30ebf84bd4 | ||
![]() |
50466843ee | ||
![]() |
c616ab284d | ||
![]() |
41090ceb55 | ||
![]() |
d7939c1721 | ||
![]() |
d93ca55b11 | ||
![]() |
9ff11e6fa4 | ||
![]() |
5f3833bc95 | ||
![]() |
66ddaebf26 | ||
![]() |
2598ac2c1a | ||
![]() |
4ab36e3da6 | ||
![]() |
282cc020b6 | ||
![]() |
6912a5a752 | ||
![]() |
cedf237852 | ||
![]() |
9ff8f3e6ec | ||
![]() |
abc9581a75 | ||
![]() |
02df033227 | ||
![]() |
f82097bf2e | ||
![]() |
2af252c4c3 | ||
![]() |
06c8d22087 | ||
![]() |
95d479af88 | ||
![]() |
aee92985ac | ||
![]() |
ea73931ad0 | ||
![]() |
b0494c203f |
9
.github/workflows/release.yml
vendored
9
.github/workflows/release.yml
vendored
@@ -36,6 +36,7 @@ jobs:
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
cache: pip
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
@@ -148,7 +149,7 @@ jobs:
|
||||
branchRegex: ^\w[\w-.]*$
|
||||
|
||||
- name: Build and push jupyterhub
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
@@ -171,7 +172,7 @@ jobs:
|
||||
branchRegex: ^\w[\w-.]*$
|
||||
|
||||
- name: Build and push jupyterhub-onbuild
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
build-args: |
|
||||
BASE_IMAGE=${{ fromJson(steps.jupyterhubtags.outputs.tags)[0] }}
|
||||
@@ -194,7 +195,7 @@ jobs:
|
||||
branchRegex: ^\w[\w-.]*$
|
||||
|
||||
- name: Build and push jupyterhub-demo
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
build-args: |
|
||||
BASE_IMAGE=${{ fromJson(steps.onbuildtags.outputs.tags)[0] }}
|
||||
@@ -220,7 +221,7 @@ jobs:
|
||||
branchRegex: ^\w[\w-.]*$
|
||||
|
||||
- name: Build and push jupyterhub/singleuser
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
build-args: |
|
||||
JUPYTERHUB_VERSION=${{ github.ref_type == 'tag' && github.ref_name || format('git:{0}', github.sha) }}
|
||||
|
4
.github/workflows/test-docs.yml
vendored
4
.github/workflows/test-docs.yml
vendored
@@ -61,6 +61,10 @@ jobs:
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
cache: pip
|
||||
cache-dependency-path: |
|
||||
requirements.txt
|
||||
docs/requirements.txt
|
||||
|
||||
- name: Install requirements
|
||||
run: |
|
||||
|
5
.github/workflows/test.yml
vendored
5
.github/workflows/test.yml
vendored
@@ -158,6 +158,11 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "${{ matrix.python }}"
|
||||
cache: pip
|
||||
cache-dependency-path: |
|
||||
pyproject.toml
|
||||
requirements.txt
|
||||
ci/oldest-dependencies/requirements.old
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
|
@@ -16,7 +16,7 @@ ci:
|
||||
repos:
|
||||
# autoformat and lint Python code
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.4.3
|
||||
rev: v0.5.0
|
||||
hooks:
|
||||
- id: ruff
|
||||
types_or:
|
||||
|
@@ -7,7 +7,7 @@ info:
|
||||
license:
|
||||
name: BSD-3-Clause
|
||||
identifier: BSD-3-Clause
|
||||
version: 5.0.0b2
|
||||
version: 5.1.0
|
||||
servers:
|
||||
- url: /hub/api
|
||||
security:
|
||||
@@ -1176,8 +1176,16 @@ paths:
|
||||
example: abc123
|
||||
accept_url:
|
||||
type: string
|
||||
description: The URL for accepting the code
|
||||
description: The URL path for accepting the code
|
||||
example: /hub/accept-share?code=abc123
|
||||
full_accept_url:
|
||||
type:
|
||||
- string
|
||||
- "null"
|
||||
description: |
|
||||
The full URL for accepting the code,
|
||||
if JupyterHub.public_url configuration is defined.
|
||||
example: https://hub.example.org/hub/accept-share?code=abc123
|
||||
security:
|
||||
- oauth2:
|
||||
- shares
|
||||
@@ -1877,7 +1885,14 @@ components:
|
||||
description: the server name. '' for the default server.
|
||||
url:
|
||||
type: string
|
||||
description: the server's URL
|
||||
description: the server's URL (path only when not using subdomains)
|
||||
full_url:
|
||||
type:
|
||||
- string
|
||||
- "null"
|
||||
description: |
|
||||
The full URL of the server (`https://hub.example.org/user/:name/:servername`).
|
||||
`null` unless JupyterHub.public_url or subdomains are configured.
|
||||
ready:
|
||||
type: boolean
|
||||
description: whether the server is ready
|
||||
@@ -2101,8 +2116,9 @@ components:
|
||||
Access the admin page. Permission to take actions via the admin
|
||||
page granted separately.
|
||||
admin:users:
|
||||
Read, write, create and delete users and their authentication
|
||||
state, not including their servers or tokens.
|
||||
Read, modify, create, and delete users and their authentication
|
||||
state, not including their servers or tokens. This is an extremely privileged
|
||||
scope and should be considered tantamount to superuser.
|
||||
admin:auth_state: Read a user’s authentication state.
|
||||
users:
|
||||
Read and write permissions to user models (excluding servers, tokens
|
||||
@@ -2110,8 +2126,8 @@ components:
|
||||
delete:users: Delete users.
|
||||
list:users: List users, including at least their names.
|
||||
read:users:
|
||||
Read user models (excluding including servers, tokens and
|
||||
authentication state).
|
||||
Read user models (including servers, tokens and authentication
|
||||
state).
|
||||
read:users:name: Read names of users.
|
||||
read:users:groups: Read users’ group membership.
|
||||
read:users:activity: Read time of last user activity.
|
||||
@@ -2133,8 +2149,8 @@ components:
|
||||
read:tokens: Read user tokens.
|
||||
admin:groups: Read and write group information, create and delete groups.
|
||||
groups:
|
||||
Read and write group information, including adding/removing users
|
||||
to/from groups.
|
||||
"Read and write group information, including adding/removing any
|
||||
users to/from groups. Note: adding users to groups may affect permissions."
|
||||
list:groups: List groups, including at least their names.
|
||||
read:groups: Read group models.
|
||||
read:groups:name: Read group names.
|
||||
|
@@ -1,3 +1,5 @@
|
||||
(contributing:community)=
|
||||
|
||||
# Community communication channels
|
||||
|
||||
We use different channels of communication for different purposes. Whichever one you use will depend on what kind of communication you want to engage in.
|
||||
|
@@ -1,3 +1,5 @@
|
||||
(contributing:contributors)=
|
||||
|
||||
# Contributors
|
||||
|
||||
Project Jupyter thanks the following people for their help and
|
||||
|
@@ -1,4 +1,4 @@
|
||||
(contributing-docs)=
|
||||
(contributing:docs)=
|
||||
|
||||
# Contributing Documentation
|
||||
|
||||
@@ -13,7 +13,7 @@ stored under the `docs/source` directory) and converts it into various
|
||||
formats for people to read. To make sure the documentation you write or
|
||||
change renders correctly, it is good practice to test it locally.
|
||||
|
||||
1. Make sure you have successfully completed {ref}`contributing/setup`.
|
||||
1. Make sure you have successfully completed {ref}`contributing:setup`.
|
||||
|
||||
2. Install the packages required to build the docs.
|
||||
|
||||
|
@@ -1,3 +1,5 @@
|
||||
(contributing)=
|
||||
|
||||
# Contributing
|
||||
|
||||
We want you to contribute to JupyterHub in ways that are most exciting
|
||||
|
@@ -1,3 +1,5 @@
|
||||
(contributing:roadmap)=
|
||||
|
||||
# The JupyterHub roadmap
|
||||
|
||||
This roadmap collects "next steps" for JupyterHub. It is about creating a
|
||||
|
@@ -1,7 +1,9 @@
|
||||
(contributing:security)=
|
||||
|
||||
# Reporting security issues in Jupyter or JupyterHub
|
||||
|
||||
If you find a security vulnerability in Jupyter or JupyterHub,
|
||||
whether it is a failure of the security model described in [Security Overview](web-security)
|
||||
whether it is a failure of the security model described in [Security Overview](explanation:security)
|
||||
or a failure in implementation,
|
||||
please report it to <mailto:security@ipython.org>.
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
(contributing/setup)=
|
||||
(contributing:setup)=
|
||||
|
||||
# Setting up a development install
|
||||
|
||||
|
@@ -11,7 +11,7 @@ can find them under the [jupyterhub/tests](https://github.com/jupyterhub/jupyter
|
||||
|
||||
## Running the tests
|
||||
|
||||
1. Make sure you have completed {ref}`contributing/setup`.
|
||||
1. Make sure you have completed {ref}`contributing:setup`.
|
||||
Once you are done, you would be able to run `jupyterhub` from the command line and access it from your web browser.
|
||||
This ensures that the dev environment is properly set up for tests to run.
|
||||
|
||||
@@ -126,7 +126,7 @@ For more information on asyncio and event-loops, here are some resources:
|
||||
|
||||
### All the tests are failing
|
||||
|
||||
Make sure you have completed all the steps in {ref}`contributing/setup` successfully, and are able to access JupyterHub from your browser at http://localhost:8000 after starting `jupyterhub` in your command line.
|
||||
Make sure you have completed all the steps in {ref}`contributing:setup` successfully, and are able to access JupyterHub from your browser at http://localhost:8000 after starting `jupyterhub` in your command line.
|
||||
|
||||
## Code formatting and linting
|
||||
|
||||
|
@@ -1,3 +1,5 @@
|
||||
(explanation:capacity-planning)=
|
||||
|
||||
# Capacity planning
|
||||
|
||||
General capacity planning advice for JupyterHub is hard to give,
|
||||
|
430
docs/source/explanation/concepts.md
Normal file
430
docs/source/explanation/concepts.md
Normal file
@@ -0,0 +1,430 @@
|
||||
(explanation:concepts)=
|
||||
|
||||
# JupyterHub: A conceptual overview
|
||||
|
||||
```{warning}
|
||||
This page could is missing cross-links to other parts of
|
||||
the documentation. You can help by adding them!
|
||||
```
|
||||
|
||||
JupyterHub is not what you think it is. Most things you think are
|
||||
part of JupyterHub are actually handled by some other component, for
|
||||
example the spawner or notebook server itself, and it's not always
|
||||
obvious how the parts relate. The knowledge contained here hasn't
|
||||
been assembled in one place before, and is essential to understand
|
||||
when setting up a sufficiently complex Jupyter(Hub) setup.
|
||||
|
||||
This document was originally written to assist in debugging: very
|
||||
often, the actual problem is not where one thinks it is and thus
|
||||
people can't easily debug. In order to tell this story, we start at
|
||||
JupyterHub and go all the way down to the fundamental components of
|
||||
Jupyter.
|
||||
|
||||
In this document, we occasionally leave things out or bend the truth
|
||||
where it helps in explanation, and give our explanations in terms of
|
||||
Python even though Jupyter itself is language-neutral. The "(&)"
|
||||
symbol highlights important points where this page leaves out or bends
|
||||
the truth for simplification of explanation, but there is more if you
|
||||
dig deeper.
|
||||
|
||||
This guide is long, but after reading it you will be know of all major
|
||||
components in the Jupyter ecosystem and everything else you read
|
||||
should make sense.
|
||||
|
||||
## What is Jupyter?
|
||||
|
||||
Before we get too far, let's remember what our end goal is. A
|
||||
**Jupyter Notebook** is nothing more than a Python(&) process
|
||||
which is getting commands from a web browser and displaying the output
|
||||
via that browser. What the process actually sees is roughly like
|
||||
getting commands on standard input(&) and writing to standard
|
||||
output(&). There is nothing intrinsically special about this process
|
||||
|
||||
- it can do anything a normal Python process can do, and nothing more.
|
||||
The **Jupyter kernel** handles capturing output and converting things
|
||||
such as graphics to a form usable by the browser.
|
||||
|
||||
Everything we explain below is building up to this, going through many
|
||||
different layers which give you many ways of customizing how this
|
||||
process runs.
|
||||
|
||||
## JupyterHub
|
||||
|
||||
**JupyterHub** is the central piece that provides multi-user
|
||||
login capabilities. Despite this, the end user only briefly interacts with
|
||||
JupyterHub and most of the actual Jupyter session does not relate to
|
||||
the hub at all: the hub mainly handles authentication and creating (JupyterHub calls it "spawning") the
|
||||
single-user server. In short, anything which is related to _starting_
|
||||
the user's workspace/environment is about JupyterHub, anything about
|
||||
_running_ usually isn't.
|
||||
|
||||
If you have problems connecting the authentication, spawning, and the
|
||||
proxy (explained below), the issue is usually with JupyterHub. To
|
||||
debug, JupyterHub has extensive logs which get printed to its console
|
||||
and can be used to discover most problems.
|
||||
|
||||
The main pieces of JupyterHub are:
|
||||
|
||||
### Authenticator
|
||||
|
||||
JupyterHub itself doesn't actually manage your users. It has a
|
||||
database of users, but it is usually connected with some other system
|
||||
that manages the usernames and passwords. When someone tries to log
|
||||
in to JupyteHub, it asks the
|
||||
**authenticator**([basics](authenticators),
|
||||
[reference](../reference/authenticators)) if the
|
||||
username/password is valid(&). The authenticator returns a username(&),
|
||||
which is passed on to the spawner, which has to use it to start that
|
||||
user's environment. The authenticator can also return user
|
||||
groups and admin status of users, so that JupyterHub can do some
|
||||
higher-level management.
|
||||
|
||||
The following authenticators are included with JupyterHub:
|
||||
|
||||
- **PAMAuthenticator** uses the standard Unix/Linux operating system
|
||||
functions to check users. Roughly, if someone already has access to
|
||||
the machine (they can log in by ssh), they will be able to log in to
|
||||
JupyterHub without any other setup. Thus, JupyterHub fills the role
|
||||
of a ssh server, but providing a web-browser based way to access the
|
||||
machine.
|
||||
|
||||
There are [plenty of others to choose from](https://github.com/jupyterhub/jupyterhub/wiki/Authenticators).
|
||||
You can connect to almost any other existing service to manage your
|
||||
users. You either use all users from this other service (e.g. your
|
||||
company), or enable only the allowed users (e.g. your group's
|
||||
Github usernames). Some other popular authenticators include:
|
||||
|
||||
- **OAuthenticator** uses the standard OAuth protocol to verify users.
|
||||
For example, you can easily use Github to authenticate your users -
|
||||
people have a "click to login with Github" button. This is often
|
||||
done with a allowlist to only allow certain users.
|
||||
|
||||
- **NativeAuthenticator** actually stores and validates its own
|
||||
usernames and passwords, unlike most other authenticators. Thus,
|
||||
you can manage all your users within JupyterHub only.
|
||||
|
||||
- There are authenticators for LTI (learning management systems),
|
||||
Shibboleth, Kerberos - and so on.
|
||||
|
||||
The authenticator is configured with the
|
||||
`c.JupyterHub.authenticator_class` configuration option in the
|
||||
`jupyterhub_config.py` file.
|
||||
|
||||
The authenticator runs internally to the Hub process but communicates
|
||||
with outside services.
|
||||
|
||||
If you have trouble logging in, this is usually a problem of the
|
||||
authenticator. The authenticator logs are part of the the JupyterHub
|
||||
logs, but there may also be relevant information in whatever external
|
||||
services you are using.
|
||||
|
||||
### Spawner
|
||||
|
||||
The **spawner** ([basics](spawners),
|
||||
[reference](../reference/spawners)) is the real core of
|
||||
JupyterHub: when someone wants a notebook server, the spawner allocates
|
||||
resources and starts the server. The notebook server could run on the
|
||||
same machine as JupyterHub, on another machine, on some cloud service,
|
||||
or more. Administrators can limit resources (CPU, memory) or isolate users
|
||||
from each other - if the spawner supports it. They can also do no
|
||||
limiting and allow any user to access any other user's files if they
|
||||
are not configured properly.
|
||||
|
||||
Some basic spawners included in JupyterHub are:
|
||||
|
||||
- **LocalProcessSpawner** is built into JupyterHub. Upon launch it tries
|
||||
to switch users to the given username (`su` (&)) and start the
|
||||
notebook server. It requires that the hub be run as root (because
|
||||
only root has permission to start processes as other user IDs).
|
||||
LocalProcessSpawner is no different than a user logging in with
|
||||
something like `ssh` and running `jupyter notebook`. PAMAuthenticator and
|
||||
LocalProcessSpawner is the most basic way of using JupyterHub (and
|
||||
what it does out of the box) and makes the hub not too dissimilar to
|
||||
an advanced ssh server.
|
||||
|
||||
There are [many more advanced spawners](/reference/spawners), and to
|
||||
show the diversity of spawning strategys some are listed below:
|
||||
|
||||
- **SudoSpawner** is like LocalProcessSpawner but lets you run
|
||||
JupyterHub without root. `sudo` has to be configured to allow the
|
||||
hub's user to run processes under other user IDs.
|
||||
|
||||
- **SystemdSpawner** uses Systemd to start other processes. It can
|
||||
isolate users from each other and provide resource limiting.
|
||||
|
||||
- **DockerSpawner** runs stuff in Docker, a containerization system.
|
||||
This lets you fully isolate users, limit CPU, memory, and provide
|
||||
other container images to fully customize the environment.
|
||||
|
||||
- **KubeSpawner** runs on the Kubernetes, a cloud orchestration
|
||||
system. The spawner can easily limit users and provide cloud
|
||||
scaling - but the spawner doesn't actually do that, Kubernetes
|
||||
does. The spawner just tells Kubernetes what to do. If you want to
|
||||
get KubeSpawner to do something, first you would figure out how to
|
||||
do it in Kubernetes, then figure out how to tell KubeSpawner to tell
|
||||
Kubernetes that. Actually... this is true for most spawners.
|
||||
|
||||
- **BatchSpawner** runs on computer clusters with batch job scheduling
|
||||
systems (e.g Slurm, HTCondor, PBS, etc). The user processes are run
|
||||
as batch jobs, having access to all the data and software that the
|
||||
users normally will.
|
||||
|
||||
In short, spawners are the interface to the rest of the operating
|
||||
system, and to configure them right you need to know a bit about how
|
||||
the corresponding operating system service works.
|
||||
|
||||
The spawner is responsible for the environment of the single-user
|
||||
notebook servers (described in the next section). In the end, it just
|
||||
makes a choice about how to start these processes: for example, the
|
||||
Docker spawner starts a normal Docker container and runs the right
|
||||
command inside of it. Thus, the spawner is responsible for setting
|
||||
what kind of software and data is available to the user.
|
||||
|
||||
The spawner runs internally to the Hub process but communicates with
|
||||
outside services. It is configured by `c.JupyterHub.spawner_class` in
|
||||
`jupyterhub_config.py`.
|
||||
|
||||
If a user tries to launch a notebook server and it doesn't work, the
|
||||
error is usually with the spawner or the notebook server (as described
|
||||
in the next section). Each spawner outputs some logs to the main
|
||||
JupyterHub logs, but may also have logs in other places depending on
|
||||
what services it interacts with (for example, the Docker spawner
|
||||
somehow puts logs in the Docker system services, Kubernetes through
|
||||
the `kubectl` API).
|
||||
|
||||
### Proxy
|
||||
|
||||
The JupyterHub **proxy** relays connections between the users
|
||||
and their single-user notebook servers. What this basically means is
|
||||
that the hub itself can shut down and the proxy can continue to
|
||||
allow users to communicate with their notebook servers. (This
|
||||
further emphasizes that the hub is responsible for starting, not
|
||||
running, the notebooks). By default, the hub starts the proxy
|
||||
automatically
|
||||
and stops the proxy when the hub stops (so that connections get
|
||||
interrupted). But when you [configure the proxy to run
|
||||
separately](howto:separate-proxy),
|
||||
user's connections will continue to work even without the hub.
|
||||
|
||||
The default proxy is **ConfigurableHttpProxy** which is simple but
|
||||
effective. A more advanced option is the [**Traefik Proxy**](https://blog.jupyter.org/introducing-traefikproxy-a-new-jupyterhub-proxy-based-on-traefik-4839e972faf6),
|
||||
which gives you redundancy and high-availability.
|
||||
|
||||
When users "connect to JupyterHub", they _always_ first connect to the
|
||||
proxy and the proxy relays the connection to the hub. Thus, the proxy
|
||||
is responsible for SSL and accepting connections from the rest of the
|
||||
internet. The user uses the hub to authenticate and start the server,
|
||||
and then the hub connects back to the proxy to adjust the proxy routes
|
||||
for the user's server (e.g. the web path `/user/someone` redirects to
|
||||
the server of someone at a certain internal address). The proxy has
|
||||
to be able to internally connect to both the hub and all the
|
||||
single-user servers.
|
||||
|
||||
The proxy always runs as a separate process to JupyterHub (even though
|
||||
JupyterHub can start it for you). JupyterHub has one set of
|
||||
configuration options for the proxy addresses (`bind_url`) and one for
|
||||
the hub (`hub_bind_url`). If `bind_url` is given, it is just passed to
|
||||
the automatic proxy to tell it what to do.
|
||||
|
||||
If you have problems after users are redirected to their single-user
|
||||
notebook servers, or making the first connection to the hub, it is
|
||||
usually caused by the proxy. The ConfigurableHttpProxy's logs are
|
||||
mixed with JupyterHub's logs if it's started through the hub (the
|
||||
default case), otherwise from whatever system runs the proxy (if you
|
||||
do configure it, you'll know).
|
||||
|
||||
### Services
|
||||
|
||||
JupyterHub has the concept of **services** ([basics](tutorial:services),
|
||||
[reference](services-reference)), which are other web services
|
||||
started by the hub, but otherwise are not necessarily related to the
|
||||
hub itself. They are often used to do things related to Jupyter
|
||||
(things that user interacts with, usually not the hub), but could
|
||||
always be run some other way. Running from the hub provides an easy
|
||||
way to get Hub API tokens and authenticate users against the hub. It
|
||||
can also automatically add a proxy route to forward web requests to
|
||||
that service.
|
||||
|
||||
A common example of a service is the [cull idle
|
||||
servers](https://github.com/jupyterhub/jupyterhub-idle-culler)
|
||||
service. When started by the hub, it automatically gets admin API
|
||||
tokens. It uses the API to list all running servers, compare against
|
||||
activity timeouts, and shut down servers exceeding the limits. Even
|
||||
though this is an intrinsic part of JupyterHub, it is only loosely
|
||||
coupled and running as a service provides convenience of
|
||||
authentication - it could be just as well run some other way, with a
|
||||
manually provided API token.
|
||||
|
||||
The configuration option `c.JupyterHub.services` is used to start
|
||||
services from the hub.
|
||||
|
||||
When a service is started from JupyterHub automatically, its logs are
|
||||
included in the JupyterHub logs.
|
||||
|
||||
## Single-user notebook server
|
||||
|
||||
The **single-user notebook server** is the same thing you get by
|
||||
running `jupyter notebook` or `jupyter lab` from the command line -
|
||||
the actual Jupyter user interface for a single person.
|
||||
|
||||
The role of the spawner is to start this server - basically, running
|
||||
the command `jupyter notebook`. Actually it doesn't run that, it runs
|
||||
`jupyterhub-singleuser` which first communicates with the hub to say
|
||||
"I'm alive" before running a completely normal Jupyter server. The
|
||||
single-user server can be JupyterLab or classic notebooks. By this
|
||||
point, the hub is almost completely out of the picture (the web
|
||||
traffic is going through proxy unchanged). Also by this time, the
|
||||
spawner has already decided the environment which this single-user
|
||||
server will have and the single-user server has to deal with that.
|
||||
|
||||
The spawner starts the server using `jupyterhub-singleuser` with some
|
||||
environment variables like `JUPYTERHUB_API_TOKEN` and
|
||||
`JUPYTERHUB_BASE_URL` which tell the single-user server how to connect
|
||||
back to the hub in order to say that it's ready.
|
||||
|
||||
The single-user server options are **JupyterLab** and **classic
|
||||
Jupyter Notebook**. They both run through the same backend server process--the web
|
||||
frontend is an option when it is starting. The spawner can choose the
|
||||
command line when it starts the single-user server. Extensions are a
|
||||
property of the single-user server (in two parts: there can be a part
|
||||
that runs in the Python server process, and parts that run in
|
||||
javascript in lab or notebook).
|
||||
|
||||
If one wants to install software for users, it is not a matter of
|
||||
"installing it for JupyerHub" - it's a matter of installing it for the
|
||||
single-user server, which might be the same environment as the hub,
|
||||
but not necessarily. (see below - it's a matter of the kernels!)
|
||||
|
||||
After the single-user notebook server is started, any errors are only
|
||||
an issue of the single-user notebook server. Sometimes, it seems like
|
||||
the spawner is failing, but really the spawner is working but the
|
||||
single-user notebook server dies right away (in this case, you need to
|
||||
find the problem with the single-user server and adjust the spawner to
|
||||
start it correctly or fix the environment). This can happen, for
|
||||
example, if the spawner doesn't set an environment variable or doesn't
|
||||
provide storage.
|
||||
|
||||
The single-user server's logs are printed to stdout/stderr, and the
|
||||
spawer decides where those streams are directed, so if you
|
||||
notice problems at this phase you need to check your spawner for
|
||||
instructions for accessing the single-user logs. For example, the
|
||||
LocalProcessSpawner logs are just outputted to the same JupyterHub
|
||||
output logs, the SystemdSpawner logs are
|
||||
written to the Systemd journal, Docker and Kubernetes logs are written
|
||||
to Docker and Kubernetes respectively, and batchspawner output goes to
|
||||
the normal output places of batch jobs and is an explicit
|
||||
configuration option of the spawner.
|
||||
|
||||
**(Jupyter) Notebook** is the classic interface, where each notebook
|
||||
opens in a separate tab. It is traditionally started by `jupyter
|
||||
notebook`. Does anything need to be said here?
|
||||
|
||||
**JupyterLab** is the new interface, where multiple notebooks are
|
||||
openable in the same tab in an IDE-like environment. It is
|
||||
traditionally started with `jupyter lab`. Both Notebook and Lab use
|
||||
the same `.ipynb` file format.
|
||||
|
||||
JupyterLab is run thorugh the same server file, but at a path `/lab`
|
||||
instead of `/tree`. Thus, they can be active at the same time in the
|
||||
backend and you can switch between them at runtime by changing your
|
||||
URL path.
|
||||
|
||||
Extensions need to be re-written for JupyterLab (if moving from
|
||||
classic notebooks). But, the server-side of the extensions can be
|
||||
shared by both.
|
||||
|
||||
## Kernel
|
||||
|
||||
The commands you run in the notebook session are not executed in the same process as
|
||||
the notebook itself, but in a separate **Jupyter kernel**. There are [many
|
||||
kernels
|
||||
available](https://github.com/jupyter/jupyter/wiki/Jupyter-kernels).
|
||||
|
||||
As a basic approximation, a **Jupyter kernel** is a process which
|
||||
accepts commands (cells that are run) and returns the output to
|
||||
Jupyter to display. One example is the **IPython Jupyter kernel**,
|
||||
which runs Python. There is nothing special about it, it can be
|
||||
considered a \*normal Python process. The kernel process can be
|
||||
approximated in UNIX terms as a process that takes commands on stdin
|
||||
and returns stuff on stdout(&). Obviously, it's more because it has
|
||||
to be able to disentangle all the possible outputs, such as figures,
|
||||
and present it to the user in a web browser.
|
||||
|
||||
Kernel communication is via the the ZeroMQ protocol on the local
|
||||
computer. Kernels are separate processes from the main single-user
|
||||
notebook server (and thus obviously, different from the JupyterHub
|
||||
process and everything else). By default (and unless you do something
|
||||
special), kernels share the same environment as the notebook server
|
||||
(data, resource limits, permissions, user id, etc.). But they _can_
|
||||
run in a separate Python environment from the single-user server
|
||||
(search `--prefix` in the [ipykernel installation
|
||||
instructions](https://ipython.readthedocs.io/en/stable/install/kernel_install.html))
|
||||
There are also more fancy techniques such as the [Jupyter Kernel
|
||||
Gateway](https://jupyter-kernel-gateway.readthedocs.io/) and [Enterprise
|
||||
Gateway](https://jupyter-enterprise-gateway.readthedocs.io/), which
|
||||
allow you to run the kernels on a different machine and possibly with
|
||||
a different environment.
|
||||
|
||||
A kernel doesn't just execute it's language - cell magics such as `%`,
|
||||
`%%`, and `!` are a property of the kernel - in particular, these are
|
||||
IPython kernel commands and don't necessarily work in any other
|
||||
kernel unless they specifically support them.
|
||||
|
||||
Kernels are yet _another_ layer of configurability.
|
||||
Each kernel can run a different programming language, with different
|
||||
software, and so on. By default, they would run in the same
|
||||
environment as the single-user notebook server, and the most common
|
||||
other way they are configured is by
|
||||
running in different Python virtual environments or conda
|
||||
environments. They can be started and killed independently (there is
|
||||
normally one per notebook you have open). The kernel uses
|
||||
most of your memory and CPU when running Jupyter - the rest of the web
|
||||
interface has a small footprint.
|
||||
|
||||
You can list your installed kernels with `jupyter kernelspec list`.
|
||||
If you look at one of `kernel.json` files in those directories, you
|
||||
will see exactly what command is run. These are normally
|
||||
automatically made by the kernels, but can be edited as needed. [The
|
||||
spec](https://jupyter-client.readthedocs.io/en/stable/kernels.html)
|
||||
tells you even more.
|
||||
|
||||
The kernel normally has to be reachable by the single-user notebook server
|
||||
but the gateways mentioned above can get around that limitation.
|
||||
|
||||
If you get problems with "Kernel died" or some other error in a single
|
||||
notebook but the single-user notebook server stays working, it is
|
||||
usually a problem with the kernel. It could be that you are trying to
|
||||
use more resources than you are allowed and the symptom is the kernel
|
||||
getting killed. It could be that it crashes for some other reason.
|
||||
In these cases, you need to find the kernel logs and investigate.
|
||||
|
||||
The debug logs for the kernel are normally mixed in with the
|
||||
single-user notebook server logs.
|
||||
|
||||
## JupyterHub distributions
|
||||
|
||||
There are several "distributions" which automatically install all of
|
||||
the things above and configure them for a certain purpose. They are
|
||||
good ways to get started, but if you have custom needs, eventually it
|
||||
may become hard to adapt them to your requirements.
|
||||
|
||||
- [**Zero to JupyterHub with
|
||||
Kubernetes**](https://zero-to-jupyterhub.readthedocs.io/) installs
|
||||
an entire scaleable system using Kubernetes. Uses KubeSpawner,
|
||||
....Authenticator, ....
|
||||
|
||||
- [**The Littlest JupyterHub**](https://tljh.jupyter.org/) installs JupyterHub on a single system
|
||||
using SystemdSpawner and NativeAuthenticator (which manages users
|
||||
itself).
|
||||
|
||||
- [**JupyterHub the hard way**](https://github.com/jupyterhub/jupyterhub-the-hard-way/blob/master/docs/installation-guide-hard.md)
|
||||
takes you through everything yourself. It is a natural companion to
|
||||
this guide, since you get to experience every little bit.
|
||||
|
||||
## What's next?
|
||||
|
||||
Now you know everything. Well, you know how everything relates, but
|
||||
there are still plenty of details, implementations, and exceptions.
|
||||
When setting up JupyterHub, the first step is to consider the above
|
||||
layers, decide the right option for each of them, then begin putting
|
||||
everything together.
|
@@ -1,4 +1,4 @@
|
||||
(hub-database)=
|
||||
(explanation:hub-database)=
|
||||
|
||||
# The Hub's Database
|
||||
|
||||
|
@@ -1,3 +1,5 @@
|
||||
(explanation)=
|
||||
|
||||
# Explanation
|
||||
|
||||
_Explanation_ documentation provide big-picture descriptions of how JupyterHub works. This section is meant to build your understanding of particular topics.
|
||||
@@ -5,6 +7,7 @@ _Explanation_ documentation provide big-picture descriptions of how JupyterHub w
|
||||
```{toctree}
|
||||
:maxdepth: 1
|
||||
|
||||
concepts
|
||||
capacity-planning
|
||||
database
|
||||
websecurity
|
||||
|
@@ -1,4 +1,4 @@
|
||||
(jupyterhub-oauth)=
|
||||
(explanation:hub-oauth)=
|
||||
|
||||
# JupyterHub and OAuth
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
(singleuser)=
|
||||
(explanation:singleuser)=
|
||||
|
||||
# The JupyterHub single-user server
|
||||
|
||||
@@ -24,7 +24,7 @@ It's the same!
|
||||
|
||||
## Single-user server authentication
|
||||
|
||||
Implementation-wise, JupyterHub single-user servers are a special-case of {ref}`services`
|
||||
Implementation-wise, JupyterHub single-user servers are a special-case of {ref}`services-reference`
|
||||
and as such use the same (OAuth) authentication mechanism (more on OAuth in JupyterHub at [](oauth)).
|
||||
This is primarily implemented in the {class}`~.HubOAuth` class.
|
||||
|
||||
@@ -104,6 +104,6 @@ But technically, all JupyterHub cares about is that it is:
|
||||
1. an http server at the prescribed URL, accessible from the Hub and proxy, and
|
||||
2. authenticated via [OAuth](oauth) with the Hub (it doesn't even have to do this, if you want to do your own authentication, as is done in BinderHub)
|
||||
|
||||
which means that you can customize JupyterHub to launch _any_ web application that meets these criteria, by following the specifications in {ref}`services`.
|
||||
which means that you can customize JupyterHub to launch _any_ web application that meets these criteria, by following the specifications in {ref}`services-reference`.
|
||||
|
||||
Most of the time, though, it's easier to use [jupyter-server-proxy](https://jupyter-server-proxy.readthedocs.io) if you want to launch additional web applications in JupyterHub.
|
||||
|
@@ -1,4 +1,4 @@
|
||||
(web-security)=
|
||||
(explanation:security)=
|
||||
|
||||
# Security Overview
|
||||
|
||||
|
@@ -1,3 +1,5 @@
|
||||
(faq)=
|
||||
|
||||
# Frequently asked questions
|
||||
|
||||
## How do I share links to notebooks?
|
||||
|
@@ -1,3 +1,5 @@
|
||||
(faq:institutional)=
|
||||
|
||||
# Institutional FAQ
|
||||
|
||||
This page contains common questions from users of JupyterHub,
|
||||
@@ -130,7 +132,7 @@ level for several years, and makes a number of "default" security decisions that
|
||||
users.
|
||||
|
||||
- For security considerations in the base JupyterHub application,
|
||||
[see the JupyterHub security page](web-security).
|
||||
[see the JupyterHub security page](explanation:security).
|
||||
- For security considerations when deploying JupyterHub on Kubernetes, see the
|
||||
[JupyterHub on Kubernetes security page](https://z2jh.jupyter.org/en/latest/security.html).
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
(troubleshooting)=
|
||||
(faq:troubleshooting)=
|
||||
|
||||
# Troubleshooting
|
||||
|
||||
@@ -167,7 +167,7 @@ When your whole JupyterHub sits behind an organization proxy (_not_ a reverse pr
|
||||
|
||||
### Launching Jupyter Notebooks to run as an externally managed JupyterHub service with the `jupyterhub-singleuser` command returns a `JUPYTERHUB_API_TOKEN` error
|
||||
|
||||
{ref}`services` allow processes to interact with JupyterHub's REST API. Example use-cases include:
|
||||
{ref}`services-reference` allow processes to interact with JupyterHub's REST API. Example use-cases include:
|
||||
|
||||
- **Secure Testing**: provide a canonical Jupyter Notebook for testing production data to reduce the number of entry points into production systems.
|
||||
- **Grading Assignments**: provide access to shared Jupyter Notebooks that may be used for management tasks such as grading assignments.
|
||||
|
@@ -1,4 +1,4 @@
|
||||
(api-only)=
|
||||
(howto:api-only)=
|
||||
|
||||
# Deploying JupyterHub in "API only mode"
|
||||
|
||||
|
@@ -1,3 +1,5 @@
|
||||
(howto:config:gh-oauth)=
|
||||
|
||||
# Configure GitHub OAuth
|
||||
|
||||
In this example, we show a configuration file for a fairly standard JupyterHub
|
||||
|
@@ -1,3 +1,5 @@
|
||||
(howto:config:reverse-proxy)=
|
||||
|
||||
# Using a reverse proxy
|
||||
|
||||
In the following example, we show configuration files for a JupyterHub server
|
||||
|
@@ -1,3 +1,5 @@
|
||||
(howto:config:no-sudo)=
|
||||
|
||||
# Run JupyterHub without root privileges using `sudo`
|
||||
|
||||
**Note:** Setting up `sudo` permissions involves many pieces of system
|
||||
|
@@ -1,3 +1,5 @@
|
||||
(howto:config:user-env)=
|
||||
|
||||
# Configuring user environments
|
||||
|
||||
To deploy JupyterHub means you are providing Jupyter notebook environments for
|
||||
|
@@ -1,3 +1,5 @@
|
||||
(howto:log-messages)=
|
||||
|
||||
# Interpreting common log messages
|
||||
|
||||
When debugging errors and outages, looking at the logs emitted by
|
||||
|
@@ -1,3 +1,5 @@
|
||||
(howto:custom-proxy)=
|
||||
|
||||
# Writing a custom Proxy implementation
|
||||
|
||||
JupyterHub 0.8 introduced the ability to write a custom implementation of the
|
||||
|
@@ -1,4 +1,4 @@
|
||||
(using-jupyterhub-rest-api)=
|
||||
(howto:rest-api)=
|
||||
|
||||
# Using JupyterHub's REST API
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
(separate-proxy)=
|
||||
(howto:separate-proxy)=
|
||||
|
||||
# Running proxy separately from the hub
|
||||
|
||||
|
@@ -1,3 +1,5 @@
|
||||
(howto:templates)=
|
||||
|
||||
# Working with templates and UI
|
||||
|
||||
The pages of the JupyterHub application are generated from
|
||||
|
@@ -1,4 +1,4 @@
|
||||
(upgrading-v5)=
|
||||
(howto:upgrading-v5)=
|
||||
|
||||
# Upgrading to JupyterHub 5
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
(upgrading-jupyterhub)=
|
||||
(howto:upgrading-jupyterhub)=
|
||||
|
||||
# Upgrading JupyterHub
|
||||
|
||||
|
@@ -186,14 +186,14 @@ An **access scope** is used to govern _access_ to a JupyterHub service or a user
|
||||
This means making API requests, or visiting via a browser using OAuth.
|
||||
Without the appropriate access scope, a user or token should not be permitted to make requests of the service.
|
||||
|
||||
When you attempt to access a service or server authenticated with JupyterHub, it will begin the [oauth flow](jupyterhub-oauth) for issuing a token that can be used to access the service.
|
||||
When you attempt to access a service or server authenticated with JupyterHub, it will begin the [oauth flow](explanation:hub-oauth) for issuing a token that can be used to access the service.
|
||||
If the user does not have the access scope for the relevant service or server, JupyterHub will not permit the oauth process to complete.
|
||||
If oauth completes, the token will have at least the access scope for the service.
|
||||
For minimal permissions, this is the _only_ scope granted to tokens issued during oauth by default,
|
||||
but can be expanded via {attr}`.Spawner.oauth_client_allowed_scopes` or a service's [`oauth_client_allowed_scopes`](service-credentials) configuration.
|
||||
|
||||
:::{seealso}
|
||||
[Further explanation of OAuth in JupyterHub](jupyterhub-oauth)
|
||||
[Further explanation of OAuth in JupyterHub](explanation:hub-oauth)
|
||||
:::
|
||||
|
||||
If a given service or single-user server can be governed by a single boolean "yes, you can use this service" or "no, you can't," or limiting via other existing scopes, access scopes are enough to manage access to the service.
|
||||
@@ -229,6 +229,32 @@ access:servers!server
|
||||
access:servers!server=username/
|
||||
: access to only `username`'s _default_ server.
|
||||
|
||||
(granting-scopes)=
|
||||
|
||||
### Considerations when allowing users to grant permissions via the `groups` scope
|
||||
|
||||
In general, permissions are fixed by role assignments in configuration (or via [Authenticator-managed roles](#authenticator-roles) in JupyterHub 5) and can only be modified by administrators who can modify the Hub configuration.
|
||||
|
||||
There is only one scope that allows users to modify permissions of themselves or others at runtime instead of via configuration:
|
||||
the `groups` scope, which allows adding and removing users from one or more groups.
|
||||
With the `groups` scope, a user can add or remove any users to/from any group.
|
||||
With the `groups!group=name` filtered scope, a user can add or remove any users to/from a specific group.
|
||||
There are two ways in which adding a user to a group may affect their permissions:
|
||||
|
||||
- if the group is assigned one or more roles, adding a user to the group may increase their permissions (this is usually the point!)
|
||||
- if the group is the _target_ of a filter on this or another group, such as `access:servers!group=students`, adding a user to the group can grant _other_ users elevated access to that user's resources.
|
||||
|
||||
With these in mind, when designing your roles, do not grant users the `groups` scope for any groups which:
|
||||
|
||||
- have roles the user should not have authority over, or
|
||||
- would grant them access they shouldn't have for _any_ user (e.g. don't grant `teachers` both `access:servers!group=students` and `groups!group=students` which is tantamount to the unrestricted `access:servers` because they control which users the `group=students` filter applies to).
|
||||
|
||||
If a group does not have role assignments and the group is not present in any `!group=` filter, there should be no permissions-related consequences for adding users to groups.
|
||||
|
||||
:::{note}
|
||||
The legacy `admin` property of users, which grants extreme superuser permissions and is generally discouraged in favor of more specific roles and scopes, may be modified only by other users with the `admin` property (e.g. added via `admin_users`).
|
||||
:::
|
||||
|
||||
(custom-scopes)=
|
||||
|
||||
### Custom scopes
|
||||
|
@@ -11,7 +11,7 @@ No other database records are affected.
|
||||
## Upgrade steps
|
||||
|
||||
1. All running **servers must be stopped** before proceeding with the upgrade.
|
||||
2. To upgrade the Hub, follow the [Upgrading JupyterHub](upgrading-jupyterhub) instructions.
|
||||
2. To upgrade the Hub, follow the [Upgrading JupyterHub](howto:upgrading-jupyterhub) instructions.
|
||||
```{attention}
|
||||
We advise against defining any new roles in the `jupyterhub.config.py` file right after the upgrade is completed and JupyterHub restarted for the first time. This preserves the 'current' state of the Hub. You can define and assign new roles on any other following startup.
|
||||
```
|
||||
|
@@ -11,7 +11,7 @@
|
||||
:Release: {{ version }}
|
||||
|
||||
JupyterHub also provides a REST API for administration of the Hub and users.
|
||||
The documentation on [Using JupyterHub's REST API](using-jupyterhub-rest-api) provides
|
||||
The documentation on [Using JupyterHub's REST API](howto:rest-api) provides
|
||||
information on:
|
||||
|
||||
- what you can do with the API
|
||||
|
File diff suppressed because one or more lines are too long
@@ -32,3 +32,11 @@ export JUPYTERHUB_METRICS_PREFIX=jupyterhub_prod
|
||||
```
|
||||
|
||||
would result in the metric `jupyterhub_prod_active_users`, etc.
|
||||
|
||||
## Configuring metrics
|
||||
|
||||
```{eval-rst}
|
||||
.. currentmodule:: jupyterhub.metrics
|
||||
|
||||
.. autoconfigurable:: PeriodicMetricsCollector
|
||||
```
|
||||
|
@@ -1,4 +1,4 @@
|
||||
(services)=
|
||||
(services-reference)=
|
||||
|
||||
# Services
|
||||
|
||||
@@ -213,7 +213,7 @@ c.JupyterHub.load_roles = [
|
||||
]
|
||||
```
|
||||
|
||||
When a service has a configured URL or explicit `oauth_client_id` or `oauth_redirect_uri`, it can operate as an [OAuth client](jupyterhub-oauth).
|
||||
When a service has a configured URL or explicit `oauth_client_id` or `oauth_redirect_uri`, it can operate as an [OAuth client](explanation:hub-oauth).
|
||||
When a user visits an oauth-authenticated service,
|
||||
completion of authentication results in issuing an oauth token.
|
||||
|
||||
|
@@ -264,7 +264,7 @@ Share codes are much like shares, except:
|
||||
To create a share code:
|
||||
|
||||
```{parsed-literal}
|
||||
[POST /api/share-code/:username/:servername](rest-api-post-share-code)
|
||||
[POST /api/share-codes/:username/:servername](rest-api-post-share-code)
|
||||
```
|
||||
|
||||
where the body should include the scopes to be granted and expiration.
|
||||
@@ -286,6 +286,7 @@ The response contains the code itself:
|
||||
{
|
||||
"code": "abc1234....",
|
||||
"accept_url": "/hub/accept-share?code=abc1234",
|
||||
"full_accept_url": "https://hub.example.org/hub/accept-share?code=abc1234",
|
||||
"id": "sc_1234",
|
||||
"scopes": [...],
|
||||
...
|
||||
|
@@ -4,7 +4,7 @@
|
||||
|
||||
This document describes how JupyterHub routes requests.
|
||||
|
||||
This does not include the [REST API](using-jupyterhub-rest-api) URLs.
|
||||
This does not include the [REST API](howto:rest-api) URLs.
|
||||
|
||||
In general, all URLs can be prefixed with `c.JupyterHub.base_url` to
|
||||
run the whole JupyterHub application on a prefix.
|
||||
@@ -240,7 +240,7 @@ and the page will show a link back to `/hub/spawn/...`.
|
||||
|
||||
On this page, users can manage their JupyterHub API tokens.
|
||||
They can revoke access and request new tokens for writing scripts
|
||||
against the [JupyterHub REST API](using-jupyterhub-rest-api).
|
||||
against the [JupyterHub REST API](howto:rest-api).
|
||||
|
||||
## `/hub/admin`
|
||||
|
||||
|
@@ -93,6 +93,25 @@ A set of initial admin users, `admin_users` can be configured as follows:
|
||||
c.Authenticator.admin_users = {'mal', 'zoe'}
|
||||
```
|
||||
|
||||
:::{warning}
|
||||
`admin_users` config can only be used to _grant_ admin permissions.
|
||||
Removing users from this set **does not** remove their admin permissions,
|
||||
which must be done via the admin page or API.
|
||||
|
||||
Role assignments via `load_roles` are the only way to _revoke_ past permissions from configuration:
|
||||
|
||||
```python
|
||||
c.JupyterHub.load_roles = [
|
||||
{
|
||||
"name": "admin",
|
||||
"users": ["admin1", "..."],
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
or, better yet, [specify your own roles](define-role-target) with only the permissions your admins actually need.
|
||||
:::
|
||||
|
||||
Users in the admin set are automatically added to the user `allowed_users` set,
|
||||
if they are not already present.
|
||||
|
||||
|
@@ -99,4 +99,4 @@ maintenance, re-configuration, etc.), then user connections are not
|
||||
interrupted. For simplicity, by default the hub starts the proxy
|
||||
automatically, so if the hub restarts, the proxy restarts, and user
|
||||
connections are interrupted. It is easy to run the proxy separately,
|
||||
for information see [the separate proxy page](separate-proxy).
|
||||
for information see [the separate proxy page](howto:separate-proxy).
|
||||
|
@@ -43,7 +43,7 @@ is important that these files be put in a secure location on your server, where
|
||||
they are not readable by regular users.
|
||||
|
||||
If you are using a **chain certificate**, see also chained certificate for SSL
|
||||
in the JupyterHub [Troubleshooting FAQ](troubleshooting).
|
||||
in the JupyterHub [Troubleshooting FAQ](faq:troubleshooting).
|
||||
|
||||
### Using letsencrypt
|
||||
|
||||
@@ -68,7 +68,7 @@ c.JupyterHub.ssl_cert = '/etc/letsencrypt/live/example.com/fullchain.pem'
|
||||
### If SSL termination happens outside of the Hub
|
||||
|
||||
In certain cases, for example, if the hub is running behind a reverse proxy, and
|
||||
[SSL termination is being provided by NGINX](https://www.nginx.com/resources/admin-guide/nginx-ssl-termination/),
|
||||
[SSL termination is being provided by NGINX](https://docs.nginx.com/nginx/admin-guide/security-controls/terminating-ssl-http/),
|
||||
it is reasonable to run the hub without SSL.
|
||||
|
||||
To achieve this, remove `c.JupyterHub.ssl_key` and `c.JupyterHub.ssl_cert`
|
||||
|
@@ -1,3 +1,5 @@
|
||||
(tutorial:services)=
|
||||
|
||||
# External services
|
||||
|
||||
When working with JupyterHub, a **Service** is defined as a process
|
||||
|
@@ -1,7 +1,7 @@
|
||||
# Starting servers with the JupyterHub API
|
||||
|
||||
Sometimes, when working with applications such as [BinderHub](https://binderhub.readthedocs.io), it may be necessary to launch Jupyter-based services on behalf of your users.
|
||||
Doing so can be achieved through JupyterHub's [REST API](using-jupyterhub-rest-api), which allows one to launch and manage servers on behalf of users through API calls instead of the JupyterHub UI.
|
||||
Doing so can be achieved through JupyterHub's [REST API](howto:rest-api), which allows one to launch and manage servers on behalf of users through API calls instead of the JupyterHub UI.
|
||||
This way, you can take advantage of other user/launch/lifecycle patterns that are not natively supported by the JupyterHub UI, all without the need to develop the server management features of JupyterHub Spawners and/or Authenticators.
|
||||
|
||||
This tutorial goes through working with the JupyterHub API to manage servers for users.
|
||||
|
@@ -159,11 +159,14 @@ which will have a JSON response:
|
||||
'last_exchanged_at': None,
|
||||
'code': 'U-eYLFT1lGstEqfMHpAIvTZ1MRjZ1Y1a-loGQ0K86to',
|
||||
'accept_url': '/hub/accept-share?code=U-eYLFT1lGstEqfMHpAIvTZ1MRjZ1Y1a-loGQ0K86to',
|
||||
'full_accept_url': 'https://hub.example.org/accept-share?code=U-eYLFT1lGstEqfMHpAIvTZ1MRjZ1Y1a-loGQ0K86to',
|
||||
}
|
||||
```
|
||||
|
||||
The most relevant fields here are `code`, which contains the code itself, and `accept_url`, which is the URL path for the page another user.
|
||||
Note: it does not contain the _hostname_ of the hub, which JupyterHub often does not know.
|
||||
If `public_url` configuration is defined, `full_accept_url` will be the full URL including the host.
|
||||
Otherwise, it will be null.
|
||||
|
||||
Share codes are guaranteed to be url-safe, so no encoding is required.
|
||||
|
||||
|
23
jsx/package-lock.json
generated
23
jsx/package-lock.json
generated
@@ -3687,11 +3687,12 @@
|
||||
}
|
||||
},
|
||||
"node_modules/braces": {
|
||||
"version": "3.0.2",
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
|
||||
"integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"fill-range": "^7.0.1"
|
||||
"fill-range": "^7.1.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
@@ -5268,9 +5269,10 @@
|
||||
}
|
||||
},
|
||||
"node_modules/fill-range": {
|
||||
"version": "7.0.1",
|
||||
"version": "7.1.1",
|
||||
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
|
||||
"integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"to-regex-range": "^5.0.1"
|
||||
},
|
||||
@@ -6230,8 +6232,9 @@
|
||||
},
|
||||
"node_modules/is-number": {
|
||||
"version": "7.0.0",
|
||||
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
|
||||
"integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=0.12.0"
|
||||
}
|
||||
@@ -9402,8 +9405,9 @@
|
||||
},
|
||||
"node_modules/to-regex-range": {
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
|
||||
"integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"is-number": "^7.0.0"
|
||||
},
|
||||
@@ -10139,9 +10143,10 @@
|
||||
}
|
||||
},
|
||||
"node_modules/ws": {
|
||||
"version": "8.13.0",
|
||||
"version": "8.17.1",
|
||||
"resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz",
|
||||
"integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=10.0.0"
|
||||
},
|
||||
|
@@ -14,8 +14,7 @@ const Groups = (props) => {
|
||||
const dispatch = useDispatch();
|
||||
const navigate = useNavigate();
|
||||
|
||||
const { setOffset, offset, handleLimit, limit, setPagination } =
|
||||
usePaginationParams();
|
||||
const { offset, handleLimit, limit, setPagination } = usePaginationParams();
|
||||
|
||||
const total = groups_page ? groups_page.total : undefined;
|
||||
|
||||
@@ -32,11 +31,22 @@ const Groups = (props) => {
|
||||
});
|
||||
};
|
||||
|
||||
// single callback to reload the page
|
||||
// uses current state, or params can be specified if state
|
||||
// should be updated _after_ load, e.g. offset
|
||||
const loadPageData = (params) => {
|
||||
params = params || {};
|
||||
return updateGroups(
|
||||
params.offset === undefined ? offset : params.offset,
|
||||
params.limit === undefined ? limit : params.limit,
|
||||
)
|
||||
.then((data) => dispatchPageUpdate(data.items, data._pagination))
|
||||
.catch((err) => setErrorAlert("Failed to update group list."));
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
updateGroups(offset, limit).then((data) =>
|
||||
dispatchPageUpdate(data.items, data._pagination),
|
||||
);
|
||||
}, [offset, limit]);
|
||||
loadPageData();
|
||||
}, [limit]);
|
||||
|
||||
if (!groups_data || !groups_page) {
|
||||
return <div data-testid="no-show"></div>;
|
||||
@@ -72,8 +82,10 @@ const Groups = (props) => {
|
||||
limit={limit}
|
||||
visible={groups_data.length}
|
||||
total={total}
|
||||
next={() => setOffset(offset + limit)}
|
||||
prev={() => setOffset(offset - limit)}
|
||||
next={() => loadPageData({ offset: offset + limit })}
|
||||
prev={() =>
|
||||
loadPageData({ offset: limit > offset ? 0 : offset - limit })
|
||||
}
|
||||
handleLimit={handleLimit}
|
||||
/>
|
||||
</Card.Body>
|
||||
|
@@ -112,8 +112,8 @@ test("Renders nothing if required data is not available", async () => {
|
||||
expect(noShow).toBeVisible();
|
||||
});
|
||||
|
||||
test("Interacting with PaginationFooter causes state update and refresh via useEffect call", async () => {
|
||||
let upgradeGroupsSpy = mockAsync();
|
||||
test("Interacting with PaginationFooter causes page refresh", async () => {
|
||||
let updateGroupsSpy = mockAsync();
|
||||
let setSearchParamsSpy = mockAsync();
|
||||
let searchParams = new URLSearchParams({ limit: "2" });
|
||||
useSearchParams.mockImplementation(() => [
|
||||
@@ -125,11 +125,11 @@ test("Interacting with PaginationFooter causes state update and refresh via useE
|
||||
]);
|
||||
let _, setSearchParams;
|
||||
await act(async () => {
|
||||
render(groupsJsx(upgradeGroupsSpy));
|
||||
render(groupsJsx(updateGroupsSpy));
|
||||
[_, setSearchParams] = useSearchParams();
|
||||
});
|
||||
|
||||
expect(upgradeGroupsSpy).toBeCalledWith(0, 2);
|
||||
expect(updateGroupsSpy).toBeCalledWith(0, 2);
|
||||
|
||||
var lastState =
|
||||
mockReducers.mock.results[mockReducers.mock.results.length - 1].value;
|
||||
@@ -140,9 +140,7 @@ test("Interacting with PaginationFooter causes state update and refresh via useE
|
||||
await act(async () => {
|
||||
fireEvent.click(next);
|
||||
});
|
||||
expect(setSearchParamsSpy).toBeCalledWith("limit=2&offset=2");
|
||||
|
||||
// FIXME: mocked useSelector, state seem to prevent updateGroups from being called
|
||||
// making the test environment not representative
|
||||
// expect(callbackSpy).toHaveBeenCalledWith(2, 2);
|
||||
expect(updateGroupsSpy).toBeCalledWith(2, 2);
|
||||
// mocked updateGroups means callback after load doesn't fire
|
||||
// expect(setSearchParamsSpy).toBeCalledWith("limit=2&offset=2");
|
||||
});
|
||||
|
@@ -41,7 +41,7 @@ const ServerDashboard = (props) => {
|
||||
let user_data = useSelector((state) => state.user_data);
|
||||
const user_page = useSelector((state) => state.user_page);
|
||||
|
||||
const { setOffset, offset, setLimit, handleLimit, limit, setPagination } =
|
||||
const { offset, setLimit, handleLimit, limit, setPagination } =
|
||||
usePaginationParams();
|
||||
|
||||
const name_filter = searchParams.get("name_filter") || "";
|
||||
@@ -123,26 +123,39 @@ const ServerDashboard = (props) => {
|
||||
} else {
|
||||
params.set("state", new_state_filter);
|
||||
}
|
||||
console.log("setting search params", params.toString());
|
||||
return params;
|
||||
});
|
||||
};
|
||||
|
||||
// the callback to update the displayed user list
|
||||
const updateUsersWithParams = () =>
|
||||
updateUsers({
|
||||
offset,
|
||||
const updateUsersWithParams = (params) => {
|
||||
if (params) {
|
||||
if (params.offset !== undefined && params.offset < 0) {
|
||||
params.offset = 0;
|
||||
}
|
||||
}
|
||||
return updateUsers({
|
||||
offset: offset,
|
||||
limit,
|
||||
name_filter,
|
||||
sort,
|
||||
state: state_filter,
|
||||
...params,
|
||||
});
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
updateUsersWithParams()
|
||||
// single callback to reload the page
|
||||
// uses current state, or params can be specified if state
|
||||
// should be updated _after_ load, e.g. offset
|
||||
const loadPageData = (params) => {
|
||||
return updateUsersWithParams(params)
|
||||
.then((data) => dispatchPageUpdate(data.items, data._pagination))
|
||||
.catch((err) => setErrorAlert("Failed to update user list."));
|
||||
}, [offset, limit, name_filter, sort, state_filter]);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
loadPageData();
|
||||
}, [limit, name_filter, sort, state_filter]);
|
||||
|
||||
if (!user_data || !user_page) {
|
||||
return <div data-testid="no-show"></div>;
|
||||
@@ -172,14 +185,7 @@ const ServerDashboard = (props) => {
|
||||
action(user.name, server.name)
|
||||
.then((res) => {
|
||||
if (res.status < 300) {
|
||||
updateUsersWithParams()
|
||||
.then((data) => {
|
||||
dispatchPageUpdate(data.items, data._pagination);
|
||||
})
|
||||
.catch(() => {
|
||||
setIsDisabled(false);
|
||||
setErrorAlert(`Failed to update users list.`);
|
||||
});
|
||||
loadPageData();
|
||||
} else {
|
||||
setErrorAlert(`Failed to ${name.toLowerCase()}.`);
|
||||
setIsDisabled(false);
|
||||
@@ -519,13 +525,7 @@ const ServerDashboard = (props) => {
|
||||
return res;
|
||||
})
|
||||
.then((res) => {
|
||||
updateUsersWithParams()
|
||||
.then((data) => {
|
||||
dispatchPageUpdate(data.items, data._pagination);
|
||||
})
|
||||
.catch(() =>
|
||||
setErrorAlert(`Failed to update users list.`),
|
||||
);
|
||||
loadPageData();
|
||||
return res;
|
||||
})
|
||||
.catch(() => setErrorAlert(`Failed to start servers.`));
|
||||
@@ -556,13 +556,7 @@ const ServerDashboard = (props) => {
|
||||
return res;
|
||||
})
|
||||
.then((res) => {
|
||||
updateUsersWithParams()
|
||||
.then((data) => {
|
||||
dispatchPageUpdate(data.items, data._pagination);
|
||||
})
|
||||
.catch(() =>
|
||||
setErrorAlert(`Failed to update users list.`),
|
||||
);
|
||||
loadPageData();
|
||||
return res;
|
||||
})
|
||||
.catch(() => setErrorAlert(`Failed to stop servers.`));
|
||||
@@ -590,8 +584,13 @@ const ServerDashboard = (props) => {
|
||||
limit={limit}
|
||||
visible={user_data.length}
|
||||
total={total}
|
||||
next={() => setOffset(offset + limit)}
|
||||
prev={() => setOffset(offset - limit)}
|
||||
// don't trigger via setOffset state change,
|
||||
// which can cause infinite cycles.
|
||||
// offset state will be set upon reply via setPagination
|
||||
next={() => loadPageData({ offset: offset + limit })}
|
||||
prev={() =>
|
||||
loadPageData({ offset: limit > offset ? 0 : offset - limit })
|
||||
}
|
||||
handleLimit={handleLimit}
|
||||
/>
|
||||
<br></br>
|
||||
|
@@ -608,7 +608,7 @@ test("Search for user calls updateUsers with name filter", async () => {
|
||||
// expect(mockUpdateUsers).toBeCalledWith(0, 100, "ab");
|
||||
});
|
||||
|
||||
test("Interacting with PaginationFooter causes state update and refresh via useEffect call", async () => {
|
||||
test("Interacting with PaginationFooter requests page update", async () => {
|
||||
await act(async () => {
|
||||
render(serverDashboardJsx());
|
||||
});
|
||||
@@ -625,14 +625,10 @@ test("Interacting with PaginationFooter causes state update and refresh via useE
|
||||
jest.runAllTimers();
|
||||
});
|
||||
|
||||
expect(searchParams.get("offset")).toEqual("2");
|
||||
expect(searchParams.get("limit")).toEqual("2");
|
||||
|
||||
// FIXME: should call updateUsers, does in reality.
|
||||
// tests don't reflect reality due to mocked state/useSelector
|
||||
// unclear how to fix this.
|
||||
// expect(callbackSpy.mock.calls).toHaveLength(2);
|
||||
// expect(callbackSpy).toHaveBeenCalledWith(2, 2, "");
|
||||
expect(mockUpdateUsers).toBeCalledWith({
|
||||
...defaultUpdateUsersParams,
|
||||
offset: 2,
|
||||
});
|
||||
});
|
||||
|
||||
test("Server delete button exists for named servers", async () => {
|
||||
|
@@ -3,7 +3,7 @@
|
||||
# Copyright (c) Jupyter Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
# version_info updated by running `tbump`
|
||||
version_info = (5, 0, 0, "b2", "")
|
||||
version_info = (5, 1, 0, "", "")
|
||||
|
||||
# pep 440 version: no dot before beta/rc, but before .dev
|
||||
# 0.1.0rc1
|
||||
|
@@ -5,6 +5,7 @@
|
||||
import json
|
||||
import re
|
||||
from typing import List, Optional
|
||||
from urllib.parse import urlunparse
|
||||
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
@@ -80,7 +81,7 @@ class _ShareAPIHandler(APIHandler):
|
||||
"""Truncated server model for use in shares
|
||||
|
||||
- Adds "user" field (just name for now)
|
||||
- Limits fields to "name", "url", "ready"
|
||||
- Limits fields to "name", "url", "full_url", "ready"
|
||||
from standard server model
|
||||
"""
|
||||
user = self.users[spawner.user.id]
|
||||
@@ -95,7 +96,7 @@ class _ShareAPIHandler(APIHandler):
|
||||
}
|
||||
}
|
||||
# subset keys for sharing
|
||||
for key in ["name", "url", "ready"]:
|
||||
for key in ["name", "url", "full_url", "ready"]:
|
||||
if key in full_model:
|
||||
server_model[key] = full_model[key]
|
||||
|
||||
@@ -128,6 +129,12 @@ class _ShareAPIHandler(APIHandler):
|
||||
model["accept_url"] = url_concat(
|
||||
self.hub.base_url + "accept-share", {"code": code}
|
||||
)
|
||||
model["full_accept_url"] = None
|
||||
public_url = self.settings.get("public_url")
|
||||
if public_url:
|
||||
model["full_accept_url"] = urlunparse(
|
||||
public_url._replace(path=model["accept_url"])
|
||||
)
|
||||
return model
|
||||
|
||||
def _init_share_query(self, kind="share"):
|
||||
|
@@ -222,7 +222,7 @@ class UserListAPIHandler(APIHandler):
|
||||
data = user_list
|
||||
|
||||
self.write(json.dumps(data))
|
||||
# if testing with raiselaod above, need expire_all to avoid affecting other operations
|
||||
# if testing with raiseload above, need expire_all to avoid affecting other operations
|
||||
# self.db.expire_all()
|
||||
|
||||
@needs_scope('admin:users')
|
||||
@@ -236,6 +236,8 @@ class UserListAPIHandler(APIHandler):
|
||||
# admin is set for all users
|
||||
# to create admin and non-admin users requires at least two API requests
|
||||
admin = data.get('admin', False)
|
||||
if admin and not self.current_user.admin:
|
||||
raise web.HTTPError(403, "Only admins can grant admin permissions")
|
||||
|
||||
to_create = []
|
||||
invalid_names = []
|
||||
@@ -309,12 +311,16 @@ class UserAPIHandler(APIHandler):
|
||||
if user is not None:
|
||||
raise web.HTTPError(409, f"User {user_name} already exists")
|
||||
|
||||
user = self.user_from_username(user_name)
|
||||
if data:
|
||||
self._check_user_model(data)
|
||||
if 'admin' in data:
|
||||
user.admin = data['admin']
|
||||
assign_default_roles(self.db, entity=user)
|
||||
if data.get('admin', False) and not self.current_user.admin:
|
||||
raise web.HTTPError(403, "Only admins can grant admin permissions")
|
||||
|
||||
# create the user
|
||||
user = self.user_from_username(user_name)
|
||||
if data and data.get('admin', False):
|
||||
user.admin = data['admin']
|
||||
assign_default_roles(self.db, entity=user)
|
||||
self.db.commit()
|
||||
|
||||
try:
|
||||
@@ -373,7 +379,17 @@ class UserAPIHandler(APIHandler):
|
||||
data['name']
|
||||
),
|
||||
)
|
||||
|
||||
if not self.current_user.admin:
|
||||
if user.admin:
|
||||
raise web.HTTPError(403, "Only admins can modify other admins")
|
||||
if 'admin' in data and data['admin']:
|
||||
raise web.HTTPError(403, "Only admins can grant admin permissions")
|
||||
for key, value in data.items():
|
||||
value_s = "..." if key == "auth_state" else repr(value)
|
||||
self.log.info(
|
||||
f"{self.current_user.name} setting {key}={value_s} for {user.name}"
|
||||
)
|
||||
if key == 'auth_state':
|
||||
await user.save_auth_state(value)
|
||||
else:
|
||||
@@ -489,10 +505,29 @@ class UserTokenListAPIHandler(APIHandler):
|
||||
400, f"token {key} must be null or a list of strings, not {value!r}"
|
||||
)
|
||||
|
||||
expires_in = body.get('expires_in', None)
|
||||
if not (expires_in is None or isinstance(expires_in, int)):
|
||||
raise web.HTTPError(
|
||||
400,
|
||||
f"token expires_in must be null or integer, not {expires_in!r}",
|
||||
)
|
||||
expires_in_max = self.settings["token_expires_in_max_seconds"]
|
||||
if expires_in_max:
|
||||
# validate expires_in against limit
|
||||
if expires_in is None:
|
||||
# expiration unspecified, use max value
|
||||
# (default before max limit was introduced was 'never', this is closest equivalent)
|
||||
expires_in = expires_in_max
|
||||
elif expires_in > expires_in_max:
|
||||
raise web.HTTPError(
|
||||
400,
|
||||
f"token expires_in: {expires_in} must not exceed {expires_in_max}",
|
||||
)
|
||||
|
||||
try:
|
||||
api_token = user.new_api_token(
|
||||
note=note,
|
||||
expires_in=body.get('expires_in', None),
|
||||
expires_in=expires_in,
|
||||
roles=token_roles,
|
||||
scopes=token_scopes,
|
||||
)
|
||||
@@ -675,14 +710,22 @@ class UserServerAPIHandler(APIHandler):
|
||||
asyncio.ensure_future(_remove_spawner(spawner._stop_future))
|
||||
return
|
||||
|
||||
if spawner.pending:
|
||||
raise web.HTTPError(
|
||||
400,
|
||||
f"{spawner._log_name} is pending {spawner.pending}, please wait",
|
||||
)
|
||||
|
||||
stop_future = None
|
||||
if spawner.ready:
|
||||
if spawner.pending:
|
||||
# we are interrupting a pending start
|
||||
# hopefully nothing gets leftover
|
||||
self.log.warning(
|
||||
f"Interrupting spawner {spawner._log_name}, pending {spawner.pending}"
|
||||
)
|
||||
spawn_future = spawner._spawn_future
|
||||
if spawn_future:
|
||||
spawn_future.cancel()
|
||||
# Give cancel a chance to resolve?
|
||||
# not sure what we would wait for here,
|
||||
await asyncio.sleep(1)
|
||||
stop_future = await self.stop_single_user(user, server_name)
|
||||
|
||||
elif spawner.ready:
|
||||
# include notify, so that a server that died is noticed immediately
|
||||
status = await spawner.poll_and_notify()
|
||||
if status is None:
|
||||
@@ -818,7 +861,9 @@ class SpawnProgressAPIHandler(APIHandler):
|
||||
# not pending, no progress to fetch
|
||||
# check if spawner has just failed
|
||||
f = spawn_future
|
||||
if f and f.done() and f.exception():
|
||||
if f and f.cancelled():
|
||||
failed_event['message'] = "Spawn cancelled"
|
||||
elif f and f.done() and f.exception():
|
||||
exc = f.exception()
|
||||
message = getattr(exc, "jupyterhub_message", str(exc))
|
||||
failed_event['message'] = f"Spawn failed: {message}"
|
||||
@@ -857,7 +902,9 @@ class SpawnProgressAPIHandler(APIHandler):
|
||||
else:
|
||||
# what happened? Maybe spawn failed?
|
||||
f = spawn_future
|
||||
if f and f.done() and f.exception():
|
||||
if f and f.cancelled():
|
||||
failed_event['message'] = "Spawn cancelled"
|
||||
elif f and f.done() and f.exception():
|
||||
exc = f.exception()
|
||||
message = getattr(exc, "jupyterhub_message", str(exc))
|
||||
failed_event['message'] = f"Spawn failed: {message}"
|
||||
|
@@ -464,6 +464,26 @@ class JupyterHub(Application):
|
||||
# convert cookie max age days to seconds
|
||||
return int(self.cookie_max_age_days * 24 * 3600)
|
||||
|
||||
token_expires_in_max_seconds = Integer(
|
||||
0,
|
||||
config=True,
|
||||
help="""
|
||||
Set the maximum expiration (in seconds) of tokens created via the API.
|
||||
|
||||
Set to any positive value to disallow creation of tokens with no expiration.
|
||||
|
||||
0 (default) = no limit.
|
||||
|
||||
Does not affect:
|
||||
|
||||
- Server API tokens ($JUPYTERHUB_API_TOKEN is tied to lifetime of the server)
|
||||
- Tokens issued during oauth (use `oauth_token_expires_in`)
|
||||
- Tokens created via the API before configuring this limit
|
||||
|
||||
.. versionadded:: 5.1
|
||||
""",
|
||||
)
|
||||
|
||||
redirect_to_server = Bool(
|
||||
True, help="Redirect user to server (if running), instead of control panel."
|
||||
).tag(config=True)
|
||||
@@ -3192,6 +3212,7 @@ class JupyterHub(Application):
|
||||
static_path=os.path.join(self.data_files_path, 'static'),
|
||||
static_url_prefix=url_path_join(self.hub.base_url, 'static/'),
|
||||
static_handler_class=CacheControlStaticFilesHandler,
|
||||
token_expires_in_max_seconds=self.token_expires_in_max_seconds,
|
||||
subdomain_hook=self.subdomain_hook,
|
||||
template_path=self.template_paths,
|
||||
template_vars=self.template_vars,
|
||||
|
@@ -102,18 +102,37 @@ class Authenticator(LoggingConfigurable):
|
||||
|
||||
admin_users = Set(
|
||||
help="""
|
||||
Set of users that will have admin rights on this JupyterHub.
|
||||
Set of users that will be granted admin rights on this JupyterHub.
|
||||
|
||||
Note: As of JupyterHub 2.0,
|
||||
full admin rights should not be required,
|
||||
and more precise permissions can be managed via roles.
|
||||
Note:
|
||||
|
||||
Admin users have extra privileges:
|
||||
- Use the admin panel to see list of users logged in
|
||||
- Add / remove users in some authenticators
|
||||
- Restart / halt the hub
|
||||
- Start / stop users' single-user servers
|
||||
- Can access each individual users' single-user server (if configured)
|
||||
As of JupyterHub 2.0,
|
||||
full admin rights should not be required,
|
||||
and more precise permissions can be managed via roles.
|
||||
|
||||
Caution:
|
||||
|
||||
Adding users to `admin_users` can only *grant* admin rights,
|
||||
removing a username from the admin_users set **DOES NOT** remove admin rights previously granted.
|
||||
|
||||
For an authoritative, restricted set of admins,
|
||||
assign explicit membership of the `admin` *role*::
|
||||
|
||||
c.JupyterHub.load_roles = [
|
||||
{
|
||||
"name": "admin",
|
||||
"users": ["admin1", "..."],
|
||||
}
|
||||
]
|
||||
|
||||
Admin users can take every possible action on behalf of all users,
|
||||
for example:
|
||||
|
||||
- Use the admin panel to see list of users logged in
|
||||
- Add / remove users in some authenticators
|
||||
- Restart / halt the hub
|
||||
- Start / stop users' single-user servers
|
||||
- Can access each individual users' single-user server
|
||||
|
||||
Admin access should be treated the same way root access is.
|
||||
|
||||
|
@@ -542,11 +542,50 @@ class TokenPageHandler(BaseHandler):
|
||||
oauth_clients = sorted(oauth_clients, key=sort_key, reverse=True)
|
||||
|
||||
auth_state = await self.current_user.get_auth_state()
|
||||
expires_in_max = self.settings["token_expires_in_max_seconds"]
|
||||
options = [
|
||||
(3600, "1 Hour"),
|
||||
(86400, "1 Day"),
|
||||
(7 * 86400, "1 Week"),
|
||||
(30 * 86400, "1 Month"),
|
||||
(365 * 86400, "1 Year"),
|
||||
]
|
||||
if expires_in_max:
|
||||
# omit items that exceed the limit
|
||||
options = [
|
||||
(seconds, label)
|
||||
for (seconds, label) in options
|
||||
if seconds <= expires_in_max
|
||||
]
|
||||
if expires_in_max not in (seconds for (seconds, label) in options):
|
||||
# max not exactly in list, add it
|
||||
# this also ensures options_list is never empty
|
||||
max_hours = expires_in_max / 3600
|
||||
max_days = max_hours / 24
|
||||
if max_days < 3:
|
||||
max_label = f"{max_hours:.0f} hours"
|
||||
else:
|
||||
# this could be a lot of days, but no need to get fancy
|
||||
max_label = f"{max_days:.0f} days"
|
||||
options.append(("", f"Max ({max_label})"))
|
||||
else:
|
||||
options.append(("", "Never"))
|
||||
|
||||
options_html_elements = [
|
||||
f'<option value="{value}">{label}</option>' for value, label in options
|
||||
]
|
||||
# make the last item selected
|
||||
options_html_elements[-1] = options_html_elements[-1].replace(
|
||||
"<option ", '<option selected="selected"'
|
||||
)
|
||||
expires_in_options_html = "\n".join(options_html_elements)
|
||||
html = await self.render_template(
|
||||
'token.html',
|
||||
api_tokens=api_tokens,
|
||||
oauth_clients=oauth_clients,
|
||||
auth_state=auth_state,
|
||||
token_expires_in_options_html=expires_in_options_html,
|
||||
token_expires_in_max_seconds=expires_in_max,
|
||||
)
|
||||
self.finish(html)
|
||||
|
||||
|
@@ -22,6 +22,7 @@ them manually here.
|
||||
added ``jupyterhub_`` prefix to metric names.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import time
|
||||
from datetime import timedelta
|
||||
@@ -236,17 +237,17 @@ EVENT_LOOP_INTERVAL_SECONDS = Histogram(
|
||||
'event_loop_interval_seconds',
|
||||
'Distribution of measured event loop intervals',
|
||||
namespace=metrics_prefix,
|
||||
# Increase resolution to 5ms below 50ms
|
||||
# don't measure below 50ms, our default
|
||||
# Increase resolution to 5ms below 75ms
|
||||
# because this is where we are most sensitive.
|
||||
# No need to have buckets below 25, since we only measure every 20ms.
|
||||
# No need to have buckets below 50, since we only measure every 50ms.
|
||||
buckets=[
|
||||
# 5ms from 25-50ms
|
||||
25e-3,
|
||||
30e-3,
|
||||
35e-3,
|
||||
40e-3,
|
||||
45e-3,
|
||||
# 5ms from 50-75ms
|
||||
50e-3,
|
||||
55e-3,
|
||||
60e-3,
|
||||
65e-3,
|
||||
70e-3,
|
||||
# from here, default prometheus buckets
|
||||
75e-3,
|
||||
0.1,
|
||||
@@ -323,19 +324,20 @@ class PeriodicMetricsCollector(LoggingConfigurable):
|
||||
""",
|
||||
)
|
||||
event_loop_interval_resolution = Float(
|
||||
0.02,
|
||||
0.05,
|
||||
config=True,
|
||||
help="""
|
||||
Interval (in seconds) on which to measure the event loop interval.
|
||||
|
||||
This is the _sensitivity_ of the event_loop_interval metric.
|
||||
This is the _sensitivity_ of the `event_loop_interval` metric.
|
||||
Setting it too low (e.g. below 20ms) can end up slowing down the whole event loop
|
||||
by measuring too often,
|
||||
while setting it too high (e.g. above a few seconds) may limit its resolution and usefulness.
|
||||
The Prometheus Histogram populated by this metric
|
||||
doesn't resolve differences below 25ms,
|
||||
so setting this below ~20ms won't result in increased resolution of the histogram metric,
|
||||
except for the average value, computed by:
|
||||
except for the average value, computed by::
|
||||
|
||||
event_loop_interval_seconds_sum / event_loop_interval_seconds_count
|
||||
""",
|
||||
)
|
||||
@@ -346,7 +348,7 @@ class PeriodicMetricsCollector(LoggingConfigurable):
|
||||
)
|
||||
|
||||
# internal state
|
||||
_last_tick = Float()
|
||||
_tasks = Dict()
|
||||
_periodic_callbacks = Dict()
|
||||
|
||||
db = Any(help="SQLAlchemy db session to use for performing queries")
|
||||
@@ -371,18 +373,39 @@ class PeriodicMetricsCollector(LoggingConfigurable):
|
||||
self.log.info(f'Found {value} active users in the last {period}')
|
||||
ACTIVE_USERS.labels(period=period.value).set(value)
|
||||
|
||||
def _event_loop_tick(self):
|
||||
"""Measure a single tick of the event loop
|
||||
async def _measure_event_loop_interval(self):
|
||||
"""Measure the event loop responsiveness
|
||||
|
||||
This measures the time since the last tick
|
||||
A single long-running coroutine because PeriodicCallback is too expensive
|
||||
to measure small intervals.
|
||||
"""
|
||||
now = time.perf_counter()
|
||||
tick_duration = now - self._last_tick
|
||||
self._last_tick = now
|
||||
EVENT_LOOP_INTERVAL_SECONDS.observe(tick_duration)
|
||||
if tick_duration >= self.event_loop_interval_log_threshold:
|
||||
# warn about slow ticks
|
||||
self.log.warning("Event loop was unresponsive for %.2fs!", tick_duration)
|
||||
tick = time.perf_counter
|
||||
|
||||
last_tick = tick()
|
||||
resolution = self.event_loop_interval_resolution
|
||||
lower_bound = 2 * resolution
|
||||
# This loop runs _very_ often, so try to keep it efficient.
|
||||
# Even excess comparisons and assignments have a measurable effect on overall cpu usage.
|
||||
while True:
|
||||
await asyncio.sleep(resolution)
|
||||
now = tick()
|
||||
# measure the _difference_ between the sleep time and the measured time
|
||||
# the event loop blocked for somewhere in the range [delay, delay + resolution]
|
||||
tick_duration = now - last_tick
|
||||
last_tick = now
|
||||
if tick_duration < lower_bound:
|
||||
# don't report numbers less than measurement resolution,
|
||||
# we don't really have that information
|
||||
delay = resolution
|
||||
else:
|
||||
delay = tick_duration - resolution
|
||||
if delay >= self.event_loop_interval_log_threshold:
|
||||
# warn about slow ticks
|
||||
self.log.warning(
|
||||
"Event loop was unresponsive for at least %.2fs!", delay
|
||||
)
|
||||
|
||||
EVENT_LOOP_INTERVAL_SECONDS.observe(delay)
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
@@ -400,12 +423,8 @@ class PeriodicMetricsCollector(LoggingConfigurable):
|
||||
self.update_active_users()
|
||||
|
||||
if self.event_loop_interval_enabled:
|
||||
now = time.perf_counter()
|
||||
self._last_tick = self._last_tick_collect = now
|
||||
self._tick_durations = []
|
||||
self._periodic_callbacks["event_loop_tick"] = PeriodicCallback(
|
||||
self._event_loop_tick,
|
||||
self.event_loop_interval_resolution * 1000,
|
||||
self._tasks["event_loop_tick"] = asyncio.create_task(
|
||||
self._measure_event_loop_interval()
|
||||
)
|
||||
|
||||
# start callbacks
|
||||
@@ -418,3 +437,5 @@ class PeriodicMetricsCollector(LoggingConfigurable):
|
||||
"""
|
||||
for pc in self._periodic_callbacks.values():
|
||||
pc.stop()
|
||||
for task in self._tasks.values():
|
||||
task.cancel()
|
||||
|
@@ -48,7 +48,7 @@ scope_definitions = {
|
||||
'doc_description': 'Access the admin page. Permission to take actions via the admin page granted separately.',
|
||||
},
|
||||
'admin:users': {
|
||||
'description': 'Read, write, create and delete users and their authentication state, not including their servers or tokens.',
|
||||
'description': 'Read, modify, create, and delete users and their authentication state, not including their servers or tokens. This is an extremely privileged scope and should be considered tantamount to superuser.',
|
||||
'subscopes': ['admin:auth_state', 'users', 'read:roles:users', 'delete:users'],
|
||||
},
|
||||
'admin:auth_state': {'description': 'Read a user’s authentication state.'},
|
||||
@@ -64,7 +64,7 @@ scope_definitions = {
|
||||
'subscopes': ['read:users:name'],
|
||||
},
|
||||
'read:users': {
|
||||
'description': 'Read user models (excluding including servers, tokens and authentication state).',
|
||||
'description': 'Read user models (including servers, tokens and authentication state).',
|
||||
'subscopes': [
|
||||
'read:users:name',
|
||||
'read:users:groups',
|
||||
@@ -109,7 +109,7 @@ scope_definitions = {
|
||||
'subscopes': ['groups', 'read:roles:groups', 'delete:groups'],
|
||||
},
|
||||
'groups': {
|
||||
'description': 'Read and write group information, including adding/removing users to/from groups.',
|
||||
'description': 'Read and write group information, including adding/removing any users to/from groups. Note: adding users to groups may affect permissions.',
|
||||
'subscopes': ['read:groups', 'list:groups'],
|
||||
},
|
||||
'list:groups': {
|
||||
|
@@ -412,9 +412,12 @@ class JupyterHubSingleUser(ExtensionApp):
|
||||
return
|
||||
|
||||
last_activity_timestamp = isoformat(last_activity)
|
||||
failure_count = 0
|
||||
|
||||
async def notify():
|
||||
nonlocal failure_count
|
||||
self.log.debug("Notifying Hub of activity %s", last_activity_timestamp)
|
||||
|
||||
req = HTTPRequest(
|
||||
url=self.hub_activity_url,
|
||||
method='POST',
|
||||
@@ -433,8 +436,12 @@ class JupyterHubSingleUser(ExtensionApp):
|
||||
)
|
||||
try:
|
||||
await client.fetch(req)
|
||||
except Exception:
|
||||
self.log.exception("Error notifying Hub of activity")
|
||||
except Exception as e:
|
||||
failure_count += 1
|
||||
# log traceback at debug-level
|
||||
self.log.debug("Error notifying Hub of activity", exc_info=True)
|
||||
# only one-line error visible by default
|
||||
self.log.error("Error notifying Hub of activity: %s", e)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
@@ -446,6 +453,8 @@ class JupyterHubSingleUser(ExtensionApp):
|
||||
max_wait=15,
|
||||
timeout=60,
|
||||
)
|
||||
if failure_count:
|
||||
self.log.info("Sent hub activity after %s retries", failure_count)
|
||||
self._last_activity_sent = last_activity
|
||||
|
||||
async def keep_activity_updated(self):
|
||||
|
@@ -588,7 +588,7 @@ class SingleUserNotebookAppMixin(Configurable):
|
||||
self.log.warning("Enabling jupyterhub test extension")
|
||||
self.jpserver_extensions["jupyterhub.tests.extension"] = True
|
||||
|
||||
def initialize(self, argv=None):
|
||||
def initialize(self, argv=None, **kwargs):
|
||||
if self.disable_user_config:
|
||||
_disable_user_config(self)
|
||||
# disable trash by default
|
||||
@@ -605,7 +605,7 @@ class SingleUserNotebookAppMixin(Configurable):
|
||||
# jupyter-server calls it too late, notebook doesn't define it yet
|
||||
# only called in jupyter-server >= 1.9
|
||||
self.init_ioloop()
|
||||
super().initialize(argv)
|
||||
super().initialize(argv, **kwargs)
|
||||
self.patch_templates()
|
||||
|
||||
def init_ioloop(self):
|
||||
|
@@ -50,6 +50,7 @@ from .utils import (
|
||||
exponential_backoff,
|
||||
maybe_future,
|
||||
random_port,
|
||||
recursive_update,
|
||||
url_escape_path,
|
||||
url_path_join,
|
||||
)
|
||||
@@ -306,6 +307,57 @@ class Spawner(LoggingConfigurable):
|
||||
f"access:servers!user={self.user.name}",
|
||||
]
|
||||
|
||||
group_overrides = Union(
|
||||
[Callable(), Dict()],
|
||||
help="""
|
||||
Override specific traitlets based on group membership of the user.
|
||||
|
||||
This can be a dict, or a callable that returns a dict. The keys of the dict
|
||||
are *only* used for lexicographical sorting, to guarantee consistent
|
||||
ordering of the overrides. If it is a callable, it may be async, and will
|
||||
be passed one parameter - the spawner instance. It should return a dictionary.
|
||||
|
||||
The values of the dict are dicts with the following keys:
|
||||
|
||||
- `"groups"` - If the user belongs to *any* of these groups, these overrides are
|
||||
applied to their server before spawning.
|
||||
- `"spawner_override"` - a dictionary with overrides to apply to the Spawner
|
||||
settings. Each value can be either the final value to change or a callable that
|
||||
take the `Spawner` instance as parameter and returns the final value.
|
||||
If the traitlet being overriden is a *dictionary*, the dictionary
|
||||
will be *recursively updated*, rather than overriden. If you want to
|
||||
remove a key, set its value to `None`.
|
||||
|
||||
Example:
|
||||
|
||||
The following example config will:
|
||||
|
||||
1. Add the environment variable "AM_I_GROUP_ALPHA" to everyone in the "group-alpha" group
|
||||
2. Add the environment variable "AM_I_GROUP_BETA" to everyone in the "group-beta" group.
|
||||
If a user is part of both "group-beta" and "group-alpha", they will get *both* these env
|
||||
vars, due to the dictionary merging functionality.
|
||||
3. Add a higher memory limit for everyone in the "group-beta" group.
|
||||
|
||||
::
|
||||
|
||||
c.Spawner.group_overrides = {
|
||||
"01-group-alpha-env-add": {
|
||||
"groups": ["group-alpha"],
|
||||
"spawner_override": {"environment": {"AM_I_GROUP_ALPHA": "yes"}},
|
||||
},
|
||||
"02-group-beta-env-add": {
|
||||
"groups": ["group-beta"],
|
||||
"spawner_override": {"environment": {"AM_I_GROUP_BETA": "yes"}},
|
||||
},
|
||||
"03-group-beta-mem-limit": {
|
||||
"groups": ["group-beta"],
|
||||
"spawner_override": {"mem_limit": "2G"}
|
||||
}
|
||||
}
|
||||
""",
|
||||
config=True,
|
||||
)
|
||||
|
||||
handler = Any()
|
||||
|
||||
oauth_roles = Union(
|
||||
@@ -504,7 +556,7 @@ class Spawner(LoggingConfigurable):
|
||||
max=1,
|
||||
help="""
|
||||
Jitter fraction for poll_interval.
|
||||
|
||||
|
||||
Avoids alignment of poll calls for many Spawners,
|
||||
e.g. when restarting JupyterHub, which restarts all polls for running Spawners.
|
||||
|
||||
@@ -1479,6 +1531,48 @@ class Spawner(LoggingConfigurable):
|
||||
except AnyTimeoutError:
|
||||
return False
|
||||
|
||||
def _apply_overrides(self, spawner_override: dict):
|
||||
"""
|
||||
Apply set of overrides onto the current spawner instance
|
||||
|
||||
spawner_override is a dict with key being the name of the traitlet
|
||||
to override, and value is either a callable or the value for the
|
||||
traitlet. If the value is a dictionary, it is *merged* with the
|
||||
existing value (rather than replaced). Callables are called with
|
||||
one parameter - the current spawner instance.
|
||||
"""
|
||||
for k, v in spawner_override.items():
|
||||
if callable(v):
|
||||
v = v(self)
|
||||
|
||||
# If v is a dict, *merge* it with existing values, rather than completely
|
||||
# resetting it. This allows *adding* things like environment variables rather
|
||||
# than completely replacing them. If value is set to None, the key
|
||||
# will be removed
|
||||
if isinstance(v, dict) and isinstance(getattr(self, k), dict):
|
||||
recursive_update(getattr(self, k), v)
|
||||
else:
|
||||
setattr(self, k, v)
|
||||
|
||||
async def apply_group_overrides(self):
|
||||
"""
|
||||
Apply group overrides before starting a server
|
||||
"""
|
||||
user_group_names = {g.name for g in self.user.groups}
|
||||
if callable(self.group_overrides):
|
||||
group_overrides = await maybe_future(self.group_overrides(self))
|
||||
else:
|
||||
group_overrides = self.group_overrides
|
||||
for key in sorted(group_overrides):
|
||||
go = group_overrides[key]
|
||||
if user_group_names & set(go['groups']):
|
||||
# If there is *any* overlap between the groups user is in
|
||||
# and the groups for this override, apply overrides
|
||||
self.log.info(
|
||||
f"Applying group_override {key} for {self.user.name}, modifying config keys: {' '.join(go['spawner_override'].keys())}"
|
||||
)
|
||||
self._apply_overrides(go['spawner_override'])
|
||||
|
||||
|
||||
def _try_setcwd(path):
|
||||
"""Try to set CWD to path, walking up until a valid directory is found.
|
||||
|
@@ -481,6 +481,70 @@ async def open_token_page(app, browser, user):
|
||||
await expect(browser).to_have_url(re.compile(".*/hub/token"))
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"expires_in_max, expected_options",
|
||||
[
|
||||
pytest.param(
|
||||
None,
|
||||
[
|
||||
('1 Hour', '3600'),
|
||||
('1 Day', '86400'),
|
||||
('1 Week', '604800'),
|
||||
('1 Month', '2592000'),
|
||||
('1 Year', '31536000'),
|
||||
('Never', ''),
|
||||
],
|
||||
id="default",
|
||||
),
|
||||
pytest.param(
|
||||
86400,
|
||||
[
|
||||
('1 Hour', '3600'),
|
||||
('1 Day', '86400'),
|
||||
],
|
||||
id="1day",
|
||||
),
|
||||
pytest.param(
|
||||
3600 * 36,
|
||||
[
|
||||
('1 Hour', '3600'),
|
||||
('1 Day', '86400'),
|
||||
('Max (36 hours)', ''),
|
||||
],
|
||||
id="36hours",
|
||||
),
|
||||
pytest.param(
|
||||
86400 * 10,
|
||||
[
|
||||
('1 Hour', '3600'),
|
||||
('1 Day', '86400'),
|
||||
('1 Week', '604800'),
|
||||
('Max (10 days)', ''),
|
||||
],
|
||||
id="10days",
|
||||
),
|
||||
],
|
||||
)
|
||||
async def test_token_form_expires_in(
|
||||
app, browser, user_special_chars, expires_in_max, expected_options
|
||||
):
|
||||
with mock.patch.dict(
|
||||
app.tornado_settings, {"token_expires_in_max_seconds": expires_in_max}
|
||||
):
|
||||
await open_token_page(app, browser, user_special_chars.user)
|
||||
# check the list of tokens duration
|
||||
dropdown = browser.locator('#token-expiration-seconds')
|
||||
options = await dropdown.locator('option').all()
|
||||
actual_values = [
|
||||
(await option.text_content(), await option.get_attribute('value'))
|
||||
for option in options
|
||||
]
|
||||
assert actual_values == expected_options
|
||||
# get the value of the 'selected' attribute of the currently selected option
|
||||
selected_value = dropdown.locator('option[selected]')
|
||||
await expect(selected_value).to_have_text(expected_options[-1][0])
|
||||
|
||||
|
||||
async def test_token_request_form_and_panel(app, browser, user_special_chars):
|
||||
"""verify elements of the request token form"""
|
||||
|
||||
@@ -497,24 +561,6 @@ async def test_token_request_form_and_panel(app, browser, user_special_chars):
|
||||
await expect(field_note).to_be_enabled()
|
||||
await expect(field_note).to_be_empty()
|
||||
|
||||
# check the list of tokens duration
|
||||
dropdown = browser.locator('#token-expiration-seconds')
|
||||
options = await dropdown.locator('option').all()
|
||||
expected_values_in_list = {
|
||||
'1 Hour': '3600',
|
||||
'1 Day': '86400',
|
||||
'1 Week': '604800',
|
||||
'Never': '',
|
||||
}
|
||||
actual_values = {
|
||||
await option.text_content(): await option.get_attribute('value')
|
||||
for option in options
|
||||
}
|
||||
assert actual_values == expected_values_in_list
|
||||
# get the value of the 'selected' attribute of the currently selected option
|
||||
selected_value = dropdown.locator('option[selected]')
|
||||
await expect(selected_value).to_have_text("Never")
|
||||
|
||||
# check scopes field
|
||||
scopes_input = browser.get_by_label("Permissions")
|
||||
await expect(scopes_input).to_be_editable()
|
||||
|
@@ -12,6 +12,7 @@ from unittest import mock
|
||||
from urllib.parse import parse_qs, quote, urlparse
|
||||
|
||||
import pytest
|
||||
from dateutil.parser import parse as parse_date
|
||||
from pytest import fixture, mark
|
||||
from tornado.httputil import url_concat
|
||||
|
||||
@@ -774,16 +775,25 @@ async def test_add_multi_user(app):
|
||||
|
||||
@mark.user
|
||||
@mark.role
|
||||
async def test_add_multi_user_admin(app):
|
||||
@mark.parametrize("is_admin", [True, False])
|
||||
async def test_add_multi_user_admin(app, create_user_with_scopes, is_admin):
|
||||
db = app.db
|
||||
requester = create_user_with_scopes("admin:users")
|
||||
requester.admin = is_admin
|
||||
db.commit()
|
||||
names = ['c', 'd']
|
||||
r = await api_request(
|
||||
app,
|
||||
'users',
|
||||
method='post',
|
||||
data=json.dumps({'usernames': names, 'admin': True}),
|
||||
name=requester.name,
|
||||
)
|
||||
assert r.status_code == 201
|
||||
if is_admin:
|
||||
assert r.status_code == 201
|
||||
else:
|
||||
assert r.status_code == 403
|
||||
return
|
||||
reply = r.json()
|
||||
r_names = [user['name'] for user in reply]
|
||||
assert names == r_names
|
||||
@@ -821,13 +831,26 @@ async def test_add_user_duplicate(app):
|
||||
|
||||
@mark.user
|
||||
@mark.role
|
||||
async def test_add_admin(app):
|
||||
@mark.parametrize("is_admin", [True, False])
|
||||
async def test_add_admin(app, create_user_with_scopes, is_admin):
|
||||
db = app.db
|
||||
name = 'newadmin'
|
||||
user = create_user_with_scopes("admin:users")
|
||||
user.admin = is_admin
|
||||
db.commit()
|
||||
r = await api_request(
|
||||
app, 'users', name, method='post', data=json.dumps({'admin': True})
|
||||
app,
|
||||
'users',
|
||||
name,
|
||||
method='post',
|
||||
data=json.dumps({'admin': True}),
|
||||
name=user.name,
|
||||
)
|
||||
assert r.status_code == 201
|
||||
if is_admin:
|
||||
assert r.status_code == 201
|
||||
else:
|
||||
assert r.status_code == 403
|
||||
return
|
||||
user = find_user(db, name)
|
||||
assert user is not None
|
||||
assert user.name == name
|
||||
@@ -847,9 +870,14 @@ async def test_delete_user(app):
|
||||
|
||||
@mark.user
|
||||
@mark.role
|
||||
async def test_make_admin(app):
|
||||
@mark.parametrize("is_admin", [True, False])
|
||||
async def test_user_make_admin(app, create_user_with_scopes, is_admin):
|
||||
db = app.db
|
||||
name = 'admin2'
|
||||
requester = create_user_with_scopes('admin:users')
|
||||
requester.admin = is_admin
|
||||
db.commit()
|
||||
|
||||
name = new_username("make_admin")
|
||||
r = await api_request(app, 'users', name, method='post')
|
||||
assert r.status_code == 201
|
||||
user = find_user(db, name)
|
||||
@@ -860,10 +888,18 @@ async def test_make_admin(app):
|
||||
assert orm.Role.find(db, 'admin') not in user.roles
|
||||
|
||||
r = await api_request(
|
||||
app, 'users', name, method='patch', data=json.dumps({'admin': True})
|
||||
app,
|
||||
'users',
|
||||
name,
|
||||
method='patch',
|
||||
data=json.dumps({'admin': True}),
|
||||
name=requester.name,
|
||||
)
|
||||
|
||||
assert r.status_code == 200
|
||||
if is_admin:
|
||||
assert r.status_code == 200
|
||||
else:
|
||||
assert r.status_code == 403
|
||||
return
|
||||
user = find_user(db, name)
|
||||
assert user is not None
|
||||
assert user.name == name
|
||||
@@ -872,6 +908,38 @@ async def test_make_admin(app):
|
||||
assert orm.Role.find(db, 'admin') in user.roles
|
||||
|
||||
|
||||
@mark.user
|
||||
@mark.parametrize("requester_is_admin", [True, False])
|
||||
@mark.parametrize("user_is_admin", [True, False])
|
||||
async def test_user_set_name(
|
||||
app, user, create_user_with_scopes, requester_is_admin, user_is_admin
|
||||
):
|
||||
db = app.db
|
||||
requester = create_user_with_scopes('admin:users')
|
||||
requester.admin = requester_is_admin
|
||||
user.admin = user_is_admin
|
||||
db.commit()
|
||||
new_name = new_username()
|
||||
|
||||
r = await api_request(
|
||||
app,
|
||||
'users',
|
||||
user.name,
|
||||
method='patch',
|
||||
data=json.dumps({'name': new_name}),
|
||||
name=requester.name,
|
||||
)
|
||||
if requester_is_admin or not user_is_admin:
|
||||
assert r.status_code == 200
|
||||
else:
|
||||
assert r.status_code == 403
|
||||
return
|
||||
renamed = find_user(db, new_name)
|
||||
assert renamed is not None
|
||||
assert renamed.name == new_name
|
||||
assert renamed.id == user.id
|
||||
|
||||
|
||||
@mark.user
|
||||
async def test_set_auth_state(app, auth_state_enabled):
|
||||
auth_state = {'secret': 'hello'}
|
||||
@@ -1557,23 +1625,20 @@ async def test_start_stop_race(app, no_patience, slow_spawn):
|
||||
r = await api_request(app, 'users', user.name, 'server', method='post')
|
||||
assert r.status_code == 202
|
||||
assert spawner.pending == 'spawn'
|
||||
spawn_future = spawner._spawn_future
|
||||
# additional spawns while spawning shouldn't trigger a new spawn
|
||||
with mock.patch.object(spawner, 'start') as m:
|
||||
r = await api_request(app, 'users', user.name, 'server', method='post')
|
||||
assert r.status_code == 202
|
||||
assert m.call_count == 0
|
||||
|
||||
# stop while spawning is not okay
|
||||
r = await api_request(app, 'users', user.name, 'server', method='delete')
|
||||
assert r.status_code == 400
|
||||
while not spawner.ready:
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
# stop while spawning is okay now
|
||||
spawner.delay = 3
|
||||
# stop the spawner
|
||||
r = await api_request(app, 'users', user.name, 'server', method='delete')
|
||||
assert r.status_code == 202
|
||||
assert spawner.pending == 'stop'
|
||||
assert spawn_future.cancelled()
|
||||
assert spawner._spawn_future is None
|
||||
# make sure we get past deleting from the proxy
|
||||
await asyncio.sleep(1)
|
||||
# additional stops while stopping shouldn't trigger a new stop
|
||||
@@ -1726,6 +1791,46 @@ async def test_get_new_token(app, headers, status, note, expires_in):
|
||||
assert r.status_code == 404
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"expires_in_max, expires_in, expected",
|
||||
[
|
||||
(86400, None, 86400),
|
||||
(86400, 86400, 86400),
|
||||
(86400, 86401, 'error'),
|
||||
(3600, 100, 100),
|
||||
(None, None, None),
|
||||
(None, 86400, 86400),
|
||||
],
|
||||
)
|
||||
async def test_token_expires_in_max(app, user, expires_in_max, expires_in, expected):
|
||||
options = {
|
||||
"expires_in": expires_in,
|
||||
}
|
||||
# request a new token
|
||||
with mock.patch.dict(
|
||||
app.tornado_settings, {"token_expires_in_max_seconds": expires_in_max}
|
||||
):
|
||||
r = await api_request(
|
||||
app,
|
||||
f'users/{user.name}/tokens',
|
||||
method='post',
|
||||
data=json.dumps(options),
|
||||
)
|
||||
if expected == 'error':
|
||||
assert r.status_code == 400
|
||||
assert f"must not exceed {expires_in_max}" in r.json()["message"]
|
||||
return
|
||||
else:
|
||||
assert r.status_code == 201
|
||||
token_model = r.json()
|
||||
if expected is None:
|
||||
assert token_model["expires_at"] is None
|
||||
else:
|
||||
expected_expires_at = utcnow() + timedelta(seconds=expected)
|
||||
expires_at = parse_date(token_model["expires_at"])
|
||||
assert abs((expires_at - expected_expires_at).total_seconds()) < 30
|
||||
|
||||
|
||||
@mark.parametrize(
|
||||
"as_user, for_user, status",
|
||||
[
|
||||
|
@@ -23,8 +23,7 @@ from ..spawner import SimpleLocalProcessSpawner, Spawner
|
||||
from ..user import User
|
||||
from ..utils import AnyTimeoutError, maybe_future, new_token, url_path_join
|
||||
from .mocking import public_url
|
||||
from .test_api import add_user
|
||||
from .utils import async_requests
|
||||
from .utils import add_user, async_requests, find_user
|
||||
|
||||
_echo_sleep = """
|
||||
import sys, time
|
||||
@@ -598,3 +597,123 @@ def test_spawner_server(db):
|
||||
spawner.server = Server.from_url("http://1.2.3.4")
|
||||
assert spawner.server is not None
|
||||
assert spawner.server.ip == "1.2.3.4"
|
||||
|
||||
|
||||
async def test_group_override(app):
|
||||
app.load_groups = {
|
||||
"admin": {"users": ["admin"]},
|
||||
"user": {"users": ["admin", "user"]},
|
||||
}
|
||||
await app.init_groups()
|
||||
|
||||
group_overrides = {
|
||||
"01-admin-mem-limit": {
|
||||
"groups": ["admin"],
|
||||
"spawner_override": {"start_timeout": 120},
|
||||
}
|
||||
}
|
||||
|
||||
admin_user = find_user(app.db, "admin")
|
||||
s = Spawner(user=admin_user)
|
||||
s.start_timeout = 60
|
||||
s.group_overrides = group_overrides
|
||||
await s.apply_group_overrides()
|
||||
assert s.start_timeout == 120
|
||||
|
||||
non_admin_user = find_user(app.db, "user")
|
||||
s = Spawner(user=non_admin_user)
|
||||
s.start_timeout = 60
|
||||
s.group_overrides = group_overrides
|
||||
await s.apply_group_overrides()
|
||||
assert s.start_timeout == 60
|
||||
|
||||
|
||||
async def test_group_override_lexical_ordering(app):
|
||||
app.load_groups = {
|
||||
"admin": {"users": ["admin"]},
|
||||
"user": {"users": ["admin", "user"]},
|
||||
}
|
||||
await app.init_groups()
|
||||
|
||||
group_overrides = {
|
||||
# this should be applied last, even though it is specified first,
|
||||
# due to lexical ordering based on key names
|
||||
"02-admin-mem-limit": {
|
||||
"groups": ["admin"],
|
||||
"spawner_override": {"start_timeout": 300},
|
||||
},
|
||||
"01-admin-mem-limit": {
|
||||
"groups": ["admin"],
|
||||
"spawner_override": {"start_timeout": 120},
|
||||
},
|
||||
}
|
||||
|
||||
admin_user = find_user(app.db, "admin")
|
||||
s = Spawner(user=admin_user)
|
||||
s.start_timeout = 60
|
||||
s.group_overrides = group_overrides
|
||||
await s.apply_group_overrides()
|
||||
assert s.start_timeout == 300
|
||||
|
||||
|
||||
async def test_group_override_dict_merging(app):
|
||||
app.load_groups = {
|
||||
"admin": {"users": ["admin"]},
|
||||
"user": {"users": ["admin", "user"]},
|
||||
}
|
||||
await app.init_groups()
|
||||
|
||||
group_overrides = {
|
||||
"01-admin-env-add": {
|
||||
"groups": ["admin"],
|
||||
"spawner_override": {"environment": {"AM_I_ADMIN": "yes"}},
|
||||
},
|
||||
"02-user-env-add": {
|
||||
"groups": ["user"],
|
||||
"spawner_override": {"environment": {"AM_I_USER": "yes"}},
|
||||
},
|
||||
}
|
||||
|
||||
admin_user = find_user(app.db, "admin")
|
||||
s = Spawner(user=admin_user)
|
||||
s.group_overrides = group_overrides
|
||||
await s.apply_group_overrides()
|
||||
assert s.environment["AM_I_ADMIN"] == "yes"
|
||||
assert s.environment["AM_I_USER"] == "yes"
|
||||
|
||||
admin_user = find_user(app.db, "user")
|
||||
s = Spawner(user=admin_user)
|
||||
s.group_overrides = group_overrides
|
||||
await s.apply_group_overrides()
|
||||
assert s.environment["AM_I_USER"] == "yes"
|
||||
assert "AM_I_ADMIN" not in s.environment
|
||||
|
||||
|
||||
async def test_group_override_callable(app):
|
||||
app.load_groups = {
|
||||
"admin": {"users": ["admin"]},
|
||||
"user": {"users": ["admin", "user"]},
|
||||
}
|
||||
await app.init_groups()
|
||||
|
||||
def group_overrides(spawner):
|
||||
return {
|
||||
"01-admin-mem-limit": {
|
||||
"groups": ["admin"],
|
||||
"spawner_override": {"start_timeout": 120},
|
||||
}
|
||||
}
|
||||
|
||||
admin_user = find_user(app.db, "admin")
|
||||
s = Spawner(user=admin_user)
|
||||
s.start_timeout = 60
|
||||
s.group_overrides = group_overrides
|
||||
await s.apply_group_overrides()
|
||||
assert s.start_timeout == 120
|
||||
|
||||
non_admin_user = find_user(app.db, "user")
|
||||
s = Spawner(user=non_admin_user)
|
||||
s.start_timeout = 60
|
||||
s.group_overrides = group_overrides
|
||||
await s.apply_group_overrides()
|
||||
assert s.start_timeout == 60
|
||||
|
@@ -1,13 +1,13 @@
|
||||
# Copyright (c) Jupyter Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
import asyncio
|
||||
import json
|
||||
import warnings
|
||||
from collections import defaultdict
|
||||
from datetime import timedelta
|
||||
from urllib.parse import quote, urlparse, urlunparse
|
||||
|
||||
from sqlalchemy import inspect
|
||||
from tornado import gen, web
|
||||
from tornado import web
|
||||
from tornado.httputil import urlencode
|
||||
from tornado.log import app_log
|
||||
|
||||
@@ -904,6 +904,7 @@ class User:
|
||||
db.commit()
|
||||
# wait for spawner.start to return
|
||||
# run optional preparation work to bootstrap the notebook
|
||||
await spawner.apply_group_overrides()
|
||||
await maybe_future(spawner.run_pre_spawn_hook())
|
||||
if self.settings.get('internal_ssl'):
|
||||
self.log.debug("Creating internal SSL certs for %s", spawner._log_name)
|
||||
@@ -911,9 +912,13 @@ class User:
|
||||
spawner.cert_paths = await maybe_future(spawner.move_certs(hub_paths))
|
||||
self.log.debug("Calling Spawner.start for %s", spawner._log_name)
|
||||
f = maybe_future(spawner.start())
|
||||
# commit any changes in spawner.start (always commit db changes before yield)
|
||||
# commit any changes in spawner.start (always commit db changes before await)
|
||||
db.commit()
|
||||
url = await gen.with_timeout(timedelta(seconds=spawner.start_timeout), f)
|
||||
# gen.with_timeout protects waited-for tasks from cancellation,
|
||||
# whereas wait_for cancels tasks that don't finish within timeout.
|
||||
# we want this task to halt if it doesn't return in the time limit.
|
||||
await asyncio.wait_for(f, timeout=spawner.start_timeout)
|
||||
url = f.result()
|
||||
if url:
|
||||
# get ip, port info from return value of start()
|
||||
if isinstance(url, str):
|
||||
|
@@ -942,3 +942,23 @@ def subdomain_hook_idna(name, domain, kind):
|
||||
else:
|
||||
suffix = f"--{kind}"
|
||||
return f"{safe_name}{suffix}.{domain}"
|
||||
|
||||
|
||||
# From https://github.com/jupyter-server/jupyter_server/blob/fc0ac3236fdd92778ea765db6e8982212c8389ee/jupyter_server/config_manager.py#L14
|
||||
def recursive_update(target, new):
|
||||
"""
|
||||
Recursively update one dictionary in-place using another.
|
||||
|
||||
None values will delete their keys.
|
||||
"""
|
||||
for k, v in new.items():
|
||||
if isinstance(v, dict):
|
||||
if k not in target:
|
||||
target[k] = {}
|
||||
recursive_update(target[k], v)
|
||||
|
||||
elif v is None:
|
||||
target.pop(k, None)
|
||||
|
||||
else:
|
||||
target[k] = v
|
||||
|
14
package-lock.json
generated
14
package-lock.json
generated
@@ -83,12 +83,12 @@
|
||||
}
|
||||
},
|
||||
"node_modules/braces": {
|
||||
"version": "3.0.2",
|
||||
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
|
||||
"integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
|
||||
"integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"fill-range": "^7.0.1"
|
||||
"fill-range": "^7.1.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
@@ -119,9 +119,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/fill-range": {
|
||||
"version": "7.0.1",
|
||||
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
|
||||
"integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
|
||||
"version": "7.1.1",
|
||||
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
|
||||
"integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"to-regex-range": "^5.0.1"
|
||||
|
@@ -7,7 +7,7 @@ build-backend = "setuptools.build_meta"
|
||||
# ref: https://setuptools.pypa.io/en/latest/userguide/pyproject_config.html
|
||||
[project]
|
||||
name = "jupyterhub"
|
||||
version = "5.0.0b2"
|
||||
version = "5.1.0"
|
||||
dynamic = ["readme", "dependencies"]
|
||||
description = "JupyterHub: A multi-user server for Jupyter notebooks"
|
||||
authors = [
|
||||
@@ -147,7 +147,7 @@ indent_size = 2
|
||||
github_url = "https://github.com/jupyterhub/jupyterhub"
|
||||
|
||||
[tool.tbump.version]
|
||||
current = "5.0.0b2"
|
||||
current = "5.1.0"
|
||||
|
||||
# Example of a semver regexp.
|
||||
# Make sure this matches current_version before
|
||||
|
@@ -39,12 +39,14 @@
|
||||
<tbody>
|
||||
<tr class="home-server-row add-server-row">
|
||||
<td colspan="4">
|
||||
<input class="new-server-name"
|
||||
aria-label="server name"
|
||||
placeholder="name-your-server">
|
||||
<button role="button"
|
||||
type="button"
|
||||
class="new-server-btn btn btn-xs btn-primary">Add New Server</button>
|
||||
<div class="input-group">
|
||||
<input class="new-server-name form-control"
|
||||
aria-label="server name"
|
||||
placeholder="name-your-server">
|
||||
<button role="button"
|
||||
type="button"
|
||||
class="new-server-btn btn btn-xs btn-primary">Add New Server</button>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
{% for spawner in named_spawners %}
|
||||
|
@@ -91,7 +91,7 @@
|
||||
{% block script %}
|
||||
{{ super() }}
|
||||
<script>
|
||||
if (window.location.protocol === "http:") {
|
||||
if (!window.isSecureContext) {
|
||||
// unhide http warning
|
||||
var warning = document.getElementById('insecure-login-warning');
|
||||
warning.className = warning.className.replace(/\bhidden\b/, '');
|
||||
|
@@ -167,7 +167,7 @@
|
||||
{% block login_widget %}
|
||||
<span id="login_widget">
|
||||
{% if user %}
|
||||
<span class="navbar-text">{{ user.name }}</span>
|
||||
<span class="navbar-text me-1">{{ user.name }}</span>
|
||||
<a id="logout"
|
||||
role="button"
|
||||
class="btn btn-sm btn-outline-dark"
|
||||
|
@@ -13,13 +13,7 @@
|
||||
<br />
|
||||
<label for="token-expiration-seconds" class="form-label">Token expires in</label>
|
||||
{% block expiration_options %}
|
||||
<select id="token-expiration-seconds" class="form-select">
|
||||
<!-- unit used for each value is `seconds` -->
|
||||
<option value="3600">1 Hour</option>
|
||||
<option value="86400">1 Day</option>
|
||||
<option value="604800">1 Week</option>
|
||||
<option value="" selected="selected">Never</option>
|
||||
</select>
|
||||
<select id="token-expiration-seconds" class="form-select">{{ token_expires_in_options_html | safe }}</select>
|
||||
{% endblock expiration_options %}
|
||||
<small id="note-expires-at" class="form-text">You can configure when your token will expire.</small>
|
||||
<br />
|
||||
|
Reference in New Issue
Block a user