From 9fefbefb3863f998abd66087c940d0a156df445f Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Fri, 4 Oct 2019 10:49:53 -0700 Subject: [PATCH 01/21] Turn this into a node that just mainly mounts gpfs --- jupyter-nersc/app-notebooks/Dockerfile | 93 +++++++++----------------- 1 file changed, 31 insertions(+), 62 deletions(-) diff --git a/jupyter-nersc/app-notebooks/Dockerfile b/jupyter-nersc/app-notebooks/Dockerfile index d4d63aa..a18427d 100644 --- a/jupyter-nersc/app-notebooks/Dockerfile +++ b/jupyter-nersc/app-notebooks/Dockerfile @@ -1,46 +1,40 @@ -ARG branch=unknown - -FROM registry.spin.nersc.gov/das/jupyter-base-${branch}:latest +FROM ubuntu:16.04 LABEL maintainer="Rollin Thomas " -WORKDIR /srv -# Additional Ubuntu packages +# Base Ubuntu packages -RUN \ - apt-get --yes install \ - csh \ - dvipng \ - ksh \ - ldap-utils \ - libnss-ldapd \ - libpam-ldap \ - nscd \ - openssh-server \ - supervisor \ - tcsh \ - texlive-xetex \ - zsh +ENV DEBIAN_FRONTEND noninteractive +ENV LANG C.UTF-8 -ADD packages3.txt /tmp/packages3.txt RUN \ - /opt/anaconda3/bin/conda update --yes conda && \ - /opt/anaconda3/bin/conda install --yes anaconda && \ - /opt/anaconda3/bin/conda install --file /tmp/packages3.txt && \ - /opt/anaconda3/bin/ipython kernel install && \ - /opt/anaconda3/bin/conda clean --yes --all + apt-get update && \ + apt-get --yes upgrade && \ + apt-get --yes install \ + bzip2 \ + curl \ + git \ + libffi-dev \ + lsb-release \ + tzdata \ + vim \ + wget \ + csh \ + ksh \ + ldap-utils \ + libnss-ldapd \ + libpam-ldap \ + nscd \ + openssh-server \ + supervisor \ + tcsh \ + zsh -# Python 2 Anaconda and additional packages +# Timezone to Berkeley -ADD packages2.txt /tmp/packages2.txt +ENV TZ=America/Los_Angeles RUN \ - wget -q -O /tmp/miniconda2.sh https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh && \ - bash /tmp/miniconda2.sh -b -p /opt/anaconda2 && \ - rm /tmp/miniconda2.sh && \ - /opt/anaconda2/bin/conda update --yes conda && \ - /opt/anaconda2/bin/conda install --yes anaconda && \ - /opt/anaconda2/bin/conda install --file /tmp/packages2.txt && \ - /opt/anaconda2/bin/ipython kernel install && \ - /opt/anaconda2/bin/conda clean --yes --all + ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && \ + echo $TZ > /etc/timezone # For ssh auth API @@ -59,40 +53,15 @@ RUN \ COPY etc/ /etc/ -# GPFS +# NGF RUN \ mkdir /global && \ ln -sf /global/u1 /global/homes && \ ln -sf /global/project /project && \ - ln -s /global/common/datatran /usr/common && \ + ln -s /global/common/cori_cle7 /usr/common && \ echo "datatran" > /etc/clustername -# JupyterHub/lab features - -RUN \ - pip install --no-cache-dir ipympl && \ - jupyter nbextension enable --sys-prefix --py widgetsnbextension - -RUN \ - conda install --yes -c conda-forge nodejs - -RUN \ - jupyter labextension install --debug \ - @jupyter-widgets/jupyterlab-manager \ - jupyter-matplotlib \ - @jupyterlab/toc \ - jupyterlab_bokeh \ - jupyterlab-favorites \ - jupyterlab-recents - -RUN \ - /opt/anaconda2/bin/jupyter nbextension enable --sys-prefix --py widgetsnbextension - -# Get port script - -ADD get_port.py /opt/anaconda3/bin/ - # Supervisord to launch sshd and nslcd ADD supervisord.conf /etc/supervisor/conf.d/supervisord.conf From 1ba47b49fdb7777dc82f2ec24f7bb0da17546118 Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Fri, 4 Oct 2019 10:50:23 -0700 Subject: [PATCH 02/21] Change to use latest tag --- jupyter-nersc/web-announcement/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jupyter-nersc/web-announcement/Dockerfile b/jupyter-nersc/web-announcement/Dockerfile index e96f1a7..1d05094 100644 --- a/jupyter-nersc/web-announcement/Dockerfile +++ b/jupyter-nersc/web-announcement/Dockerfile @@ -4,7 +4,7 @@ FROM registry.spin.nersc.gov/das/jupyter-base-${branch}:latest LABEL maintainer="Rollin Thomas " RUN \ - pip install git+https://github.com/rcthomas/jupyterhub-announcement.git@persist-announcements + pip install git+https://github.com/rcthomas/jupyterhub-announcement.git@0.3.1 WORKDIR /srv From 2e03f7b09e6ad981d0c97a4966fa0169775b26ab Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Fri, 4 Oct 2019 10:50:39 -0700 Subject: [PATCH 03/21] Add env for nbviewers service --- jupyter-nersc/web-jupyterhub/docker-entrypoint.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/jupyter-nersc/web-jupyterhub/docker-entrypoint.sh b/jupyter-nersc/web-jupyterhub/docker-entrypoint.sh index 096e023..68aa8c2 100644 --- a/jupyter-nersc/web-jupyterhub/docker-entrypoint.sh +++ b/jupyter-nersc/web-jupyterhub/docker-entrypoint.sh @@ -29,7 +29,6 @@ file_env 'MODS_JUPYTERHUB_API_TOKEN' file_env 'CONFIGPROXY_AUTH_TOKEN' file_env 'JUPYTERHUB_CRYPT_KEY' file_env 'ANNOUNCEMENT_JUPYTERHUB_API_TOKEN' - -# file_env 'NBVIEWER_JUPYTERHUB_API_TOKEN' +file_env 'NBVIEWER_JUPYTERHUB_API_TOKEN' exec "$@" From b8ed2aa9f1ee106078bf41baa1c6570e52e781df Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Fri, 4 Oct 2019 10:50:59 -0700 Subject: [PATCH 04/21] Add nbviewer but also change spin node --- .../web-jupyterhub/jupyterhub_config.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/jupyter-nersc/web-jupyterhub/jupyterhub_config.py b/jupyter-nersc/web-jupyterhub/jupyterhub_config.py index 92cf8fe..b62726d 100644 --- a/jupyter-nersc/web-jupyterhub/jupyterhub_config.py +++ b/jupyter-nersc/web-jupyterhub/jupyterhub_config.py @@ -405,11 +405,11 @@ def comma_split(string): 'admin': True, 'api_token': os.environ["MODS_JUPYTERHUB_API_TOKEN"] }, -# { -# 'name': 'nbviewer', -# 'url': 'http://web-nbviewer:5000', -# 'api_token': os.environ["NBVIEWER_JUPYTERHUB_API_TOKEN"] -# } + { + 'name': 'nbviewer', + 'url': 'http://web-nbviewer:5000', + 'api_token': os.environ["NBVIEWER_JUPYTERHUB_API_TOKEN"] + } ] ## The class to use for spawning single-user servers. @@ -1117,13 +1117,16 @@ def comma_split(string): "spin-shared-node-cpu": ( "sshspawner.sshspawner.SSHSpawner", { "cmd": ["/global/common/cori/das/jupyterhub/jupyter-launcher.sh", - "/opt/anaconda3/bin/jupyter-labhub"], + "/global/common/cori_cle7/software/jupyter/19-09/bin/jupyter-labhub"], +# "/opt/anaconda3/bin/jupyter-labhub"], "args": ["--transport=ipc"], "environment": {"OMP_NUM_THREADS" : "2"}, "remote_hosts": ["app-notebooks"], - "remote_port_command": "/opt/anaconda3/bin/python /global/common/cori/das/jupyterhub/new-get-port.py --ip", + "remote_port_command": "/usr/bin/python /global/common/cori/das/jupyterhub/new-get-port.py --ip", +# "remote_port_command": "/opt/anaconda3/bin/python /global/common/cori/das/jupyterhub/new-get-port.py --ip", "hub_api_url": "http://{}:8081/hub/api".format(ip), - "path": "/opt/anaconda3/bin:/usr/bin:/usr/local/bin:/bin", + "path": "/global/common/cori_cle7/software/jupyter/19-09/bin:/global/common/cori/das/jupyterhub:/usr/common/usg/bin:/usr/bin:/bin", +# "path": "/opt/anaconda3/bin:/usr/bin:/usr/local/bin:/bin", "ssh_keyfile": '/certs/{username}.key' } ) From 70eb2785fa2942e654323b51bb8c349cccb0f27b Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Fri, 4 Oct 2019 10:51:22 -0700 Subject: [PATCH 05/21] Require notebook --- jupyter-nersc/web-nbviewer/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/jupyter-nersc/web-nbviewer/Dockerfile b/jupyter-nersc/web-nbviewer/Dockerfile index 9729b83..b3b5ca5 100644 --- a/jupyter-nersc/web-nbviewer/Dockerfile +++ b/jupyter-nersc/web-nbviewer/Dockerfile @@ -16,6 +16,7 @@ RUN \ markdown \ nbconvert \ nbformat \ + notebook \ pycurl && \ pip install --no-cache-dir \ statsd From f9458938a08161ee3d72fe4ea4200606659b5d6a Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Fri, 4 Oct 2019 10:51:33 -0700 Subject: [PATCH 06/21] Some additional configs --- jupyter-nersc/web-nbviewer/nbviewer_config.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/jupyter-nersc/web-nbviewer/nbviewer_config.py b/jupyter-nersc/web-nbviewer/nbviewer_config.py index 7e3ccc0..00b5f36 100644 --- a/jupyter-nersc/web-nbviewer/nbviewer_config.py +++ b/jupyter-nersc/web-nbviewer/nbviewer_config.py @@ -9,3 +9,7 @@ c.NBViewer.localfiles = "/repos/nbviewer/notebook-5.7.8/tools/tests" c.NBViewer.template_path = "/repos/clonenotebooks/templates" + +c.NBViewer.static_path = "/repos/clonenotebooks/static" +c.NBViewer.index_handler = "clonenotebooks.renderers.IndexRenderingHandler" + From 80ff32a22fe7c9788ee0214214d5d4d43280cdc4 Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Wed, 9 Oct 2019 09:17:58 -0700 Subject: [PATCH 07/21] Add Gerty --- .../web-jupyterhub/jupyterhub_config.py | 26 +++++++++++++++---- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/jupyter-nersc/web-jupyterhub/jupyterhub_config.py b/jupyter-nersc/web-jupyterhub/jupyterhub_config.py index b62726d..685be60 100644 --- a/jupyter-nersc/web-jupyterhub/jupyterhub_config.py +++ b/jupyter-nersc/web-jupyterhub/jupyterhub_config.py @@ -1005,11 +1005,12 @@ def comma_split(string): #------------------------------------------------------------------------------ c.NERSCSpawner.profiles = [ - { "name": "gerty-shared-node-cpu" }, - { "name": "cori-shared-node-cpu" }, - { "name": "cori-exclusive-node-cpu" }, - { "name": "cori-exclusive-node-gpu" }, - { "name": "spin-shared-node-cpu" }, + { "name": "gerty-shared-node-cpu" }, + { "name": "gerty-exclusive-node-cpu" }, + { "name": "cori-shared-node-cpu" }, + { "name": "cori-exclusive-node-cpu" }, + { "name": "cori-exclusive-node-gpu" }, + { "name": "spin-shared-node-cpu" }, ] c.NERSCSpawner.setups = [ @@ -1073,6 +1074,21 @@ def comma_split(string): "ssh_keyfile": '/certs/{username}.key' } ), + "gerty-exclusive-node-cpu": ( + "nerscslurmspawner.NERSCExclusiveSlurmSpawner", { + "cmd": ["/global/common/cori/das/jupyterhub/jupyter-launcher.sh", + "/usr/common/software/jupyter/19-09/bin/jupyter-labhub"], + "exec_prefix": "/usr/bin/ssh -q -o StrictHostKeyChecking=no -o preferredauthentications=publickey -l {username} -i /certs/{username}.key {remote_host}", + "http_timeout": 300, + "startup_poll_interval": 30.0, + "req_remote_host": "gerty.nersc.gov", + "req_homedir": "/tmp", + "req_runtime": "240", + "req_qos": "regular", + "hub_api_url": "http://{}:8081/hub/api".format(ip), + "path": "/usr/common/software/jupyter/19-09/bin:/global/common/cori/das/jupyterhub:/usr/common/usg/bin:/usr/bin:/bin", + } + ), "cori-shared-node-cpu": ( "sshspawner.sshspawner.SSHSpawner", { "cmd": ["/global/common/cori/das/jupyterhub/jupyter-launcher.sh", From 183e76f16f45756a006df846861cc65890bd567a Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Wed, 16 Oct 2019 21:51:08 -0700 Subject: [PATCH 08/21] New monitor package --- jupyter-nersc/app-monitoring/monitor.py | 535 ++++++++++++++++-------- 1 file changed, 353 insertions(+), 182 deletions(-) diff --git a/jupyter-nersc/app-monitoring/monitor.py b/jupyter-nersc/app-monitoring/monitor.py index e94b7a2..943b18e 100644 --- a/jupyter-nersc/app-monitoring/monitor.py +++ b/jupyter-nersc/app-monitoring/monitor.py @@ -1,9 +1,12 @@ +# https://raw.githubusercontent.com/pika/pika/master/examples/asynchronous_publisher_example.py -# Based on the asynchronous publisher example in the Pika documentation. +# -*- coding: utf-8 -*- +# pylint: disable=C0111,C0103,R0205 import argparse -import logging +import functools import json +import logging import os import pika @@ -14,258 +17,426 @@ LOGGER = logging.getLogger(__name__) -def main(): - args = parse_arguments() +class ExamplePublisher(object): + """This is an example publisher that will handle unexpected interactions + with RabbitMQ such as channel and connection closures. - if args.debug: - logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT) - else: - logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) + If RabbitMQ closes the connection, it will reopen it. You should + look at the output, as there are limited reasons why the connection may + be closed, which usually are tied to permission related issues or + socket timeouts. - credentials = pika.PlainCredentials(args.username, args.password) + It uses delivery confirmations and illustrates one way to keep track of + messages that have been sent and if they've been confirmed by RabbitMQ. - publisher = Publisher(args.rabbitmq_host, credentials, - args.publish_interval, args.routing_key, args.exchange, - args.exchange_type, args.queue, args.api_url, args.api_token, - args.subcategory) + """ - try: - publisher.run() - except KeyboardInterrupt: - publisher.stop() + def __init__(self, amqp_url, publish_interval, routing_key, exchange, exchange_type): + """Setup the example publisher object, passing in the URL we will use + to connect to RabbitMQ. + :param str amqp_url: The URL for connecting to RabbitMQ -def parse_arguments(): - parser = argparse.ArgumentParser() - parser.add_argument("--debug", "-d", - action="store_true") - parser.add_argument("--username", "-u", - default=os.environ["MODS_USERNAME"]) - parser.add_argument("--password", "-p", - default=os.environ["MODS_PASSWORD"]) - parser.add_argument("--rabbitmq-host", "-r", - default=os.environ["MODS_RABBITMQ_HOST"]) - parser.add_argument("--publish-interval", "-i", - default=300, type=int) - parser.add_argument("--routing-key", "-k", - default="ou.das") - parser.add_argument("--exchange", "-e", - default="ha-metric") - parser.add_argument("--exchange-type", "-t", - default="topic") - parser.add_argument("--queue", "-q", - default="text") - parser.add_argument("--api-url", "-a", - default="http://web-jupyterhub:8081/hub/api") - parser.add_argument("--api-token", "-o", - default=os.environ["MODS_JUPYTERHUB_API_TOKEN"]) - parser.add_argument("--subcategory", "-s", - default="jupyterhub_v8") - return parser.parse_args() + """ + self._connection = None + self._channel = None + + self._deliveries = None + self._acked = None + self._nacked = None + self._message_number = None + self._stopping = False + self._url = amqp_url -class Publisher(object): - - def __init__(self, host, credentials, publish_interval, routing_key, - exchange, exchange_type, queue, api_url, api_token, subcategory): - - self.connection = None - self.channel = None - self.deliveries = [] - self.acked = 0 - self.nacked = 0 - self.message_number = 0 - self.stopping = False - self.host = host - self.credentials = credentials - self.publish_interval = publish_interval - self.routing_key = routing_key - self.exchange = exchange - self.exchange_type = exchange_type - self.queue = queue - self.closing = False - self.api_url = api_url - self.api_token = api_token - self.subcategory = subcategory + self._publish_interval = publish_interval + self._routing_key = routing_key + self._exchange = exchange + self._exchange_type = exchange_type def connect(self): - LOGGER.info('Connecting to %s', self.host) - parameters = pika.connection.ConnectionParameters(self.host, - credentials = self.credentials) + """This method connects to RabbitMQ, returning the connection handle. + When the connection is established, the on_connection_open method + will be invoked by pika. - # For reconnect to work, must ensure `stop_ioloop_on_close=False`. - return pika.SelectConnection(parameters, - self.on_connection_open, - stop_ioloop_on_close=False) + :rtype: pika.SelectConnection - def on_connection_open(self, unused_connection): + """ + LOGGER.info('Connecting to %s', self._url) + return pika.SelectConnection( + pika.URLParameters(self._url), + on_open_callback=self.on_connection_open, + on_open_error_callback=self.on_connection_open_error, + on_close_callback=self.on_connection_closed) + + def on_connection_open(self, _unused_connection): + """This method is called by pika once the connection to RabbitMQ has + been established. It passes the handle to the connection object in + case we need it, but in this case, we'll just mark it unused. + + :param pika.SelectConnection _unused_connection: The connection + + """ LOGGER.info('Connection opened') - self.add_on_connection_close_callback() self.open_channel() - def add_on_connection_close_callback(self): - LOGGER.info('Adding connection close callback') - self.connection.add_on_close_callback(self.on_connection_closed) + def on_connection_open_error(self, _unused_connection, err): + """This method is called by pika if the connection to RabbitMQ + can't be established. - def on_connection_closed(self, connection, reply_code, reply_text): - self.channel = None - if self.closing: - self.connection.ioloop.stop() - else: - LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s', - reply_code, reply_text) - self.connection.add_timeout(5, self.reconnect) + :param pika.SelectConnection _unused_connection: The connection + :param Exception err: The error - def reconnect(self): - self.deliveries = [] - self.acked = 0 - self.nacked = 0 - self.message_number = 0 + """ + LOGGER.error('Connection open failed, reopening in 5 seconds: %s', err) + self._connection.ioloop.call_later(5, self._connection.ioloop.stop) - # This is the old connection IOLoop instance, stop its ioloop - self.connection.ioloop.stop() + def on_connection_closed(self, _unused_connection, reason): + """This method is invoked by pika when the connection to RabbitMQ is + closed unexpectedly. Since it is unexpected, we will reconnect to + RabbitMQ if it disconnects. - # Create a new connection - self.connection = self.connect() + :param pika.connection.Connection connection: The closed connection obj + :param Exception reason: exception representing reason for loss of + connection. - # There is now a new connection, needs a new ioloop to run - self.connection.ioloop.start() + """ + self._channel = None + if self._stopping: + self._connection.ioloop.stop() + else: + LOGGER.warning('Connection closed, reopening in 5 seconds: %s', + reason) + self._connection.ioloop.call_later(5, self._connection.ioloop.stop) def open_channel(self): + """This method will open a new channel with RabbitMQ by issuing the + Channel.Open RPC command. When RabbitMQ confirms the channel is open + by sending the Channel.OpenOK RPC reply, the on_channel_open method + will be invoked. + + """ LOGGER.info('Creating a new channel') - self.connection.channel(on_open_callback=self.on_channel_open) + self._connection.channel(on_open_callback=self.on_channel_open) def on_channel_open(self, channel): + """This method is invoked by pika when the channel has been opened. + The channel object is passed in so we can make use of it. + + Since the channel is now open, we'll declare the exchange to use. + + :param pika.channel.Channel channel: The channel object + + """ LOGGER.info('Channel opened') - self.channel = channel + self._channel = channel self.add_on_channel_close_callback() - self.setup_exchange(self.exchange) + self.setup_exchange(self._exchange) def add_on_channel_close_callback(self): + """This method tells pika to call the on_channel_closed method if + RabbitMQ unexpectedly closes the channel. + + """ LOGGER.info('Adding channel close callback') - self.channel.add_on_close_callback(self.on_channel_closed) + self._channel.add_on_close_callback(self.on_channel_closed) + + def on_channel_closed(self, channel, reason): + """Invoked by pika when RabbitMQ unexpectedly closes the channel. + Channels are usually closed if you attempt to do something that + violates the protocol, such as re-declare an exchange or queue with + different parameters. In this case, we'll close the connection + to shutdown the object. - def on_channel_closed(self, channel, reply_code, reply_text): - LOGGER.warning('Channel was closed: (%s) %s', reply_code, reply_text) - if not self.closing: - self.connection.close() + :param pika.channel.Channel channel: The closed channel + :param Exception reason: why the channel was closed + + """ + LOGGER.warning('Channel %i was closed: %s', channel, reason) + self._channel = None + if not self._stopping: + self._connection.close() def setup_exchange(self, exchange_name): + """Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC + command. When it is complete, the on_exchange_declareok method will + be invoked by pika. + + :param str|unicode exchange_name: The name of the exchange to declare + + """ LOGGER.info('Declaring exchange %s', exchange_name) - self.channel.exchange_declare(self.on_exchange_declareok, - exchange_name, - self.exchange_type, - durable=True) - - def on_exchange_declareok(self, unused_frame): - LOGGER.info('Exchange declared') - self.setup_queue(self.queue) - - def setup_queue(self, queue_name): - LOGGER.info('Declaring queue %s', queue_name) - self.channel.queue_declare(self.on_queue_declareok, queue_name) - - def on_queue_declareok(self, method_frame): - LOGGER.info('Binding %s to %s with %s', - self.exchange, self.queue, self.routing_key) - self.channel.queue_bind(self.on_bindok, self.queue, - self.exchange, self.routing_key) - - def on_bindok(self, unused_frame): - LOGGER.info('Queue bound') + # Note: using functools.partial is not required, it is demonstrating + # how arbitrary data can be passed to the callback when it is called + cb = functools.partial( + self.on_exchange_declareok, userdata=exchange_name) + self._channel.exchange_declare( + exchange=exchange_name, + exchange_type=self._exchange_type, + callback=cb, + durable=True) + + def on_exchange_declareok(self, _unused_frame, userdata): + """Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC + command. + + :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame + :param str|unicode userdata: Extra user data (exchange name) + + """ + LOGGER.info('Exchange declared: %s', userdata) self.start_publishing() def start_publishing(self): + """This method will enable delivery confirmations and schedule the + first message to be sent to RabbitMQ + + """ LOGGER.info('Issuing consumer related RPC commands') self.enable_delivery_confirmations() self.schedule_next_message() def enable_delivery_confirmations(self): + """Send the Confirm.Select RPC method to RabbitMQ to enable delivery + confirmations on the channel. The only way to turn this off is to close + the channel and create a new one. + + When the message is confirmed from RabbitMQ, the + on_delivery_confirmation method will be invoked passing in a Basic.Ack + or Basic.Nack method from RabbitMQ that will indicate which messages it + is confirming or rejecting. + + """ LOGGER.info('Issuing Confirm.Select RPC command') - self.channel.confirm_delivery(self.on_delivery_confirmation) + self._channel.confirm_delivery(self.on_delivery_confirmation) def on_delivery_confirmation(self, method_frame): + """Invoked by pika when RabbitMQ responds to a Basic.Publish RPC + command, passing in either a Basic.Ack or Basic.Nack frame with + the delivery tag of the message that was published. The delivery tag + is an integer counter indicating the message number that was sent + on the channel via Basic.Publish. Here we're just doing house keeping + to keep track of stats and remove message numbers that we expect + a delivery confirmation of from the list used to keep track of messages + that are pending confirmation. + + :param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame + + """ confirmation_type = method_frame.method.NAME.split('.')[1].lower() - LOGGER.info('Received %s for delivery tag: %i', - confirmation_type, + LOGGER.info('Received %s for delivery tag: %i', confirmation_type, method_frame.method.delivery_tag) if confirmation_type == 'ack': - self.acked += 1 + self._acked += 1 elif confirmation_type == 'nack': - self.nacked += 1 - self.deliveries.remove(method_frame.method.delivery_tag) - LOGGER.info('Published %i messages, %i have yet to be confirmed, ' - '%i were acked and %i were nacked', - self.message_number, len(self.deliveries), - self.acked, self.nacked) + self._nacked += 1 + self._deliveries.remove(method_frame.method.delivery_tag) + LOGGER.info( + 'Published %i messages, %i have yet to be confirmed, ' + '%i were acked and %i were nacked', self._message_number, + len(self._deliveries), self._acked, self._nacked) def schedule_next_message(self): - if self.stopping: - return + """If we are not closing our connection to RabbitMQ, schedule another + message to be delivered in _publish_interval seconds. + + """ LOGGER.info('Scheduling next message for %0.1f seconds', - self.publish_interval) - self.connection.add_timeout(self.publish_interval, - self.publish_message) + self._publish_interval) + self._connection.ioloop.call_later(self._publish_interval, + self.publish_message) def publish_message(self): - if self.stopping: + """If the class is not stopping, publish a message to RabbitMQ, + appending a list of deliveries with the message number that was sent. + This list will be used to check for delivery confirmations in the + on_delivery_confirmations method. + + Once the message has been sent, schedule another message to be sent. + The main reason I put scheduling in was just so you can get a good idea + of how the process is flowing by slowing down and speeding up the + delivery intervals by changing the _publish_interval constant in the + class. + + """ + if self._channel is None or not self._channel.is_open: return + message = self.create_message() + self._channel.basic_publish(self._exchange, self._routing_key, + json.dumps(message)) + self._message_number += 1 + self._deliveries.append(self._message_number) + LOGGER.info('Published message # %i', self._message_number) + self.schedule_next_message() - headers = dict(Authorization="token " + self.api_token) - r = requests.get(self.api_url + "/users", headers=headers) - r.raise_for_status() - users = r.json() + def create_message(self): + raise NotImplementedError + + def run(self): + """Run the example code by connecting and then starting the IOLoop. + + """ + while not self._stopping: + self._connection = None + self._deliveries = [] + self._acked = 0 + self._nacked = 0 + self._message_number = 0 + + try: + self._connection = self.connect() + self._connection.ioloop.start() + except KeyboardInterrupt: + self.stop() + if (self._connection is not None and + not self._connection.is_closed): + # Finish closing + self._connection.ioloop.start() - current_users = 0 - running_servers = dict(total=0) - for user in users: + LOGGER.info('Stopped') + + def stop(self): + """Stop the example by closing the channel and connection. We + set a flag here so that we stop scheduling new messages to be + published. The IOLoop is started because this method is + invoked by the Try/Catch below when KeyboardInterrupt is caught. + Starting the IOLoop again will allow the publisher to cleanly + disconnect from RabbitMQ. + + """ + LOGGER.info('Stopping') + self._stopping = True + self.close_channel() + self.close_connection() + + def close_channel(self): + """Invoke this command to close the channel with RabbitMQ by sending + the Channel.Close RPC command. + + """ + if self._channel is not None: + LOGGER.info('Closing the channel') + self._channel.close() + + def close_connection(self): + """This method closes the connection to RabbitMQ.""" + if self._connection is not None: + LOGGER.info('Closing connection') + self._connection.close() + + +class JupyterHubPublisher(ExamplePublisher): + + def __init__(self, api_token, api_url, subcategory, *args, **kwargs): + self._api_token = api_token + self._api_url = api_url + self._subcategory = subcategory + super().__init__(*args, **kwargs) + + def create_message(self): + import pprint + + hub_users = self.hub_users() + iris_staff = self.iris_staff() + + users = list() + user_servers = list() + + for user in hub_users: if not user["servers"]: continue servers = user["servers"] if "" in servers: servers["default"] = servers.pop("") - current_users += 1 + name = user["name"] + if name in iris_staff: + continue + users.append(name) for key in servers: - if key not in running_servers: - running_servers[key] = 0 - running_servers[key] += 1 - running_servers["total"] += 1 + user_servers.append(f"{name}:{key}") + + # Format and return message message = dict() message["category"] = "MODS" - message["subcategory"] = self.subcategory - message["current_users"] = current_users - message["running_servers"] = running_servers + message["subcategory"] = self._subcategory + message["users"] = users + message["user_servers"] = user_servers + return message + + def hub_users(self): + headers = dict(Authorization="token " + self._api_token) + r = requests.get(self._api_url + "/users", headers=headers) + r.raise_for_status() + return r.json() + + def iris_staff(self): + query = """{ + systemInfo { + projects(repoName: "nstaff"){ + users { + name + } + } + } + }""" + + try: + r = requests.post("https://iris.nersc.gov/graphql", data=dict(query=query)) + r.raise_for_status() + except Exception as err: + LOGGER.warning("Iris error, assuming there are no staff!") + return set() + else: + data = r.json() + users = data["data"]["systemInfo"]["projects"][0]["users"] + return set(u["name"] for u in users) - self.channel.basic_publish(self.exchange, self.routing_key, - json.dumps(message)) - self.message_number += 1 - self.deliveries.append(self.message_number) - LOGGER.info('Published message # %i', self.message_number) - self.schedule_next_message() +def main(): + args = parse_arguments() - def close_channel(self): - LOGGER.info('Closing the channel') - if self.channel: - self.channel.close() + logging.basicConfig(level=args.log_level, format=LOG_FORMAT) - def run(self): - self.connection = self.connect() - self.connection.ioloop.start() + amqp_url = f"amqp://{args.username}:{args.password}@{args.rabbitmq_host}" - def stop(self): - LOGGER.info('Stopping') - self.stopping = True - self.close_channel() - self.close_connection() - self.connection.ioloop.start() - LOGGER.info('Stopped') + publisher = JupyterHubPublisher( + args.api_token, + args.api_url, + args.subcategory, + amqp_url, + args.publish_interval, + args.routing_key, + args.exchange, + args.exchange_type, + ) + publisher.run() - def close_connection(self): - LOGGER.info('Closing connection') - self.closing = True - self.connection.close() +def parse_arguments(): + parser = argparse.ArgumentParser() + parser.add_argument("--log-level", "-l", + default=logging.INFO, type=int) + parser.add_argument("--api-token", "-o", + default=os.environ["MODS_JUPYTERHUB_API_TOKEN"]) + parser.add_argument("--api-url", "-a", + default="http://web-jupyterhub:8081/hub/api") + parser.add_argument("--subcategory", "-s", + default="jupyterhub_v10") + parser.add_argument("--username", "-u", + default=os.environ["MODS_USERNAME"]) + parser.add_argument("--password", "-p", + default=os.environ["MODS_PASSWORD"]) + parser.add_argument("--rabbitmq-host", "-r", + default=os.environ["MODS_RABBITMQ_HOST"]) + parser.add_argument("--publish-interval", "-i", + default=300, type=int) + parser.add_argument("--routing-key", "-k", + default="ou.das") + parser.add_argument("--exchange", "-e", + default="ha-metric") + parser.add_argument("--exchange-type", "-t", + default="topic") + return parser.parse_args() if __name__ == '__main__': From 3a81d66f93a5080f6157357e5044cc48366a1b77 Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Wed, 16 Oct 2019 21:51:37 -0700 Subject: [PATCH 09/21] Update Dockerfile for monitoring --- jupyter-nersc/app-monitoring/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/jupyter-nersc/app-monitoring/Dockerfile b/jupyter-nersc/app-monitoring/Dockerfile index 67cc5d5..7d6e874 100644 --- a/jupyter-nersc/app-monitoring/Dockerfile +++ b/jupyter-nersc/app-monitoring/Dockerfile @@ -28,9 +28,9 @@ RUN \ curl -s -o /tmp/miniconda3.sh https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ bash /tmp/miniconda3.sh -f -b -p /opt/anaconda3 && \ rm -rf /tmp/miniconda3.sh && \ - /opt/anaconda3/bin/conda update --yes conda && \ +# /opt/anaconda3/bin/conda update --yes conda && \ /opt/anaconda3/bin/pip install --no-cache-dir \ - pika==0.13.1 + pika ENV PATH=/opt/anaconda3/bin:$PATH From 12e8d331e950275f233f6db3e1edb0321cf461aa Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Thu, 17 Oct 2019 20:42:14 -0700 Subject: [PATCH 10/21] Don't crash if Iris is down --- jupyter-nersc/web-jupyterhub/jupyterhub_config.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/jupyter-nersc/web-jupyterhub/jupyterhub_config.py b/jupyter-nersc/web-jupyterhub/jupyterhub_config.py index 685be60..cbd06c8 100644 --- a/jupyter-nersc/web-jupyterhub/jupyterhub_config.py +++ b/jupyter-nersc/web-jupyterhub/jupyterhub_config.py @@ -1181,7 +1181,10 @@ async def setup(spawner): async def post_auth_hook(authenticator, handler, authentication): iris = Iris() - userdata = await iris.query_user(authentication["name"]) + try: + userdata = await iris.query_user(authentication["name"]) + except: + userdata = {} if authentication["auth_state"] is None: authentication["auth_state"] = {} authentication["auth_state"]["userdata"] = userdata From 491fc7fda9fd83e6301159d31d652a0b650b88de Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Thu, 17 Oct 2019 20:42:34 -0700 Subject: [PATCH 11/21] Handle unpopulated userdata b/c Iris down --- jupyter-nersc/web-jupyterhub/nerscspawner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jupyter-nersc/web-jupyterhub/nerscspawner.py b/jupyter-nersc/web-jupyterhub/nerscspawner.py index 225b5bf..f083a27 100644 --- a/jupyter-nersc/web-jupyterhub/nerscspawner.py +++ b/jupyter-nersc/web-jupyterhub/nerscspawner.py @@ -72,7 +72,7 @@ def default_gpu_repo(self): return None def user_allocations(self, repos=[]): - for allocation in self.userdata["userAllocations"]: + for allocation in self.userdata.get("userAllocations", []): if repos and allocation["computeAllocation"]["repoName"] not in repos: continue yield allocation From 40ae03325c941c7c922209c395e5754eae657351 Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Thu, 17 Oct 2019 20:42:51 -0700 Subject: [PATCH 12/21] Make announcements more consistent w/regular hub --- jupyter-nersc/web-jupyterhub/templates/page.html | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/jupyter-nersc/web-jupyterhub/templates/page.html b/jupyter-nersc/web-jupyterhub/templates/page.html index 4a3f7c2..30c7727 100644 --- a/jupyter-nersc/web-jupyterhub/templates/page.html +++ b/jupyter-nersc/web-jupyterhub/templates/page.html @@ -10,14 +10,11 @@ $.get("/services/announcement/latest", function(data) { var announcement = data["announcement"]; if(announcement) { - $(".announcement").html(`
-
-

Announcement

-
-
+ $(".announcement").html( + `
${announcement} -
-
`); +
` + ); } }); From a8acc1f6746bb45c7b1699eb3dfaf402a52128ce Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Tue, 22 Oct 2019 15:29:50 -0700 Subject: [PATCH 13/21] Changes for testing jupyter_server_mapper --- jupyter-compose/app-notebooks/Dockerfile | 15 +++++ .../jupyter_server_mapper/__init__.py | 13 ++++ .../jupyter_server_mapper/config.py | 15 +++++ .../jupyter_server_mapper/handlers.py | 59 +++++++++++++++++++ .../jupyter-server-mapper/setup.py | 11 ++++ .../app-notebooks/jupyter_notebook_config.py | 5 ++ jupyter-compose/app-service1/index.html | 9 +++ jupyter-compose/app-service2/index.html | 9 +++ jupyter-compose/docker-compose.yml | 14 +++++ jupyter-compose/web-announcement/Dockerfile | 2 +- 10 files changed, 151 insertions(+), 1 deletion(-) create mode 100644 jupyter-compose/app-notebooks/jupyter-server-mapper/jupyter_server_mapper/__init__.py create mode 100644 jupyter-compose/app-notebooks/jupyter-server-mapper/jupyter_server_mapper/config.py create mode 100644 jupyter-compose/app-notebooks/jupyter-server-mapper/jupyter_server_mapper/handlers.py create mode 100644 jupyter-compose/app-notebooks/jupyter-server-mapper/setup.py create mode 100644 jupyter-compose/app-notebooks/jupyter_notebook_config.py create mode 100644 jupyter-compose/app-service1/index.html create mode 100644 jupyter-compose/app-service2/index.html diff --git a/jupyter-compose/app-notebooks/Dockerfile b/jupyter-compose/app-notebooks/Dockerfile index 9a6bbdc..93338f5 100644 --- a/jupyter-compose/app-notebooks/Dockerfile +++ b/jupyter-compose/app-notebooks/Dockerfile @@ -37,6 +37,21 @@ RUN \ RUN \ jupyter labextension install @jupyterlab/hub-extension +# Jupyter server proxy; install but don't enable + +RUN \ + pip install --no-cache-dir \ + jupyter-server-proxy + +ADD jupyter-server-mapper /tmp/jupyter-server-mapper +RUN \ + cd /tmp/jupyter-server-mapper && \ + python setup.py install && \ + cd - && \ + jupyter serverextension enable --py jupyter_server_mapper --sys-prefix + +ADD jupyter_notebook_config.py /opt/anaconda3/etc/jupyter/. + # Some dummy users RUN \ diff --git a/jupyter-compose/app-notebooks/jupyter-server-mapper/jupyter_server_mapper/__init__.py b/jupyter-compose/app-notebooks/jupyter-server-mapper/jupyter_server_mapper/__init__.py new file mode 100644 index 0000000..5d357b1 --- /dev/null +++ b/jupyter-compose/app-notebooks/jupyter-server-mapper/jupyter_server_mapper/__init__.py @@ -0,0 +1,13 @@ + +from .config import ServerMapper +from .handlers import setup_handlers + +def _jupyter_server_extension_paths(): + return [{ + "module": "jupyter_server_mapper" + }] + +def load_jupyter_server_extension(nbapp): + server_mapper = ServerMapper(parent=nbapp) + + setup_handlers(nbapp.web_app, server_mapper) diff --git a/jupyter-compose/app-notebooks/jupyter-server-mapper/jupyter_server_mapper/config.py b/jupyter-compose/app-notebooks/jupyter-server-mapper/jupyter_server_mapper/config.py new file mode 100644 index 0000000..586c7cb --- /dev/null +++ b/jupyter-compose/app-notebooks/jupyter-server-mapper/jupyter_server_mapper/config.py @@ -0,0 +1,15 @@ + +from traitlets.config import Configurable +from traitlets import Any + +class ServerMapper(Configurable): + + mapper = Any( + lambda key: key, + help="""Function that maps a key to a host. + + By default the key maps to itself. This means that if `mapper` is not + configured, the key is treated as a host.""", + config=True + ) + diff --git a/jupyter-compose/app-notebooks/jupyter-server-mapper/jupyter_server_mapper/handlers.py b/jupyter-compose/app-notebooks/jupyter-server-mapper/jupyter_server_mapper/handlers.py new file mode 100644 index 0000000..c66f76d --- /dev/null +++ b/jupyter-compose/app-notebooks/jupyter-server-mapper/jupyter_server_mapper/handlers.py @@ -0,0 +1,59 @@ + +from jupyter_server_proxy.handlers import ProxyHandler +from notebook.utils import url_path_join + + +class RemoteProxyHandler(ProxyHandler): + """ + A tornado request handler that proxies HTTP and websockets + from a port on a remote system. + """ + async def http_get(self, host, port, proxied_path): + return await self.proxy(host, port, proxied_path) + + async def open(self, host, port, proxied_path): + return await self.proxy_open(host, port, proxied_path) + + def post(self, host, port, proxied_path): + return self.proxy(host, port, proxied_path) + + def put(self, host, port, proxied_path): + return self.proxy(host, port, proxied_path) + + def delete(self, host, port, proxied_path): + return self.proxy(host, port, proxied_path) + + def head(self, host, port, proxied_path): + return self.proxy(host, port, proxied_path) + + def patch(self, host, port, proxied_path): + return self.proxy(host, port, proxied_path) + + def options(self, host, port, proxied_path): + return self.proxy(host, port, proxied_path) + + def proxy(self, host, port, proxied_path): + return super().proxy(host, port, proxied_path) + + +class MapperHandler(RemoteProxyHandler): + + def __init__(self, *args, **kwargs): + self.mapper = kwargs.pop('mapper', lambda key: key) + self.mappings = dict() + super().__init__(*args, **kwargs) + + def proxy(self, key, port, proxied_path): + return super().proxy( + self.mappings.setdefault(key, self.mapper(key)), + port, proxied_path) + + +def setup_handlers(web_app, config): + host_pattern = '.*$' + web_app.add_handlers('.*', [ + (url_path_join(web_app.settings['base_url'], r'/mapper/(.+):(\d+)(.*)'), + MapperHandler, {'absolute_url': False, 'mapper': config.mapper}), + (url_path_join(web_app.settings['base_url'], r'/mapper/absolute/(.+):(\d+)(.*)'), + MapperHandler, {'absolute_url': True, 'mapper': config.mapper}), + ]) diff --git a/jupyter-compose/app-notebooks/jupyter-server-mapper/setup.py b/jupyter-compose/app-notebooks/jupyter-server-mapper/setup.py new file mode 100644 index 0000000..13bf1b5 --- /dev/null +++ b/jupyter-compose/app-notebooks/jupyter-server-mapper/setup.py @@ -0,0 +1,11 @@ + +from distutils.core import setup + +setup( + name='jupyter-server-mapper', + version='0.1.0', + description='Jupyter Server Mapper', + author='R. C. Thomas', + author_email='rcthomas@lbl.gov', + packages=['jupyter_server_mapper'], +) diff --git a/jupyter-compose/app-notebooks/jupyter_notebook_config.py b/jupyter-compose/app-notebooks/jupyter_notebook_config.py new file mode 100644 index 0000000..43dfdd3 --- /dev/null +++ b/jupyter-compose/app-notebooks/jupyter_notebook_config.py @@ -0,0 +1,5 @@ + +def mapper(key): + return "service" + key + +c.ServerMapper.mapper = mapper diff --git a/jupyter-compose/app-service1/index.html b/jupyter-compose/app-service1/index.html new file mode 100644 index 0000000..5db6e1a --- /dev/null +++ b/jupyter-compose/app-service1/index.html @@ -0,0 +1,9 @@ + + + Hello! + + +

This Is Service 1

+ It's great isn't it? + + diff --git a/jupyter-compose/app-service2/index.html b/jupyter-compose/app-service2/index.html new file mode 100644 index 0000000..5dee931 --- /dev/null +++ b/jupyter-compose/app-service2/index.html @@ -0,0 +1,9 @@ + + + Hello! + + +

This Is Service 2

+ Totally different service here. + + diff --git a/jupyter-compose/docker-compose.yml b/jupyter-compose/docker-compose.yml index eda1e73..d624fe8 100644 --- a/jupyter-compose/docker-compose.yml +++ b/jupyter-compose/docker-compose.yml @@ -24,3 +24,17 @@ services: - CONFIGPROXY_AUTH_TOKEN=the-sign-pointed-this-way ports: - 8000:8000 + service1: + command: ["python", "-m", "http.server", "--directory", "/content/"] + image: continuumio/miniconda3:latest + ports: + - 9000:8000 + volumes: + - ./app-service1:/content + service2: + command: ["python", "-m", "http.server", "--directory", "/content/"] + image: continuumio/miniconda3:latest + ports: + - 9001:8000 + volumes: + - ./app-service2:/content diff --git a/jupyter-compose/web-announcement/Dockerfile b/jupyter-compose/web-announcement/Dockerfile index e96f1a7..1d05094 100644 --- a/jupyter-compose/web-announcement/Dockerfile +++ b/jupyter-compose/web-announcement/Dockerfile @@ -4,7 +4,7 @@ FROM registry.spin.nersc.gov/das/jupyter-base-${branch}:latest LABEL maintainer="Rollin Thomas " RUN \ - pip install git+https://github.com/rcthomas/jupyterhub-announcement.git@persist-announcements + pip install git+https://github.com/rcthomas/jupyterhub-announcement.git@0.3.1 WORKDIR /srv From cb519ded50886872f0d1813c7ae7da6178539d05 Mon Sep 17 00:00:00 2001 From: Kelly Rowland Date: Wed, 23 Oct 2019 14:19:15 -0700 Subject: [PATCH 14/21] add env file --- jupyter-compose/.env | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 jupyter-compose/.env diff --git a/jupyter-compose/.env b/jupyter-compose/.env new file mode 100644 index 0000000..f42d3fe --- /dev/null +++ b/jupyter-compose/.env @@ -0,0 +1,5 @@ +# Name of JupyterHub container data volume +SSL_VOLUME_HOST=jupyterhub-ssl + +# Data volume container mount point +SSL_VOLUME_CONTAINER=/jupyterhub-ssl From e1cc6781c1291f2af9adfad854a5a7b37917b946 Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Tue, 5 Nov 2019 10:25:32 -0800 Subject: [PATCH 15/21] Move ssl testing to own dir --- .gitignore | 1 + jupyter-compose-ssl/.env | 5 + jupyter-compose-ssl/README.md | 34 + jupyter-compose-ssl/app-notebooks/Dockerfile | 54 + jupyter-compose-ssl/app-notebooks/build.sh | 8 + .../app-notebooks/docker-entrypoint.sh | 11 + jupyter-compose-ssl/app-notebooks/get_port.py | 23 + jupyter-compose-ssl/docker-compose.yml | 67 ++ .../web-announcement/Dockerfile | 14 + .../web-announcement/announcement_config.py | 72 ++ jupyter-compose-ssl/web-announcement/build.sh | 8 + .../web-announcement/docker-entrypoint.sh | 28 + jupyter-compose-ssl/web-jupyterhub/Dockerfile | 30 + jupyter-compose-ssl/web-jupyterhub/build.sh | 8 + .../web-jupyterhub/docker-entrypoint.sh | 10 + .../web-jupyterhub/jupyterhub_config.py | 1015 +++++++++++++++++ .../web-jupyterhub/templates/page.html | 24 + 17 files changed, 1412 insertions(+) create mode 100644 jupyter-compose-ssl/.env create mode 100644 jupyter-compose-ssl/README.md create mode 100644 jupyter-compose-ssl/app-notebooks/Dockerfile create mode 100644 jupyter-compose-ssl/app-notebooks/build.sh create mode 100755 jupyter-compose-ssl/app-notebooks/docker-entrypoint.sh create mode 100755 jupyter-compose-ssl/app-notebooks/get_port.py create mode 100644 jupyter-compose-ssl/docker-compose.yml create mode 100644 jupyter-compose-ssl/web-announcement/Dockerfile create mode 100644 jupyter-compose-ssl/web-announcement/announcement_config.py create mode 100644 jupyter-compose-ssl/web-announcement/build.sh create mode 100644 jupyter-compose-ssl/web-announcement/docker-entrypoint.sh create mode 100644 jupyter-compose-ssl/web-jupyterhub/Dockerfile create mode 100644 jupyter-compose-ssl/web-jupyterhub/build.sh create mode 100644 jupyter-compose-ssl/web-jupyterhub/docker-entrypoint.sh create mode 100644 jupyter-compose-ssl/web-jupyterhub/jupyterhub_config.py create mode 100644 jupyter-compose-ssl/web-jupyterhub/templates/page.html diff --git a/.gitignore b/.gitignore index f62b410..00a294a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ jupyterhub.sqlite jupyterhub_cookie_secret /jupyter-compose/config +/jupyter-compose-ssl/config diff --git a/jupyter-compose-ssl/.env b/jupyter-compose-ssl/.env new file mode 100644 index 0000000..f42d3fe --- /dev/null +++ b/jupyter-compose-ssl/.env @@ -0,0 +1,5 @@ +# Name of JupyterHub container data volume +SSL_VOLUME_HOST=jupyterhub-ssl + +# Data volume container mount point +SSL_VOLUME_CONTAINER=/jupyterhub-ssl diff --git a/jupyter-compose-ssl/README.md b/jupyter-compose-ssl/README.md new file mode 100644 index 0000000..f03b11b --- /dev/null +++ b/jupyter-compose-ssl/README.md @@ -0,0 +1,34 @@ +# Developing with docker-compose + +These images are designed for local dev/debug/testing using docker-compose. +This requires docker-compose. + +## Preparation + +Install Docker Compose (Just google for it). + +Build the images + + cd web-jupyterhub && bash build.sh && cd .. + cd app-notebooks && bash build.sh && cd .. + +You may want to pull the proxy image ahead of time + + docker pull jupyterhub/configurable-http-proxy + +Generate a key file + + mkdir config + ssh-keygen -t rsa -N '' -C ca@localhost -f config/newkey + ssh-keygen -s config/newkey -h -I localhost config/newkey.pub + +## Bring up the containers + + docker-compose up -d + +## Cleaning up and upgrading + +In general a docker-compose up -d will refresh things. Some times that isn't enough. If all else fails, try... + + docker-compose stop + docker-compose rm -f diff --git a/jupyter-compose-ssl/app-notebooks/Dockerfile b/jupyter-compose-ssl/app-notebooks/Dockerfile new file mode 100644 index 0000000..9a6bbdc --- /dev/null +++ b/jupyter-compose-ssl/app-notebooks/Dockerfile @@ -0,0 +1,54 @@ +ARG branch=unknown + +FROM registry.spin.nersc.gov/das/jupyter-base-${branch}:latest +LABEL maintainer="Rollin Thomas " +WORKDIR /tmp + +RUN \ + apt-get update && \ + apt-get --yes upgrade && \ + apt-get --yes install \ + openssh-server + +# Configure sshd + +RUN \ + mkdir -p /var/run/sshd + +# Python 3 Anaconda and additional packages + +RUN \ + conda update --yes conda && \ + conda install --yes \ + ipykernel \ + ipywidgets \ + jupyterlab \ + notebook && \ + ipython kernel install && \ + conda clean --yes --all + +ADD get_port.py /opt/anaconda3/bin + +# Typical extensions + +RUN \ + jupyter nbextension enable --sys-prefix --py widgetsnbextension + +RUN \ + jupyter labextension install @jupyterlab/hub-extension + +# Some dummy users + +RUN \ + adduser -q --gecos "" --disabled-password torgo && \ + echo torgo:the-master-would-not-approve | chpasswd + +RUN \ + adduser -q --gecos "" --disabled-password master && \ + echo master:you-have-failed-us-torgo | chpasswd + +WORKDIR /srv +ADD docker-entrypoint.sh . +RUN chmod +x docker-entrypoint.sh +ENTRYPOINT [ "./docker-entrypoint.sh" ] +CMD [ "/usr/sbin/sshd", "-p", "22", "-D" ] diff --git a/jupyter-compose-ssl/app-notebooks/build.sh b/jupyter-compose-ssl/app-notebooks/build.sh new file mode 100644 index 0000000..b7f39a2 --- /dev/null +++ b/jupyter-compose-ssl/app-notebooks/build.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +branch=$(git symbolic-ref --short HEAD) + +docker build \ + --build-arg branch=$branch \ + "$@" \ + --tag app-notebooks:latest . diff --git a/jupyter-compose-ssl/app-notebooks/docker-entrypoint.sh b/jupyter-compose-ssl/app-notebooks/docker-entrypoint.sh new file mode 100755 index 0000000..d2cc277 --- /dev/null +++ b/jupyter-compose-ssl/app-notebooks/docker-entrypoint.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +for u in $(ls /home/) ; do + mkdir /home/$u/.ssh/ + cat /config/newkey.pub > /home/$u/.ssh/authorized_keys + chmod 700 /home/$u/.ssh/ + chown -R $u /home/$u/.ssh + chmod 600 /home/$u/.ssh/authorized_keys +done + +exec "$@" diff --git a/jupyter-compose-ssl/app-notebooks/get_port.py b/jupyter-compose-ssl/app-notebooks/get_port.py new file mode 100755 index 0000000..097df4a --- /dev/null +++ b/jupyter-compose-ssl/app-notebooks/get_port.py @@ -0,0 +1,23 @@ +#!/opt/anaconda3/bin/python + +import socket + +def main(): + print(f"{ip()} {port()}") + +def port(): + s = socket.socket() + s.bind(('', 0)) + port = s.getsockname()[1] + s.close() + return port + +def ip(address=("8.8.8.8", 80)): + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.connect(address) + ip = s.getsockname()[0] + s.close() + return ip + +if __name__ == "__main__": + main() diff --git a/jupyter-compose-ssl/docker-compose.yml b/jupyter-compose-ssl/docker-compose.yml new file mode 100644 index 0000000..0dc3ed8 --- /dev/null +++ b/jupyter-compose-ssl/docker-compose.yml @@ -0,0 +1,67 @@ +version: "3" +services: + web: + container_name: web + depends_on: + - proxy + image: web-jupyterhub:latest + environment: + - CONFIGPROXY_AUTH_TOKEN=the-sign-pointed-this-way + - ANNOUNCEMENT_JUPYTERHUB_API_TOKEN=the-hands-of-fate-have-doomed-this-man + - INTERNAL_SSL_PATH=${SSL_VOLUME_CONTAINER} + volumes: + - ./config:/config + - "ssl:${SSL_VOLUME_CONTAINER}:rw" + ports: + - 8081:8081 + restart: unless-stopped + user: root + app: + container_name: app + image: app-notebooks:latest + volumes: + - ./config:/config +# - "ssl:${SSL_VOLUME_CONTAINER}:ro" + announcement: + container_name: announcement + image: web-announcement:latest + environment: + - JUPYTERHUB_API_TOKEN=the-hands-of-fate-have-doomed-this-man + - JUPYTERHUB_API_URL=https://web:8081/hub/api + - JUPYTERHUB_SSL_KEYFILE=/jupyterhub-ssl/hub-internal/hub-internal.key + - JUPYTERHUB_SSL_CERTFILE=/jupyterhub-ssl/hub-internal/hub-internal.crt + - JUPYTERHUB_SSL_CLIENT_CA=/jupyterhub-ssl/hub-ca_trust.crt + restart: always + volumes: + - "ssl:${SSL_VOLUME_CONTAINER}:ro" + proxy: + command: > + configurable-http-proxy + --port 8000 + --api-ip 0.0.0.0 + --api-port 8001 + --error-target https://web:8081/hub/error + --api-ssl-key ${SSL_VOLUME_CONTAINER}/proxy-api/proxy-api.key + --api-ssl-cert ${SSL_VOLUME_CONTAINER}/proxy-api/proxy-api.crt + --api-ssl-ca ${SSL_VOLUME_CONTAINER}/proxy-api-ca_trust.crt + --api-ssl-request-cert + --api-ssl-reject-unauthorized + --client-ssl-key ${SSL_VOLUME_CONTAINER}/proxy-client/proxy-client.key + --client-ssl-cert ${SSL_VOLUME_CONTAINER}/proxy-client/proxy-client.crt + --client-ssl-ca ${SSL_VOLUME_CONTAINER}/proxy-client-ca_trust.crt + --client-ssl-request-cert + --client-ssl-reject-unauthorized + container_name: proxy + image: jupyterhub/configurable-http-proxy:latest + environment: + - CONFIGPROXY_AUTH_TOKEN=the-sign-pointed-this-way + ports: + - 8000:8000 + restart: always + user: root + volumes: + - "ssl:${SSL_VOLUME_CONTAINER}:ro" +volumes: + ssl: + external: + name: ${SSL_VOLUME_HOST} diff --git a/jupyter-compose-ssl/web-announcement/Dockerfile b/jupyter-compose-ssl/web-announcement/Dockerfile new file mode 100644 index 0000000..b50af16 --- /dev/null +++ b/jupyter-compose-ssl/web-announcement/Dockerfile @@ -0,0 +1,14 @@ +ARG branch=unknown + +FROM registry.spin.nersc.gov/das/jupyter-base-${branch}:latest +LABEL maintainer="Rollin Thomas " + +RUN \ + pip install git+https://github.com/rcthomas/jupyterhub-announcement.git@0.4.1 + +WORKDIR /srv + +ADD docker-entrypoint.sh announcement_config.py ./ +RUN chmod +x docker-entrypoint.sh +ENTRYPOINT ["./docker-entrypoint.sh"] +CMD ["python", "-m", "jupyterhub_announcement"] diff --git a/jupyter-compose-ssl/web-announcement/announcement_config.py b/jupyter-compose-ssl/web-announcement/announcement_config.py new file mode 100644 index 0000000..ba3fc72 --- /dev/null +++ b/jupyter-compose-ssl/web-announcement/announcement_config.py @@ -0,0 +1,72 @@ +# Configuration file for application. + +#------------------------------------------------------------------------------ +# Application(SingletonConfigurable) configuration +#------------------------------------------------------------------------------ + +## This is an application. + +## The date format used by logging formatters for %(asctime)s +#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S' + +## The Logging format template +#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s' + +## Set the log level by value or name. +#c.Application.log_level = 30 +c.Application.log_level = 0 + +#------------------------------------------------------------------------------ +# AnnouncementService(Application) configuration +#------------------------------------------------------------------------------ + +## This is an application. + +## Config file to load +#c.AnnouncementService.config_file = 'announcement_config.py' + +## Fixed message to show at the top of the page. +# +# A good use for this parameter would be a link to a more general live system +# status page or MOTD. +#c.AnnouncementService.fixed_message = '' + +## Generate default config file +#c.AnnouncementService.generate_config = False + +## Logo path, can be used to override JupyterHub one +#c.AnnouncementService.logo_file = '' + +## Port this service will listen on +#c.AnnouncementService.port = 8888 + +## Announcement service prefix +#c.AnnouncementService.service_prefix = '/services/announcement/' + +## Search paths for jinja templates, coming before default ones +#c.AnnouncementService.template_paths = [] + +#------------------------------------------------------------------------------ +# AnnouncementQueue(LoggingConfigurable) configuration +#------------------------------------------------------------------------------ + +## File path where announcements persist as JSON. +# +# For a persistent announcement queue, this parameter must be set to a non-empty +# value and correspond to a read+write-accessible path. The announcement queue +# is stored as a list of JSON objects. If this parameter is set to a non-empty +# value: +# +# * The persistence file is used to initialize the announcement queue +# at start-up. This is the only time the persistence file is read. +# * If the persistence file does not exist at start-up, it is +# created when an announcement is added to the queue. +# * The persistence file is over-written with the contents of the +# announcement queue each time a new announcement is added. +# +# If this parameter is set to an empty value (the default) then the queue is +# just empty at initialization and the queue is ephemeral; announcements will +# not be persisted on updates to the queue. +#c.AnnouncementQueue.persist_path = '' +c.AnnouncementQueue.persist_path = 'announcements.json' + diff --git a/jupyter-compose-ssl/web-announcement/build.sh b/jupyter-compose-ssl/web-announcement/build.sh new file mode 100644 index 0000000..7e48fc6 --- /dev/null +++ b/jupyter-compose-ssl/web-announcement/build.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +branch=$(git symbolic-ref --short HEAD) + +docker build \ + --build-arg branch=$branch \ + "$@" \ + --tag web-announcement:latest . diff --git a/jupyter-compose-ssl/web-announcement/docker-entrypoint.sh b/jupyter-compose-ssl/web-announcement/docker-entrypoint.sh new file mode 100644 index 0000000..a2c1ead --- /dev/null +++ b/jupyter-compose-ssl/web-announcement/docker-entrypoint.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# file_env VAR [DEFAULT] +# ---------------------- +# Treat the value of VAR_FILE as the path to a secrets file and initialize VAR +# with the contents of that file. From postgres docker-entrypoint.sh. + +file_env() { + local var="$1" + local fileVar="${var}_FILE" + local def="${2:-}" + if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then + echo >&2 "error: both $var and $fileVar are set (but are exclusive)" + exit 1 + fi + local val="$def" + if [ "${!var:-}" ]; then + val="${!var}" + elif [ "${!fileVar:-}" ]; then + val="$(< "${!fileVar}")" + fi + export "$var"="$val" + unset "$fileVar" +} + +file_env 'JUPYTERHUB_API_TOKEN' + +exec "$@" diff --git a/jupyter-compose-ssl/web-jupyterhub/Dockerfile b/jupyter-compose-ssl/web-jupyterhub/Dockerfile new file mode 100644 index 0000000..6a52780 --- /dev/null +++ b/jupyter-compose-ssl/web-jupyterhub/Dockerfile @@ -0,0 +1,30 @@ +ARG branch=unknown + +FROM registry.spin.nersc.gov/das/jupyter-base-${branch}:latest +LABEL maintainer="Rollin Thomas " + +# JupyterHub components + +RUN \ + pip install git+https://github.com/kellyrowland/sshspawner.git@ssl +# pip install git+https://github.com/NERSC/sshspawner.git@clean-up + +# Some dummy users + +RUN \ + adduser -q --gecos "" --disabled-password torgo && \ + echo torgo:the-master-would-not-approve | chpasswd + +RUN \ + adduser -q --gecos "" --disabled-password master && \ + echo master:you-have-failed-us-torgo | chpasswd + +WORKDIR /srv +ADD docker-entrypoint.sh . +ADD jupyterhub_config.py . +ADD templates templates +RUN chmod +x docker-entrypoint.sh +RUN echo "StrictHostKeyChecking no" >> /etc/ssh/ssh_config + +CMD ["jupyterhub", "--debug"] +ENTRYPOINT ["./docker-entrypoint.sh"] diff --git a/jupyter-compose-ssl/web-jupyterhub/build.sh b/jupyter-compose-ssl/web-jupyterhub/build.sh new file mode 100644 index 0000000..a4954e6 --- /dev/null +++ b/jupyter-compose-ssl/web-jupyterhub/build.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +branch=$(git symbolic-ref --short HEAD) + +docker build \ + --build-arg branch=$branch \ + "$@" \ + --tag web-jupyterhub:latest . diff --git a/jupyter-compose-ssl/web-jupyterhub/docker-entrypoint.sh b/jupyter-compose-ssl/web-jupyterhub/docker-entrypoint.sh new file mode 100644 index 0000000..073fbae --- /dev/null +++ b/jupyter-compose-ssl/web-jupyterhub/docker-entrypoint.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +for u in $(ls /home/) ; do + cp /config/newkey /tmp/$u.key + chmod 600 /tmp/$u.key + cp /config/newkey-cert.pub /tmp/$u.key-cert.pub + chmod 600 /tmp/$u.key-cert.pub +done + +exec "$@" diff --git a/jupyter-compose-ssl/web-jupyterhub/jupyterhub_config.py b/jupyter-compose-ssl/web-jupyterhub/jupyterhub_config.py new file mode 100644 index 0000000..3b3896e --- /dev/null +++ b/jupyter-compose-ssl/web-jupyterhub/jupyterhub_config.py @@ -0,0 +1,1015 @@ +import os + +# Configuration file for jupyterhub. + +#------------------------------------------------------------------------------ +# Application(SingletonConfigurable) configuration +#------------------------------------------------------------------------------ + +## This is an application. + +## The date format used by logging formatters for %(asctime)s +#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S' + +## The Logging format template +#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s' + +## Set the log level by value or name. +#c.Application.log_level = 30 + +#------------------------------------------------------------------------------ +# JupyterHub(Application) configuration +#------------------------------------------------------------------------------ + +## An Application for starting a Multi-User Jupyter Notebook server. + +## Maximum number of concurrent servers that can be active at a time. +# +# Setting this can limit the total resources your users can consume. +# +# An active server is any server that's not fully stopped. It is considered +# active from the time it has been requested until the time that it has +# completely stopped. +# +# If this many user servers are active, users will not be able to launch new +# servers until a server is shutdown. Spawn requests will be rejected with a 429 +# error asking them to try again. +# +# If set to 0, no limit is enforced. +#c.JupyterHub.active_server_limit = 0 + +## Duration (in seconds) to determine the number of active users. +#c.JupyterHub.active_user_window = 1800 + +## Grant admin users permission to access single-user servers. +# +# Users should be properly informed if this is enabled. +#c.JupyterHub.admin_access = False + +## DEPRECATED since version 0.7.2, use Authenticator.admin_users instead. +#c.JupyterHub.admin_users = set() + +## Allow named single-user servers per user +#c.JupyterHub.allow_named_servers = False + +## Answer yes to any questions (e.g. confirm overwrite) +#c.JupyterHub.answer_yes = False + +## PENDING DEPRECATION: consider using service_tokens +# +# Dict of token:username to be loaded into the database. +# +# Allows ahead-of-time generation of API tokens for use by externally managed +# services, which authenticate as JupyterHub users. +# +# Consider using service_tokens for general services that talk to the JupyterHub +# API. +#c.JupyterHub.api_tokens = {} + +## Authentication for prometheus metrics +#c.JupyterHub.authenticate_prometheus = True + +## Class for authenticating users. +# +# This should be a subclass of :class:`jupyterhub.auth.Authenticator` +# +# with an :meth:`authenticate` method that: +# +# - is a coroutine (asyncio or tornado) +# - returns username on success, None on failure +# - takes two arguments: (handler, data), +# where `handler` is the calling web.RequestHandler, +# and `data` is the POST form data from the login page. +# +# .. versionchanged:: 1.0 +# authenticators may be registered via entry points, +# e.g. `c.JupyterHub.authenticator_class = 'pam'` +# +# Currently installed: +# - default: jupyterhub.auth.PAMAuthenticator +# - dummy: jupyterhub.auth.DummyAuthenticator +# - pam: jupyterhub.auth.PAMAuthenticator +#c.JupyterHub.authenticator_class = 'jupyterhub.auth.PAMAuthenticator' + +## The base URL of the entire application. +# +# Add this to the beginning of all JupyterHub URLs. Use base_url to run +# JupyterHub within an existing website. +# +# .. deprecated: 0.9 +# Use JupyterHub.bind_url +#c.JupyterHub.base_url = '/' + +## The public facing URL of the whole JupyterHub application. +# +# This is the address on which the proxy will bind. Sets protocol, ip, base_url +#c.JupyterHub.bind_url = 'http://:8000' + +## Whether to shutdown the proxy when the Hub shuts down. +# +# Disable if you want to be able to teardown the Hub while leaving the proxy +# running. +# +# Only valid if the proxy was starting by the Hub process. +# +# If both this and cleanup_servers are False, sending SIGINT to the Hub will +# only shutdown the Hub, leaving everything else running. +# +# The Hub should be able to resume from database state. +#c.JupyterHub.cleanup_proxy = True + +## Whether to shutdown single-user servers when the Hub shuts down. +# +# Disable if you want to be able to teardown the Hub while leaving the single- +# user servers running. +# +# If both this and cleanup_proxy are False, sending SIGINT to the Hub will only +# shutdown the Hub, leaving everything else running. +# +# The Hub should be able to resume from database state. +#c.JupyterHub.cleanup_servers = True +c.JupyterHub.cleanup_servers = False + +## Maximum number of concurrent users that can be spawning at a time. +# +# Spawning lots of servers at the same time can cause performance problems for +# the Hub or the underlying spawning system. Set this limit to prevent bursts of +# logins from attempting to spawn too many servers at the same time. +# +# This does not limit the number of total running servers. See +# active_server_limit for that. +# +# If more than this many users attempt to spawn at a time, their requests will +# be rejected with a 429 error asking them to try again. Users will have to wait +# for some of the spawning services to finish starting before they can start +# their own. +# +# If set to 0, no limit is enforced. +#c.JupyterHub.concurrent_spawn_limit = 100 + +## The config file to load +#c.JupyterHub.config_file = 'jupyterhub_config.py' + +## DEPRECATED: does nothing +#c.JupyterHub.confirm_no_ssl = False + +## Number of days for a login cookie to be valid. Default is two weeks. +#c.JupyterHub.cookie_max_age_days = 14 + +## The cookie secret to use to encrypt cookies. +# +# Loaded from the JPY_COOKIE_SECRET env variable by default. +# +# Should be exactly 256 bits (32 bytes). +#c.JupyterHub.cookie_secret = b'' + +## File in which to store the cookie secret. +#c.JupyterHub.cookie_secret_file = 'jupyterhub_cookie_secret' + +## The location of jupyterhub data files (e.g. /usr/local/share/jupyterhub) +#c.JupyterHub.data_files_path = '/opt/anaconda3/share/jupyterhub' + +## Include any kwargs to pass to the database connection. See +# sqlalchemy.create_engine for details. +#c.JupyterHub.db_kwargs = {} + +## url for the database. e.g. `sqlite:///jupyterhub.sqlite` +#c.JupyterHub.db_url = 'sqlite:///jupyterhub.sqlite' + +## log all database transactions. This has A LOT of output +#c.JupyterHub.debug_db = False + +## DEPRECATED since version 0.8: Use ConfigurableHTTPProxy.debug +#c.JupyterHub.debug_proxy = False + +## The default URL for users when they arrive (e.g. when user directs to "/") +# +# By default, redirects users to their own server. +#c.JupyterHub.default_url = '' + +## Dict authority:dict(files). Specify the key, cert, and/or ca file for an +# authority. This is useful for externally managed proxies that wish to use +# internal_ssl. +# +# The files dict has this format (you must specify at least a cert):: +# +# { +# 'key': '/path/to/key.key', +# 'cert': '/path/to/cert.crt', +# 'ca': '/path/to/ca.crt' +# } +# +# The authorities you can override: 'hub-ca', 'notebooks-ca', 'proxy-api-ca', +# 'proxy-client-ca', and 'services-ca'. +# +# Use with internal_ssl +#c.JupyterHub.external_ssl_authorities = {} + +## Register extra tornado Handlers for jupyterhub. +# +# Should be of the form ``("", Handler)`` +# +# The Hub prefix will be added, so `/my-page` will be served at `/hub/my-page`. +#c.JupyterHub.extra_handlers = [] + +## DEPRECATED: use output redirection instead, e.g. +# +# jupyterhub &>> /var/log/jupyterhub.log +#c.JupyterHub.extra_log_file = '' + +## Extra log handlers to set on JupyterHub logger +#c.JupyterHub.extra_log_handlers = [] + +## Generate certs used for internal ssl +#c.JupyterHub.generate_certs = False + +## Generate default config file +#c.JupyterHub.generate_config = False + +## The URL on which the Hub will listen. This is a private URL for internal +# communication. Typically set in combination with hub_connect_url. If a unix +# socket, hub_connect_url **must** also be set. +# +# For example: +# +# "http://127.0.0.1:8081" +# "unix+http://%2Fsrv%2Fjupyterhub%2Fjupyterhub.sock" +# +# .. versionadded:: 0.9 +#c.JupyterHub.hub_bind_url = '' + +## The ip or hostname for proxies and spawners to use for connecting to the Hub. +# +# Use when the bind address (`hub_ip`) is 0.0.0.0 or otherwise different from +# the connect address. +# +# Default: when `hub_ip` is 0.0.0.0, use `socket.gethostname()`, otherwise use +# `hub_ip`. +# +# Note: Some spawners or proxy implementations might not support hostnames. +# Check your spawner or proxy documentation to see if they have extra +# requirements. +# +# .. versionadded:: 0.8 +#c.JupyterHub.hub_connect_ip = '' + +## DEPRECATED +# +# Use hub_connect_url +# +# .. versionadded:: 0.8 +# +# .. deprecated:: 0.9 +# Use hub_connect_url +#c.JupyterHub.hub_connect_port = 0 + +## The URL for connecting to the Hub. Spawners, services, and the proxy will use +# this URL to talk to the Hub. +# +# Only needs to be specified if the default hub URL is not connectable (e.g. +# using a unix+http:// bind url). +# +# .. seealso:: +# JupyterHub.hub_connect_ip +# JupyterHub.hub_bind_url +# +# .. versionadded:: 0.9 +#c.JupyterHub.hub_connect_url = '' + +## The ip address for the Hub process to *bind* to. +# +# By default, the hub listens on localhost only. This address must be accessible +# from the proxy and user servers. You may need to set this to a public ip or '' +# for all interfaces if the proxy or user servers are in containers or on a +# different host. +# +# See `hub_connect_ip` for cases where the bind and connect address should +# differ, or `hub_bind_url` for setting the full bind URL. +#c.JupyterHub.hub_ip = '127.0.0.1' +c.JupyterHub.hub_ip = '0.0.0.0' + +## The internal port for the Hub process. +# +# This is the internal port of the hub itself. It should never be accessed +# directly. See JupyterHub.port for the public port to use when accessing +# jupyterhub. It is rare that this port should be set except in cases of port +# conflict. +# +# See also `hub_ip` for the ip and `hub_bind_url` for setting the full bind URL. +#c.JupyterHub.hub_port = 8081 + +## The location to store certificates automatically created by JupyterHub. +# +# Use with internal_ssl +#c.JupyterHub.internal_certs_location = 'internal-ssl' +c.JupyterHub.internal_certs_location = os.environ.get('INTERNAL_SSL_PATH', 'internal-ssl') + +## Enable SSL for all internal communication +# +# This enables end-to-end encryption between all JupyterHub components. +# JupyterHub will automatically create the necessary certificate authority and +# sign notebook certificates as they're created. +#c.JupyterHub.internal_ssl = False +c.JupyterHub.internal_ssl = True + +## The public facing ip of the whole JupyterHub application (specifically +# referred to as the proxy). +# +# This is the address on which the proxy will listen. The default is to listen +# on all interfaces. This is the only address through which JupyterHub should be +# accessed by users. +# +# .. deprecated: 0.9 +# Use JupyterHub.bind_url +#c.JupyterHub.ip = '' + +## Supply extra arguments that will be passed to Jinja environment. +#c.JupyterHub.jinja_environment_options = {} + +## Interval (in seconds) at which to update last-activity timestamps. +#c.JupyterHub.last_activity_interval = 300 + +## Dict of 'group': ['usernames'] to load at startup. +# +# This strictly *adds* groups and users to groups. +# +# Loading one set of groups, then starting JupyterHub again with a different set +# will not remove users or groups from previous launches. That must be done +# through the API. +#c.JupyterHub.load_groups = {} + +## Specify path to a logo image to override the Jupyter logo in the banner. +#c.JupyterHub.logo_file = '' + +## Maximum number of concurrent named servers that can be created by a user at a +# time. +# +# Setting this can limit the total resources a user can consume. +# +# If set to 0, no limit is enforced. +#c.JupyterHub.named_server_limit_per_user = 0 + +## File to write PID Useful for daemonizing JupyterHub. +#c.JupyterHub.pid_file = '' + +## The public facing port of the proxy. +# +# This is the port on which the proxy will listen. This is the only port through +# which JupyterHub should be accessed by users. +# +# .. deprecated: 0.9 +# Use JupyterHub.bind_url +#c.JupyterHub.port = 8000 + +## DEPRECATED since version 0.8 : Use ConfigurableHTTPProxy.api_url +#c.JupyterHub.proxy_api_ip = '' + +## DEPRECATED since version 0.8 : Use ConfigurableHTTPProxy.api_url +#c.JupyterHub.proxy_api_port = 0 + +## DEPRECATED since version 0.8: Use ConfigurableHTTPProxy.auth_token +#c.JupyterHub.proxy_auth_token = '' + +## Interval (in seconds) at which to check if the proxy is running. +#c.JupyterHub.proxy_check_interval = 30 + +## The class to use for configuring the JupyterHub proxy. +# +# Should be a subclass of :class:`jupyterhub.proxy.Proxy`. +# +# .. versionchanged:: 1.0 +# proxies may be registered via entry points, +# e.g. `c.JupyterHub.proxy_class = 'traefik'` +# +# Currently installed: +# - configurable-http-proxy: jupyterhub.proxy.ConfigurableHTTPProxy +# - default: jupyterhub.proxy.ConfigurableHTTPProxy +#c.JupyterHub.proxy_class = 'jupyterhub.proxy.ConfigurableHTTPProxy' + +## DEPRECATED since version 0.8. Use ConfigurableHTTPProxy.command +#c.JupyterHub.proxy_cmd = [] + +## Recreate all certificates used within JupyterHub on restart. +# +# Note: enabling this feature requires restarting all notebook servers. +# +# Use with internal_ssl +#c.JupyterHub.recreate_internal_certs = False + +## Redirect user to server (if running), instead of control panel. +#c.JupyterHub.redirect_to_server = True + +## Purge and reset the database. +#c.JupyterHub.reset_db = False + +## Interval (in seconds) at which to check connectivity of services with web +# endpoints. +#c.JupyterHub.service_check_interval = 60 + +## Dict of token:servicename to be loaded into the database. +# +# Allows ahead-of-time generation of API tokens for use by externally managed +# services. +#c.JupyterHub.service_tokens = {} + +## List of service specification dictionaries. +# +# A service +# +# For instance:: +# +# services = [ +# { +# 'name': 'cull_idle', +# 'command': ['/path/to/cull_idle_servers.py'], +# }, +# { +# 'name': 'formgrader', +# 'url': 'http://127.0.0.1:1234', +# 'api_token': 'super-secret', +# 'environment': +# } +# ] +#c.JupyterHub.services = [] +c.JupyterHub.services = [ + { + 'name': 'announcement', + 'url': 'https://announcement:8888', + 'api_token': os.environ["ANNOUNCEMENT_JUPYTERHUB_API_TOKEN"] + } +] + +## Shuts down all user servers on logout +#c.JupyterHub.shutdown_on_logout = False + +## The class to use for spawning single-user servers. +# +# Should be a subclass of :class:`jupyterhub.spawner.Spawner`. +# +# .. versionchanged:: 1.0 +# spawners may be registered via entry points, +# e.g. `c.JupyterHub.spawner_class = 'localprocess'` +# +# Currently installed: +# - default: jupyterhub.spawner.LocalProcessSpawner +# - localprocess: jupyterhub.spawner.LocalProcessSpawner +# - simple: jupyterhub.spawner.SimpleLocalProcessSpawner +#c.JupyterHub.spawner_class = 'jupyterhub.spawner.LocalProcessSpawner' +c.JupyterHub.spawner_class = 'sshspawner.sshspawner.SSHSpawner' + +## Path to SSL certificate file for the public facing interface of the proxy +# +# When setting this, you should also set ssl_key +#c.JupyterHub.ssl_cert = '' + +## Path to SSL key file for the public facing interface of the proxy +# +# When setting this, you should also set ssl_cert +#c.JupyterHub.ssl_key = '' + +## Host to send statsd metrics to. An empty string (the default) disables sending +# metrics. +#c.JupyterHub.statsd_host = '' + +## Port on which to send statsd metrics about the hub +#c.JupyterHub.statsd_port = 8125 + +## Prefix to use for all metrics sent by jupyterhub to statsd +#c.JupyterHub.statsd_prefix = 'jupyterhub' + +## Run single-user servers on subdomains of this host. +# +# This should be the full `https://hub.domain.tld[:port]`. +# +# Provides additional cross-site protections for javascript served by single- +# user servers. +# +# Requires `.hub.domain.tld` to resolve to the same host as +# `hub.domain.tld`. +# +# In general, this is most easily achieved with wildcard DNS. +# +# When using SSL (i.e. always) this also requires a wildcard SSL certificate. +#c.JupyterHub.subdomain_host = '' + +## Paths to search for jinja templates, before using the default templates. +#c.JupyterHub.template_paths = [] +c.JupyterHub.template_paths = ["./templates"] + +## Extra variables to be passed into jinja templates +#c.JupyterHub.template_vars = {} + +## Extra settings overrides to pass to the tornado application. +#c.JupyterHub.tornado_settings = {} + +## Trust user-provided tokens (via JupyterHub.service_tokens) to have good +# entropy. +# +# If you are not inserting additional tokens via configuration file, this flag +# has no effect. +# +# In JupyterHub 0.8, internally generated tokens do not pass through additional +# hashing because the hashing is costly and does not increase the entropy of +# already-good UUIDs. +# +# User-provided tokens, on the other hand, are not trusted to have good entropy +# by default, and are passed through many rounds of hashing to stretch the +# entropy of the key (i.e. user-provided tokens are treated as passwords instead +# of random keys). These keys are more costly to check. +# +# If your inserted tokens are generated by a good-quality mechanism, e.g. +# `openssl rand -hex 32`, then you can set this flag to True to reduce the cost +# of checking authentication tokens. +#c.JupyterHub.trust_user_provided_tokens = False + +## Names to include in the subject alternative name. +# +# These names will be used for server name verification. This is useful if +# JupyterHub is being run behind a reverse proxy or services using ssl are on +# different hosts. +# +# Use with internal_ssl +#c.JupyterHub.trusted_alt_names = [] +c.JupyterHub.trusted_alt_names = ['DNS:proxy', 'DNS:app', 'DNS:web', 'DNS:announcement', + 'IP:172.17.0.1', 'IP:172.17.0.2', 'IP:172.17.0.3', 'IP:172.17.0.4', + 'IP:172.18.0.1', 'IP:172.18.0.2', 'IP:172.18.0.3', 'IP:172.18.0.4', + 'IP:172.19.0.1', 'IP:172.19.0.2', 'IP:172.19.0.3', 'IP:172.19.0.4', + 'IP:172.20.0.1', 'IP:172.20.0.2', 'IP:172.20.0.3', 'IP:172.20.0.4', + 'IP:172.21.0.1', 'IP:172.21.0.2', 'IP:172.21.0.3', 'IP:172.21.0.4', + 'IP:172.22.0.1', 'IP:172.22.0.2', 'IP:172.22.0.3', 'IP:172.22.0.4', + 'IP:172.23.0.1', 'IP:172.23.0.2', 'IP:172.23.0.3', 'IP:172.23.0.4', + 'IP:172.24.0.1', 'IP:172.24.0.2', 'IP:172.24.0.3', 'IP:172.24.0.4',] + +## Downstream proxy IP addresses to trust. +# +# This sets the list of IP addresses that are trusted and skipped when +# processing the `X-Forwarded-For` header. For example, if an external proxy is +# used for TLS termination, its IP address should be added to this list to +# ensure the correct client IP addresses are recorded in the logs instead of the +# proxy server's IP address. +#c.JupyterHub.trusted_downstream_ips = [] + +## Upgrade the database automatically on start. +# +# Only safe if database is regularly backed up. Only SQLite databases will be +# backed up to a local file automatically. +#c.JupyterHub.upgrade_db = False + +#------------------------------------------------------------------------------ +# Spawner(LoggingConfigurable) configuration +#------------------------------------------------------------------------------ + +## Base class for spawning single-user notebook servers. +# +# Subclass this, and override the following methods: +# +# - load_state - get_state - start - stop - poll +# +# As JupyterHub supports multiple users, an instance of the Spawner subclass is +# created for each user. If there are 20 JupyterHub users, there will be 20 +# instances of the subclass. + +## Extra arguments to be passed to the single-user server. +# +# Some spawners allow shell-style expansion here, allowing you to use +# environment variables here. Most, including the default, do not. Consult the +# documentation for your spawner to verify! +#c.Spawner.args = [] +c.Spawner.args = ["--transport=ipc"] + +## An optional hook function that you can implement to pass `auth_state` to the +# spawner after it has been initialized but before it starts. The `auth_state` +# dictionary may be set by the `.authenticate()` method of the authenticator. +# This hook enables you to pass some or all of that information to your spawner. +# +# Example:: +# +# def userdata_hook(spawner, auth_state): +# spawner.userdata = auth_state["userdata"] +# +# c.Spawner.auth_state_hook = userdata_hook +#c.Spawner.auth_state_hook = None + +## The command used for starting the single-user server. +# +# Provide either a string or a list containing the path to the startup script +# command. Extra arguments, other than this path, should be provided via `args`. +# +# This is usually set if you want to start the single-user server in a different +# python environment (with virtualenv/conda) than JupyterHub itself. +# +# Some spawners allow shell-style expansion here, allowing you to use +# environment variables. Most, including the default, do not. Consult the +# documentation for your spawner to verify! +#c.Spawner.cmd = ['jupyterhub-singleuser'] +c.Spawner.cmd = ['jupyter-labhub'] + +## Maximum number of consecutive failures to allow before shutting down +# JupyterHub. +# +# This helps JupyterHub recover from a certain class of problem preventing +# launch in contexts where the Hub is automatically restarted (e.g. systemd, +# docker, kubernetes). +# +# A limit of 0 means no limit and consecutive failures will not be tracked. +#c.Spawner.consecutive_failure_limit = 0 + +## Minimum number of cpu-cores a single-user notebook server is guaranteed to +# have available. +# +# If this value is set to 0.5, allows use of 50% of one CPU. If this value is +# set to 2, allows use of up to 2 CPUs. +# +# **This is a configuration setting. Your spawner must implement support for the +# limit to work.** The default spawner, `LocalProcessSpawner`, does **not** +# implement this support. A custom spawner **must** add support for this setting +# for it to be enforced. +#c.Spawner.cpu_guarantee = None + +## Maximum number of cpu-cores a single-user notebook server is allowed to use. +# +# If this value is set to 0.5, allows use of 50% of one CPU. If this value is +# set to 2, allows use of up to 2 CPUs. +# +# The single-user notebook server will never be scheduled by the kernel to use +# more cpu-cores than this. There is no guarantee that it can access this many +# cpu-cores. +# +# **This is a configuration setting. Your spawner must implement support for the +# limit to work.** The default spawner, `LocalProcessSpawner`, does **not** +# implement this support. A custom spawner **must** add support for this setting +# for it to be enforced. +#c.Spawner.cpu_limit = None + +## Enable debug-logging of the single-user server +#c.Spawner.debug = False +c.Spawner.debug = True + +## The URL the single-user server should start in. +# +# `{username}` will be expanded to the user's username +# +# Example uses: +# +# - You can set `notebook_dir` to `/` and `default_url` to `/tree/home/{username}` to allow people to +# navigate the whole filesystem from their notebook server, but still start in their home directory. +# - Start with `/notebooks` instead of `/tree` if `default_url` points to a notebook instead of a directory. +# - You can set this to `/lab` to have JupyterLab start by default, rather than Jupyter Notebook. +#c.Spawner.default_url = '' + +## Disable per-user configuration of single-user servers. +# +# When starting the user's single-user server, any config file found in the +# user's $HOME directory will be ignored. +# +# Note: a user could circumvent this if the user modifies their Python +# environment, such as when they have their own conda environments / virtualenvs +# / containers. +#c.Spawner.disable_user_config = False + +## Whitelist of environment variables for the single-user server to inherit from +# the JupyterHub process. +# +# This whitelist is used to ensure that sensitive information in the JupyterHub +# process's environment (such as `CONFIGPROXY_AUTH_TOKEN`) is not passed to the +# single-user server's process. +#c.Spawner.env_keep = ['PATH', 'PYTHONPATH', 'CONDA_ROOT', 'CONDA_DEFAULT_ENV', 'VIRTUAL_ENV', 'LANG', 'LC_ALL'] + +## Extra environment variables to set for the single-user server's process. +# +# Environment variables that end up in the single-user server's process come from 3 sources: +# - This `environment` configurable +# - The JupyterHub process' environment variables that are whitelisted in `env_keep` +# - Variables to establish contact between the single-user notebook and the hub (such as JUPYTERHUB_API_TOKEN) +# +# The `environment` configurable should be set by JupyterHub administrators to +# add installation specific environment variables. It is a dict where the key is +# the name of the environment variable, and the value can be a string or a +# callable. If it is a callable, it will be called with one parameter (the +# spawner instance), and should return a string fairly quickly (no blocking +# operations please!). +# +# Note that the spawner class' interface is not guaranteed to be exactly same +# across upgrades, so if you are using the callable take care to verify it +# continues to work after upgrades! +#c.Spawner.environment = {} + +## Timeout (in seconds) before giving up on a spawned HTTP server +# +# Once a server has successfully been spawned, this is the amount of time we +# wait before assuming that the server is unable to accept connections. +#c.Spawner.http_timeout = 30 + +## The IP address (or hostname) the single-user server should listen on. +# +# The JupyterHub proxy implementation should be able to send packets to this +# interface. +#c.Spawner.ip = '' +c.Spawner.ip = '0.0.0.0' + +## Minimum number of bytes a single-user notebook server is guaranteed to have +# available. +# +# Allows the following suffixes: +# - K -> Kilobytes +# - M -> Megabytes +# - G -> Gigabytes +# - T -> Terabytes +# +# **This is a configuration setting. Your spawner must implement support for the +# limit to work.** The default spawner, `LocalProcessSpawner`, does **not** +# implement this support. A custom spawner **must** add support for this setting +# for it to be enforced. +#c.Spawner.mem_guarantee = None + +## Maximum number of bytes a single-user notebook server is allowed to use. +# +# Allows the following suffixes: +# - K -> Kilobytes +# - M -> Megabytes +# - G -> Gigabytes +# - T -> Terabytes +# +# If the single user server tries to allocate more memory than this, it will +# fail. There is no guarantee that the single-user notebook server will be able +# to allocate this much memory - only that it can not allocate more than this. +# +# **This is a configuration setting. Your spawner must implement support for the +# limit to work.** The default spawner, `LocalProcessSpawner`, does **not** +# implement this support. A custom spawner **must** add support for this setting +# for it to be enforced. +#c.Spawner.mem_limit = None + +## Path to the notebook directory for the single-user server. +# +# The user sees a file listing of this directory when the notebook interface is +# started. The current interface does not easily allow browsing beyond the +# subdirectories in this directory's tree. +# +# `~` will be expanded to the home directory of the user, and {username} will be +# replaced with the name of the user. +# +# Note that this does *not* prevent users from accessing files outside of this +# path! They can do so with many other means. +#c.Spawner.notebook_dir = '' + +## An HTML form for options a user can specify on launching their server. +# +# The surrounding `
` element and the submit button are already provided. +# +# For example: +# +# .. code:: html +# +# Set your key: +# +#
+# Choose a letter: +# +# +# The data from this form submission will be passed on to your spawner in +# `self.user_options` +# +# Instead of a form snippet string, this could also be a callable that takes as +# one parameter the current spawner instance and returns a string. The callable +# will be called asynchronously if it returns a future, rather than a str. Note +# that the interface of the spawner class is not deemed stable across versions, +# so using this functionality might cause your JupyterHub upgrades to break. +#c.Spawner.options_form = traitlets.Undefined + +## Interval (in seconds) on which to poll the spawner for single-user server's +# status. +# +# At every poll interval, each spawner's `.poll` method is called, which checks +# if the single-user server is still running. If it isn't running, then +# JupyterHub modifies its own state accordingly and removes appropriate routes +# from the configurable proxy. +#c.Spawner.poll_interval = 30 + +## The port for single-user servers to listen on. +# +# Defaults to `0`, which uses a randomly allocated port number each time. +# +# If set to a non-zero value, all Spawners will use the same port, which only +# makes sense if each server is on a different address, e.g. in containers. +# +# New in version 0.7. +#c.Spawner.port = 0 + +## An optional hook function that you can implement to do work after the spawner +# stops. +# +# This can be set independent of any concrete spawner implementation. +#c.Spawner.post_stop_hook = None + +## An optional hook function that you can implement to do some bootstrapping work +# before the spawner starts. For example, create a directory for your user or +# load initial content. +# +# This can be set independent of any concrete spawner implementation. +# +# This maybe a coroutine. +# +# Example:: +# +# from subprocess import check_call +# def my_hook(spawner): +# username = spawner.user.name +# check_call(['./examples/bootstrap-script/bootstrap.sh', username]) +# +# c.Spawner.pre_spawn_hook = my_hook +#c.Spawner.pre_spawn_hook = None + +## List of SSL alt names +# +# May be set in config if all spawners should have the same value(s), or set at +# runtime by Spawner that know their names. +#c.Spawner.ssl_alt_names = [] + +## Whether to include DNS:localhost, IP:127.0.0.1 in alt names +#c.Spawner.ssl_alt_names_include_local = True + +## Timeout (in seconds) before giving up on starting of single-user server. +# +# This is the timeout for start to return, not the timeout for the server to +# respond. Callers of spawner.start will assume that startup has failed if it +# takes longer than this. start should return when the server process is started +# and its location is known. +#c.Spawner.start_timeout = 60 + +#------------------------------------------------------------------------------ +# Authenticator(LoggingConfigurable) configuration +#------------------------------------------------------------------------------ + +## Base class for implementing an authentication provider for JupyterHub + +## Set of users that will have admin rights on this JupyterHub. +# +# Admin users have extra privileges: +# - Use the admin panel to see list of users logged in +# - Add / remove users in some authenticators +# - Restart / halt the hub +# - Start / stop users' single-user servers +# - Can access each individual users' single-user server (if configured) +# +# Admin access should be treated the same way root access is. +# +# Defaults to an empty set, in which case no user has admin access. +#c.Authenticator.admin_users = set() +c.Authenticator.admin_users = set(["master"]) + +## The max age (in seconds) of authentication info before forcing a refresh of +# user auth info. +# +# Refreshing auth info allows, e.g. requesting/re-validating auth tokens. +# +# See :meth:`.refresh_user` for what happens when user auth info is refreshed +# (nothing by default). +#c.Authenticator.auth_refresh_age = 300 + +## Automatically begin the login process +# +# rather than starting with a "Login with..." link at `/hub/login` +# +# To work, `.login_url()` must give a URL other than the default `/hub/login`, +# such as an oauth handler or another automatic login handler, registered with +# `.get_handlers()`. +# +# .. versionadded:: 0.8 +#c.Authenticator.auto_login = False + +## Blacklist of usernames that are not allowed to log in. +# +# Use this with supported authenticators to restrict which users can not log in. +# This is an additional blacklist that further restricts users, beyond whatever +# restrictions the authenticator has in place. +# +# If empty, does not perform any additional restriction. +# +# .. versionadded: 0.9 +#c.Authenticator.blacklist = set() + +## Enable persisting auth_state (if available). +# +# auth_state will be encrypted and stored in the Hub's database. This can +# include things like authentication tokens, etc. to be passed to Spawners as +# environment variables. +# +# Encrypting auth_state requires the cryptography package. +# +# Additionally, the JUPYTERHUB_CRYPT_KEY environment variable must contain one +# (or more, separated by ;) 32B encryption keys. These can be either base64 or +# hex-encoded. +# +# If encryption is unavailable, auth_state cannot be persisted. +# +# New in JupyterHub 0.8 +#c.Authenticator.enable_auth_state = False + +## An optional hook function that you can implement to do some bootstrapping work +# during authentication. For example, loading user account details from an +# external system. +# +# This function is called after the user has passed all authentication checks +# and is ready to successfully authenticate. This function must return the +# authentication dict reguardless of changes to it. +# +# This maybe a coroutine. +# +# .. versionadded: 1.0 +# +# Example:: +# +# import os, pwd +# def my_hook(authenticator, handler, authentication): +# user_data = pwd.getpwnam(authentication['name']) +# spawn_data = { +# 'pw_data': user_data +# 'gid_list': os.getgrouplist(authentication['name'], user_data.pw_gid) +# } +# +# if authentication['auth_state'] is None: +# authentication['auth_state'] = {} +# authentication['auth_state']['spawn_data'] = spawn_data +# +# return authentication +# +# c.Authenticator.post_auth_hook = my_hook +#c.Authenticator.post_auth_hook = None + +## Force refresh of auth prior to spawn. +# +# This forces :meth:`.refresh_user` to be called prior to launching a server, to +# ensure that auth state is up-to-date. +# +# This can be important when e.g. auth tokens that may have expired are passed +# to the spawner via environment variables from auth_state. +# +# If refresh_user cannot refresh the user auth data, launch will fail until the +# user logs in again. +#c.Authenticator.refresh_pre_spawn = False + +## Dictionary mapping authenticator usernames to JupyterHub users. +# +# Primarily used to normalize OAuth user names to local users. +#c.Authenticator.username_map = {} + +## Regular expression pattern that all valid usernames must match. +# +# If a username does not match the pattern specified here, authentication will +# not be attempted. +# +# If not set, allow any username. +#c.Authenticator.username_pattern = '' + +## Whitelist of usernames that are allowed to log in. +# +# Use this with supported authenticators to restrict which users can log in. +# This is an additional whitelist that further restricts users, beyond whatever +# restrictions the authenticator has in place. +# +# If empty, does not perform any additional restriction. +#c.Authenticator.whitelist = set() + +#------------------------------------------------------------------------------ +# CryptKeeper(SingletonConfigurable) configuration +#------------------------------------------------------------------------------ + +## Encapsulate encryption configuration +# +# Use via the encryption_config singleton below. + +## +#c.CryptKeeper.keys = [] + +## The number of threads to allocate for encryption +#c.CryptKeeper.n_threads = 2 + +#------------------------------------------------------------------------------ +# Additional ConfigurableHTTPProxy configuration +#------------------------------------------------------------------------------ + +c.ConfigurableHTTPProxy.should_start = False + +c.ConfigurableHTTPProxy.api_url = 'https://proxy:8001' + +#------------------------------------------------------------------------------ +# SSHSpawner configuration +#------------------------------------------------------------------------------ + +# The remote host to spawn notebooks on +c.SSHSpawner.remote_hosts = ['app'] +c.SSHSpawner.remote_port = '22' + +# The system path for the remote SSH session. Must have the jupyter-singleuser and python executables +c.SSHSpawner.path = '/opt/anaconda3/bin:/usr/bin:/bin:/usr/bin/X11:/usr/games:/usr/lib/mit/bin:/usr/lib/mit/sbin' + +# The command to return an unused port on the target system. See scripts/get_port.py for an example +c.SSHSpawner.remote_port_command = '/opt/anaconda3/bin/get_port.py' + +c.SSHSpawner.ssh_keyfile = '/tmp/{username}.key' + +c.SSHSpawner.hub_api_url = 'https://web:8081/hub/api' diff --git a/jupyter-compose-ssl/web-jupyterhub/templates/page.html b/jupyter-compose-ssl/web-jupyterhub/templates/page.html new file mode 100644 index 0000000..4a3f7c2 --- /dev/null +++ b/jupyter-compose-ssl/web-jupyterhub/templates/page.html @@ -0,0 +1,24 @@ +{% extends "templates/page.html" %} +{% block announcement %} +
+
+{% endblock %} + +{% block script %} +{{ super() }} + +{% endblock %} From 7fa64e934a281458b818dab1dc96652aeeb6c1a9 Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Tue, 5 Nov 2019 10:27:51 -0800 Subject: [PATCH 16/21] Catch all the methods --- jupyter-nersc/web-offline/app.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/jupyter-nersc/web-offline/app.py b/jupyter-nersc/web-offline/app.py index 273b37e..9417cda 100644 --- a/jupyter-nersc/web-offline/app.py +++ b/jupyter-nersc/web-offline/app.py @@ -10,8 +10,8 @@ app = Sanic(__name__) app.static("/static", "./static") -@app.route('/') -@app.route('/') +@app.route('/', methods=["GET", "PUT", "POST", "PATCH", "DELETE", "OPTION"]) +@app.route('/', methods=["GET", "PUT", "POST", "PATCH", "DELETE", "OPTION"]) async def catch_all(request, path=""): default_message = "NERSC's Jupyter service is offline. It will return when maintenance is over. Please try again later." message = os.environ.get("MESSAGE", default_message).strip() From 53ddff3e607cbcfca2d4efff29d5cefa4674b0df Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Wed, 6 Nov 2019 19:28:26 -0800 Subject: [PATCH 17/21] Fix from William --- jupyter-nersc/web-nbviewer/nbviewer_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jupyter-nersc/web-nbviewer/nbviewer_config.py b/jupyter-nersc/web-nbviewer/nbviewer_config.py index 00b5f36..cd680f8 100644 --- a/jupyter-nersc/web-nbviewer/nbviewer_config.py +++ b/jupyter-nersc/web-nbviewer/nbviewer_config.py @@ -1,4 +1,4 @@ -c.NBViewer.handler_settings = {'clone_notebooks' : True} +c.NBViewer.handler_settings = {'clone_notebooks' : True, 'clone_to_directory' : '/global/homes/{username[0]}/{username}'} c.NBViewer.local_handler = "clonenotebooks.renderers.LocalRenderingHandler" c.NBViewer.url_handler = "clonenotebooks.renderers.URLRenderingHandler" From dde07446ddba9288b41941d3f437b90a1d825153 Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Wed, 6 Nov 2019 19:30:09 -0800 Subject: [PATCH 18/21] Upgrade to 19-11 remote --- .../web-jupyterhub/jupyterhub_config.py | 27 +++++++++---------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/jupyter-nersc/web-jupyterhub/jupyterhub_config.py b/jupyter-nersc/web-jupyterhub/jupyterhub_config.py index cbd06c8..76029ba 100644 --- a/jupyter-nersc/web-jupyterhub/jupyterhub_config.py +++ b/jupyter-nersc/web-jupyterhub/jupyterhub_config.py @@ -1064,20 +1064,20 @@ def comma_split(string): "gerty-shared-node-cpu": ( "sshspawner.sshspawner.SSHSpawner", { "cmd": ["/global/common/cori/das/jupyterhub/jupyter-launcher.sh", - "/global/common/cori_cle7/software/jupyter/19-09/bin/jupyter-labhub"], + "/global/common/cori_cle7/software/jupyter/19-11/bin/jupyter-labhub"], "args": ["--transport=ipc"], "environment": {"OMP_NUM_THREADS" : "2"}, "remote_hosts": ["gerty.nersc.gov"], "remote_port_command": "/usr/bin/python /global/common/cori/das/jupyterhub/new-get-port.py --ip", "hub_api_url": "http://{}:8081/hub/api".format(ip), - "path": "/global/common/cori_cle7/software/jupyter/19-09/bin:/global/common/cori/das/jupyterhub:/usr/common/usg/bin:/usr/bin:/bin", + "path": "/global/common/cori_cle7/software/jupyter/19-11/bin:/global/common/cori/das/jupyterhub:/usr/common/usg/bin:/usr/bin:/bin", "ssh_keyfile": '/certs/{username}.key' } ), "gerty-exclusive-node-cpu": ( "nerscslurmspawner.NERSCExclusiveSlurmSpawner", { "cmd": ["/global/common/cori/das/jupyterhub/jupyter-launcher.sh", - "/usr/common/software/jupyter/19-09/bin/jupyter-labhub"], + "/usr/common/software/jupyter/19-11/bin/jupyter-labhub"], "exec_prefix": "/usr/bin/ssh -q -o StrictHostKeyChecking=no -o preferredauthentications=publickey -l {username} -i /certs/{username}.key {remote_host}", "http_timeout": 300, "startup_poll_interval": 30.0, @@ -1086,26 +1086,26 @@ def comma_split(string): "req_runtime": "240", "req_qos": "regular", "hub_api_url": "http://{}:8081/hub/api".format(ip), - "path": "/usr/common/software/jupyter/19-09/bin:/global/common/cori/das/jupyterhub:/usr/common/usg/bin:/usr/bin:/bin", + "path": "/usr/common/software/jupyter/19-11/bin:/global/common/cori/das/jupyterhub:/usr/common/usg/bin:/usr/bin:/bin", } ), "cori-shared-node-cpu": ( "sshspawner.sshspawner.SSHSpawner", { "cmd": ["/global/common/cori/das/jupyterhub/jupyter-launcher.sh", - "/usr/common/software/jupyter/19-09/bin/jupyter-labhub"], + "/usr/common/software/jupyter/19-11/bin/jupyter-labhub"], "args": ["--transport=ipc"], "environment": {"OMP_NUM_THREADS" : "2", "PYTHONFAULTHANDLER": "1"}, "remote_hosts": ["corijupyter.nersc.gov"], "remote_port_command": "/usr/bin/python /global/common/cori/das/jupyterhub/new-get-port.py --ip", "hub_api_url": "http://{}:8081/hub/api".format(ip), - "path": "/usr/common/software/jupyter/19-09/bin:/global/common/cori/das/jupyterhub:/usr/common/usg/bin:/usr/bin:/bin", + "path": "/usr/common/software/jupyter/19-11/bin:/global/common/cori/das/jupyterhub:/usr/common/usg/bin:/usr/bin:/bin", "ssh_keyfile": '/certs/{username}.key' } ), "cori-exclusive-node-cpu": ( "nerscslurmspawner.NERSCExclusiveSlurmSpawner", { "cmd": ["/global/common/cori/das/jupyterhub/jupyter-launcher.sh", - "/usr/common/software/jupyter/19-09/bin/jupyter-labhub"], + "/usr/common/software/jupyter/19-11/bin/jupyter-labhub"], "exec_prefix": "/usr/bin/ssh -q -o StrictHostKeyChecking=no -o preferredauthentications=publickey -l {username} -i /certs/{username}.key {remote_host}", "http_timeout": 300, "startup_poll_interval": 30.0, @@ -1113,13 +1113,13 @@ def comma_split(string): "req_homedir": "/tmp", "req_runtime": "240", "hub_api_url": "http://{}:8081/hub/api".format(ip), - "path": "/usr/common/software/jupyter/19-09/bin:/global/common/cori/das/jupyterhub:/usr/common/usg/bin:/usr/bin:/bin", + "path": "/usr/common/software/jupyter/19-11/bin:/global/common/cori/das/jupyterhub:/usr/common/usg/bin:/usr/bin:/bin", } ), "cori-exclusive-node-gpu": ( "nerscslurmspawner.NERSCExclusiveGPUSlurmSpawner", { "cmd": ["/global/common/cori/das/jupyterhub/jupyter-launcher.sh", - "/usr/common/software/jupyter/19-09/bin/jupyter-labhub"], + "/usr/common/software/jupyter/19-11/bin/jupyter-labhub"], "args": ["--transport=ipc"], "exec_prefix": "/usr/bin/ssh -q -o StrictHostKeyChecking=no -o preferredauthentications=publickey -l {username} -i /certs/{username}.key {remote_host}", "startup_poll_interval": 30.0, @@ -1127,22 +1127,19 @@ def comma_split(string): "req_homedir": "/tmp", "req_runtime": "240", "hub_api_url": "http://{}:8081/hub/api".format(ip), - "path": "/usr/common/software/jupyter/19-09/bin:/global/common/cori/das/jupyterhub:/usr/common/usg/bin:/usr/bin:/bin", + "path": "/usr/common/software/jupyter/19-11/bin:/global/common/cori/das/jupyterhub:/usr/common/usg/bin:/usr/bin:/bin", } ), "spin-shared-node-cpu": ( "sshspawner.sshspawner.SSHSpawner", { "cmd": ["/global/common/cori/das/jupyterhub/jupyter-launcher.sh", - "/global/common/cori_cle7/software/jupyter/19-09/bin/jupyter-labhub"], -# "/opt/anaconda3/bin/jupyter-labhub"], + "/global/common/cori_cle7/software/jupyter/19-11/bin/jupyter-labhub"], "args": ["--transport=ipc"], "environment": {"OMP_NUM_THREADS" : "2"}, "remote_hosts": ["app-notebooks"], "remote_port_command": "/usr/bin/python /global/common/cori/das/jupyterhub/new-get-port.py --ip", -# "remote_port_command": "/opt/anaconda3/bin/python /global/common/cori/das/jupyterhub/new-get-port.py --ip", "hub_api_url": "http://{}:8081/hub/api".format(ip), - "path": "/global/common/cori_cle7/software/jupyter/19-09/bin:/global/common/cori/das/jupyterhub:/usr/common/usg/bin:/usr/bin:/bin", -# "path": "/opt/anaconda3/bin:/usr/bin:/usr/local/bin:/bin", + "path": "/global/common/cori_cle7/software/jupyter/19-11/bin:/global/common/cori/das/jupyterhub:/usr/common/usg/bin:/usr/bin:/bin", "ssh_keyfile": '/certs/{username}.key' } ) From c0b0c695fd3378cd045e226c4c8076c5d726315a Mon Sep 17 00:00:00 2001 From: William Krinsman Date: Fri, 8 Nov 2019 10:30:09 -0800 Subject: [PATCH 19/21] Use default_server_name feature name of JupyterHub to better utilize user-redirect, plus no longer need to have two branches of clonenotebooks. --- jupyter-nersc/web-jupyterhub/jupyterhub_config.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/jupyter-nersc/web-jupyterhub/jupyterhub_config.py b/jupyter-nersc/web-jupyterhub/jupyterhub_config.py index 76029ba..068dfd1 100644 --- a/jupyter-nersc/web-jupyterhub/jupyterhub_config.py +++ b/jupyter-nersc/web-jupyterhub/jupyterhub_config.py @@ -1199,3 +1199,5 @@ def auth_state_hook(spawner, auth_state): ### Prometheus c.JupyterHub.authenticate_prometheus = False + +c.JupyterHub.default_server_name = 'cori-shared-node-cpu' From 8e3df34df27d91647443cec36738b90b60cf73ef Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Mon, 11 Nov 2019 10:37:02 -0800 Subject: [PATCH 20/21] Comment out test change --- jupyter-nersc/web-jupyterhub/jupyterhub_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jupyter-nersc/web-jupyterhub/jupyterhub_config.py b/jupyter-nersc/web-jupyterhub/jupyterhub_config.py index 068dfd1..a2e2517 100644 --- a/jupyter-nersc/web-jupyterhub/jupyterhub_config.py +++ b/jupyter-nersc/web-jupyterhub/jupyterhub_config.py @@ -1200,4 +1200,4 @@ def auth_state_hook(spawner, auth_state): c.JupyterHub.authenticate_prometheus = False -c.JupyterHub.default_server_name = 'cori-shared-node-cpu' +#c.JupyterHub.default_server_name = 'cori-shared-node-cpu' From dcaf08d79b2934cc92481d81b2ee7cacc188aa7b Mon Sep 17 00:00:00 2001 From: Rollin Thomas Date: Mon, 11 Nov 2019 10:37:37 -0800 Subject: [PATCH 21/21] Testing remote proxy --- jupyter-compose/app-notebooks/Dockerfile | 26 ++++++++++++------- .../app-notebooks/jupyter_notebook_config.py | 12 ++++++--- 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/jupyter-compose/app-notebooks/Dockerfile b/jupyter-compose/app-notebooks/Dockerfile index 93338f5..e2fd869 100644 --- a/jupyter-compose/app-notebooks/Dockerfile +++ b/jupyter-compose/app-notebooks/Dockerfile @@ -37,21 +37,29 @@ RUN \ RUN \ jupyter labextension install @jupyterlab/hub-extension -# Jupyter server proxy; install but don't enable +# Jupyter server proxy RUN \ pip install --no-cache-dir \ - jupyter-server-proxy - -ADD jupyter-server-mapper /tmp/jupyter-server-mapper -RUN \ - cd /tmp/jupyter-server-mapper && \ - python setup.py install && \ - cd - && \ - jupyter serverextension enable --py jupyter_server_mapper --sys-prefix + git+https://github.com/rcthomas/jupyter-server-proxy.git@allow-remote-proxy ADD jupyter_notebook_config.py /opt/anaconda3/etc/jupyter/. +#### # Jupyter server proxy; install but don't enable +#### +#### RUN \ +#### pip install --no-cache-dir \ +#### jupyter-server-proxy +#### +#### ADD jupyter-server-mapper /tmp/jupyter-server-mapper +#### RUN \ +#### cd /tmp/jupyter-server-mapper && \ +#### python setup.py install && \ +#### cd - && \ +#### jupyter serverextension enable --py jupyter_server_mapper --sys-prefix +#### +#### ADD jupyter_notebook_config.py /opt/anaconda3/etc/jupyter/. + # Some dummy users RUN \ diff --git a/jupyter-compose/app-notebooks/jupyter_notebook_config.py b/jupyter-compose/app-notebooks/jupyter_notebook_config.py index 43dfdd3..f138b71 100644 --- a/jupyter-compose/app-notebooks/jupyter_notebook_config.py +++ b/jupyter-compose/app-notebooks/jupyter_notebook_config.py @@ -1,5 +1,11 @@ -def mapper(key): - return "service" + key +### def mapper(key): +### return "service" + key +### +### c.ServerMapper.mapper = mapper -c.ServerMapper.mapper = mapper +def hook(handler, host): + handler.log.info("request to proxy to host " + host) + return host == "service1" + +c.ServerProxy.host_whitelist_hook = hook