diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d4fdfbb --- /dev/null +++ b/.gitignore @@ -0,0 +1,29 @@ +# bluesky-queueserver +existing_plans_and_devices.yaml + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*.egg-info/ + +# Microsoft VisualStudio Code Editor +.vscode/ + +# log files +.logs/ + +# Jupyter notebook checkpoints +.ipynb_checkpoints/ + +# filewriter callback output files +*.h5 +*.dat + +# local developer content +dev_* + +# Sphinx build products +build/ + +# Macintosh +.DS_Store diff --git a/README.md b/README.md new file mode 100644 index 0000000..dded86f --- /dev/null +++ b/README.md @@ -0,0 +1,18 @@ +# Bluesky Instrument Template + +**Caution**: If you will use the bluesky queueserver (QS), note that _every_ +Python file in this directory will be executed when QS starts the RunEngine. +Don't add extra Python files to this directory. Instead, put them in `user/` or +somewhere else. + +Contains: + +description | item(s) +--- | --- +Introduction | [`intro2bluesky.md`](https://bcda-aps.github.io/bluesky_training/reference/_intro2bluesky.html) +IPython console startup | [`console/`](console/README.md) +Bluesky queueserver support | [introduction](qserver.md), `*qs*` +Instrument configuration | `instrument/` +Conda environments | [`environments/`](./environments/README.md) +Unit tests | [`tests/`](./tests/README.md) +Documentation | [How-to, examples, tutorials, reference](https://bcda-aps.github.io/bluesky_training) diff --git a/_run_qs.sh b/_run_qs.sh new file mode 100755 index 0000000..1f29f5c --- /dev/null +++ b/_run_qs.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +# Start the bluesky queueserver. + +#-------------------- +# change the program defaults here +# CONDA: pre-defined in GitHub Actions workflow +export CONDA=${CONDA:-/APSshare/miniconda/x86_64} +export CONDA_ENVIRONMENT="${BLUESKY_CONDA_ENV:-training_2022}" +export DATABROKER_CATALOG=${DATABROKER_CATALOG:-training} +export QS_SERVER_HOST=$(hostname) # or host (that passes $(hostname) test below) +export QS_UPDATE_PLANS_DEVICES=ENVIRONMENT_OPEN +export QS_USER_GROUP_PERMISSIONS_FILE="./user_group_permissions.yaml" +export QS_USER_GROUP_PERMISSIONS_RELOAD=ON_STARTUP + +# REDIS_ADDR is __always__ localhost. +# Override if it is not, but you may encounter access issues. YOYO. +export REDIS_ADDR=localhost +#-------------------- + +# QS and redis must be on the same workstation +if [ "$(hostname)" != "${QS_SERVER_HOST}" ]; then + echo "Must run queueserver on ${QS_SERVER_HOST}. This is $(hostname)" + exit 1 +fi + +SHELL_SCRIPT_NAME=${BASH_SOURCE:-${0}} +if [ -z "$STARTUP_DIR" ] ; then + # If no startup dir is specified, use the directory with this script + export STARTUP_DIR=$(dirname "${SHELL_SCRIPT_NAME}") +fi + +# activate conda command, if needed +if [ ! -f "${CONDA_EXE}" ]; then + CONDA_ROOTS="${CONDA}" # In GitHub Actions workflow: (miniconda) + CONDA_ROOTS+=" /APSshare/miniconda/x86_64" + CONDA_ROOTS+=" /opt/miniconda3" + for root in ${CONDA_ROOTS}; do + if [ -d "${root}" ] && [ -f "${root}/etc/profile.d/conda.sh" ]; then + # Found a match! + source "${root}/etc/profile.d/conda.sh" + break + fi + done +fi + +# In GitHub Actions workflow, +# $ENV_NAME is an environment variable naming the conda environment to be used +if [ -z "${ENV_NAME}" ] ; then + ENV_NAME="${CONDA_ENVIRONMENT}" +fi + +# echo "conda env list = $(conda env list)" + +conda activate "${ENV_NAME}" + +# #-------------------- +# echo "Environment: $(env | sort)" +# echo "------" +# echo "CONDA_ENVIRONMENT=${CONDA_ENVIRONMENT}" +# echo "CONDA=${CONDA}" +# echo "DATABROKER_CATALOG=${DATABROKER_CATALOG}" +# echo "QS_SERVER_HOST=${QS_SERVER_HOST}" +# echo "QS_UPDATE_PLANS_DEVICES=${QS_UPDATE_PLANS_DEVICES}" +# echo "QS_USER_GROUP_PERMISSIONS_FILE=${QS_USER_GROUP_PERMISSIONS_FILE}" +# echo "QS_USER_GROUP_PERMISSIONS_RELOAD=${QS_USER_GROUP_PERMISSIONS_RELOAD}" +# echo "REDIS_ADDR=${REDIS_ADDR}" +# echo "SHELL_SCRIPT_NAME=${SHELL_SCRIPT_NAME}" +# echo "STARTUP_DIR=${STARTUP_DIR}" +# #-------------------- + +# Start the bluesky queueserver (QS) +start-re-manager \ + --redis-addr "${REDIS_ADDR}" \ + --startup-dir "${STARTUP_DIR}" \ + --update-existing-plans-devices "${QS_UPDATE_PLANS_DEVICES}" \ + --user-group-permissions "${QS_USER_GROUP_PERMISSIONS_FILE}" \ + --user-group-permissions-reload "${QS_USER_GROUP_PERMISSIONS_RELOAD}" \ + --zmq-publish-console ON \ + --keep-re diff --git a/blueskyStarter.sh b/blueskyStarter.sh new file mode 100755 index 0000000..5944bd5 --- /dev/null +++ b/blueskyStarter.sh @@ -0,0 +1,116 @@ +#!/bin/bash + +# Start a bluesky session in IPython console (default) or Jupyter notebook GUI. + +# Get the Python environment name. +# define fallback if BLUESKY_CONDA_ENV is not found +DEFAULT_ENV=bluesky_2023_3 +export ENV_NAME="${BLUESKY_CONDA_ENV:-${DEFAULT_ENV}}" +export IPYTHON_PROFILE=bluesky +export IPYTHONDIR="${HOME}/.ipython-bluesky" + + +pick () { # activate ENV_NAME (using conda) from given arg + + ARG="${1}" + + if [ "${ARG}" == "" ]; then + return 1 + fi + + if [ -d "${ARG}" ]; then + pick "${ARG}/bin/conda" + + if [ "${cmd_base}" != "" ]; then + return 0 + fi + return 1 + fi + + CMD=$(which ${ARG}) # as executable command + if [ "${CMD}" == "" ]; then + return 1 + fi + + if [ -x "${CMD}" ]; then + match_env_name=$( \ + ${CMD} env list \ + | grep "^[ ]*${ENV_NAME} " \ + | awk '{print $1}' \ + ) + if [ "${match_env_name}" != "" ]; then + # found the requested environment name + cmd_base=$(basename "${CMD}") + case "${cmd_base}" in + conda) + source "$(dirname ${CMD})/activate" base + "${cmd_base}" activate "${ENV_NAME}" + return 0 + ;; + *) + return 1 + ;; + esac + fi + fi + + return 2 +} + + +pick_environment_executable () { # Activate the environment + # Pick the first "hit" + pick "/APSshare/miniconda/x86_64" \ + || pick "${HOME}" \ + || pick "conda" \ + || pick "/opt/miniconda3" \ + || pick "${HOME}/Apps/miniconda" \ + || pick "${HOME}/Apps/anaconda" + + echo "==> CONDA_PREFIX=${CONDA_PREFIX}" + + if [ "${cmd_base}" != "" ]; then + echo "$(which python) -- $(python --version)" + return 0 + fi + + echo "Could not activate environment: '${ENV_NAME}'" + return 3 +} + + +console_session () { + export OPTIONS="" + export OPTIONS="${OPTIONS} --profile=${IPYTHON_PROFILE}" + export OPTIONS="${OPTIONS} --ipython-dir=${IPYTHONDIR}" + export OPTIONS="${OPTIONS} --IPCompleter.use_jedi=False" + export OPTIONS="${OPTIONS} --InteractiveShellApp.hide_initial_ns=False" + + pick_environment_executable + + ipython ${OPTIONS} +} + +lab_server () { + export OPTIONS="" + # export OPTIONS="${OPTIONS} --no-browser" + export OPTIONS="${OPTIONS} --ip=${HOST}" + + pick_environment_executable + + python -m ipykernel install --user --name "${ENV_NAME}" + jupyter-lab ${OPTIONS} +} + +usage () { + echo $"Usage: $0 [console | lab | help]" +} + +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +case $(echo "${1}" | tr '[:upper:]' '[:lower:]') in + lab) lab_server ;; + "" | console) console_session ;; + help) usage ;; + *) usage; exit 1 +esac diff --git a/console/README.md b/console/README.md new file mode 100644 index 0000000..4499caa --- /dev/null +++ b/console/README.md @@ -0,0 +1,5 @@ +# Bluesky IPython Console Session + +Loads the `instrument` [package](https://bcda-aps.github.io/bluesky_training/instrument) for use in an interactive IPython console session (or Jupyter notebook). + +Copy or link the `__start_bluesky_instrument__.py` file to the appropriate IPython profile `startup` directory, such as: `~/.ipython-bluesky/profile_bluesky/startup/__start_bluesky_instrument__.py` diff --git a/console/__start_bluesky_instrument__.py b/console/__start_bluesky_instrument__.py new file mode 100755 index 0000000..58fd45a --- /dev/null +++ b/console/__start_bluesky_instrument__.py @@ -0,0 +1,26 @@ +""" +start bluesky in IPython console session +""" + +# start a Bluesky data collection console session +from IPython import get_ipython +import pathlib +import sys + +# find the "bluesky/" directory +BLUESKY_DIRECTORY = pathlib.Path.home() / "bluesky" +if not BLUESKY_DIRECTORY.exists(): + # + BLUESKY_DIRECTORY = pathlib.Path(__file__).absolute().parent.parent.parent + BLUESKY_DIRECTORY = BLUESKY_DIRECTORY / "bluesky" +if not BLUESKY_DIRECTORY.exists(): + raise FileNotFoundError( + f"Cannot find bluesky directory: {BLUESKY_DIRECTORY}" + ) +# put bluesky directory on the import path +sys.path.append(str(BLUESKY_DIRECTORY)) + +# terse error dumps (Exception tracebacks) +get_ipython().run_line_magic('xmode', 'Minimal') + +from instrument.collection import * diff --git a/environments/README.md b/environments/README.md new file mode 100644 index 0000000..c3058c6 --- /dev/null +++ b/environments/README.md @@ -0,0 +1,37 @@ +# Environment Definitions + +This directory contains the [YAML](https://yaml.org) files that define the +package requirements (and possibly the acceptable versions) for a conda +environment. + +## YAML files + +This directory contains the master source for these YAML files. +The repository is: https://github.com/BCDA-APS/bluesky_training/ + +version | file +--- | --- +2023-3 (latest) | [`environment_2023_3.yml`](./environment_2023_3.yml) +2023-2 | [`environment_2023_2.yml`](./environment_2023_2.yml) +2023-1 | [`environment_2023_1.yml`](./archive/environment_2023_1.yml) +2022_3 | [`environment_2022_3.yml`](./archive/environment_2022_3.yml) +2022_2 | [`environment_2022_2.yml`](./archive/environment_2022_2.yml) +2022_1 | [`environment_2022_1.yml`](./archive/environment_2022_1.yml) +2021_2 | [`environment_2021_2.yml`](./archive/environment_2021_2.yml) +2021_1 | [`environment_2021_1.yml`](./archive/environment_2021_1.yml) + +_note_: Prior to the 2023-2 version, the master source for these YAML files was the +[BCDA Bluesky +configuration](https://github.com/BCDA-APS/use_bluesky/tree/main/install) +repository. + +## Managing environments + +First you must activate the conda +[environment](https://bcda-aps.github.io/bluesky_training/reference/_conda_environment.html) +you will use (if not already activated). Such as: + +```bash +(base) prjemian@zap:~$ conda activate bluesky_2023_3 +(bluesky_2023_3) prjemian@zap:~$ +``` diff --git a/environments/archive/environment_2021_1.yml b/environments/archive/environment_2021_1.yml new file mode 100644 index 0000000..4e166da --- /dev/null +++ b/environments/archive/environment_2021_1.yml @@ -0,0 +1,65 @@ +name: bluesky_2021_1 +channels: + - defaults + - conda-forge + - aps-anl-tag + - nsls2forge + - pydm-tag + - pcds-tag +dependencies: + - python>=3.8 + # remove anaconda package, it makes install and update take much longer or become unsuccessful + # - anaconda + - aps-dm-api + - apstools + - area-detector-handlers + - black + - bluesky>=1.6.2 + - caproto + - conda-build + - coverage + - databroker>=1.0.6 + - databroker-pack + # - epics-pydb + - flake8 + - h5py + - happi + - hklpy + - imagecodecs-lite + - ipython + - jupyter + - jupyterlab + - lxml + - nexpy + - notebook + - ophyd>=1.5.1 + - pandas + - pandoc + - pint + - pip + - psutil + - punx + - pvview + - pydm + - pyEpics>=3.4.2 + - pylint + - pymca + - pymongo + - pyqt=5 + - pyRestTable + - pytest + - qt=5 + - scikit-image + - spec2nexus + - sphinx + - sphinxcontrib-napoleon + - stdlogpj + - twine + - typhos + - versioneer + - xlrd + - pip: + - bluesky-live + # - ipython-genutils==0.2.0 + - sphinx-rtd-theme + - super-state-machine diff --git a/environments/archive/environment_2021_2.yml b/environments/archive/environment_2021_2.yml new file mode 100644 index 0000000..cb91ab4 --- /dev/null +++ b/environments/archive/environment_2021_2.yml @@ -0,0 +1,73 @@ +name: bluesky_2021_2 + +# use: +# date; conda env create --force -f ./environment_2021_2.yml; date + +channels: + - defaults + - conda-forge + - nsls2forge + - aps-anl-tag + +dependencies: + # --- Python core packages + - python>=3.8 + - ipython + - jupyter + - jupyterlab + - notebook + - pip + - psutil + # --- testing and quality assurance + - black + - flake8 + - pylint + - pytest + # --- Qt + - pyqt=5 + - qt=5 + # --- support packages + - aps-dm-api + - epics-base>=7.0.5 + - h5py + - imagecodecs-lite + - ipympl + - lxml + - pandas + - pint + - pyEpics>=3.4.3 + - pymongo + - scikit-image + - xlrd + # --- Bluesky framework packages + - apstools + - area-detector-handlers + - bluesky>=1.6.7 + - bluesky-kafka + - databroker-pack + - databroker>=1.2.3 + - happi + - hklpy>=0.3.16 + - ophyd>=1.6.1 + - pydm + # --- user-requested packages + - nexpy + - punx + - pvview + - pymca + - pyRestTable + - spec2nexus + - stdlogpj + - xrayutilities + # --- packaging and publishing + - conda-build + - coverage + - sphinx + - sphinxcontrib-napoleon + - twine + - versioneer + - pip: + - bluesky-queueserver + # - bluesky-webclient is NOT Python software, don't install it this way + # https://github.com/bluesky/bluesky-webclient + - bluesky-widgets diff --git a/environments/archive/environment_2022_1.yml b/environments/archive/environment_2022_1.yml new file mode 100644 index 0000000..2747b87 --- /dev/null +++ b/environments/archive/environment_2022_1.yml @@ -0,0 +1,73 @@ +name: bluesky_2022_1 + +# use: +# date; conda env create --force -f ./environment_2022_1.yml; date + +channels: + - defaults + - conda-forge + - aps-anl-tag + - nsls2forge + +dependencies: + # --- Python core packages + - python<3.8 + - ipython + - jupyter + - jupyterlab + - notebook + - pip + - psutil + # --- testing and quality assurance + - black + - flake8 + - pylint + - pytest + # --- Qt + - pyqt=5 + - qt=5 + # --- support packages + - aps-dm-api + - epics-base>=7.0.5 + - h5py + - imagecodecs-lite + - ipympl + - lxml + - pandas + - pint + - pyEpics>=3.5.0 + - pymongo + - scikit-image + - xlrd + # --- Bluesky framework packages + - apstools + - area-detector-handlers + - bluesky>=1.8.1 + - databroker>=1.2.4 + - databroker-pack + - hklpy>=1.0.1 + - ophyd>=1.6.3 + - pydm + # --- optional Bluesky framework packages for evaluation + - bluesky-queueserver + - bluesky-widgets + # --- user-requested packages + - nexpy + - punx + - pvview + - pymca + - pyRestTable + - spec2nexus + - stdlogpj + - xrayutilities + # --- packaging and publishing + - conda-build + - coverage + - sphinx + - sphinxcontrib-napoleon + - twine + - versioneer + - pip: + - https://github.com/BCDA-APS/adl2pydm/archive/main.zip + # - bluesky-webclient is NOT Python software, don't install it this way + # https://github.com/bluesky/bluesky-webclient diff --git a/environments/archive/environment_2022_2.yml b/environments/archive/environment_2022_2.yml new file mode 100644 index 0000000..3232913 --- /dev/null +++ b/environments/archive/environment_2022_2.yml @@ -0,0 +1,86 @@ +name: bluesky_2022_2 + +# use: +# date; conda env create --force -f ./environment_2022_2.yml; date + +channels: + - defaults + - conda-forge + - aps-anl-tag + - nsls2forge + +dependencies: + + # --- Python core packages + - python=3.9 + - ipython + - jupyter + - jupyterlab + - notebook + - pip + - psutil + + # --- testing and quality assurance + - black + - flake8 + - pylint + - pytest + + # --- Qt + - pyqt=5 + - qt=5 + + # --- general support packages + - bitshuffle + - epics-base>=7.0.5 + - h5py + - hdf5plugin + - imagecodecs-lite + - ipympl + - lxml + - pandas + - pint + - pyEpics>=3.5.0 + - pymongo + - scikit-image + - xlrd + + # # --- Bluesky framework packages + # all moved to pip install (conda takes additional 10m to solve) + + # --- user-requested packages + - aps-dm-api>=5 + - nexpy + - punx + - pvview + - pymca + - pyRestTable + - spec2nexus + - stdlogpj + - xrayutilities + + # # --- packaging and publishing + # - conda-build + # - coverage + # - sphinx + # - sphinxcontrib-napoleon + # - twine + # - versioneer + + - pip: + - apstools + - area-detector-handlers + - bluesky-queueserver + - bluesky-queueserver-api + - bluesky-widgets + - bluesky>=1.8.1 + - databroker-pack + - databroker>=1.2.4,<2 + - hklpy + - ophyd>=1.6.3 + - pydm + + - https://github.com/BCDA-APS/adl2pydm/archive/main.zip + # --- optional Bluesky framework packages for evaluation + # - bluesky-webclient is NOT Python software, don't install it this way + # https://github.com/bluesky/bluesky-webclient diff --git a/environments/archive/environment_2022_3.yml b/environments/archive/environment_2022_3.yml new file mode 100644 index 0000000..2df9feb --- /dev/null +++ b/environments/archive/environment_2022_3.yml @@ -0,0 +1,105 @@ +name: bluesky_2022_3 + +# use: +# prefer micromamba (conda takes much longer, if ever, to solve) +# micromamba installed in ~30s +# date; micromamba create -y -n bluesky_2022_3 -f ./environment_2022_3.yml; date +# # then activate with: +# eval "$(micromamba shell hook --shell=bash)" +# micromamba activate bluesky_2022_3 +# conda installed in ~250s +# date; conda env create --force -f ./environment_2022_3.yml; date +# # then activate with: +# conda activate bluesky_2022_3 + +channels: + - conda-forge + - apsu + - aps-anl-tag + - defaults + # - sveseli + # - nsls2forge + +dependencies: + + # --- Python core packages + - python >=3.9 + - ipython + - jupyter + - jupyterlab + - notebook + - pip + - psutil + + # --- testing and quality assurance + - black + - flake8 + - pylint + - pytest + - pytest-notebook + + # --- Qt + - pyqt =5 + - qt =5 + + # --- general support packages + - bitshuffle + - epics-base >=7.0.5 + - h5py + - hdf5plugin + - imagecodecs-lite + - ipympl + - lxml + - pandas + - pint + - pyEpics >=3.5.0 + - pymongo + - scikit-image + - xlrd + + # --- Bluesky framework packages + - adl2pydm + - apstools >=1.6.3 + - area-detector-handlers + - bluesky-queueserver + - bluesky-queueserver-api + - bluesky-widgets + - bluesky >=1.8.1 + - databroker-pack + - databroker =1.2 + - happi + - hklpy >=1.0.3 # --- linux-64 + - ophyd >=1.6.3 + - pydm + - tiled-server + # https://github.com/pydata/xarray/issues/6818 + - xarray !=2022.6.0 # incompatible with numpy=1.20 + + # --- user-requested packages + - aps-dm-api >=5 # linux-64 osx-64 + # - c2dataviewer # --- linux-64 + - nexpy + - punx + - pvapy + - pvview + - pymca + - pyRestTable + - spec2nexus + - stdlogpj + - xrayutilities + + # --- packaging and publishing + - conda-build + - coverage + - sphinx + - sphinxcontrib-napoleon + - twine + - versioneer + - mamba + - micromamba + - pygithub + + # - pip: + # # --- optional Bluesky framework packages for evaluation + # # - bluesky-webclient is NOT Python software, don't install it this way + # # https://github.com/bluesky/bluesky-webclient diff --git a/environments/archive/environment_2023_1.yml b/environments/archive/environment_2023_1.yml new file mode 100644 index 0000000..31914bd --- /dev/null +++ b/environments/archive/environment_2023_1.yml @@ -0,0 +1,110 @@ +name: bluesky_2023_1 + +# use: +# date; micromamba create -y -n bluesky_2023_1 -f ./environment_2023_1.yml; date +# # then activate with: +# eval "$(micromamba shell hook --shell=bash)" +# micromamba activate bluesky_2023_1 + +# download: +# wget https://raw.githubusercontent.com/BCDA-APS/bluesky_training/main/bluesky/environments/environment_2023_1.yml + +# Add/revise environment variable for default bluesky environment. +# Might be defined in either ~/.bashrc or ~/.bash_aliases +# export BLUESKY_CONDA_ENV=bluesky_2023_1 + +channels: + - conda-forge + - apsu + - aps-anl-tag + - defaults + - sveseli + +dependencies: + + # --- Python core packages + - python >=3.9 + - ipython + - jupyter + - jupyterlab + - notebook + - pip + - psutil + + # --- testing and quality assurance + - black + - flake8 + - pylint + - pytest + - pytest-notebook + + # --- Qt + - pyqt =5 + - qt =5 + + # --- general support packages + - bitshuffle + - epics-base >=7.0.5 + - h5py + - hdf5plugin + - imagecodecs-lite + - ipympl + - lxml + - pandas + - pint + - pyEpics >=3.5.0 + - pymongo + - pysumreg + - scikit-image + - xlrd + + # --- Bluesky framework packages + - apstools + - area-detector-handlers + - bluesky-queueserver + - bluesky-queueserver-api + - bluesky-widgets + - bluesky >=1.8.1 + - databroker-pack + - databroker =1.2 + - hklpy >=1.0.3 # --- linux-64 + - ophyd >=1.6.3 + - pydm + # https://github.com/pydata/xarray/issues/6818 + - xarray !=2022.6.0 # incompatible with numpy=1.20 + + # --- user-requested packages + - nexpy + - punx + - pvapy + - pvview + - pymca + - pyRestTable + - spec2nexus + - stdlogpj + - xrayutilities + + # --- packaging and publishing + - conda-build + - coverage + - mamba + - micromamba + - pydata-sphinx-theme + - pygithub + - sphinx + - sphinxcontrib-napoleon + - twine + - versioneer + + # if using this environment at APS, then uncomment these (and apsbss below) + # - aps-dm-api >=5 # linux-64 osx-64 + # - c2dataviewer # --- linux-64 + + - pip: + # - apsbss # only works on APS subnet + # - happi + - tiled[all] + - https://github.com/BCDA-APS/adl2pydm/archive/main.zip + # --- optional Bluesky framework packages for evaluation + # - bluesky-webclient is NOT Python software, don't install it this way + # https://github.com/bluesky/bluesky-webclient diff --git a/environments/environment_2023_2.yml b/environments/environment_2023_2.yml new file mode 100644 index 0000000..315d586 --- /dev/null +++ b/environments/environment_2023_2.yml @@ -0,0 +1,121 @@ +name: bluesky_2023_2 + +# download: +# wget https://raw.githubusercontent.com/BCDA-APS/bluesky_training/main/bluesky/environments/environment_2023_2.yml +# create: +# conda env create --force -n bluesky_2023_2 -f ./environment_2023_2.yml --solver=libmamba +# activate: +# conda activate bluesky_2023_2 + +# Add/revise environment variable for default bluesky environment. +# Add to ~/.bash_aliases (or if that does not exist, ~/.bashrc). +# export BLUESKY_CONDA_ENV=bluesky_2023_2 + +# For more advice about bash environment variables, see: +# https://github.com/BCDA-APS/bluesky_training/blob/main/bluesky/environments/admin/bluesky.md#bash-environment-variables + +channels: + # To get epics-base compatible with both PyEpics _and_ pvapy, apsu channel first. + # BTW: apsu channel includes all of sveseli channel (no need for the extra). + - apsu + + # then conda-forge + - conda-forge + - aps-anl-tag + - defaults + +dependencies: + + # --- Python core packages + - python >=3.10 + - ipython + - jupyter + - jupyterlab + - notebook + - pip + - psutil + + # --- testing and quality assurance + - black + - flake8 + - pylint + - pytest + - pytest-notebook + + # --- Qt + - pyqt =5 + - qt =5 + + # --- general support packages + - bitshuffle + - epics-base >=7.0.5 + - h5py + - hdf5plugin + - imagecodecs-lite + - ipympl + - lxml + - pandas + - pint + - pyEpics >=3.5.0 + - pymongo + - pysumreg + - scikit-image + - xlrd + + # --- Bluesky framework packages + - apstools + - area-detector-handlers + - bluesky-queueserver + - bluesky-queueserver-api + - bluesky-widgets + - bluesky >=1.8.1 + - databroker-pack + - databroker =1.2 + - hklpy >=1.0.3 # --- linux-64 + - httpx >=0.14 # issue #75 + - ophyd >=1.7 + - pydm + - tiled + # https://github.com/pydata/xarray/issues/6818 + - xarray !=2022.6.0 # incompatible with numpy=1.20 + + # --- user-requested packages + - nexpy + - punx + - pvapy + - pvview + - pymca + - pyRestTable + - spec2nexus + - stdlogpj + - xrayutilities + + # --- packaging and publishing + - conda-build + - coverage + - mamba + - micromamba + - myst-parser + - nbsphinx + - pydata-sphinx-theme + - pygithub + - sphinx + - sphinx-tabs + - sphinxcontrib-napoleon + - twine + - versioneer + + # if not using this environment at APS, then comment these (and apsbss below) + - aps-dm-api >=5 # linux-64 osx-64 + # - c2dataviewer # --- linux-64 + # LibMambaUnsatisfiableError: Encountered problems while solving: + # - package c2dataviewer-1.7.6-py37_0 requires python >=3.7,<3.8.0a0, but none of the providers can be installed + + - pip: + - apsbss # only works on APS subnet + # - happi + # - tiled[all] + - https://github.com/BCDA-APS/adl2pydm/archive/main.zip + # --- optional Bluesky framework packages for evaluation + # - bluesky-webclient is NOT Python software, don't install it this way + # https://github.com/bluesky/bluesky-webclient diff --git a/environments/environment_2023_3.yml b/environments/environment_2023_3.yml new file mode 100644 index 0000000..0f6d73c --- /dev/null +++ b/environments/environment_2023_3.yml @@ -0,0 +1,126 @@ +name: bluesky_2023_3 + +# download: +# wget https://raw.githubusercontent.com/BCDA-APS/bluesky_training/main/bluesky/environments/environment_2023_3.yml +# create: +# conda env create --force -n bluesky_2023_3 -f ./environment_2023_3.yml --solver=libmamba +# activate: +# conda activate bluesky_2023_3 + +# Add/revise environment variable for default bluesky environment. +# Add to ~/.bash_aliases (or if that does not exist, ~/.bashrc). +# export BLUESKY_CONDA_ENV=bluesky_2023_3 + +# For more advice about bash environment variables, see: +# https://github.com/BCDA-APS/bluesky_training/blob/main/bluesky/environments/admin/bluesky.md#bash-environment-variables + +channels: + # To get epics-base compatible with both PyEpics _and_ pvapy, apsu channel first. + # BTW: apsu channel includes all of sveseli channel (no need for the extra). + - apsu + + # then conda-forge + - conda-forge + - aps-anl-tag + - defaults + +dependencies: + + # --- Python core packages + - python >=3.10 + - ipython + - jupyter + - jupyterlab + - notebook + - pip + - psutil + + # --- testing and quality assurance + - black + - flake8 + - pylint + - pytest + - pytest-notebook + + # --- Qt + - pyqt =5 + - qt =5 + + # --- general support packages + - bitshuffle + - epics-base >=7.0.5 + - h5py + - hdf5plugin + - httpie + - imagecodecs-lite + - ipympl + - lxml + - pandas + - pint + - pyEpics >=3.5.0 + - pymongo + - pysumreg + - scikit-image + - xlrd + + # --- Bluesky framework packages + - apstools + - area-detector-handlers + - bluesky >=1.8.1,!=1.11.0 + - bluesky-httpserver + - bluesky-kafka + - bluesky-live + - bluesky-queueserver + - bluesky-queueserver-api + - bluesky-widgets + - databroker-pack + - databroker =1.2 + - hklpy >=1.0.3 # --- linux-64 + - httpx >=0.14 # issue #75 + - ophyd >=1.7 + - pydm + - tiled + # https://github.com/pydata/xarray/issues/6818 + - xarray !=2022.6.0 # incompatible with numpy=1.20 + + # --- user-requested packages + - nexpy + - punx + - pvapy + - pvview + - pymca + - pyRestTable + - spec2nexus + - stdlogpj + - xrayutilities + + # --- packaging and publishing + - conda-build + - coverage + - mamba + - micromamba + - myst-parser + - nbsphinx + - pydata-sphinx-theme + - pygithub + - sphinx + - sphinx-tabs + - sphinxcontrib-napoleon + - twine + - versioneer + + # if not using this environment at APS, then comment these (and apsbss below) + - aps-dm-api >=5 # linux-64 osx-64 + # - c2dataviewer # --- linux-64 + # LibMambaUnsatisfiableError: Encountered problems while solving: + # - package c2dataviewer-1.7.6-py37_0 requires python >=3.7,<3.8.0a0, but none of the providers can be installed + + - pip: + - apsbss # only works on APS subnet + # - haven-spc + # - happi + # - tiled[all] + - https://github.com/BCDA-APS/adl2pydm/archive/main.zip + # --- optional Bluesky framework packages for evaluation + # - bluesky-webclient is NOT Python software, don't install it this way + # https://github.com/bluesky/bluesky-webclient diff --git a/instrument/README.md b/instrument/README.md new file mode 100644 index 0000000..850018d --- /dev/null +++ b/instrument/README.md @@ -0,0 +1,9 @@ +# Bluesky Instrument description + +Describes the devices, plans, and other Python code supporting an instrument for data acquisition with Bluesky. + +description | configuration file +--- | --- +instrument customizations | `iconfig.yml` +interactive data collection | `collection.py` +bluesky-queueserver | `queueserver.py` diff --git a/instrument/__init__.py b/instrument/__init__.py new file mode 100755 index 0000000..5305868 --- /dev/null +++ b/instrument/__init__.py @@ -0,0 +1,2 @@ +# instrument configuration +from ._iconfig import iconfig diff --git a/instrument/_iconfig.py b/instrument/_iconfig.py new file mode 100755 index 0000000..7bd05ab --- /dev/null +++ b/instrument/_iconfig.py @@ -0,0 +1,32 @@ +""" +Provide information from the configuration.yml file. + +Example YAML configuration file:: + + # simple key:value pairs + + ADSIM_IOC_PREFIX: "bdpad:" + GP_IOC_PREFIX: "bdp:" + catalog: bdp2022 +""" + +__all__ = ["iconfig", ] + +import logging +logger = logging.getLogger(__name__) + +logger.info(__file__) +print(__file__) + +import pathlib +import yaml + + +CONFIG_FILE = pathlib.Path(__file__).absolute().parent / "iconfig.yml" + +if CONFIG_FILE.exists(): + iconfig = yaml.load(open(CONFIG_FILE, "r").read(), yaml.Loader) +else: + raise FileNotFoundError( + f"Could not find instrument configuration file: {CONFIG_FILE}" + ) diff --git a/instrument/callbacks/__init__.py b/instrument/callbacks/__init__.py new file mode 100755 index 0000000..af72f5d --- /dev/null +++ b/instrument/callbacks/__init__.py @@ -0,0 +1,8 @@ +# this file makes the .py files here importable + +from .. import iconfig + +if iconfig.get("WRITE_SPEC_DATA_FILES", False): + from .spec_data_file_writer import * + +del iconfig diff --git a/instrument/callbacks/spec_data_file_writer.py b/instrument/callbacks/spec_data_file_writer.py new file mode 100755 index 0000000..bee25d7 --- /dev/null +++ b/instrument/callbacks/spec_data_file_writer.py @@ -0,0 +1,78 @@ +""" +custom callbacks +""" + +__all__ = [ + "specwriter", + "spec_comment", + "newSpecFile", +] + +import logging + +logger = logging.getLogger(__name__) +logger.info(__file__) + +try: + import apstools.callbacks as APS_fw +except ModuleNotFoundError: + import apstools.filewriters as APS_fw + +import datetime +import pathlib + +import apstools.utils + +from ..framework.initialize import RE + +# write scans to SPEC data file +specwriter = APS_fw.SpecWriterCallback() +# make the SPEC file in current working directory (assumes is writable) +_path = pathlib.Path().cwd() +specwriter.newfile(_path / specwriter.spec_filename) + +try: + # feature new in apstools 1.6.14 + from apstools.plans import label_stream_wrapper + + def motor_start_preprocessor(plan): + return label_stream_wrapper(plan, "motor", when="start") + + RE.preprocessors.append(motor_start_preprocessor) +except Exception: + logger.warning("Could load support to log motors positions.") + + +def spec_comment(comment, doc=None): + # supply our specwriter to the standard routine + APS_fw.spec_comment(comment, doc, specwriter) + + +def newSpecFile(title, scan_id=None, RE=None): + """ + User choice of the SPEC file name. + + Cleans up title, prepends month and day and appends file extension. + If ``RE`` is passed, then resets ``RE.md["scan_id"] = scan_id``. + + If the SPEC file already exists, then ``scan_id`` is ignored and + ``RE.md["scan_id"]`` is set to the last scan number in the file. + """ + kwargs = {} + if RE is not None: + kwargs["RE"] = RE + + mmdd = str(datetime.datetime.now()).split()[0][5:].replace("-", "_") + clean = apstools.utils.cleanupText(title) + fname = pathlib.Path(f"{mmdd}_{clean}.dat") + if fname.exists(): + logger.warning(f">>> file already exists: {fname} <<<") + handled = "appended" + else: + kwargs["scan_id"] = scan_id or 1 + handled = "created" + + specwriter.newfile(fname, **kwargs) + + logger.info(f"SPEC file name : {specwriter.spec_filename}") + logger.info(f"File will be {handled} at end of next bluesky scan.") diff --git a/instrument/collection.py b/instrument/collection.py new file mode 100755 index 0000000..8e108d5 --- /dev/null +++ b/instrument/collection.py @@ -0,0 +1,56 @@ +""" +configure for data collection in a console session +""" + +from .session_logs import logger + +logger.info(__file__) + +# conda environment name +import os +_conda_prefix = os.environ.get("CONDA_PREFIX") +if _conda_prefix is not None: + logger.info("CONDA_PREFIX = %s", _conda_prefix) +del _conda_prefix + +from . import iconfig +from IPython import get_ipython + +# terse error dumps (Exception tracebacks) +_ip = get_ipython() +if _ip is not None: + _xmode_level = iconfig.get("XMODE_DEBUG_LEVEL", "Minimal") + _ip.run_line_magic('xmode', _xmode_level) + logger.info("xmode exception level: '%s'", _xmode_level) + del _ip + +from . import mpl + +logger.info("#### Bluesky Framework ####") +from .framework import * + +logger.info("#### Devices ####") +from .devices import * + +logger.info("#### Callbacks ####") +from .callbacks import * + +logger.info("#### Plans ####") +from .plans import * + +logger.info("#### Utilities ####") +from .utils import * +from apstools.utils import * + +from ._iconfig import iconfig +if iconfig.get("WRITE_SPEC_DATA_FILES", False): + if specwriter is not None: + RE.subscribe(specwriter.receiver) + logger.info(f"writing to SPEC file: {specwriter.spec_filename}") + logger.info(" >>>> Using default SPEC file name <<<<") + logger.info(" file will be created when bluesky ends its next scan") + logger.info(" to change SPEC file, use command: newSpecFile('title')") + +# last line: ensure we have the console's logger +from .session_logs import logger +logger.info("#### Startup is complete. ####") diff --git a/instrument/devices/__init__.py b/instrument/devices/__init__.py new file mode 100755 index 0000000..fe757b1 --- /dev/null +++ b/instrument/devices/__init__.py @@ -0,0 +1,28 @@ +""" +local, custom Device definitions +""" + +# from ophyd.log import config_ophyd_logging +# config_ophyd_logging(level="DEBUG") +# # 'ophyd' — the logger to which all ophyd log records propagate +# # 'ophyd.objects' — logs records from all devices and signals (that is, OphydObject subclasses) +# # 'ophyd.control_layer' — logs requests issued to the underlying control layer (e.g. pyepics, caproto) +# # 'ophyd.event_dispatcher' — issues regular summaries of the backlog of updates from the control layer that are being processed on background threads + +# from .aps_source import * +# from .aps_undulator import * + +# from .area_detector import * +# from .calculation_records import * +# from .fourc_diffractometer import * +# from .ioc_stats import * +# from .kohzu_monochromator import * +# from .motors import * +# from .noisy_detector import * +# from .scaler import * +# from .shutter_simulator import * +# from .simulated_fourc import * +# from .simulated_kappa import * +# from .slits import * +# from .sixc_diffractometer import * +# from .temperature_signal import * \ No newline at end of file diff --git a/instrument/devices/aps_source.py b/instrument/devices/aps_source.py new file mode 100755 index 0000000..f5cb318 --- /dev/null +++ b/instrument/devices/aps_source.py @@ -0,0 +1,18 @@ +""" +APS only: connect with facility information +""" + +__all__ = [ + "aps", +] + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +import apstools.devices + + +aps = apstools.devices.ApsMachineParametersDevice(name="aps") diff --git a/instrument/devices/aps_undulator.py b/instrument/devices/aps_undulator.py new file mode 100755 index 0000000..7adebf0 --- /dev/null +++ b/instrument/devices/aps_undulator.py @@ -0,0 +1,19 @@ +""" +APS only: insertion device +""" + +__all__ = [ + "undulator", +] + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +import apstools.devices + + +undulator = apstools.devices.ApsUndulator("ID45", name="undulator") +# undulator = apstools.devices.ApsUndulatorDual("ID45", name="undulator") diff --git a/instrument/devices/area_detector.py b/instrument/devices/area_detector.py new file mode 100755 index 0000000..43d73d7 --- /dev/null +++ b/instrument/devices/area_detector.py @@ -0,0 +1,179 @@ +""" +EPICS area_detector ADSimDetector +""" + +__all__ = """ + adsimdet + change_ad_simulated_image_parameters + dither_ad_peak_position + dither_ad_off + dither_ad_on +""".split() + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +from .. import iconfig +from .calculation_records import calcs +from apstools.devices import AD_plugin_primed +from apstools.devices import AD_prime_plugin2 +from apstools.devices import CamMixin_V34 +from apstools.devices import SingleTrigger_V34 +from ophyd import ADComponent +from ophyd import DetectorBase +from ophyd import SimDetectorCam +from ophyd.areadetector.filestore_mixins import FileStoreHDF5IterativeWrite +from ophyd.areadetector.plugins import HDF5Plugin_V34 +from ophyd.areadetector.plugins import ImagePlugin_V34 +from ophyd.areadetector.plugins import PvaPlugin_V34 +from ophyd.ophydobj import Kind +import numpy as np +import pathlib + + +IOC = iconfig.get("ADSIM_IOC_PREFIX", "ad:") + +IMAGE_DIR = iconfig["AD_IMAGE_DIR"] +AD_IOC_MOUNT_PATH = pathlib.Path(iconfig["AD_MOUNT_PATH"]) +BLUESKY_MOUNT_PATH = pathlib.Path(iconfig["BLUESKY_MOUNT_PATH"]) + +# MUST end with a `/`, pathlib will NOT provide it +WRITE_PATH_TEMPLATE = f"{AD_IOC_MOUNT_PATH / IMAGE_DIR}/" +READ_PATH_TEMPLATE = f"{BLUESKY_MOUNT_PATH / IMAGE_DIR}/" + + +class SimDetectorCam_V34(CamMixin_V34, SimDetectorCam): + """Revise SimDetectorCam for ADCore revisions.""" + + +class MyHDF5Plugin(FileStoreHDF5IterativeWrite, HDF5Plugin_V34): + """ + Add data acquisition methods to HDF5Plugin. + + * ``stage()`` - prepare device PVs befor data acquisition + * ``unstage()`` - restore device PVs after data acquisition + * ``generate_datum()`` - coordinate image storage metadata + """ + + def stage(self): + self.stage_sigs.move_to_end("capture", last=True) + super().stage() + + +class SimDetector_V34(SingleTrigger_V34, DetectorBase): + """ + ADSimDetector + + SingleTrigger: + + * stop any current acquisition + * sets image_mode to 'Multiple' + """ + + cam = ADComponent(SimDetectorCam_V34, "cam1:") + hdf1 = ADComponent( + MyHDF5Plugin, + "HDF1:", + write_path_template=WRITE_PATH_TEMPLATE, + read_path_template=READ_PATH_TEMPLATE, + ) + image = ADComponent(ImagePlugin_V34, "image1:") + pva = ADComponent(PvaPlugin_V34, "Pva1:") + + +def change_ad_simulated_image_parameters(): + """ + Make the image be a "peak" (simulate a diffraction spot). + + Randomly-placed, random max, random noise + + Not a bluesky plan (uses blocking calls). + """ + cam = adsimdet.cam + cam.reset.put(1) + cam.sim_mode.put(1) # Peaks + cam.gain.put(100 + 100 * np.random.random()) + cam.offset.put(10 * np.random.random()) + cam.noise.put(20 * np.random.random()) + cam.peak_start.peak_start_x.put(200 + 500 * np.random.random()) + cam.peak_start.peak_start_y.put(200 + 500 * np.random.random()) + cam.peak_width.peak_width_x.put(10 + 100 * np.random.random()) + cam.peak_width.peak_width_y.put(10 + 100 * np.random.random()) + cam.peak_variation.put(0.5 + 20 * np.random.random()) + + +def dither_ad_off(): + # select: 0 = off (Passive) + calcs.calc9.scanning_rate.put(0) + calcs.calc10.scanning_rate.put(0) + + +def dither_ad_on(select=6): + # select: 6 = 1 Hz (1 second), 9 = 10 Hz (.1 second) + calcs.calc9.scanning_rate.put(select) + calcs.calc10.scanning_rate.put(select) + + +def dither_ad_peak_position(magnitude=40): + """ + Dither the peak position using swait records. + """ + peak = adsimdet.cam.peak_start + formula = f"min(B,max(C,A+{magnitude}*(RNDM-0.5)))" + x = calcs.calc9 + x.description.put("adsimdet peak X dither") + x.calculation.put(formula) + x.channels.A.input_pv.put(peak.peak_start_x.pvname) + x.channels.B.input_value.put(900) # upper limit + x.channels.C.input_value.put(100) # lower limit + x.output_link_pv.put(peak.peak_start_x.setpoint_pvname) + y = calcs.calc10 + y.description.put("adsimdet peak Y dither") + y.calculation.put(formula) + y.channels.A.input_pv.put(peak.peak_start_y.pvname) + y.channels.B.input_value.put(900) # upper limit + y.channels.C.input_value.put(100) # lower limit + y.output_link_pv.put(peak.peak_start_y.setpoint_pvname) + dither_ad_on() + + +try: + adsimdet = SimDetector_V34(IOC, name="adsimdet", labels=("area_detector",)) + adsimdet.wait_for_connection(timeout=15) +except TimeoutError: + logger.warning("Did not connect to area detector IOC '%s'", IOC) + adsimdet = None +else: + # override default settings from ophyd + adsimdet.hdf1.create_directory.put(-5) + adsimdet.hdf1.kind = Kind.config | Kind.normal # Ensure plugin's read is called. + + # The plugins do not block, the cam must wait for the plugins to finish. + for det in (adsimdet, ): + for nm in det.component_names: + obj = getattr(det, nm) + if "blocking_callbacks" in dir(obj): # is it a plugin? + obj.stage_sigs["blocking_callbacks"] = "No" + det.cam.stage_sigs["wait_for_plugins"] = "Yes" + det.hdf1.stage_sigs["compression"] = "zlib" + det.hdf1.stage_sigs.move_to_end("capture", last=True) + + if iconfig.get("ALLOW_AREA_DETECTOR_WARMUP", False): + # Even with `lazy_open=1`, ophyd checks if the area + # detector HDF5 plugin has been primed. We might + # need to prime it. Here's ophyd's test: + # if np.array(adsimdet.hdf1.array_size.get()).sum() == 0: + # logger.info(f"Priming {adsimdet.hdf1.name} ...") + # adsimdet.hdf1.warmup() + # logger.info(f"Enabling {adsimdet.image.name} plugin ...") + # adsimdet.image.enable.put("Enable") + # Ophyd's test is not sufficient. + # WORKAROUND (involving a few more tests) + if not AD_plugin_primed(adsimdet.hdf1): + AD_prime_plugin2(adsimdet.hdf1) + + change_ad_simulated_image_parameters() # new peak parameters + dither_ad_peak_position() # EPICS will dither the peak position diff --git a/instrument/devices/calculation_records.py b/instrument/devices/calculation_records.py new file mode 100755 index 0000000..299b4fe --- /dev/null +++ b/instrument/devices/calculation_records.py @@ -0,0 +1,28 @@ +""" +calculations +""" + +__all__ = ["calcs", "calcouts"] + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +from .. import iconfig +import apstools.synApps + + +IOC = iconfig.get("GP_IOC_PREFIX", "gp:") + +calcs = apstools.synApps.UserCalcsDevice(IOC, name="calcs") +calcouts = apstools.synApps.UserCalcoutDevice(IOC, name="calcouts") + +if iconfig.get("ENABLE_CALCS", False): + # Normally, do not do _any_ actions (like these) in the instrument + # package since that might affect other simultaneous use. In this + # case, the actions are probably OK. Most users forget they even exist. + # These steps enable all the userCalcN and userCalcoutN records to process. + calcs.enable.put(1) + calcouts.enable.put(1) diff --git a/instrument/devices/fourc_diffractometer.py b/instrument/devices/fourc_diffractometer.py new file mode 100755 index 0000000..2853ab4 --- /dev/null +++ b/instrument/devices/fourc_diffractometer.py @@ -0,0 +1,60 @@ +""" +4-circle diffractometer, connected to EpicsMotor records. + +https://github.com/prjemian/epics-docker/tree/main/v1.1/n5_custom_synApps#motor-assignments + + +===== ================== +motor assignment +===== ================== +m29 4-circle diffractometer M_TTH +m30 4-circle diffractometer M_TH +m31 4-circle diffractometer M_CHI +m32 4-circle diffractometer M_PHI +===== ================== +""" + +__all__ = """ + fourc +""".split() + +import logging + +logger = logging.getLogger(__name__) +logger.info(__file__) + +import hkl +from ophyd import Component +from ophyd import EpicsMotor +from ophyd import EpicsSignalRO + +from .. import iconfig + +IOC = iconfig.get("GP_IOC_PREFIX", "gp:") +M_TTH = "m29" +M_TH = "m30" +M_CHI = "m31" +M_PHI = "m32" + + +class FourCircle(hkl.SimMixin, hkl.E4CV): + """ + Our 4-circle. Eulerian, vertical scattering orientation. + + Energy obtained (RO) from monochromator. + """ + + # the reciprocal axes are defined by SimMixin + + omega = Component(EpicsMotor, f"{M_TH}", kind="hinted", labels=["motor"]) + chi = Component(EpicsMotor, f"{M_CHI}", kind="hinted", labels=["motor"]) + phi = Component(EpicsMotor, f"{M_PHI}", kind="hinted", labels=["motor"]) + tth = Component(EpicsMotor, f"{M_TTH}", kind="hinted", labels=["motor"]) + + energy = Component(EpicsSignalRO, "BraggERdbkAO", kind="hinted", labels=["energy"]) + energy_units = Component(EpicsSignalRO, "BraggERdbkAO.EGU", kind="config") + + +fourc = FourCircle(IOC, name="fourc") +fourc.wait_for_connection() +fourc._update_calc_energy() diff --git a/instrument/devices/ioc_stats.py b/instrument/devices/ioc_stats.py new file mode 100755 index 0000000..169925d --- /dev/null +++ b/instrument/devices/ioc_stats.py @@ -0,0 +1,35 @@ +""" +IOC statistics: synApps iocStats +""" + +# fmt: off +__all__ = [ + "gp_stats", + # "ad_stats", +] +# fmt: on + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +from .. import iconfig +from ophyd import Component, Device, EpicsSignalRO + + +IOC = iconfig.get("GP_IOC_PREFIX", "gp:") + + +class IocInfoDevice(Device): + + iso8601 = Component(EpicsSignalRO, "iso8601") + uptime = Component(EpicsSignalRO, "UPTIME") + + +gp_stats = IocInfoDevice(IOC, name="gp_stats") + +# Too bad, this ADSimDetector does not have iocStats +# IOC = iconfig.get("ADSIM_IOC_PREFIX", "ad:") +# ad_stats = IocInfoDevice(IOC, name="ad_stats") diff --git a/instrument/devices/kohzu_monochromator.py b/instrument/devices/kohzu_monochromator.py new file mode 100755 index 0000000..4c3024b --- /dev/null +++ b/instrument/devices/kohzu_monochromator.py @@ -0,0 +1,64 @@ +""" +Simulated Kohzu Double-Crystal Monochromator (DCM) +""" + +__all__ = [ + "dcm", +] + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +from .. import iconfig +from apstools.devices import KohzuSeqCtl_Monochromator +from bluesky import plan_stubs as bps +from ophyd import Component +from ophyd import EpicsMotor + + +IOC = iconfig.get("GP_IOC_PREFIX", "gp:") + + +class MyKohzu(KohzuSeqCtl_Monochromator): + m_theta = Component(EpicsMotor, "m45", kind="normal", labels=["motor"]) + m_y = Component(EpicsMotor, "m46", kind="normal", labels=["motor"]) + m_z = Component(EpicsMotor, "m47", kind="normal", labels=["motor"]) + + def into_control_range(self, p_theta=2, p_y=-15, p_z=90): + """ + Move the Kohzu motors into range so the energy controls will work. + + Written as a bluesky plan so that all motors can be moved + simultaneously. Return early if the motors are already in range. + + USAGE:: + + RE(dcm.into_control_range()) + """ + args = [] + if self.m_theta.position < p_theta: + args += [self.m_theta, p_theta] + if self.m_y.position > p_y: + args += [self.m_y, p_y] + if self.m_z.position < p_z: + args += [self.m_z, p_z] + if (len(args) == 0): + # all motors in range, no work to do, MUST yield something + yield from bps.null() + return + yield from bps.sleep(1) # allow IOC to react + yield from bps.mv( + self.operator_acknowledge, 1, + self.mode, "Auto" + ) + + def stop(self): + self.m_theta.stop() + self.m_y.stop() + self.m_z.stop() + + +dcm = MyKohzu(IOC, name="dcm") diff --git a/instrument/devices/motors.py b/instrument/devices/motors.py new file mode 100755 index 0000000..fe9865a --- /dev/null +++ b/instrument/devices/motors.py @@ -0,0 +1,47 @@ +""" +example motors +""" + +__all__ = """ + m1 m2 m3 m4 + m7 m8 + m9 m10 m11 m12 + m13 m14 m15 m16 +""".split() + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +from .. import iconfig +from ophyd import EpicsMotor, Component, EpicsSignal + + +IOC = iconfig.get("GP_IOC_PREFIX", "gp:") + + +class MyEpicsMotor(EpicsMotor): + steps_per_revolution = Component(EpicsSignal, ".SREV", kind="omitted") + + +m1 = MyEpicsMotor(f"{IOC}m1", name="m1", labels=("motor",)) +m2 = MyEpicsMotor(f"{IOC}m2", name="m2", labels=("motor",)) +m3 = MyEpicsMotor(f"{IOC}m3", name="m3", labels=("motor",)) +m4 = MyEpicsMotor(f"{IOC}m4", name="m4", labels=("motor",)) +# m5 = MyEpicsMotor(f"{IOC}m5", name="m5", labels=("motor",)) # used by Slit1 +# m6 = MyEpicsMotor(f"{IOC}m6", name="m6", labels=("motor",)) # used by Slit1 +m7 = MyEpicsMotor(f"{IOC}m7", name="m7", labels=("motor",)) +m8 = MyEpicsMotor(f"{IOC}m8", name="m8", labels=("motor",)) +m9 = MyEpicsMotor(f"{IOC}m9", name="m9", labels=("motor",)) +m10 = MyEpicsMotor(f"{IOC}m10", name="m10", labels=("motor",)) +m11 = MyEpicsMotor(f"{IOC}m11", name="m11", labels=("motor",)) +m12 = MyEpicsMotor(f"{IOC}m12", name="m12", labels=("motor",)) +m13 = MyEpicsMotor(f"{IOC}m13", name="m13", labels=("motor",)) +m14 = MyEpicsMotor(f"{IOC}m14", name="m14", labels=("motor",)) +m15 = MyEpicsMotor(f"{IOC}m15", name="m15", labels=("motor",)) +m16 = MyEpicsMotor(f"{IOC}m16", name="m16", labels=("motor",)) + +m1.wait_for_connection() +m1.steps_per_revolution.put(2000) diff --git a/instrument/devices/noisy_detector.py b/instrument/devices/noisy_detector.py new file mode 100755 index 0000000..ad20fee --- /dev/null +++ b/instrument/devices/noisy_detector.py @@ -0,0 +1,48 @@ +""" +simulated noisy detector +""" + +__all__ = [ + "noisy", + "change_noisy_parameters", +] + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +from .. import iconfig +from .calculation_records import calcs +from .motors import m1 +from ophyd import EpicsSignalRO +import apstools.devices +import numpy + + +IOC = iconfig.get("GP_IOC_PREFIX", "gp:") + + +def change_noisy_parameters(fwhm=0.15, peak=10000, noise=0.08): + """ + Setup the swait record with new random numbers. + + BLOCKING calls, not a bluesky plan + """ + calcs.calc1.reset() + apstools.devices.setup_lorentzian_swait( + calcs.calc1, + m1.user_readback, + center=2 * numpy.random.random() - 1, + width=fwhm * numpy.random.random(), + scale=peak * (9 + numpy.random.random()), + noise=noise * (0.01 + numpy.random.random()), + ) + + +# demo: use swait records to make "noisy" detector signals +noisy = EpicsSignalRO( + f"{IOC}userCalc1", name="noisy", labels=("detectors", "simulator") +) +change_noisy_parameters() diff --git a/instrument/devices/scaler.py b/instrument/devices/scaler.py new file mode 100755 index 0000000..84c7a87 --- /dev/null +++ b/instrument/devices/scaler.py @@ -0,0 +1,55 @@ +""" +example scaler +""" + +__all__ = """ + scaler1 + timebase I0 I00 I000 scint diode +""".split() + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +from .. import iconfig +from ophyd.scaler import ScalerCH +import time + + +IOC = iconfig.get("GP_IOC_PREFIX", "gp:") + +# make an instance of the entire scaler, for general control +scaler1 = ScalerCH(f"{IOC}scaler1", name="scaler1", labels=["scalers", "detectors"]) +scaler1.wait_for_connection() + +if not len(scaler1.channels.chan01.chname.get()): + # CAUTION: define channel names JUST for this simulation. + # For a real instrument, the names are assigned when the + # detector pulse cables are connected to the scaler channels. + logger.info( + f"{scaler1.name} has no channel names. Assigning channel names." + ) + scaler1.channels.chan01.chname.put("timebase") + scaler1.channels.chan02.chname.put("I0") + scaler1.channels.chan03.chname.put("scint") + scaler1.channels.chan04.chname.put("diode") + scaler1.channels.chan05.chname.put("I000") + scaler1.channels.chan06.chname.put("I00") + time.sleep(1) # wait for IOC + +# choose just the channels with EPICS names +scaler1.select_channels() + +# examples: make shortcuts to specific channels assigned in EPICS + +timebase = scaler1.channels.chan01.s +I0 = scaler1.channels.chan02.s +scint = scaler1.channels.chan03.s +diode = scaler1.channels.chan04.s +I000 = scaler1.channels.chan05.s +I00 = scaler1.channels.chan06.s + +for item in (timebase, I0, I00, I000, scint, diode): + item._ophyd_labels_ = set(["channel", "counter"]) diff --git a/instrument/devices/shutter_simulator.py b/instrument/devices/shutter_simulator.py new file mode 100755 index 0000000..0668c2d --- /dev/null +++ b/instrument/devices/shutter_simulator.py @@ -0,0 +1,22 @@ +""" +shutter +""" + +__all__ = [ + "shutter", +] + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +import apstools.devices + +shutter = apstools.devices.SimulatedApsPssShutterWithStatus( + name="shutter", labels=("shutters",) +) + +# shutter needs short recovery time after moving +shutter.delay_s = 0.05 diff --git a/instrument/devices/simulated_fourc.py b/instrument/devices/simulated_fourc.py new file mode 100755 index 0000000..e67aa8f --- /dev/null +++ b/instrument/devices/simulated_fourc.py @@ -0,0 +1,16 @@ +""" +Simulated 4-circle diffractometer. +""" + +__all__ = """ + sim4c +""".split() + +import logging + +logger = logging.getLogger(__name__) +logger.info(__file__) + +import hkl + +sim4c = hkl.SimulatedE4CV("", name="sim4c") diff --git a/instrument/devices/simulated_kappa.py b/instrument/devices/simulated_kappa.py new file mode 100755 index 0000000..f0f4065 --- /dev/null +++ b/instrument/devices/simulated_kappa.py @@ -0,0 +1,18 @@ +""" +Simulated kappa 4- & 6-circle diffractometers. +""" + +__all__ = """ + simk4c + simk6c +""".split() + +import logging + +logger = logging.getLogger(__name__) +logger.info(__file__) + +import hkl + +simk4c = hkl.SimulatedK4CV("", name="simk4c") +simk6c = hkl.SimulatedK6C("", name="simk6c") diff --git a/instrument/devices/sixc_diffractometer.py b/instrument/devices/sixc_diffractometer.py new file mode 100755 index 0000000..d4242a0 --- /dev/null +++ b/instrument/devices/sixc_diffractometer.py @@ -0,0 +1,63 @@ +""" +6-circle diffractometer, connected to EpicsMotor records. + +https://github.com/prjemian/epics-docker/tree/main/v1.1/n5_custom_synApps#motor-assignments + + +===== ================== +motor assignment +===== ================== +m29 4-circle diffractometer M_TTH +m30 4-circle diffractometer M_TH +m31 4-circle diffractometer M_CHI +m32 4-circle diffractometer M_PHI +===== ================== +""" + +__all__ = """ + sixc +""".split() + +import logging + +logger = logging.getLogger(__name__) +logger.info(__file__) + +import hkl +from ophyd import Component, EpicsMotor, EpicsSignalRO + +from .. import iconfig + +IOC = iconfig.get("GP_IOC_PREFIX", "gp:") +# No defined motor assignments, pick different than fourc (m29-32) +M_TTH = "m23" +M_OMEGA = "m24" +M_CHI = "m25" +M_PHI = "m26" +M_GAMMA = "m27" +M_MU = "m28" + + +class SixCircle(hkl.SimMixin, hkl.E6C): + """ + Our 6-circle. Eulerian. + + Energy obtained (RO) from monochromator. + """ + + # the reciprocal axes are defined by SimMixin + + mu = Component(EpicsMotor, f"{M_MU}", kind="hinted", labels=["motor"]) + omega = Component(EpicsMotor, f"{M_OMEGA}", kind="hinted", labels=["motor"]) + chi = Component(EpicsMotor, f"{M_CHI}", kind="hinted", labels=["motor"]) + phi = Component(EpicsMotor, f"{M_PHI}", kind="hinted", labels=["motor"]) + gamma = Component(EpicsMotor, f"{M_GAMMA}", kind="hinted", labels=["motor"]) + delta = Component(EpicsMotor, f"{M_TTH}", kind="hinted", labels=["motor"]) + + energy = Component(EpicsSignalRO, "BraggERdbkAO", kind="hinted", labels=["energy"]) + energy_units = Component(EpicsSignalRO, "BraggERdbkAO.EGU", kind="config") + + +sixc = SixCircle(IOC, name="sixc") +sixc.wait_for_connection() +sixc._update_calc_energy() diff --git a/instrument/devices/slits.py b/instrument/devices/slits.py new file mode 100755 index 0000000..9433924 --- /dev/null +++ b/instrument/devices/slits.py @@ -0,0 +1,93 @@ +""" +2D slits, 4 individual motorized blades. + +.. note:: The motor assignments are set in the IOC + when loading the ``2slit.db`` database. + +There are two representations of the same ``2slit.db`` database. +Choose between a hierarchical (``Optics2Slit2D_HV()``) +or flat structure (``Optics2Slit2D_InbOutBotTop()`): + +* ``Optics2Slit2D_HV()`` has a hierarchical structure:: + + slit1 + h + xp, xn, size, center + v + xp, xn, size, center + +* ``Optics2Slit2D_InbOutBotTop()`` has a flat structure:: + + slit1 + top + bot + out + inb + hsize + hcenter + vsize + vcenter + +Coordinates of each representation (viewing from detector towards source):: + + Optics2Slit2D_HV Optics2Slit2D_InbOutBotTop + v.xp top + h.xn h.xp inb out + v.xn bot + +**Motor Assignments** + +This information is for reference only. The Python configuration +here does not need to know the motor assignments. That is part +of the IOC configuration. + +====== ========== ================== +motor position assignment +====== ========== ================== +m41 v.xp Slit1V:mXp +m42 v.xn Slit1V:mXn +m5 (!) h.xp Slit1H:mXp +m6 (!) h.xn Slit1H:mXn +====== ========== ================== + +.. warning: (!) Some motor assignments in the training IOC are misconfigured. + The misconfiguration happens in the IOC configuration in the + ``prjemian/custom-synapps-6.2`` docker image. + (https://hub.docker.com/r/prjemian/custom-synapps-6.2) + + ========== ========== ============ + in docker should be assignment + ========== ========== ============ + m5 m43 Slit1H:mXp + m6 m44 Slit1H:mXn + ========== ========== ============ + + These assignments will be corrected in a future version of the + docker image: ``prjemian/synapps``. + +**References** + +* https://github.com/epics-modules/optics/blob/master/opticsApp/Db/2slit.db +* https://bcda-aps.github.io/apstools/latest/api/synApps/_db_2slit.html#apstools.synApps.db_2slit.Optics2Slit2D_InbOutBotTop +* https://github.com/prjemian/epics-docker/tree/main/v1.1/n5_custom_synApps#motor-assignments +""" + +__all__ = """ + slit1 +""".split() + +import logging + +# Choose between alternate interfaces to the same controls: +from apstools.synApps import Optics2Slit2D_HV +# from apstools.synApps import Optics2Slit2D_InbOutBotTop + +from .. import iconfig + +logger = logging.getLogger(__name__) +logger.info(__file__) + +IOC = iconfig.get("GP_IOC_PREFIX", "gp:") + + +slit1 = Optics2Slit2D_HV(f"{IOC}Slit1", name="slit1") diff --git a/instrument/devices/temperature_signal.py b/instrument/devices/temperature_signal.py new file mode 100755 index 0000000..7887fdf --- /dev/null +++ b/instrument/devices/temperature_signal.py @@ -0,0 +1,136 @@ +""" +simulated noisy temperature controller using swait record +""" + +__all__ = [ + "temperature", +] + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +from .. import iconfig +from .calculation_records import calcs +from ophyd import Component +from ophyd import EpicsSignal +from ophyd import PVPositioner +from ophyd import Signal + + +IOC = iconfig.get("GP_IOC_PREFIX", "gp:") + + +class MyPvPositioner(PVPositioner): + # positioner + readback = Component(EpicsSignal, ".VAL") # THIS readback IS writable! + setpoint = Component(EpicsSignal, ".B") + done = Component(Signal, value=True) + done_value = True + + # additional, for the simulator + calculation = Component(EpicsSignal, ".CALC", kind="config") + description = Component(EpicsSignal, ".DESC", kind="config") + max_change = Component(EpicsSignal, ".D", kind="config") + noise = Component(EpicsSignal, ".C", kind="config") + previous_value_pv = Component(EpicsSignal, ".INAN", kind="config") + scanning_rate = Component(EpicsSignal, ".SCAN", kind="config") + tolerance = Component(EpicsSignal, ".E", kind="config") + report_dmov_changes = Component(Signal, value=True, kind="omitted") + + def cb_readback(self, *args, **kwargs): + """ + Called when readback changes (EPICS CA monitor event). + """ + diff = self.readback.get() - self.setpoint.get() + dmov = abs(diff) <= self.tolerance.get() + if self.report_dmov_changes.get() and dmov != self.done.get(): + logger.debug(f"{self.name} reached: {dmov}") + self.done.put(dmov) + + def cb_setpoint(self, *args, **kwargs): + """ + Called when setpoint changes (EPICS CA monitor event). + + When the setpoint is changed, force done=False. For any move, + done must go != done_value, then back to done_value (True). + Without this response, a small move (within tolerance) will not return. + Next update of readback will compute self.done. + """ + self.done.put(not self.done_value) + + def __init__(self, *args, **kwargs): + """ + These are the arguments in the full signature: + + self, + prefix, + *, + limits=None, + name=None, + read_attrs=None, + configuration_attrs=None, + parent=None, + egu="", + **kwargs, + """ + super().__init__(*args, **kwargs) + + # setup callbacks on readback and setpoint + self.readback.subscribe(self.cb_readback) + self.setpoint.subscribe(self.cb_setpoint) + + # the readback needs no adjective + self.readback.name = self.name + + @property + def inposition(self): + """ + Report (boolean) if positioner is done. + """ + return self.done.get() == self.done_value + + def stop(self, *, success=False): + """ + Hold the current readback when the stop() method is called and not done. + """ + if not self.done.get(): + self.setpoint.put(self.position) + + def setup_temperature( + self, + setpoint=None, + noise=2, + rate=6, + tol=1, + max_change=2, + report_dmov_changes=True, + ): + """ + Setup the swait record with new random numbers. + + BLOCKING calls, not a bluesky plan + """ + calcs.calc8.reset() # remove any prior configuration + self.description.put("temperature") + self.report_dmov_changes.put(report_dmov_changes) + self.previous_value_pv.put(self.readback.pvname) + if setpoint is not None: + self.setpoint.put(setpoint) + self.readback.put(setpoint) + self.noise.put(noise) + self.max_change.put(max_change) + self.tolerance.put(tol) + self.scanning_rate.put(rate) # 1 second + self.calculation.put("A+max(-D,min(D,(B-A)))+C*(RNDM-0.5)") + + +temperature = MyPvPositioner( + f"{IOC}userCalc8", name="temperature", limits=(-20, 255), egu="C", +) +temperature.wait_for_connection() +temperature.setup_temperature( + setpoint=25, noise=1, rate=5, tol=1, max_change=2, report_dmov_changes=False +) diff --git a/instrument/epics_signal_config.py b/instrument/epics_signal_config.py new file mode 100755 index 0000000..5ddfbf6 --- /dev/null +++ b/instrument/epics_signal_config.py @@ -0,0 +1,58 @@ +""" +Set default timeouts for EPICS and define an EPICS-based scan_id. +""" + +__all__ = """ + epics_scan_id_source + scan_id_epics +""".split() + +import logging + +logger = logging.getLogger(__name__) +logger.info(__file__) + +from . import iconfig +from ophyd.signal import EpicsSignal +from ophyd.signal import EpicsSignalBase + + +# set default timeout for all EpicsSignal connections & communications +# always first, before ANY ophyd EPICS-based signals are created +TIMEOUT = 60 +if not EpicsSignalBase._EpicsSignalBase__any_instantiated: + EpicsSignalBase.set_defaults( + auto_monitor=True, + timeout=iconfig.get("PV_READ_TIMEOUT", TIMEOUT), + write_timeout=iconfig.get("PV_WRITE_TIMEOUT", TIMEOUT), + connection_timeout=iconfig.get("PV_CONNECTION_TIMEOUT", TIMEOUT), + ) + +pvname = iconfig.get("RUN_ENGINE_SCAN_ID_PV") +if pvname is None: + logger.info("Using RunEngine metadata for scan_id") + scan_id_epics = None +else: + logger.info("Using EPICS PV %s for scan_id", pvname) + scan_id_epics = EpicsSignal(pvname, name="scan_id_epics") + + +def epics_scan_id_source(*args, **kwargs): + """ + Callback function for RunEngine. Returns *next* scan_id to be used. + + * Get current scan_id from PV. + * Apply lower limit of zero. + * Increment. + * Set PV with new value. + * Return new value. + """ + if scan_id_epics is None: + raise RuntimeError( + "epics_scan_id_source() called when" + " 'RUN_ENGINE_SCAN_ID_PV' is" + "undefined in 'iconfig.yml' file." + ) + new_scan_id = max(scan_id_epics.get(), 0) + 1 + scan_id_epics.put(new_scan_id) + return new_scan_id diff --git a/instrument/framework/__init__.py b/instrument/framework/__init__.py new file mode 100755 index 0000000..bfeb2c5 --- /dev/null +++ b/instrument/framework/__init__.py @@ -0,0 +1,10 @@ +""" +configure the Bluesky framework +""" + +from .check_python import * +from .check_bluesky import * + +from .initialize import * + +from .metadata import * diff --git a/instrument/framework/check_bluesky.py b/instrument/framework/check_bluesky.py new file mode 100755 index 0000000..d05d738 --- /dev/null +++ b/instrument/framework/check_bluesky.py @@ -0,0 +1,63 @@ +""" +ensure BlueSky is available +""" + +__all__ = [] + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +import sys +from .. import iconfig + +# ensure BlueSky is available +try: + import bluesky +except ImportError: + raise ImportError( + "No module named `bluesky`\n" + f"This python is from directory: {sys.prefix}\n" + "\n" + "You should exit now and find a Python with Bluesky." + ) + +# ensure minimum bluesky version + +req_version = tuple(iconfig.get("MINIMUM_BLUESKY_VERSION", (1, 8))) +cur_version = tuple(map(int, bluesky.__version__.split(".")[:2])) +if cur_version < req_version: + ver_str = ".".join((map(str, req_version))) + raise ValueError( + f"Need bluesky version {ver_str} or higher" + f", found version {bluesky.__version__}" + ) + +# ensure minimum ophyd version + +import ophyd + +req_version = tuple(iconfig.get("MINIMUM_OPHYD_VERSION", (1, 6))) +cur_version = tuple(map(int, ophyd.__version__.split(".")[:2])) +if cur_version < req_version: + ver_str = ".".join((map(str, req_version))) + raise ValueError( + f"Need ophyd version {ver_str} or higher" + f", found version {ophyd.__version__}" + ) + + +# ensure minimum databroker version + +import databroker + +req_version = tuple(iconfig.get("MINIMUM_DATABROKER_VERSION", (1, 2))) +cur_version = tuple(map(int, databroker.__version__.split(".")[:2])) +if cur_version < req_version: + ver_str = ".".join((map(str, req_version))) + raise ValueError( + f"Need databroker version {ver_str} or higher" + f", found version {databroker.__version__}" + ) diff --git a/instrument/framework/check_python.py b/instrument/framework/check_python.py new file mode 100755 index 0000000..98ad614 --- /dev/null +++ b/instrument/framework/check_python.py @@ -0,0 +1,28 @@ +""" +make sure we have the software packages we need +""" + +__all__ = [] + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +from .. import iconfig +import sys + +# ensure minimum Python version + +req_version = tuple(iconfig.get("MINIMUM_PYTHON_VERSION", (3, 7))) +cur_version = sys.version_info +if cur_version < req_version: + ver_str = ".".join((map(str, req_version))) + raise RuntimeError( + f"Requires Python {ver_str}+ with the Bluesky framework.\n" + f"You have Python {sys.version} from {sys.prefix}\n" + "\n" + "You should exit now and start a Python" + " with the Bluesky framework." + ) diff --git a/instrument/framework/initialize.py b/instrument/framework/initialize.py new file mode 100755 index 0000000..5ca712d --- /dev/null +++ b/instrument/framework/initialize.py @@ -0,0 +1,150 @@ +""" +initialize the bluesky framework +""" + +__all__ = """ + RE cat sd bec peaks + bp bps bpp + summarize_plan + np + """.split() + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +import pathlib +import sys + +sys.path.append(str(pathlib.Path(__file__).absolute().parent.parent.parent)) + +from .. import iconfig +from bluesky import RunEngine +from bluesky import SupplementalData +from bluesky.callbacks.best_effort import BestEffortCallback +from bluesky.magics import BlueskyMagics +from bluesky.simulators import summarize_plan +from bluesky.utils import PersistentDict +from bluesky.utils import ProgressBarManager +from bluesky.utils import ts_msg_hook +from IPython import get_ipython +from ophyd.signal import EpicsSignalBase +import databroker +import ophyd +import warnings + +# convenience imports +import bluesky.plans as bp +import bluesky.plan_stubs as bps +import bluesky.preprocessors as bpp +import numpy as np + + +def get_md_path(): + path = iconfig.get("RUNENGINE_MD_PATH") + if path is None: + path = pathlib.Path.home() / "Bluesky_RunEngine_md" + else: + path = pathlib.Path(path) + logger.info("RunEngine metadata saved in directory: %s", str(path)) + return str(path) + + +# Set up a RunEngine and use metadata backed PersistentDict +RE = RunEngine({}) +RE.md = PersistentDict(get_md_path()) + + +# Connect with our mongodb database +catalog_name = iconfig.get("DATABROKER_CATALOG", "training") +# databroker v2 api +try: + cat = databroker.catalog[catalog_name] + logger.info("using databroker catalog '%s'", cat.name) +except KeyError: + cat = databroker.temp().v2 + logger.info("using TEMPORARY databroker catalog '%s'", cat.name) + + +# Subscribe metadatastore to documents. +# If this is removed, data is not saved to metadatastore. +RE.subscribe(cat.v1.insert) + +# Set up SupplementalData. +sd = SupplementalData() +RE.preprocessors.append(sd) + +if iconfig.get("USE_PROGRESS_BAR", False): + # Add a progress bar. + pbar_manager = ProgressBarManager() + RE.waiting_hook = pbar_manager + +# Register bluesky IPython magics. +_ipython = get_ipython() +if _ipython is not None: + _ipython.register_magics(BlueskyMagics) + +# Set up the BestEffortCallback. +bec = BestEffortCallback() +RE.subscribe(bec) +peaks = bec.peaks # just as alias for less typing +bec.disable_baseline() + +# At the end of every run, verify that files were saved and +# print a confirmation message. +# from bluesky.callbacks.broker import verify_files_saved +# RE.subscribe(post_run(verify_files_saved), 'stop') + +# Uncomment the following lines to turn on +# verbose messages for debugging. +# ophyd.logger.setLevel(logging.DEBUG) + +ophyd.set_cl(iconfig.get("OPHYD_CONTROL_LAYER", "PyEpics").lower()) +logger.info(f"using ophyd control layer: {ophyd.cl.name}") + +# diagnostics +# RE.msg_hook = ts_msg_hook + +# set default timeout for all EpicsSignal connections & communications +TIMEOUT = 60 +if not EpicsSignalBase._EpicsSignalBase__any_instantiated: + EpicsSignalBase.set_defaults( + auto_monitor=True, + timeout=iconfig.get("PV_READ_TIMEOUT", TIMEOUT), + write_timeout=iconfig.get("PV_WRITE_TIMEOUT", TIMEOUT), + connection_timeout=iconfig.get("PV_CONNECTION_TIMEOUT", TIMEOUT), + ) + +_pv = iconfig.get("RUN_ENGINE_SCAN_ID_PV") +if _pv is None: + logger.info("Using RunEngine metadata for scan_id") +else: + from ophyd import EpicsSignal + + logger.info("Using EPICS PV %s for scan_id", _pv) + scan_id_epics = EpicsSignal(_pv, name="scan_id_epics") + + def epics_scan_id_source(_md): + """ + Callback function for RunEngine. Returns *next* scan_id to be used. + + * Ignore metadata dictionary passed as argument. + * Get current scan_id from PV. + * Apply lower limit of zero. + * Increment (so that scan_id numbering starts from 1). + * Set PV with new value. + * Return new value. + + Exception will be raised if PV is not connected when next + ``bps.open_run()`` is called. + """ + new_scan_id = max(scan_id_epics.get(), 0) + 1 + scan_id_epics.put(new_scan_id) + return new_scan_id + + # tell RunEngine to use the EPICS PV to provide the scan_id. + RE.scan_id_source = epics_scan_id_source + scan_id_epics.wait_for_connection() + RE.md["scan_id"] = scan_id_epics.get() diff --git a/instrument/framework/metadata.py b/instrument/framework/metadata.py new file mode 100755 index 0000000..551c827 --- /dev/null +++ b/instrument/framework/metadata.py @@ -0,0 +1,65 @@ +""" +define standard experiment metadata +""" + +__all__ = [] + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +from .. import iconfig + +import getpass +import os +import socket +from datetime import datetime + +import apstools +import databroker +import epics +import h5py +import intake +import matplotlib +import numpy +import ophyd +import pyRestTable +import spec2nexus + +import bluesky + +from .initialize import RE +from .initialize import cat + +HOSTNAME = socket.gethostname() or "localhost" +USERNAME = getpass.getuser() or "Bluesky user" + +# useful diagnostic to record with all data +versions = dict( + apstools=apstools.__version__, + bluesky=bluesky.__version__, + databroker=databroker.__version__, + epics=epics.__version__, + h5py=h5py.__version__, + intake=intake.__version__, + matplotlib=matplotlib.__version__, + numpy=numpy.__version__, + ophyd=ophyd.__version__, + pyRestTable=pyRestTable.__version__, + spec2nexus=spec2nexus.__version__, +) + +# Set up default metadata +RE.md["databroker_catalog"] = cat.name +RE.md["login_id"] = USERNAME + "@" + HOSTNAME +RE.md.update(iconfig.get("RUNENGINE_METADATA", {})) +RE.md["versions"] = versions +RE.md["pid"] = os.getpid() +RE.md["iconfig"] = iconfig + +conda_prefix = os.environ.get("CONDA_PREFIX") +if conda_prefix is not None: + RE.md["conda_prefix"] = conda_prefix +del conda_prefix diff --git a/instrument/iconfig.yml b/instrument/iconfig.yml new file mode 100644 index 0000000..ba6f7ac --- /dev/null +++ b/instrument/iconfig.yml @@ -0,0 +1,79 @@ +# configuration.yml + +# configuration for the instrument package +# simple key:value pairs + +ADSIM_IOC_PREFIX: "ad:" +GP_IOC_PREFIX: "gp:" + +DATABROKER_CATALOG: &databroker_catalog EDIT_CATALOG_NAME_HERE + +# default RunEngine metadata +RUNENGINE_METADATA: + beamline_id: Bluesky_training + instrument_name: BCDA EPICS Bluesky training + proposal_id: training + databroker_catalog: *databroker_catalog + +# Uncomment and edit to define a PV to use for the `scan_id`. +# The default behavior is to use `RE.md["scan_id"]`. +# RUN_ENGINE_SCAN_ID_PV: "IOC:integer" + + +# area detector paths used by training IOC +# These are examples for other use of this instrument. +# see: https://bcda-aps.github.io/apstools/latest/examples/de_0_adsim_hdf5_basic.html#File-Directories +AD_IMAGE_DIR: "adsimdet/%Y/%m/%d" +AD_MOUNT_PATH: /tmp +BLUESKY_MOUNT_PATH: /tmp/docker_ioc/iocad/tmp + +# permissions +ALLOW_AREA_DETECTOR_WARMUP: true +ENABLE_AREA_DETECTOR_IMAGE_PLUGIN: true +ENABLE_CALCS: true +USE_PROGRESS_BAR: false +WRITE_SPEC_DATA_FILES: true + +# ---------------------------------- + +# Directory to "autosave" the RE.md dictionary (uses PersistentDict) +# Uncomment and modify to change from the default. +# RUNENGINE_MD_PATH: /home/USERNAME/Bluesky_RunEngine_md + +# override default control layer for ophyd +# if undefined, defaults to PyEpics +# OPHYD_CONTROL_LAYER: PyEpics +# OPHYD_CONTROL_LAYER: caproto + +# Uncomment any of these to override the defaults +LOGGING: + # MAX_BYTES: 1000000 + NUMBER_OF_PREVIOUS_BACKUPS: 9 + # LOG_PATH: /tmp + # If LOG_PATH undefined, this session will log into PWD/logs/ + # where PWD is present working directory when session is started + +# default timeouts (seconds) +PV_READ_TIMEOUT: &TIMEOUT 15 +PV_WRITE_TIMEOUT: *TIMEOUT +PV_CONNECTION_TIMEOUT: *TIMEOUT + +XMODE_DEBUG_LEVEL: Minimal + +# Minimum software package version checks: MAJOR.minor (no .patch). +# These will be converted to tuple() for comparisons. +MINIMUM_PYTHON_VERSION: + - 3 + - 8 +MINIMUM_BLUESKY_VERSION: + - 1 + - 10 +MINIMUM_OPHYD_VERSION: + - 1 + - 7 +MINIMUM_DATABROKER_VERSION: + - 1 + - 2 + +# identify the version of this iconfig.yml file +ICONFIG_VERSION: 1.0.1 \ No newline at end of file diff --git a/instrument/mpl/__init__.py b/instrument/mpl/__init__.py new file mode 100755 index 0000000..15ece52 --- /dev/null +++ b/instrument/mpl/__init__.py @@ -0,0 +1,31 @@ +""" +configure matplotlib for console or notebook session +MUST be run BEFORE other initializations +""" + + +def isnotebook(): + """ + see: https://stackoverflow.com/a/39662359/1046449 + """ + try: + from IPython import get_ipython + + _ipython = get_ipython() + if _ipython is not None: + shell = _ipython.__class__.__name__ + return shell == "ZMQInteractiveShell" + return False + # return True # Jupyter notebook or qtconsole + # elif shell == 'TerminalInteractiveShell': + # return False # Terminal running IPython + # else: + # return False # Other type (?) + except NameError: + return False # Probably standard Python interpreter + + +if isnotebook(): + from .notebook import * +else: + from .console import * diff --git a/instrument/mpl/console.py b/instrument/mpl/console.py new file mode 100755 index 0000000..5f173c2 --- /dev/null +++ b/instrument/mpl/console.py @@ -0,0 +1,17 @@ +""" +Configure matplotlib in interactive mode for IPython console +""" + +__all__ = [ + "plt", +] + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +import matplotlib.pyplot as plt + +plt.ion() diff --git a/instrument/mpl/notebook.py b/instrument/mpl/notebook.py new file mode 100755 index 0000000..2efae01 --- /dev/null +++ b/instrument/mpl/notebook.py @@ -0,0 +1,23 @@ +""" +Configure matplotlib in interactive mode for Jupyter notebook +""" + +__all__ = [ + "plt", +] + +from IPython import get_ipython +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +# %matplotlib notebook +_ipython = get_ipython() +if _ipython is not None: + # _ipython.magic("matplotlib notebook") + _ipython.magic("matplotlib inline") +import matplotlib.pyplot as plt + +plt.ion() diff --git a/instrument/plans/__init__.py b/instrument/plans/__init__.py new file mode 100755 index 0000000..bc13fcd --- /dev/null +++ b/instrument/plans/__init__.py @@ -0,0 +1,6 @@ +""" +local, custom Bluesky plans (scans) and other functions +""" + +# from .lup_plan import * +# from .peak_finder_example import * \ No newline at end of file diff --git a/instrument/plans/lup_plan.py b/instrument/plans/lup_plan.py new file mode 100755 index 0000000..2f2d5dd --- /dev/null +++ b/instrument/plans/lup_plan.py @@ -0,0 +1,50 @@ +""" +lup: lineup +""" + +__all__ = [ + "lup", +] + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +from bluesky import plan_stubs as bps +from bluesky import plans as bp + +from ..framework.initialize import bec + + +def lup(detectors, motor, start, finish, npts=5, key="cen"): + """ + Lineup a positioner. + + Step-scan the motor from start to finish and collect data from the detectors. + The **first** detector in the list will be used to assess alignment. + The statistical measure is selected by ``key`` with a default of + center: ``key="cen"``. + + The bluesky ``BestEffortCallback``is required, with plots enabled, to + collect the data for the statistical measure. + + If the chosen key is reported, the `lup()` plan will move the positioner to + the new value at the end of the plan and print the new position. + """ + det0 = detectors[0].name + print(f"{det0=}") + yield from bp.rel_scan(detectors, motor, start, finish, npts) + + yield from bps.sleep(1) + + if det0 in bec.peaks[key]: + target = bec.peaks[key][det0] + if isinstance(target, tuple): + target = target[0] + print(f"want to move {motor.name} to {target}") + yield from bps.mv(motor, target) + print(f"{motor.name}={motor.position}") + else: + print(f"'{det0}' not found in {bec.peaks[key]}") diff --git a/instrument/plans/peak_finder_example.py b/instrument/plans/peak_finder_example.py new file mode 100755 index 0000000..29107d6 --- /dev/null +++ b/instrument/plans/peak_finder_example.py @@ -0,0 +1,125 @@ +""" +example custom scan plan + +Find the peak of noisy v. m1 in the range of +/- 2. +""" + +__all__ = [ + "two_pass_scan", + "findpeak_multipass", + "repeat_findpeak", +] + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + +from .. import iconfig +from ..devices import change_noisy_parameters +from ..devices import m1 +from ..devices import noisy +from ..utils.image_analysis import analyze_peak +from bluesky import plans as bp +import pyRestTable + +if iconfig.get("framework", "unknown") == "queueserver": + from ..queueserver_framework import cat +else: + from ..framework import cat + + +def _get_peak_stats(uid, yname, xname): + """(internal) Convenience function.""" + logger.debug("compute scan statistics: '%s(%s)' uid: %s", yname, xname, uid) + try: + ds = cat[uid].primary.read() + return analyze_peak(ds[yname].data, ds[xname].data) + except NameError: + logger.warning("Catalog object not defined. No peak statistics.") + return {} + + +def two_pass_scan(md=None): + """ + Find the peak of noisy v. m1 in the range of +/- 2. + + We know the peak center of the simulated noisy detector + is positioned randomly between -1 to +1. Overscan that + range to find both sides of the peak. + + This is a 2 scan procedure. First scan passes through + the full range. Second scan is centered on the peak + and width of the first scan. + """ + md = md or {} + + change_noisy_parameters() + + sig = 2 + expansion_factor = 1.2 # expand search by FWHM*expansion_factor + m1.move(0) + for i in range(1, 3): + md["scan_sequence"] = i + uid = yield from bp.rel_scan([noisy], m1, -sig, +sig, 23, md=md) + stats = _get_peak_stats(uid, noisy.name, m1.name) + if len(stats) > 0: + sig = stats["fwhm"] * expansion_factor + m1.move(stats["centroid_position"]) + else: + logger.warning("Catalog object not found. No peak statistics.") + break + + +def findpeak_multipass(number_of_scans=4, number_of_points=23, magnify=1.2, md=None): + """ + find peak of noisy v. m1 by repeated scans with refinement + + basically:: + + sig = 2.1 + m1.move(0.0) + for _ in range(3): + RE(bp.rel_scan([noisy], m1, -sig, sig, 23)) + stats = _get_peak_stats(uid, noisy.name, m1.name) + sig = stats["fwhm"] + m1.move(stats["centroid_position"]) + """ + md = md or {} + md["number_of_scans"] = number_of_scans + sig = 2.1 / magnify + cen = 0 + results = [] + for _again in range(number_of_scans): + md["scan_sequence_number"] = _again+1 + m1.move(cen) + uid = yield from bp.rel_scan( + [noisy], m1, -magnify * sig, magnify * sig, number_of_points, md=md + ) + stats = _get_peak_stats(uid, noisy.name, m1.name) + if len(stats) > 0: + scan_id = cat[uid].metadata["start"]["scan_id"] + sig = stats["fwhm"] + cen = stats["centroid_position"] + results.append((scan_id, cen, sig)) + else: + break + m1.move(cen) + + tbl = pyRestTable.Table() + tbl.labels = "scan_id center FWHM".split() + for row in results: + tbl.addRow(row) + logger.info("iterative results:\n%s", str(tbl)) + + +def repeat_findpeak(iters=1, md=None): + """Repeat findpeak_multipass() with new parameters each time.""" + md = md or {} + for _i in range(iters): + md["iteration"] = _i+1 + # FIXME: apstools.utils.trim_plot_lines(bec, 4, m1, noisy) + change_noisy_parameters() + yield from findpeak_multipass(md=md) + logger.info("Finished #%d of %d iterations", _i + 1, iters) diff --git a/instrument/queueserver.py b/instrument/queueserver.py new file mode 100755 index 0000000..af153c9 --- /dev/null +++ b/instrument/queueserver.py @@ -0,0 +1,81 @@ +""" +Configure for data collection using bluesky-queueserver. +""" + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) +print(__file__) + +from . import iconfig +from .epics_signal_config import scan_id_epics +from .queueserver_framework import * + +# guides choice of module to import cat +iconfig["framework"] = "queueserver" + +from .devices import * +from .plans import * +from .utils import * +from .callbacks import * + +from bluesky.plans import * +from bluesky.plan_stubs import sleep +from ophyd import Device +from ophyd import Signal +import pyRestTable + + +if iconfig.get("WRITE_SPEC_DATA_FILES", False): + if specwriter is not None: + RE.subscribe(specwriter.receiver) + logger.info(f"writing to SPEC file: {specwriter.spec_filename}") + logger.info(" >>>> Using default SPEC file name <<<<") + logger.info(" file will be created when bluesky ends its next scan") + logger.info(" to change SPEC file, use command: newSpecFile('title')") + + +def print_devices_and_signals(): + """ + Print the Devices and Signals in the current global namespace. + """ + glo = globals().copy() + + table = pyRestTable.Table() + table.labels = "device/object pvprefix/pvname connected?".split() + for k, v in sorted(glo.items()): + if isinstance(v, (Device, Signal)) and not k.startswith("_"): + v.wait_for_connection() + p = "" + for aname in "pvname prefix".split(): + if hasattr(v, aname): + p = getattr(v, aname) + table.addRow((v.name, p, v.connected)) + if len(table.rows) > 0: + print("Table of Devices and signals:") + print(table) + + +def print_plans(): + """ + Print the plans in the current global namespace. + """ + glo = globals().copy() + # fmt: off + plans = [ + k + for k, v in sorted(glo.items()) + if inspect.isgeneratorfunction(v) + ] + # fmt: on + if len(plans) > 0: + print("List of Plans:") + for k in plans: + print(f"* {k}{inspect.signature(glo[k])}") + print("") + + +if iconfig.get("APS_IN_BASELINE", False): + sd.baseline.append(aps) diff --git a/instrument/queueserver_framework.py b/instrument/queueserver_framework.py new file mode 100755 index 0000000..c1050a6 --- /dev/null +++ b/instrument/queueserver_framework.py @@ -0,0 +1,114 @@ +""" +Define RE for bluesky-queueserver. +""" + +__all__ = """ + cat + make_kv_table + print_instrument_configuration + print_RE_metadata + RE + sd +""".split() + +import logging +logger = logging.getLogger(__name__) + +logger.info(__file__) +print(__file__) + +from . import iconfig +from .epics_signal_config import epics_scan_id_source +from .epics_signal_config import scan_id_epics +import apstools +import bluesky +import bluesky_queueserver +import databroker +import epics +import getpass +import h5py +import matplotlib +import numpy +import ophyd +import os +import pyRestTable +import socket +import spec2nexus + + +HOSTNAME = socket.gethostname() or "localhost" +USERNAME = getpass.getuser() or "queueserver user" + +# useful diagnostic to record with all data +versions = dict( + apstools=apstools.__version__, + bluesky=bluesky.__version__, + bluesky_queueserver=bluesky_queueserver.__version__, + databroker=databroker.__version__, + epics=epics.__version__, + h5py=h5py.__version__, + matplotlib=matplotlib.__version__, + numpy=numpy.__version__, + ophyd=ophyd.__version__, + pyRestTable=pyRestTable.__version__, + spec2nexus=spec2nexus.__version__, +) + +try: + cat = databroker.catalog[iconfig["DATABROKER_CATALOG"]] + logger.info("using databroker catalog '%s'", cat.name) +except KeyError: + cat = databroker.temp().v2 + logger.info("using TEMPORARY databroker catalog '%s'", cat.name) + +if scan_id_epics is None: + RE = bluesky.RunEngine({}) +else: + RE = bluesky.RunEngine({}, scan_id_source=epics_scan_id_source) + logger.info(r"RE 'scan_id' uses EPICS PV: {scan_id_epics.pvname}") +RE.subscribe(cat.v1.insert) + +RE.md["databroker_catalog"] = cat.name +RE.md["login_id"] = USERNAME + "@" + HOSTNAME +RE.md.update(iconfig.get("RUNENGINE_METADATA", {})) +RE.md["versions"] = versions +RE.md["pid"] = os.getpid() +if scan_id_epics is not None: + RE.md["scan_id"] = scan_id_epics.get() + +# Set up SupplementalData. +sd = bluesky.SupplementalData() +RE.preprocessors.append(sd) + +ophyd.set_cl(iconfig.get("OPHYD_CONTROL_LAYER", "PyEpics").lower()) +logger.info(f"using ophyd control layer: {ophyd.cl.name}") + + +def make_kv_table(data): + table = pyRestTable.Table() + table.labels = "key value".split() + for k, v in sorted(data.items()): + if isinstance(v, dict): + table.addRow((k, make_kv_table(v))) + else: + table.addRow((k, v)) + return table + + +def print_instrument_configuration(): + if len(iconfig) > 0: + table = make_kv_table(iconfig) + print("") + print("Instrument configuration (iconfig):") + print(table) + + +def print_RE_metadata(): + """ + Print a table (to the console) with the current RunEngine metadata. + """ + if len(RE.md) > 0: + table = make_kv_table(RE.md) + print("") + print("RunEngine metadata:") + print(table) diff --git a/instrument/session_logs.py b/instrument/session_logs.py new file mode 100755 index 0000000..8e66c89 --- /dev/null +++ b/instrument/session_logs.py @@ -0,0 +1,107 @@ +""" +Configure logging for this session. + +There are many _loggers_ to control the level of detailed logging for some +bluesky/ophyd internals. The next table shows some of the many possible logger +names. Configure the ``ACTIVATE_LOGGERS`` dictionary (below, where the keys are +logger names and the values are logging level, as shown) with any of these +names, or others which you may find useful: + +========================== ==================================================== +logger name description +========================== ==================================================== +``bluesky`` logger to which all bluesky log records propagate +``bluesky.emit_document`` when a Document is emitted. The log record does not contain the full content of the Document. +``bluesky.RE`` Records from a RunEngine. INFO-level notes state changes. DEBUG-level notes when each message from a plan is about to be processed and when a status object has completed. +``bluesky.RE.msg`` when each ``Msg`` is about to be processed. +``bluesky.RE.state`` when the RunEngine’s state changes. +``databroker`` logger to which all databroker log records propagate +``ophyd`` logger to which all ophyd log records propagate +``ophyd.objects`` records from all devices and signals (that is, OphydObject subclasses) +``ophyd.control_layer`` requests issued to the underlying control layer (e.g. pyepics, caproto) +``ophyd.event_dispatcher`` regular summaries of the backlog of updates from the control layer that are being processed on background threads +========================== ==================================================== + +References: + +* https://blueskyproject.io/ophyd/user_v1/reference/logging.html#logger-names +* https://blueskyproject.io/bluesky/debugging.html#logger-names +""" + +__all__ = [ + "logger", +] + +import logging +import pathlib + +from apstools.utils import file_log_handler +from apstools.utils import setup_IPython_console_logging +from apstools.utils import stream_log_handler + +from . import iconfig + +SESSION_NAME = "bluesky-session" +IPYTHON_LOGGER = "ipython_logger" + +BYTE = 1 +kB = 1024 * BYTE +MB = 1024 * kB + +CHOICES = dict( + LOG_PATH=None, + MAX_BYTES=1 * MB, + NUMBER_OF_PREVIOUS_BACKUPS=9, +) +CHOICES.update(iconfig.get("LOGGING", {})) +if CHOICES["LOG_PATH"] is not None: + CHOICES["LOG_PATH"] = pathlib.Path(CHOICES["LOG_PATH"]) + +# see the table above for details about this dictionary +ACTIVATE_LOGGERS = { + # "bluesky": "DEBUG", + # "bluesky.emit_document": "DEBUG", + # "bluesky.RE.msg": "DEBUG", + # "ophyd": "DEBUG", + "ophyd.control_layer": "DEBUG", + # "ophyd.objects": "DEBUG", + # "databroker": "DEBUG", +} + + +logger = logging.getLogger(SESSION_NAME) +logger.setLevel(logging.DEBUG) # allow any log content at this level +logger.addHandler(stream_log_handler()) # terse log to the console +logger.addHandler( + file_log_handler( # verbose log to a file + backupCount=CHOICES["NUMBER_OF_PREVIOUS_BACKUPS"], + file_name_base=IPYTHON_LOGGER, + log_path=CHOICES["LOG_PATH"], + maxBytes=CHOICES["MAX_BYTES"], + ) +) +setup_IPython_console_logging(log_path=CHOICES["LOG_PATH"]) + +logger.info("#" * 60 + " startup") +logger.info("logging started") +logger.info(f"logging level = {logger.level}") + +# log messages from the instrument package: '__package__' +_l = logging.getLogger(__package__) +_l.setLevel("DEBUG") +_l.addHandler(stream_log_handler()) # terse log to the console +_l.info(__file__) + + +for logger_name, level in ACTIVATE_LOGGERS.items(): + _l = logging.getLogger(logger_name) + _l.setLevel(logging.DEBUG) # allow any log content at this level + _l.addHandler( + file_log_handler( # logger to a file + backupCount=CHOICES["NUMBER_OF_PREVIOUS_BACKUPS"], + file_name_base=logger_name, + level=level, # filter reporting to this level + log_path=CHOICES["LOG_PATH"], + maxBytes=CHOICES["MAX_BYTES"], + ) + ) diff --git a/instrument/utils/__init__.py b/instrument/utils/__init__.py new file mode 100755 index 0000000..22ea47a --- /dev/null +++ b/instrument/utils/__init__.py @@ -0,0 +1,5 @@ +""" +any extra commands or utility functions here +""" + +from .image_analysis import * diff --git a/instrument/utils/image_analysis.py b/instrument/utils/image_analysis.py new file mode 100755 index 0000000..28702e4 --- /dev/null +++ b/instrument/utils/image_analysis.py @@ -0,0 +1,105 @@ +""" +Statistical peak analysis functions +""" + +__all__ = [ + "analyze_image", + "analyze_peak", +] + +import logging + +logger = logging.getLogger(__name__) + +logger.info(__file__) + + +import numpy as np +import pyRestTable +from scipy.ndimage import center_of_mass +from scipy.integrate import trapz + + +def analyze_peak(y_arr, x_arr=None): + """Measures of peak center & width.""" + # clear all results + center_position = None + centroid_position = None + maximum_position = None + minimum_position = None + crossings = None + fwhm = None + + y = np.array(y_arr) + num_points = len(y) + if x_arr is None: + x = np.arange(num_points) + else: + if len(x_arr) != num_points: + raise ValueError("x and y arrays are not of the same length.") + x = np.array(x_arr) + + # Compute x value at min and max of y + y_max_index = y.argmax() + y_min_index = y.argmin() + maximum_position = (x[y_max_index], y[y_max_index]) + minimum_position = (x[y_min_index], y[y_min_index]) + + (center_position,) = np.interp(center_of_mass(y), np.arange(num_points), x) + + # # for now, assume x is regularly spaced, otherwise, should be integrals + # sumY = sum(y) + # sumXY = sum(x*y) + # sumXXY = sum(x*x*y) + # # weighted_mean is same as center_position + # # weighted_mean = sumXY / sumY + # stdDev = np.sqrt((sumXXY / sumY) - (sumXY / sumY)**2) + + mid = (np.max(y) + np.min(y)) / 2 + crossings = np.where(np.diff((y > mid).astype(int)))[0] + _cen_list = [] + for cr in crossings.ravel(): + _x = x[cr : cr + 2] + _y = y[cr : cr + 2] - mid + + dx = np.diff(_x)[0] + dy = np.diff(_y)[0] + m = dy / dx + _cen_list.append((-_y[0] / m) + _x[0]) + + if _cen_list: + centroid_position = np.mean(_cen_list) + crossings = np.array(_cen_list) + if len(_cen_list) >= 2: + fwhm = np.abs(crossings[-1] - crossings[0], dtype=float) + + return dict( + centroid_position=centroid_position, + fwhm=fwhm, + # half_max=mid, + crossings=crossings, + maximum=maximum_position, + center_position=center_position, + minimum=minimum_position, + # stdDev=stdDev, + ) + + +def analyze_image(image): + horizontal = analyze_peak(image.sum(axis=0)) + vertical = analyze_peak(image.sum(axis=1)) + + table = pyRestTable.Table() + table.addLabel("measure") + table.addLabel("vertical (dim_1)") + table.addLabel("horizontal (dim_2)") + for key in horizontal.keys(): + table.addRow( + ( + key, + vertical[key], + horizontal[key], + ) + ) + + print(table) diff --git a/qserver.md b/qserver.md new file mode 100644 index 0000000..c491229 --- /dev/null +++ b/qserver.md @@ -0,0 +1,37 @@ +# Introduction to the Bluesky Queueserver + +work-in-progress: *very* basic notes for now + +- [Introduction to the Bluesky Queueserver](#introduction-to-the-bluesky-queueserver) + - [Run the queuserver](#run-the-queuserver) + - [operations](#operations) + - [diagnostics and testing](#diagnostics-and-testing) + - [graphical user interface](#graphical-user-interface) + +**IMPORTANT**: When the queueserver starts, it **must** find only one `.py` file in this directory and it must find `instrument/` in the same directory. Attempts to place the qserver files in a sub directory result in `'instrument/' directory not found` as queueserver starts. + +## Run the queuserver + +### operations + +Run in a background screen session. + +`./qserver.sh start` + +Stop this with + +`./qserver.sh stop` + +### diagnostics and testing + +`./qserver.sh run` + +## graphical user interface + +`queue-monitor &` + +- connect to the server +- open the environment +- add tasks to the queue +- run the queue +- diff --git a/qserver.sh b/qserver.sh new file mode 100755 index 0000000..3e47ee8 --- /dev/null +++ b/qserver.sh @@ -0,0 +1,192 @@ +#!/bin/bash + +# Manage the bluesky queueserver process in a screen session. +# Calls _run_qs.sh + +#-------------------- +# change the program defaults here +DATABROKER_CATALOG=training # also defined in _run_qs.sh +DEFAULT_SESSION_NAME="bluesky_queueserver-${DATABROKER_CATALOG}" +#-------------------- + +SHELL_SCRIPT_NAME=${BASH_SOURCE:-${0}} +if [ -z "$STARTUP_DIR" ] ; then + # If no startup dir is specified, use the directory with this script + STARTUP_DIR=$(dirname "${SHELL_SCRIPT_NAME}") +fi + +SELECTION=${1:-usage} +SESSION_NAME=${2:-"${DEFAULT_SESSION_NAME}"} + +PROCESS=_run_qs.sh +STARTUP_COMMAND="${STARTUP_DIR}/${PROCESS}" + +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +# echo "SESSION_NAME = ${SESSION_NAME}" +# echo "SHELL_SCRIPT_NAME = ${SHELL_SCRIPT_NAME}" +# echo "STARTUP_COMMAND = ${STARTUP_COMMAND}" +# echo "STARTUP_DIR = ${STARTUP_DIR}" + +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +function checkpid() { + MY_UID=$(id -u) + # # The '\$' is needed in the pgrep pattern to select vm7, but not vm7.sh + MY_PID=$(pgrep "${PROCESS}"\$ -u "${MY_UID}") + # #!echo "MY_PID=${MY_PID}" + + if [ "${MY_PID}" != "" ] ; then + # Assume the process is down until proven otherwise + PROCESS_DOWN=1 + SCREEN_PID="" + + # At least one instance of the process is running; + # Find the binary that is associated with this process + for pid in ${MY_PID}; do + # compare directories + BIN_CWD=$(readlink "/proc/${pid}/cwd") + START_CWD=$(readlink -f "${STARTUP_DIR}") + + if [ "$BIN_CWD" = "$START_CWD" ] ; then + # The process is running with PID=$pid from $STARTUP_DIR + P_PID=$(ps -p "${pid}" -o ppid=) + # strip leading (and trailing) whitespace + arr=($P_PID) + P_PID=${arr[0]} + SCREEN_SESSION="${P_PID}.${SESSION_NAME}" + SCREEN_MATCH=$(screen -ls "${SCREEN_SESSION}" | grep "${SESSION_NAME}") + if [ "${SCREEN_MATCH}" != "" ] ; then + # process is running in screen + PROCESS_DOWN=0 + MY_PID=${pid} + SCREEN_PID=${P_PID} + break + fi + fi + done + else + # process is not running + PROCESS_DOWN=1 + fi + + return ${PROCESS_DOWN} +} + +function checkup () { + if ! checkpid; then + restart + fi +} + +function console () { + if checkpid; then + echo "Connecting to ${SCREEN_SESSION}'s screen session" + # The -r flag will only connect if no one is attached to the session + #!screen -r "${SESSION_NAME}" + # The -x flag will connect even if someone is attached to the session + screen -x "${SCREEN_SESSION}" + else + echo "${SCREEN_NAME} is not running" + fi +} + +function exit_if_running() { + # ensure that multiple, simultaneous processes are not started by this user ID + MY_UID=$(id -u) + MY_PID=$(pgrep "${SESSION_NAME}"\$ -u "${MY_UID}") + + if [ "" != "${MY_PID}" ] ; then + echo "${SESSION_NAME} is already running (PID=${MY_PID}), won't start a new one" + exit 1 + fi +} + +function restart() { + stop + start +} + +function run_process() { + # only use this for diagnostic purposes + exit_if_running + ${STARTUP_COMMAND} +} + +function screenpid() { + if [ -z "${SCREEN_PID}" ] ; then + echo + else + echo " in a screen session (pid=${SCREEN_PID})" + fi +} + +function start() { + if checkpid; then + echo -n "${SCREEN_SESSION} is already running (pid=${MY_PID})" + screenpid + else + echo "Starting ${SESSION_NAME}" + cd "${STARTUP_DIR}" + # Run SESSION_NAME inside a screen session + CMD="screen -dm -S ${SESSION_NAME} -h 5000 ${STARTUP_COMMAND}" + ${CMD} + fi +} + +function status() { + if checkpid; then + echo -n "${SCREEN_SESSION} is running (pid=${MY_PID})" + screenpid + else + echo "${SESSION_NAME} is not running" + fi +} + +function stop() { + if checkpid; then + echo "Stopping ${SCREEN_SESSION} (pid=${MY_PID})" + kill "${MY_PID}" + else + echo "${SESSION_NAME} is not running" + fi +} + +function usage() { + echo "Usage: $(basename "${SHELL_SCRIPT_NAME}") {start|stop|restart|status|checkup|console|run} [NAME]" + echo "" + echo " COMMANDS" + echo " console attach to process console if process is running in screen" + echo " checkup check that process is running, restart if not" + echo " restart restart process" + echo " run run process in console (not screen)" + echo " start start process" + echo " status report if process is running" + echo " stop stop process" + echo "" + echo " OPTIONAL TERMS" + echo " NAME name of process (default: ${DEFAULT_SESSION_NAME})" +} + +# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +case ${SELECTION} in + start) start ;; + stop | kill) stop ;; + restart) restart ;; + status) status ;; + checkup) checkup ;; + console) console ;; + run) run_process ;; + *) usage ;; +esac + +# ----------------------------------------------------------------------------- +# :author: Pete R. Jemian +# :email: jemian@anl.gov +# :copyright: (c) 2017-2022, UChicago Argonne, LLC +# +# Distributed under the terms of the Creative Commons Attribution 4.0 International Public License. +# +# The full license is in the file LICENSE.txt, distributed with this software. +# ----------------------------------------------------------------------------- diff --git a/qstarter.py b/qstarter.py new file mode 100755 index 0000000..e250982 --- /dev/null +++ b/qstarter.py @@ -0,0 +1,31 @@ +""" +Load devices and plans for bluesky queueserver. + +How to start the queueserver process:: + + DATABROKER_CATALOG=training + STARTUP_DIR=$(pwd) + + start-re-manager \ + --startup-dir "${STARTUP_DIR}" \ + --update-existing-plans-devices ENVIRONMENT_OPEN \ + --zmq-publish-console ON \ + --databroker-config "${DATABROKER_CATALOG}" + +""" + +import pathlib +import sys + +sys.path.append( + str(pathlib.Path(__file__).absolute().parent) +) + +from instrument import iconfig +from instrument.queueserver import * + + +print_instrument_configuration() +print_devices_and_signals() +print_plans() +print_RE_metadata() diff --git a/user/README.md b/user/README.md new file mode 100644 index 0000000..5f0fb4f --- /dev/null +++ b/user/README.md @@ -0,0 +1,49 @@ +# user directory + +- [user directory](#user-directory) + - [About files in `user` directory](#about-files-in-user-directory) + - [`%run` - Load \& Run a file](#run---load--run-a-file) + - [Access symbols in the IPython session namespace](#access-symbols-in-the-ipython-session-namespace) + +## About files in `user` directory + +Each file should be written like a standard Python module, including all the imports necessary to support the code. + +To use symbols from the user command shell (a.k.a., the *IPython session namespace*): +you'll need to add them. Look at the section below for these instructions. + +Usually, your code should take any necessary symbols as arguments (args) or +optional keyword arguments (kwargs). + +## `%run` - Load & Run a file + +To load a Python module (filename without the traling `.py` extension) from this +directory into ipython use a command such as this example: + +```bash +In [1]: %run -im user.quick_hello +Hello! +``` + +**Tip**: The `%run` IPython magic command is comparable to SPEC's `qdo` command. + +**Caution**: + If you add or modify symbols in the user's command shell (IPython namespace) and those symbols are used in your file (`user/quick_hello.py` as the example shows), you must repeat the `%run` command (above) to load those changes. + +Alternatively, this equivalent command loads and runs `quick_hello.py` file: + +```bash +In [2]: %run -i user/quick_hello.py +Hello! +``` + +## Access symbols in the IPython session namespace + +Add this code block at the top of the file, before anything else: + +```py +# get all the symbols from the IPython shell +import IPython +globals().update(IPython.get_ipython().user_ns) +logger.info(__file__) +``` diff --git a/user/quick_hello.py b/user/quick_hello.py new file mode 100755 index 0000000..a6ac9f9 --- /dev/null +++ b/user/quick_hello.py @@ -0,0 +1,44 @@ +""" +Hello, World! demo for bluesky-queueserver testing. + +EXAMPLE:: + + # Load this code in IPython or Jupyter notebook: + %run -i user/quick_hello.py + + # Run the plan with the RunEngine: + RE(hello_world()) +""" + +__all__ = """ + hello_world +""".split() + +import logging + +logger = logging.getLogger(__name__) +logger.info(__file__) + +from ophyd import Component, Device, Signal + +from bluesky import plans as bp + +print("Loading 'Hello, World!' example.") + + +class HelloDevice(Device): + """Simple ophyd device to provide Hello, World! capability.""" + + number = Component(Signal, value=0, kind="hinted") + text = Component(Signal, value="", kind="normal") + + +hello_device = HelloDevice(name="hello") +hello_device.stage_sigs["number"] = 1 +hello_device.stage_sigs["text"] = "Hello, World!" +hello_device.number.name = hello_device.name + + +def hello_world(): + """Simple bluesky plan for demonstrating Hello, World!.""" + yield from bp.count([hello_device], md=dict(title="test QS")) diff --git a/user_group_permissions.yaml b/user_group_permissions.yaml new file mode 100644 index 0000000..5db3e47 --- /dev/null +++ b/user_group_permissions.yaml @@ -0,0 +1,46 @@ +user_groups: + root: # The group includes all available plan and devices + allowed_plans: + - null # Allow all + forbidden_plans: + - ":^_" # All plans with names starting with '_' + allowed_devices: + - null # Allow all + forbidden_devices: + - ":^_:?.*" # All devices with names starting with '_' + allowed_functions: + - null # Allow all + forbidden_functions: + - ":^_" # All functions with names starting with '_' + primary: # The group includes beamline staff, includes all or most of the plans and devices + allowed_plans: + - ":.*" # Different way to allow all plans. + forbidden_plans: + - null # Nothing is forbidden + allowed_devices: + - ":?.*:depth=5" # Allow all device and subdevices. Maximum deepth for subdevices is 5. + forbidden_devices: + - null # Nothing is forbidden + allowed_functions: + - "function_sleep" # Explicitly listed name + test_user: # Users with limited access capabilities + allowed_plans: + - ":^count" # Use regular expression patterns + - ":scan$" + forbidden_plans: + - ":^adaptive_scan$" # Use regular expression patterns + - ":^inner_product" + allowed_devices: + - ":^det:?.*" # Use regular expression patterns + - ":^motor:?.*" + - ":^sim_bundle_A:?.*" + forbidden_devices: + - ":^det[3-5]$:?.*" # Use regular expression patterns + - ":^motor\\d+$:?.*" + allowed_functions: + - ":element$" + - ":elements$" + - "function_sleep" + - "clear_buffer" + forbidden_functions: + - ":^_" # All functions with names starting with '_'