diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..e69de29b diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..fe0e08e9 --- /dev/null +++ b/.gitignore @@ -0,0 +1,124 @@ +# OS Stuff +Thumbs.db +.DS_Store +.idea + +# Mosaic stuff +mosaic-workspace + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class +*.pyc +*.json + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST +.token + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ +_site + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +# Example outputs +*.h5 +!examples/stride/anastasio2D/data/*.h5 +examples/*/*.png + +# Legacy code +legacy/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..41f3e3eb --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,5 @@ +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v1.2.3 + hooks: + - id: flake8 \ No newline at end of file diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..a1b138f6 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,6 @@ +include setup.py +include README.md +include MANIFEST.in +include environment.yml +include requirements.txt +include requirements-optional.txt diff --git a/README.md b/README.md new file mode 100644 index 00000000..889c798e --- /dev/null +++ b/README.md @@ -0,0 +1,61 @@ + + +# Stride - A (somewhat) general optimisation framework for medical ultrasound imaging + +## Quickstart + +The recommended way to install Stride is through Anaconda's package manager (version >=4.9), which can be downloaded +in [Anaconda](https://www.continuum.io/downloads) or [Miniconda](https://conda.io/miniconda.html). +A Python version above 3.7 is recommended to run Stride. + +To install Stride, follow these steps: + +```sh +git clone git@github.com:trustimaging/stride.git +cd stride +conda env create -f environment.yml +conda activate stride +pip install -e . +``` + +## Running the examples + +To perform a forward run on the alpha2D example: + +```sh +cd examples/stride/alpha2D +mrun python foward.py +``` + +You can control the number of workers and threads per worker by running: + +```sh +mrun -nw 2 -nth 5 python foward.py +``` + +You can configure the devito solvers using environment variables. For example, to run the same code on a GPU with OpenACC you can: + +```sh +export DEVITO_COMPILER=pgcc +export DEVITO_LANGUAGE=openacc +export DEVITO_PLATFORM=nvidiaX +mrun -nw 1 -nth 5 python foward.py +``` + +Once you've run alpha2D forward, you can run the corresponding inverse problem by doing: + +```sh +mrun python inverse.py +``` + + +## Documentation + +You can build and access the documentation by running: + +```sh +cd docs +make html +``` + +and opening the generated ``_build/index.html`` in your browser. diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 00000000..71613705 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,225 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " applehelp to make an Apple Help Book" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " epub3 to make an epub3" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " coverage to run coverage check of the documentation (if enabled)" + @echo " dummy to check syntax errors of document sources" + +.PHONY: clean +clean: + rm -rf $(BUILDDIR)/* + +.PHONY: html +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +.PHONY: dirhtml +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +.PHONY: singlehtml +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +.PHONY: pickle +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +.PHONY: json +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +.PHONY: htmlhelp +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +.PHONY: qthelp +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Stride.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Stride.qhc" + +.PHONY: applehelp +applehelp: + $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp + @echo + @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." + @echo "N.B. You won't be able to view it unless you put it in" \ + "~/Library/Documentation/Help or install it in your application" \ + "bundle." + +.PHONY: devhelp +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/Stride" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Stride" + @echo "# devhelp" + +.PHONY: epub +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +.PHONY: epub3 +epub3: + $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 + @echo + @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." + +.PHONY: latex +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +.PHONY: latexpdf +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +.PHONY: latexpdfja +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +.PHONY: text +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +.PHONY: man +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +.PHONY: texinfo +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +.PHONY: info +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +.PHONY: gettext +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +.PHONY: changes +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +.PHONY: linkcheck +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +.PHONY: doctest +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +.PHONY: coverage +coverage: + $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage + @echo "Testing of coverage in the sources finished, look at the " \ + "results in $(BUILDDIR)/coverage/python.txt." + +.PHONY: xml +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +.PHONY: pseudoxml +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." + +.PHONY: dummy +dummy: + $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy + @echo + @echo "Build finished. Dummy builder generates no files." diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 00000000..eb651cbb --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,281 @@ +@ECHO OFF + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set BUILDDIR=_build +set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . +set I18NSPHINXOPTS=%SPHINXOPTS% . +if NOT "%PAPER%" == "" ( + set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% + set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% +) + +if "%1" == "" goto help + +if "%1" == "help" ( + :help + echo.Please use `make ^` where ^ is one of + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. singlehtml to make a single large HTML file + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. devhelp to make HTML files and a Devhelp project + echo. epub to make an epub + echo. epub3 to make an epub3 + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. text to make text files + echo. man to make manual pages + echo. texinfo to make Texinfo files + echo. gettext to make PO message catalogs + echo. changes to make an overview over all changed/added/deprecated items + echo. xml to make Docutils-native XML files + echo. pseudoxml to make pseudoxml-XML files for display purposes + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled + echo. coverage to run coverage check of the documentation if enabled + echo. dummy to check syntax errors of document sources + goto end +) + +if "%1" == "clean" ( + for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i + del /q /s %BUILDDIR%\* + goto end +) + + +REM Check if sphinx-build is available and fallback to Python version if any +%SPHINXBUILD% 1>NUL 2>NUL +if errorlevel 9009 goto sphinx_python +goto sphinx_ok + +:sphinx_python + +set SPHINXBUILD=python -m sphinx.__init__ +%SPHINXBUILD% 2> nul +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +:sphinx_ok + + +if "%1" == "html" ( + %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/html. + goto end +) + +if "%1" == "dirhtml" ( + %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. + goto end +) + +if "%1" == "singlehtml" ( + %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. + goto end +) + +if "%1" == "pickle" ( + %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the pickle files. + goto end +) + +if "%1" == "json" ( + %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the JSON files. + goto end +) + +if "%1" == "htmlhelp" ( + %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run HTML Help Workshop with the ^ +.hhp project file in %BUILDDIR%/htmlhelp. + goto end +) + +if "%1" == "qthelp" ( + %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run "qcollectiongenerator" with the ^ +.qhcp project file in %BUILDDIR%/qthelp, like this: + echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Stride.qhcp + echo.To view the help file: + echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Stride.ghc + goto end +) + +if "%1" == "devhelp" ( + %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. + goto end +) + +if "%1" == "epub" ( + %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The epub file is in %BUILDDIR%/epub. + goto end +) + +if "%1" == "epub3" ( + %SPHINXBUILD% -b epub3 %ALLSPHINXOPTS% %BUILDDIR%/epub3 + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The epub3 file is in %BUILDDIR%/epub3. + goto end +) + +if "%1" == "latex" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "latexpdf" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + cd %BUILDDIR%/latex + make all-pdf + cd %~dp0 + echo. + echo.Build finished; the PDF files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "latexpdfja" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + cd %BUILDDIR%/latex + make all-pdf-ja + cd %~dp0 + echo. + echo.Build finished; the PDF files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "text" ( + %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The text files are in %BUILDDIR%/text. + goto end +) + +if "%1" == "man" ( + %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The manual pages are in %BUILDDIR%/man. + goto end +) + +if "%1" == "texinfo" ( + %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. + goto end +) + +if "%1" == "gettext" ( + %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The message catalogs are in %BUILDDIR%/locale. + goto end +) + +if "%1" == "changes" ( + %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + if errorlevel 1 exit /b 1 + echo. + echo.The overview file is in %BUILDDIR%/changes. + goto end +) + +if "%1" == "linkcheck" ( + %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + if errorlevel 1 exit /b 1 + echo. + echo.Link check complete; look for any errors in the above output ^ +or in %BUILDDIR%/linkcheck/output.txt. + goto end +) + +if "%1" == "doctest" ( + %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + if errorlevel 1 exit /b 1 + echo. + echo.Testing of doctests in the sources finished, look at the ^ +results in %BUILDDIR%/doctest/output.txt. + goto end +) + +if "%1" == "coverage" ( + %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage + if errorlevel 1 exit /b 1 + echo. + echo.Testing of coverage in the sources finished, look at the ^ +results in %BUILDDIR%/coverage/python.txt. + goto end +) + +if "%1" == "xml" ( + %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The XML files are in %BUILDDIR%/xml. + goto end +) + +if "%1" == "pseudoxml" ( + %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. + goto end +) + +if "%1" == "dummy" ( + %SPHINXBUILD% -b dummy %ALLSPHINXOPTS% %BUILDDIR%/dummy + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. Dummy builder generates no files. + goto end +) + +:end diff --git a/docs/source/_static/style.css b/docs/source/_static/style.css new file mode 100644 index 00000000..4962d255 --- /dev/null +++ b/docs/source/_static/style.css @@ -0,0 +1,9 @@ + +.wy-side-nav-search { + background-color: #343131; +} + +.wy-nav-content { + max-width: 1200px; + min-height: 100vh; +} diff --git a/docs/source/_templates/layout.html b/docs/source/_templates/layout.html new file mode 100644 index 00000000..3e44f4a3 --- /dev/null +++ b/docs/source/_templates/layout.html @@ -0,0 +1,4 @@ +{% extends "!layout.html" %} +{% block extrahead %} + +{% endblock %} diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 00000000..ffc2bb5f --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,435 @@ +# -*- coding: utf-8 -*- +# +# Stride documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + # 'sphinxcontrib.fulltoc', + 'sphinx.ext.todo', + 'sphinx.ext.viewcode', + 'sphinx.ext.githubpages', + 'sphinx.ext.mathjax', + 'sphinx.ext.napoleon' # support for numpydoc +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The encoding of source files. +# +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Stride' +author = u'TRUST' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = u'1.0' +# The full version, including alpha/beta/rc tags. +# release = u'3.4' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'en' + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# +# today = '' +# +# Else, today_fmt is used as the format for a strftime call. +# +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +html_theme_options = { + 'collapse_navigation': True, + 'sticky_navigation': True, + 'navigation_depth': 10, + 'includehidden': True, + 'titles_only': False +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. +# " v documentation" by default. +# +html_title = u'Stride' + +# A shorter title for the navigation bar. Default is the same as html_title. +# +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# +html_logo = '_static/stride_logo.png' + +# The name of an image file (relative to this directory) to use as a favicon of +# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# +html_favicon = '_static/stride_logo.png' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# +# html_extra_path = [] + +# If not None, a 'Last updated on:' timestamp is inserted at every page +# bottom, using the given strftime format. +# The empty string is equivalent to '%b %d, %Y'. +# +# html_last_updated_fmt = None + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# +# html_sidebars = {'**': ['globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html']} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# +# html_additional_pages = {} + +# If false, no module index is generated. +# +# html_domain_indices = True + +# If false, no index is generated. +# +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# +html_show_sphinx = False + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' +# +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# 'ja' uses this config value. +# 'zh' user can custom change `jieba` dictionary path. +# +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'Stridedoc' + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'Stride.tex', u'Stride Documentation', + u'Stride', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# +# latex_use_parts = False + +# If true, show page references after internal links. +# +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# +# latex_appendices = [] + +# It false, will not define \strong, \code, itleref, \crossref ... but only +# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added +# packages. +# +# latex_keep_old_macro_names = True + +# If false, no module index is generated. +# +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'stride', u'Stride Documentation', + [author], 1) +] + +# If true, show URL addresses after external links. +# +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'Stride', u'Stride Documentation', + author, 'Stride', u'Stride Documentation.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +# +# texinfo_appendices = [] + +# If false, no module index is generated. +# +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# +# texinfo_no_detailmenu = False + + +# -- Options for Epub output ---------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project +epub_author = author +epub_publisher = author +epub_copyright = copyright + +# The basename for the epub file. It defaults to the project name. +# epub_basename = project + +# The HTML theme for the epub output. Since the default themes are not +# optimized for small screen space, using the same theme for HTML and epub +# output is usually not wise. This defaults to 'epub', a theme designed to save +# visual space. +# +# epub_theme = 'epub' + +# The language of the text. It defaults to the language option +# or 'en' if the language is not set. +# +# epub_language = '' + +# The scheme of the identifier. Typical schemes are ISBN or URL. +# epub_scheme = '' + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A tuple containing the cover image and cover page html template filenames. +# +# epub_cover = () + +# A sequence of (type, uri, title) tuples for the guide element of content.opf. +# +# epub_guide = () + +# HTML files that should be inserted before the pages created by sphinx. +# The format is a list of tuples containing the path and title. +# +# epub_pre_files = [] + +# HTML files that should be inserted after the pages created by sphinx. +# The format is a list of tuples containing the path and title. +# +# epub_post_files = [] + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + +# The depth of the table of contents in toc.ncx. +# +# epub_tocdepth = 3 + +# Allow duplicate toc entries. +# +# epub_tocdup = True + +# Choose between 'default' and 'includehidden'. +# +# epub_tocscope = 'default' + +# Fix unsupported image types using the Pillow. +# +# epub_fix_images = False + +# Scale large images. +# +# epub_max_image_width = 0 + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# +# epub_show_urls = 'inline' + +# If false, no index is generated. +# +# epub_use_index = True diff --git a/docs/source/download.rst b/docs/source/download.rst new file mode 100644 index 00000000..4236831a --- /dev/null +++ b/docs/source/download.rst @@ -0,0 +1,21 @@ +======== +Download +======== + +The recommended way to install Stride is through Anaconda's package manager (version >=4.9), which can be downloaded +in: + +.. _Anaconda: https://www.continuum.io/downloads +.. _Miniconda: https://conda.io/miniconda.html + +A Python version above 3.7 is recommended to run Stride. + +To install Stride, follow these steps: + +.. code-block:: shell + + git clone git@github.com:trustimaging/stride.git + cd stride + conda env create -f environment.yml + conda activate stride + pip install -e . diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 00000000..f4e4840b --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,31 @@ + +Stride +====== + +This is the Stride documentation. + +`Stride `_ is a (somewhat) +general optimisation framework for medical ultrasound imaging. + +Getting started +--------------- + +You can get instructions on how to download and install Stride +:doc:`here `. + +You can find tutorials on how to start using Stride on +:doc:`tutorials `. + +You can find the API Reference :doc:`here `. + +For information on the parallelisation library Mosaic, check the specific documentation :doc:`here `. + +.. title:: Stride + +.. toctree:: + :hidden: + + Download + Tutorials + Stride API Reference + Mosaic API Reference diff --git a/docs/source/mosaic/api/api_index.rst b/docs/source/mosaic/api/api_index.rst new file mode 100644 index 00000000..6471213c --- /dev/null +++ b/docs/source/mosaic/api/api_index.rst @@ -0,0 +1,14 @@ +==================== +Mosaic API Reference +==================== + +.. toctree:: + + run + runtime/runtime_index + comms/comms_index + core/core_index + types/types_index + file_manipulation/file_manipulation_index + utils/utils_index + diff --git a/docs/source/mosaic/api/comms/comms.rst b/docs/source/mosaic/api/comms/comms.rst new file mode 100644 index 00000000..ae7b9d4b --- /dev/null +++ b/docs/source/mosaic/api/comms/comms.rst @@ -0,0 +1,8 @@ +================== +Comms +================== + +.. autoclass:: mosaic.comms.comms.CommsManager + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/comms/comms_index.rst b/docs/source/mosaic/api/comms/comms_index.rst new file mode 100644 index 00000000..3c0102cc --- /dev/null +++ b/docs/source/mosaic/api/comms/comms_index.rst @@ -0,0 +1,10 @@ +================== +Comms +================== + + +.. toctree:: + + comms + serialisation + compression diff --git a/docs/source/mosaic/api/comms/compression.rst b/docs/source/mosaic/api/comms/compression.rst new file mode 100644 index 00000000..d76151b5 --- /dev/null +++ b/docs/source/mosaic/api/comms/compression.rst @@ -0,0 +1,6 @@ +================== +Compression +================== + +.. autofunction:: mosaic.comms.compression.maybe_compress +.. autofunction:: mosaic.comms.compression.decompress diff --git a/docs/source/mosaic/api/comms/serialisation.rst b/docs/source/mosaic/api/comms/serialisation.rst new file mode 100644 index 00000000..a7648541 --- /dev/null +++ b/docs/source/mosaic/api/comms/serialisation.rst @@ -0,0 +1,6 @@ +================== +Serialisation +================== + +.. autofunction:: mosaic.comms.serialisation.serialise +.. autofunction:: mosaic.comms.serialisation.deserialise diff --git a/docs/source/mosaic/api/core/base.rst b/docs/source/mosaic/api/core/base.rst new file mode 100644 index 00000000..042870ea --- /dev/null +++ b/docs/source/mosaic/api/core/base.rst @@ -0,0 +1,23 @@ +================== +Base +================== + +.. autoclass:: mosaic.core.base.CMDBase + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: mosaic.core.base.RemoteBase + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: mosaic.core.base.ProxyBase + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: mosaic.core.base.MonitoredBase + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/core/core_index.rst b/docs/source/mosaic/api/core/core_index.rst new file mode 100644 index 00000000..cf2756c1 --- /dev/null +++ b/docs/source/mosaic/api/core/core_index.rst @@ -0,0 +1,10 @@ +================== +Core +================== + + +.. toctree:: + + tessera + task + base diff --git a/docs/source/mosaic/api/core/task.rst b/docs/source/mosaic/api/core/task.rst new file mode 100644 index 00000000..31b9f40c --- /dev/null +++ b/docs/source/mosaic/api/core/task.rst @@ -0,0 +1,33 @@ +================== +Task +================== + +.. autoclass:: mosaic.core.task.Task + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: mosaic.core.task.TaskProxy + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: mosaic.core.task.TaskOutputGenerator + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: mosaic.core.task.TaskOutput + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: mosaic.core.task.TaskDone + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: mosaic.core.task.MonitoredTask + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/core/tessera.rst b/docs/source/mosaic/api/core/tessera.rst new file mode 100644 index 00000000..36d3564b --- /dev/null +++ b/docs/source/mosaic/api/core/tessera.rst @@ -0,0 +1,25 @@ +================== +Tessera +================== + +.. autofunction:: mosaic.core.tessera.tessera + +.. autoclass:: mosaic.core.tessera.Tessera + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: mosaic.core.tessera.TesseraProxy + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: mosaic.core.tessera.ArrayProxy + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: mosaic.core.tessera.MonitoredTessera + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/file_manipulation/file_manipulation_index.rst b/docs/source/mosaic/api/file_manipulation/file_manipulation_index.rst new file mode 100644 index 00000000..02579a41 --- /dev/null +++ b/docs/source/mosaic/api/file_manipulation/file_manipulation_index.rst @@ -0,0 +1,8 @@ +================== +File manipulation +================== + + +.. toctree:: + + h5 diff --git a/docs/source/mosaic/api/file_manipulation/h5.rst b/docs/source/mosaic/api/file_manipulation/h5.rst new file mode 100644 index 00000000..e5edebe5 --- /dev/null +++ b/docs/source/mosaic/api/file_manipulation/h5.rst @@ -0,0 +1,10 @@ +================== +HDF5 +================== + +.. autoclass:: mosaic.file_manipulation.h5.HDF5 + :members: + :undoc-members: + :show-inheritance: + +.. autofunction:: mosaic.file_manipulation.h5.file_exists diff --git a/docs/source/mosaic/api/run.rst b/docs/source/mosaic/api/run.rst new file mode 100644 index 00000000..5512523c --- /dev/null +++ b/docs/source/mosaic/api/run.rst @@ -0,0 +1,6 @@ +================== +Running mosaic +================== + +.. autofunction:: mosaic.run +.. autofunction:: mosaic.init diff --git a/docs/source/mosaic/api/runtime/head.rst b/docs/source/mosaic/api/runtime/head.rst new file mode 100644 index 00000000..64eeb5b0 --- /dev/null +++ b/docs/source/mosaic/api/runtime/head.rst @@ -0,0 +1,8 @@ +================== +Head +================== + +.. automodule:: mosaic.runtime.head + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/runtime/monitor.rst b/docs/source/mosaic/api/runtime/monitor.rst new file mode 100644 index 00000000..ea230a6f --- /dev/null +++ b/docs/source/mosaic/api/runtime/monitor.rst @@ -0,0 +1,21 @@ +================== +Monitor +================== + +.. autoclass:: mosaic.runtime.monitor.Monitor + :members: + :undoc-members: + :show-inheritance: + +Strategies +---------- + +.. autoclass:: mosaic.runtime.strategies.MonitorStrategy + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: mosaic.runtime.strategies.RoundRobin + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/runtime/node.rst b/docs/source/mosaic/api/runtime/node.rst new file mode 100644 index 00000000..9360246d --- /dev/null +++ b/docs/source/mosaic/api/runtime/node.rst @@ -0,0 +1,8 @@ +================== +Node +================== + +.. automodule:: mosaic.runtime.node + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/runtime/runtime.rst b/docs/source/mosaic/api/runtime/runtime.rst new file mode 100644 index 00000000..844f8bbd --- /dev/null +++ b/docs/source/mosaic/api/runtime/runtime.rst @@ -0,0 +1,8 @@ +================== +Runtime +================== + +.. automodule:: mosaic.runtime.runtime + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/runtime/runtime_index.rst b/docs/source/mosaic/api/runtime/runtime_index.rst new file mode 100644 index 00000000..a1b5bcc5 --- /dev/null +++ b/docs/source/mosaic/api/runtime/runtime_index.rst @@ -0,0 +1,12 @@ +================== +Runtime +================== + + +.. toctree:: + + runtime + head + monitor + node + worker diff --git a/docs/source/mosaic/api/runtime/worker.rst b/docs/source/mosaic/api/runtime/worker.rst new file mode 100644 index 00000000..e6882533 --- /dev/null +++ b/docs/source/mosaic/api/runtime/worker.rst @@ -0,0 +1,8 @@ +================== +Worker +================== + +.. automodule:: mosaic.runtime.worker + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/types/config.rst b/docs/source/mosaic/api/types/config.rst new file mode 100644 index 00000000..ec59bc55 --- /dev/null +++ b/docs/source/mosaic/api/types/config.rst @@ -0,0 +1,8 @@ +================== +Config +================== + +.. automodule:: mosaic.types.config + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/types/extensible.rst b/docs/source/mosaic/api/types/extensible.rst new file mode 100644 index 00000000..a760042d --- /dev/null +++ b/docs/source/mosaic/api/types/extensible.rst @@ -0,0 +1,8 @@ +================== +Extensible +================== + +.. automodule:: mosaic.types.extensible + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/types/immutable.rst b/docs/source/mosaic/api/types/immutable.rst new file mode 100644 index 00000000..bdb1da47 --- /dev/null +++ b/docs/source/mosaic/api/types/immutable.rst @@ -0,0 +1,8 @@ +================== +Immutable +================== + +.. automodule:: mosaic.types.immutable + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/types/imported_function.rst b/docs/source/mosaic/api/types/imported_function.rst new file mode 100644 index 00000000..6db37542 --- /dev/null +++ b/docs/source/mosaic/api/types/imported_function.rst @@ -0,0 +1,8 @@ +================== +Imported function +================== + +.. automodule:: mosaic.types.imported_function + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/types/struct.rst b/docs/source/mosaic/api/types/struct.rst new file mode 100644 index 00000000..08abe563 --- /dev/null +++ b/docs/source/mosaic/api/types/struct.rst @@ -0,0 +1,8 @@ +================== +Struct +================== + +.. automodule:: mosaic.types.struct + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/types/types_index.rst b/docs/source/mosaic/api/types/types_index.rst new file mode 100644 index 00000000..151912ae --- /dev/null +++ b/docs/source/mosaic/api/types/types_index.rst @@ -0,0 +1,12 @@ +================== +Types +================== + + +.. toctree:: + + extensible + immutable + struct + config + imported_function diff --git a/docs/source/mosaic/api/utils/change_case.rst b/docs/source/mosaic/api/utils/change_case.rst new file mode 100644 index 00000000..28d1ff3c --- /dev/null +++ b/docs/source/mosaic/api/utils/change_case.rst @@ -0,0 +1,8 @@ +================== +Change case +================== + +.. automodule:: mosaic.utils.change_case + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/utils/event_loop.rst b/docs/source/mosaic/api/utils/event_loop.rst new file mode 100644 index 00000000..aaad6fc2 --- /dev/null +++ b/docs/source/mosaic/api/utils/event_loop.rst @@ -0,0 +1,8 @@ +================== +Event loop +================== + +.. automodule:: mosaic.utils.event_loop + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/utils/logger.rst b/docs/source/mosaic/api/utils/logger.rst new file mode 100644 index 00000000..0f30642b --- /dev/null +++ b/docs/source/mosaic/api/utils/logger.rst @@ -0,0 +1,8 @@ +================== +Logger +================== + +.. automodule:: mosaic.utils.logger + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/utils/subprocess.rst b/docs/source/mosaic/api/utils/subprocess.rst new file mode 100644 index 00000000..ab9f269c --- /dev/null +++ b/docs/source/mosaic/api/utils/subprocess.rst @@ -0,0 +1,8 @@ +================== +Subprocess +================== + +.. automodule:: mosaic.utils.subprocess + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/utils/utils.rst b/docs/source/mosaic/api/utils/utils.rst new file mode 100644 index 00000000..33e8454a --- /dev/null +++ b/docs/source/mosaic/api/utils/utils.rst @@ -0,0 +1,8 @@ +================== +Utils +================== + +.. automodule:: mosaic.utils.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/mosaic/api/utils/utils_index.rst b/docs/source/mosaic/api/utils/utils_index.rst new file mode 100644 index 00000000..b2c2018d --- /dev/null +++ b/docs/source/mosaic/api/utils/utils_index.rst @@ -0,0 +1,12 @@ +================== +Utils +================== + + +.. toctree:: + + event_loop + subprocess + logger + change_case + utils diff --git a/docs/source/stride/api/api_index.rst b/docs/source/stride/api/api_index.rst new file mode 100644 index 00000000..2dec6b89 --- /dev/null +++ b/docs/source/stride/api/api_index.rst @@ -0,0 +1,12 @@ +==================== +Stride API Reference +==================== + +.. toctree:: + + runner + problem_definition/problem_definition_index + problem_types/problem_types_index + optimisation/optimisation_index + plotting/plotting_index + utils/utils_index diff --git a/docs/source/stride/api/optimisation/functionals.rst b/docs/source/stride/api/optimisation/functionals.rst new file mode 100644 index 00000000..99d39885 --- /dev/null +++ b/docs/source/stride/api/optimisation/functionals.rst @@ -0,0 +1,18 @@ +================== +Functionals +================== + +.. autoclass:: stride.optimisation.functionals.functional.FunctionalBase + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.optimisation.functionals.l2_norm_difference.L2NormDifference + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.optimisation.functionals.functional.FunctionalValue + :members: + :undoc-members: + :show-inheritance: \ No newline at end of file diff --git a/docs/source/stride/api/optimisation/optimisation.rst b/docs/source/stride/api/optimisation/optimisation.rst new file mode 100644 index 00000000..17f92c9e --- /dev/null +++ b/docs/source/stride/api/optimisation/optimisation.rst @@ -0,0 +1,28 @@ +================== +Optimisation +================== + +.. autoclass:: stride.optimisation.optimisation.Optimisation + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.optimisation.optimisation.Block + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.optimisation.optimisation.Iteration + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.optimisation.optimisation.CallableList + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.optimisation.optimisation.VariableList + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/stride/api/optimisation/optimisation_index.rst b/docs/source/stride/api/optimisation/optimisation_index.rst new file mode 100644 index 00000000..67e47e12 --- /dev/null +++ b/docs/source/stride/api/optimisation/optimisation_index.rst @@ -0,0 +1,11 @@ +================== +Optimisation +================== + + +.. toctree:: + + optimisation + variables + functionals + pipelines diff --git a/docs/source/stride/api/optimisation/pipelines.rst b/docs/source/stride/api/optimisation/pipelines.rst new file mode 100644 index 00000000..59ee6bae --- /dev/null +++ b/docs/source/stride/api/optimisation/pipelines.rst @@ -0,0 +1,54 @@ +================== +Pipelines +================== + +.. autoclass:: stride.optimisation.pipelines.pipeline.Pipeline + :members: + :undoc-members: + :show-inheritance: + +Default pipelines +----------------- + +.. automodule:: stride.optimisation.pipelines.default_pipelines + :members: + :undoc-members: + :show-inheritance: + +Steps +----- + +.. automodule:: stride.optimisation.pipelines.steps.filter_traces + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: stride.optimisation.pipelines.steps.filter_wavelets + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: stride.optimisation.pipelines.steps.norm_per_shot + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: stride.optimisation.pipelines.steps.smooth_field + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: stride.optimisation.pipelines.steps.norm_field + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: stride.optimisation.pipelines.steps.clip + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: stride.optimisation.pipelines.steps.mask + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/stride/api/optimisation/variables.rst b/docs/source/stride/api/optimisation/variables.rst new file mode 100644 index 00000000..aef1d487 --- /dev/null +++ b/docs/source/stride/api/optimisation/variables.rst @@ -0,0 +1,8 @@ +================== +Variables +================== + +.. autoclass:: stride.optimisation.variables.Vp + :members: + :undoc-members: + :show-inheritance: \ No newline at end of file diff --git a/docs/source/stride/api/plotting/fields.rst b/docs/source/stride/api/plotting/fields.rst new file mode 100644 index 00000000..0a81afd1 --- /dev/null +++ b/docs/source/stride/api/plotting/fields.rst @@ -0,0 +1,5 @@ +================== +Field plotting +================== + +.. autofunction:: stride.plotting.plot_fields.plot_scalar_field diff --git a/docs/source/stride/api/plotting/plotting_index.rst b/docs/source/stride/api/plotting/plotting_index.rst new file mode 100644 index 00000000..9418e72e --- /dev/null +++ b/docs/source/stride/api/plotting/plotting_index.rst @@ -0,0 +1,11 @@ +================== +Plotting +================== + + +.. toctree:: + + fields + points + traces + show diff --git a/docs/source/stride/api/plotting/points.rst b/docs/source/stride/api/plotting/points.rst new file mode 100644 index 00000000..bd64fb9b --- /dev/null +++ b/docs/source/stride/api/plotting/points.rst @@ -0,0 +1,5 @@ +================== +Point plotting +================== + +.. autofunction:: stride.plotting.plot_points.plot_points diff --git a/docs/source/stride/api/plotting/show.rst b/docs/source/stride/api/plotting/show.rst new file mode 100644 index 00000000..c5c2f4cc --- /dev/null +++ b/docs/source/stride/api/plotting/show.rst @@ -0,0 +1,5 @@ +================== +Show +================== + +.. autofunction:: stride.plotting.plot_show.show diff --git a/docs/source/stride/api/plotting/traces.rst b/docs/source/stride/api/plotting/traces.rst new file mode 100644 index 00000000..d55944bc --- /dev/null +++ b/docs/source/stride/api/plotting/traces.rst @@ -0,0 +1,6 @@ +================== +Trace plotting +================== + +.. autofunction:: stride.plotting.plot_traces.plot_trace +.. autofunction:: stride.plotting.plot_traces.plot_gather diff --git a/docs/source/stride/api/problem_definition/acquisitions.rst b/docs/source/stride/api/problem_definition/acquisitions.rst new file mode 100644 index 00000000..4cabddc3 --- /dev/null +++ b/docs/source/stride/api/problem_definition/acquisitions.rst @@ -0,0 +1,13 @@ +================== +Acquisitions +================== + +.. autoclass:: stride.problem_definition.acquisitions.Shot + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.problem_definition.acquisitions.Acquisitions + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/stride/api/problem_definition/base.rst b/docs/source/stride/api/problem_definition/base.rst new file mode 100644 index 00000000..537e867d --- /dev/null +++ b/docs/source/stride/api/problem_definition/base.rst @@ -0,0 +1,23 @@ +================== +Base +================== + +.. autoclass:: stride.problem_definition.base.Gridded + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.problem_definition.base.Saved + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.problem_definition.base.GriddedSaved + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.problem_definition.base.ProblemBase + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/stride/api/problem_definition/data.rst b/docs/source/stride/api/problem_definition/data.rst new file mode 100644 index 00000000..d29f8388 --- /dev/null +++ b/docs/source/stride/api/problem_definition/data.rst @@ -0,0 +1,28 @@ +================== +Data +================== + +.. autoclass:: stride.problem_definition.data.Data + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.problem_definition.data.StructuredData + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.problem_definition.data.ScalarField + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.problem_definition.data.VectorField + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.problem_definition.data.Traces + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/stride/api/problem_definition/domain.rst b/docs/source/stride/api/problem_definition/domain.rst new file mode 100644 index 00000000..8ec85ba4 --- /dev/null +++ b/docs/source/stride/api/problem_definition/domain.rst @@ -0,0 +1,13 @@ +================== +Domain +================== + +.. autoclass:: stride.problem_definition.domain.Space + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.problem_definition.domain.Time + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/stride/api/problem_definition/geometry.rst b/docs/source/stride/api/problem_definition/geometry.rst new file mode 100644 index 00000000..118ede2a --- /dev/null +++ b/docs/source/stride/api/problem_definition/geometry.rst @@ -0,0 +1,13 @@ +================== +Geometry +================== + +.. autoclass:: stride.problem_definition.geometry.TransducerLocation + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.problem_definition.geometry.Geometry + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/stride/api/problem_definition/medium.rst b/docs/source/stride/api/problem_definition/medium.rst new file mode 100644 index 00000000..8cfcac8c --- /dev/null +++ b/docs/source/stride/api/problem_definition/medium.rst @@ -0,0 +1,8 @@ +================== +Medium +================== + +.. autoclass:: stride.problem_definition.medium.Medium + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/stride/api/problem_definition/problem.rst b/docs/source/stride/api/problem_definition/problem.rst new file mode 100644 index 00000000..9a55aaf1 --- /dev/null +++ b/docs/source/stride/api/problem_definition/problem.rst @@ -0,0 +1,13 @@ +================== +Problem +================== + +.. autoclass:: stride.problem_definition.problem.Problem + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.problem_definition.problem.SubProblem + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/stride/api/problem_definition/problem_definition_index.rst b/docs/source/stride/api/problem_definition/problem_definition_index.rst new file mode 100644 index 00000000..2ba7b622 --- /dev/null +++ b/docs/source/stride/api/problem_definition/problem_definition_index.rst @@ -0,0 +1,15 @@ +================== +Problem definition +================== + + +.. toctree:: + + domain + base + data + problem + medium + transducers + geometry + acquisitions diff --git a/docs/source/stride/api/problem_definition/transducers.rst b/docs/source/stride/api/problem_definition/transducers.rst new file mode 100644 index 00000000..8c6edbde --- /dev/null +++ b/docs/source/stride/api/problem_definition/transducers.rst @@ -0,0 +1,21 @@ +================== +Transducers +================== + +.. autoclass:: stride.problem_definition.transducers.Transducers + :members: + :undoc-members: + :show-inheritance: + +Transducer types +---------------- + +.. autoclass:: stride.problem_definition.transducer_types.transducer.Transducer + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.problem_definition.transducer_types.point_transducer.PointTransducer + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/stride/api/problem_types/acoustic.rst b/docs/source/stride/api/problem_types/acoustic.rst new file mode 100644 index 00000000..2703066e --- /dev/null +++ b/docs/source/stride/api/problem_types/acoustic.rst @@ -0,0 +1,11 @@ +================== +Acoustic +================== + +Devito +------ + +.. autoclass:: stride.problem_types.acoustic.devito.AcousticDevito + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/stride/api/problem_types/operators.rst b/docs/source/stride/api/problem_types/operators.rst new file mode 100644 index 00000000..ca340f6c --- /dev/null +++ b/docs/source/stride/api/problem_types/operators.rst @@ -0,0 +1,16 @@ +================== +Operators +================== + +Devito +------ + +.. autoclass:: stride.problem_types.operators.devito.GridDevito + :members: + :undoc-members: + :show-inheritance: + +.. autoclass:: stride.problem_types.operators.devito.OperatorDevito + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/stride/api/problem_types/problem_type.rst b/docs/source/stride/api/problem_types/problem_type.rst new file mode 100644 index 00000000..11073dfa --- /dev/null +++ b/docs/source/stride/api/problem_types/problem_type.rst @@ -0,0 +1,8 @@ +================== +Problem type +================== + +.. autoclass:: stride.problem_types.problem_type.ProblemTypeBase + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/stride/api/problem_types/problem_types_index.rst b/docs/source/stride/api/problem_types/problem_types_index.rst new file mode 100644 index 00000000..825f106d --- /dev/null +++ b/docs/source/stride/api/problem_types/problem_types_index.rst @@ -0,0 +1,11 @@ +================== +Problem Types +================== + + +.. toctree:: + + problem_type + acoustic + operators + diff --git a/docs/source/stride/api/runner.rst b/docs/source/stride/api/runner.rst new file mode 100644 index 00000000..69ec06a8 --- /dev/null +++ b/docs/source/stride/api/runner.rst @@ -0,0 +1,8 @@ +================== +Runner +================== + +.. autoclass:: stride.runner.Runner + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/stride/api/utils/fetch.rst b/docs/source/stride/api/utils/fetch.rst new file mode 100644 index 00000000..056a38e3 --- /dev/null +++ b/docs/source/stride/api/utils/fetch.rst @@ -0,0 +1,5 @@ +================== +Fetch +================== + +.. autofunction:: stride.utils.fetch.fetch diff --git a/docs/source/stride/api/utils/fft.rst b/docs/source/stride/api/utils/fft.rst new file mode 100644 index 00000000..ff7238b8 --- /dev/null +++ b/docs/source/stride/api/utils/fft.rst @@ -0,0 +1,6 @@ +================== +FFT +================== + +.. autofunction:: stride.utils.fft.magnitude_spectrum +.. autofunction:: stride.utils.fft.bandwidth diff --git a/docs/source/stride/api/utils/filters.rst b/docs/source/stride/api/utils/filters.rst new file mode 100644 index 00000000..1aa80af4 --- /dev/null +++ b/docs/source/stride/api/utils/filters.rst @@ -0,0 +1,17 @@ +================== +Filters +================== + +Butterworth +------------ + +.. autofunction:: stride.utils.filters.bandpass_filter_butterworth +.. autofunction:: stride.utils.filters.lowpass_filter_butterworth +.. autofunction:: stride.utils.filters.highpass_filter_butterworth + +FIR +---- + +.. autofunction:: stride.utils.filters.bandpass_filter_fir +.. autofunction:: stride.utils.filters.lowpass_filter_fir +.. autofunction:: stride.utils.filters.highpass_filter_fir diff --git a/docs/source/stride/api/utils/geometries.rst b/docs/source/stride/api/utils/geometries.rst new file mode 100644 index 00000000..146f79ee --- /dev/null +++ b/docs/source/stride/api/utils/geometries.rst @@ -0,0 +1,6 @@ +===================== +Predefined geometries +===================== + +.. autofunction:: stride.utils.geometries.elliptical +.. autofunction:: stride.utils.geometries.ellipsoidal diff --git a/docs/source/stride/api/utils/utils_index.rst b/docs/source/stride/api/utils/utils_index.rst new file mode 100644 index 00000000..d4e3b1e2 --- /dev/null +++ b/docs/source/stride/api/utils/utils_index.rst @@ -0,0 +1,12 @@ +================== +Utils +================== + + +.. toctree:: + + filters + fft + wavelets + geometries + fetch diff --git a/docs/source/stride/api/utils/wavelets.rst b/docs/source/stride/api/utils/wavelets.rst new file mode 100644 index 00000000..48c32e80 --- /dev/null +++ b/docs/source/stride/api/utils/wavelets.rst @@ -0,0 +1,6 @@ +================== +Wavelets +================== + +.. autofunction:: stride.utils.wavelets.tone_burst +.. autofunction:: stride.utils.wavelets.ricker diff --git a/docs/source/tutorials.rst b/docs/source/tutorials.rst new file mode 100644 index 00000000..22e39930 --- /dev/null +++ b/docs/source/tutorials.rst @@ -0,0 +1,5 @@ +========= +Tutorials +========= + +TODO diff --git a/environment.yml b/environment.yml new file mode 100644 index 00000000..1dbae0cf --- /dev/null +++ b/environment.yml @@ -0,0 +1,36 @@ +name: stride +channels: + - conda-forge + - defaults +dependencies: + - python>=3.7 + - blosc + - cached-property + - click + - cloudpickle>=1.6 + - cython + - flake8 + - gputil + - h5py>=3.1 + - matplotlib==3.1 + - numpy>=1.19 + - pickle5 + - pip + - pre-commit + - psutil + - pyflakes + - pytest + - pytest-cov + - pyyaml + - pytest-runner + - pyzmq>=20.0 + - scikit-image + - scipy>=1.6 + - sphinx + - sphinx_rtd_theme + - traitsui + - wxPython + - zlib + - pip: + - devito + - python-daemon diff --git a/examples/mosaic/basic_example.py b/examples/mosaic/basic_example.py new file mode 100644 index 00000000..e58ccdb5 --- /dev/null +++ b/examples/mosaic/basic_example.py @@ -0,0 +1,101 @@ + +import time +import numpy as np + +import mosaic +from mosaic import tessera + + +@tessera +class Solver1: + def __init__(self, data): + self.data = data + + def solve(self, data): + print('Solve 1') + self.data = self.data + data + + time.sleep(10) + + return self.data + + def solve_more(self): + print('Solve More 1') + time.sleep(5) + + +@tessera +class Solver2: + def __init__(self): + self.data = 0 + + def solve(self, data): + print('Solve 2') + self.data = data*2 + + time.sleep(10) + + return self.data + + def solve_more(self): + print('Solve More 2') + time.sleep(5) + + +async def main(runtime): + array = np.zeros((1024, 1024, 1), dtype=np.float32) + + # These objects will be created remotely + solver_1 = await Solver1.remote(array) + solver_2 = await Solver2.remote() + + # These will run in parallel + # The calls will return immediately by creating a remote + # task + start = time.time() + task_1 = await solver_1.solve(array) + task_2 = await solver_2.solve(array) + + # Do some other work + + # Wait until the remote tasks are finished + await task_1 + await task_2 + + # The results of the tasks stay in the remote worker + # until we request it back + result_1 = await task_1.result() + result_2 = await task_2.result() + + print(result_1.shape) + print(result_2.shape) + print(time.time() - start) + + # These will wait for each other because + # their results depend on each other + start = time.time() + task_1 = await solver_1.solve(array) + task_2 = await solver_2.solve(task_1) + + # Do some other work + + # Wait until the remote tasks are finished + # Now we only need to wait for the second task + await task_2 + print(time.time() - start) + + # These will also wait for each other + start = time.time() + task_1 = await solver_1.solve_more() + task_2 = await solver_2.solve_more(task_1.outputs.done) + + # Do some other work + + # Wait until the remote tasks are finished + # Now we only need to wait for the second task + await task_2 + print(time.time() - start) + + +if __name__ == '__main__': + mosaic.run(main) diff --git a/examples/stride/anastasio2D/data/anastasio2D-TrueModel.h5 b/examples/stride/anastasio2D/data/anastasio2D-TrueModel.h5 new file mode 100644 index 00000000..e413a670 Binary files /dev/null and b/examples/stride/anastasio2D/data/anastasio2D-TrueModel.h5 differ diff --git a/examples/stride/anastasio2D/forward.ipynb b/examples/stride/anastasio2D/forward.ipynb new file mode 100644 index 00000000..f4bf8c68 --- /dev/null +++ b/examples/stride/anastasio2D/forward.ipynb @@ -0,0 +1,4402 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "electrical-ferry", + "metadata": {}, + "outputs": [], + "source": [ + "import mosaic\n", + "\n", + "from stride import *\n", + "from stride.utils import wavelets\n", + "\n", + "%matplotlib notebook" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "lightweight-folder", + "metadata": {}, + "outputs": [], + "source": [ + "# Create the grid\n", + "shape = (356, 385)\n", + "extra = (50, 50)\n", + "absorbing = (40, 40)\n", + "spacing = (0.5e-3, 0.5e-3)\n", + "\n", + "space = Space(shape=shape,\n", + " extra=extra,\n", + " absorbing=absorbing,\n", + " spacing=spacing)\n", + "\n", + "start = 0.\n", + "step = 0.08e-6\n", + "num = 2500\n", + "\n", + "time = Time(start=start,\n", + " step=step,\n", + " num=num)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "funded-belize", + "metadata": {}, + "outputs": [], + "source": [ + "# Create problem\n", + "problem = Problem(name='anastasio2D',\n", + " space=space, time=time)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "sublime-leader", + "metadata": {}, + "outputs": [], + "source": [ + "# Create medium\n", + "vp = ScalarField('vp', grid=problem.grid)\n", + "vp.load('data/anastasio2D-TrueModel.h5')\n", + "\n", + "problem.medium.add(vp)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "stunning-leisure", + "metadata": {}, + "outputs": [], + "source": [ + "# Create transducers\n", + "problem.transducers.default()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "chronic-insight", + "metadata": {}, + "outputs": [], + "source": [ + "# Create geometry\n", + "num_locations = 128\n", + "problem.geometry.default('elliptical', num_locations)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "sustainable-pittsburgh", + "metadata": {}, + "outputs": [], + "source": [ + "# Create acquisitions\n", + "problem.acquisitions.default()\n", + "\n", + "# Create wavelets\n", + "f_centre = 0.50e6\n", + "n_cycles = 3\n", + "\n", + "for shot in problem.acquisitions.shots:\n", + " shot.wavelets.data[0, :] = wavelets.tone_burst(f_centre, n_cycles,\n", + " time.num, time.step)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "recovered-madrid", + "metadata": {}, + "outputs": [ + { + "data": { + "application/javascript": [ + "/* Put everything inside the global mpl namespace */\n", + "window.mpl = {};\n", + "\n", + "\n", + "mpl.get_websocket_type = function() {\n", + " if (typeof(WebSocket) !== 'undefined') {\n", + " return WebSocket;\n", + " } else if (typeof(MozWebSocket) !== 'undefined') {\n", + " return MozWebSocket;\n", + " } else {\n", + " alert('Your browser does not have WebSocket support. ' +\n", + " 'Please try Chrome, Safari or Firefox ≥ 6. ' +\n", + " 'Firefox 4 and 5 are also supported but you ' +\n", + " 'have to enable WebSockets in about:config.');\n", + " };\n", + "}\n", + "\n", + "mpl.figure = function(figure_id, websocket, ondownload, parent_element) {\n", + " this.id = figure_id;\n", + "\n", + " this.ws = websocket;\n", + "\n", + " this.supports_binary = (this.ws.binaryType != undefined);\n", + "\n", + " if (!this.supports_binary) {\n", + " var warnings = document.getElementById(\"mpl-warnings\");\n", + " if (warnings) {\n", + " warnings.style.display = 'block';\n", + " warnings.textContent = (\n", + " \"This browser does not support binary websocket messages. \" +\n", + " \"Performance may be slow.\");\n", + " }\n", + " }\n", + "\n", + " this.imageObj = new Image();\n", + "\n", + " this.context = undefined;\n", + " this.message = undefined;\n", + " this.canvas = undefined;\n", + " this.rubberband_canvas = undefined;\n", + " this.rubberband_context = undefined;\n", + " this.format_dropdown = undefined;\n", + "\n", + " this.image_mode = 'full';\n", + "\n", + " this.root = $('
');\n", + " this._root_extra_style(this.root)\n", + " this.root.attr('style', 'display: inline-block');\n", + "\n", + " $(parent_element).append(this.root);\n", + "\n", + " this._init_header(this);\n", + " this._init_canvas(this);\n", + " this._init_toolbar(this);\n", + "\n", + " var fig = this;\n", + "\n", + " this.waiting = false;\n", + "\n", + " this.ws.onopen = function () {\n", + " fig.send_message(\"supports_binary\", {value: fig.supports_binary});\n", + " fig.send_message(\"send_image_mode\", {});\n", + " if (mpl.ratio != 1) {\n", + " fig.send_message(\"set_dpi_ratio\", {'dpi_ratio': mpl.ratio});\n", + " }\n", + " fig.send_message(\"refresh\", {});\n", + " }\n", + "\n", + " this.imageObj.onload = function() {\n", + " if (fig.image_mode == 'full') {\n", + " // Full images could contain transparency (where diff images\n", + " // almost always do), so we need to clear the canvas so that\n", + " // there is no ghosting.\n", + " fig.context.clearRect(0, 0, fig.canvas.width, fig.canvas.height);\n", + " }\n", + " fig.context.drawImage(fig.imageObj, 0, 0);\n", + " };\n", + "\n", + " this.imageObj.onunload = function() {\n", + " fig.ws.close();\n", + " }\n", + "\n", + " this.ws.onmessage = this._make_on_message_function(this);\n", + "\n", + " this.ondownload = ondownload;\n", + "}\n", + "\n", + "mpl.figure.prototype._init_header = function() {\n", + " var titlebar = $(\n", + " '
');\n", + " var titletext = $(\n", + " '
');\n", + " titlebar.append(titletext)\n", + " this.root.append(titlebar);\n", + " this.header = titletext[0];\n", + "}\n", + "\n", + "\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(canvas_div) {\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._init_canvas = function() {\n", + " var fig = this;\n", + "\n", + " var canvas_div = $('
');\n", + "\n", + " canvas_div.attr('style', 'position: relative; clear: both; outline: 0');\n", + "\n", + " function canvas_keyboard_event(event) {\n", + " return fig.key_event(event, event['data']);\n", + " }\n", + "\n", + " canvas_div.keydown('key_press', canvas_keyboard_event);\n", + " canvas_div.keyup('key_release', canvas_keyboard_event);\n", + " this.canvas_div = canvas_div\n", + " this._canvas_extra_style(canvas_div)\n", + " this.root.append(canvas_div);\n", + "\n", + " var canvas = $('');\n", + " canvas.addClass('mpl-canvas');\n", + " canvas.attr('style', \"left: 0; top: 0; z-index: 0; outline: 0\")\n", + "\n", + " this.canvas = canvas[0];\n", + " this.context = canvas[0].getContext(\"2d\");\n", + "\n", + " var backingStore = this.context.backingStorePixelRatio ||\n", + "\tthis.context.webkitBackingStorePixelRatio ||\n", + "\tthis.context.mozBackingStorePixelRatio ||\n", + "\tthis.context.msBackingStorePixelRatio ||\n", + "\tthis.context.oBackingStorePixelRatio ||\n", + "\tthis.context.backingStorePixelRatio || 1;\n", + "\n", + " mpl.ratio = (window.devicePixelRatio || 1) / backingStore;\n", + "\n", + " var rubberband = $('');\n", + " rubberband.attr('style', \"position: absolute; left: 0; top: 0; z-index: 1;\")\n", + "\n", + " var pass_mouse_events = true;\n", + "\n", + " canvas_div.resizable({\n", + " start: function(event, ui) {\n", + " pass_mouse_events = false;\n", + " },\n", + " resize: function(event, ui) {\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " stop: function(event, ui) {\n", + " pass_mouse_events = true;\n", + " fig.request_resize(ui.size.width, ui.size.height);\n", + " },\n", + " });\n", + "\n", + " function mouse_event_fn(event) {\n", + " if (pass_mouse_events)\n", + " return fig.mouse_event(event, event['data']);\n", + " }\n", + "\n", + " rubberband.mousedown('button_press', mouse_event_fn);\n", + " rubberband.mouseup('button_release', mouse_event_fn);\n", + " // Throttle sequential mouse events to 1 every 20ms.\n", + " rubberband.mousemove('motion_notify', mouse_event_fn);\n", + "\n", + " rubberband.mouseenter('figure_enter', mouse_event_fn);\n", + " rubberband.mouseleave('figure_leave', mouse_event_fn);\n", + "\n", + " canvas_div.on(\"wheel\", function (event) {\n", + " event = event.originalEvent;\n", + " event['data'] = 'scroll'\n", + " if (event.deltaY < 0) {\n", + " event.step = 1;\n", + " } else {\n", + " event.step = -1;\n", + " }\n", + " mouse_event_fn(event);\n", + " });\n", + "\n", + " canvas_div.append(canvas);\n", + " canvas_div.append(rubberband);\n", + "\n", + " this.rubberband = rubberband;\n", + " this.rubberband_canvas = rubberband[0];\n", + " this.rubberband_context = rubberband[0].getContext(\"2d\");\n", + " this.rubberband_context.strokeStyle = \"#000000\";\n", + "\n", + " this._resize_canvas = function(width, height) {\n", + " // Keep the size of the canvas, canvas container, and rubber band\n", + " // canvas in synch.\n", + " canvas_div.css('width', width)\n", + " canvas_div.css('height', height)\n", + "\n", + " canvas.attr('width', width * mpl.ratio);\n", + " canvas.attr('height', height * mpl.ratio);\n", + " canvas.attr('style', 'width: ' + width + 'px; height: ' + height + 'px;');\n", + "\n", + " rubberband.attr('width', width);\n", + " rubberband.attr('height', height);\n", + " }\n", + "\n", + " // Set the figure to an initial 600x600px, this will subsequently be updated\n", + " // upon first draw.\n", + " this._resize_canvas(600, 600);\n", + "\n", + " // Disable right mouse context menu.\n", + " $(this.rubberband_canvas).bind(\"contextmenu\",function(e){\n", + " return false;\n", + " });\n", + "\n", + " function set_focus () {\n", + " canvas.focus();\n", + " canvas_div.focus();\n", + " }\n", + "\n", + " window.setTimeout(set_focus, 100);\n", + "}\n", + "\n", + "mpl.figure.prototype._init_toolbar = function() {\n", + " var fig = this;\n", + "\n", + " var nav_element = $('
');\n", + " nav_element.attr('style', 'width: 100%');\n", + " this.root.append(nav_element);\n", + "\n", + " // Define a callback function for later on.\n", + " function toolbar_event(event) {\n", + " return fig.toolbar_button_onclick(event['data']);\n", + " }\n", + " function toolbar_mouse_event(event) {\n", + " return fig.toolbar_button_onmouseover(event['data']);\n", + " }\n", + "\n", + " for(var toolbar_ind in mpl.toolbar_items) {\n", + " var name = mpl.toolbar_items[toolbar_ind][0];\n", + " var tooltip = mpl.toolbar_items[toolbar_ind][1];\n", + " var image = mpl.toolbar_items[toolbar_ind][2];\n", + " var method_name = mpl.toolbar_items[toolbar_ind][3];\n", + "\n", + " if (!name) {\n", + " // put a spacer in here.\n", + " continue;\n", + " }\n", + " var button = $('');\n", + " button.click(method_name, toolbar_event);\n", + " button.mouseover(tooltip, toolbar_mouse_event);\n", + " nav_element.append(button);\n", + " }\n", + "\n", + " // Add the status bar.\n", + " var status_bar = $('');\n", + " nav_element.append(status_bar);\n", + " this.message = status_bar[0];\n", + "\n", + " // Add the close button to the window.\n", + " var buttongrp = $('
');\n", + " var button = $('');\n", + " button.click(function (evt) { fig.handle_close(fig, {}); } );\n", + " button.mouseover('Stop Interaction', toolbar_mouse_event);\n", + " buttongrp.append(button);\n", + " var titlebar = this.root.find($('.ui-dialog-titlebar'));\n", + " titlebar.prepend(buttongrp);\n", + "}\n", + "\n", + "mpl.figure.prototype._root_extra_style = function(el){\n", + " var fig = this\n", + " el.on(\"remove\", function(){\n", + "\tfig.close_ws(fig, {});\n", + " });\n", + "}\n", + "\n", + "mpl.figure.prototype._canvas_extra_style = function(el){\n", + " // this is important to make the div 'focusable\n", + " el.attr('tabindex', 0)\n", + " // reach out to IPython and tell the keyboard manager to turn it's self\n", + " // off when our div gets focus\n", + "\n", + " // location in version 3\n", + " if (IPython.notebook.keyboard_manager) {\n", + " IPython.notebook.keyboard_manager.register_events(el);\n", + " }\n", + " else {\n", + " // location in version 2\n", + " IPython.keyboard_manager.register_events(el);\n", + " }\n", + "\n", + "}\n", + "\n", + "mpl.figure.prototype._key_event_extra = function(event, name) {\n", + " var manager = IPython.notebook.keyboard_manager;\n", + " if (!manager)\n", + " manager = IPython.keyboard_manager;\n", + "\n", + " // Check for shift+enter\n", + " if (event.shiftKey && event.which == 13) {\n", + " this.canvas_div.blur();\n", + " event.shiftKey = false;\n", + " // Send a \"J\" for go to next cell\n", + " event.which = 74;\n", + " event.keyCode = 74;\n", + " manager.command_mode();\n", + " manager.handle_keydown(event);\n", + " }\n", + "}\n", + "\n", + "mpl.figure.prototype.handle_save = function(fig, msg) {\n", + " fig.ondownload(fig, null);\n", + "}\n", + "\n", + "\n", + "mpl.find_output_cell = function(html_output) {\n", + " // Return the cell and output element which can be found *uniquely* in the notebook.\n", + " // Note - this is a bit hacky, but it is done because the \"notebook_saving.Notebook\"\n", + " // IPython event is triggered only after the cells have been serialised, which for\n", + " // our purposes (turning an active figure into a static one), is too late.\n", + " var cells = IPython.notebook.get_cells();\n", + " var ncells = cells.length;\n", + " for (var i=0; i= 3 moved mimebundle to data attribute of output\n", + " data = data.data;\n", + " }\n", + " if (data['text/html'] == html_output) {\n", + " return [cell, data, j];\n", + " }\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "// Register the function which deals with the matplotlib target/channel.\n", + "// The kernel may be null if the page has been refreshed.\n", + "if (IPython.notebook.kernel != null) {\n", + " IPython.notebook.kernel.comm_manager.register_target('matplotlib', mpl.mpl_figure_comm);\n", + "}\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Plot\n", + "problem.plot()" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "civic-bidding", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:25:21,550 - INFO HEAD Listening at \n", + "2021-02-25 16:25:21,580 - INFO MONITOR Listening at \n", + "2021-02-25 16:25:21,613 - INFO NODE:0 Listening at \n", + "2021-02-25 16:25:21,660 - INFO WORKER:0:0 Listening at \n" + ] + } + ], + "source": [ + "await mosaic.interactive('on')" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "logical-aquarium", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:25:26,569 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:26,571 - INFO HEAD Giving shot 0 to worker:0:0\n", + "2021-02-25 16:25:26,707 - INFO WORKER:0:0 (ShotID 0) Preparing to run shot\n", + "2021-02-25 16:25:26,708 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:26,708 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:26,709 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:28,810 - INFO WORKER:0:0 Operator `acoustic_iso_state` configuration:\n", + "2021-02-25 16:25:28,813 - INFO WORKER:0:0 \t * autotuning=['aggressive', 'runtime']\n", + "2021-02-25 16:25:28,813 - INFO WORKER:0:0 \t * develop-mode=False\n", + "2021-02-25 16:25:28,813 - INFO WORKER:0:0 \t * mpi=False\n", + "2021-02-25 16:25:28,814 - INFO WORKER:0:0 \t * log-level=DEBUG\n", + "2021-02-25 16:25:28,814 - INFO WORKER:0:0 \t * subs={h_x: 0.0005, h_y: 0.0005}\n", + "2021-02-25 16:25:28,814 - INFO WORKER:0:0 \t * opt=advanced\n", + "2021-02-25 16:25:28,815 - INFO WORKER:0:0 \t * platform=None\n", + "2021-02-25 16:25:28,815 - INFO WORKER:0:0 \t * language=openmp\n", + "2021-02-25 16:25:28,815 - INFO WORKER:0:0 \t * compiler=None\n", + "2021-02-25 16:25:28,815 - INFO WORKER:0:0 Operator `acoustic_iso_state` generated in 0.98 s\n", + "2021-02-25 16:25:28,816 - INFO WORKER:0:0 * lowering.Expressions: 0.41 s (42.1 %)\n", + "2021-02-25 16:25:28,816 - INFO WORKER:0:0 * lowering.Clusters: 0.29 s (29.8 %)\n", + "2021-02-25 16:25:28,819 - INFO WORKER:0:0 * lowering.IET: 0.24 s (24.7 %)\n", + "2021-02-25 16:25:28,819 - INFO WORKER:0:0 Flops reduction after symbolic optimization: [109 --> 59]\n", + "2021-02-25 16:25:28,819 - INFO WORKER:0:0 Operator `acoustic_iso_state` fetched `/tmp/devito-jitcache-uid1000/078e36ed081c6c013a98205b41b0e33d5afc5e09.c` in 0.08 s from jit-cache\n", + "2021-02-25 16:25:28,820 - INFO WORKER:0:0 (ShotID 0) Running state equation for shot\n", + "2021-02-25 16:25:28,820 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:28,820 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.68 s\n", + "2021-02-25 16:25:28,820 - INFO WORKER:0:0 Global performance: [OI=2.90, 48.07 GFlops/s, 0.82 GPts/s]\n", + "2021-02-25 16:25:28,821 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:28,821 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.47 s [OI=2.90, 70.91 GFlops/s, 1.21 GPts/s]\n", + "2021-02-25 16:25:28,821 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.16 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:28,822 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.07 s [OI=3.84, 0.24 GFlops/s]\n", + "2021-02-25 16:25:28,822 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:28,822 - INFO WORKER:0:0 (ShotID 0) Completed state equation run for shot\n", + "2021-02-25 16:25:28,926 - INFO HEAD Shot 0 retrieved\n", + "2021-02-25 16:25:30,029 - INFO HEAD Appended traces for shot 0 to observed file\n", + "2021-02-25 16:25:30,182 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:30,183 - INFO HEAD Giving shot 1 to worker:0:0\n", + "2021-02-25 16:25:30,365 - INFO WORKER:0:0 (ShotID 1) Preparing to run shot\n", + "2021-02-25 16:25:30,366 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:30,366 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:30,366 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:30,799 - INFO WORKER:0:0 (ShotID 1) Running state equation for shot\n", + "2021-02-25 16:25:30,801 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:30,805 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:25:30,807 - INFO WORKER:0:0 Global performance: [OI=2.90, 96.42 GFlops/s, 1.64 GPts/s]\n", + "2021-02-25 16:25:30,810 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:30,812 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 116.56 GFlops/s, 1.98 GPts/s]\n", + "2021-02-25 16:25:30,817 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.05 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:30,819 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.00 GFlops/s]\n", + "2021-02-25 16:25:30,824 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:30,828 - INFO WORKER:0:0 (ShotID 1) Completed state equation run for shot\n", + "2021-02-25 16:25:30,901 - INFO HEAD Shot 1 retrieved\n", + "2021-02-25 16:25:31,162 - INFO HEAD Appended traces for shot 1 to observed file\n", + "2021-02-25 16:25:31,322 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:31,323 - INFO HEAD Giving shot 2 to worker:0:0\n", + "2021-02-25 16:25:31,492 - INFO WORKER:0:0 (ShotID 2) Preparing to run shot\n", + "2021-02-25 16:25:31,492 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:31,492 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:31,493 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:32,035 - INFO WORKER:0:0 (ShotID 2) Running state equation for shot\n", + "2021-02-25 16:25:32,035 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:32,036 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.44 s\n", + "2021-02-25 16:25:32,036 - INFO WORKER:0:0 Global performance: [OI=2.90, 75.66 GFlops/s, 1.29 GPts/s]\n", + "2021-02-25 16:25:32,036 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:32,037 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.33 s [OI=2.90, 98.86 GFlops/s, 1.68 GPts/s]\n", + "2021-02-25 16:25:32,037 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.08 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:32,037 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.57 GFlops/s]\n", + "2021-02-25 16:25:32,038 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:32,038 - INFO WORKER:0:0 (ShotID 2) Completed state equation run for shot\n", + "2021-02-25 16:25:32,138 - INFO HEAD Shot 2 retrieved\n", + "2021-02-25 16:25:32,389 - INFO HEAD Appended traces for shot 2 to observed file\n", + "2021-02-25 16:25:32,540 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:32,541 - INFO HEAD Giving shot 3 to worker:0:0\n", + "2021-02-25 16:25:32,703 - INFO WORKER:0:0 (ShotID 3) Preparing to run shot\n", + "2021-02-25 16:25:32,704 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:32,704 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:32,704 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:33,138 - INFO WORKER:0:0 (ShotID 3) Running state equation for shot\n", + "2021-02-25 16:25:33,146 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:33,149 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:25:33,154 - INFO WORKER:0:0 Global performance: [OI=2.90, 96.40 GFlops/s, 1.64 GPts/s]\n", + "2021-02-25 16:25:33,159 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:33,163 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.31 s [OI=2.90, 105.48 GFlops/s, 1.79 GPts/s]\n", + "2021-02-25 16:25:33,168 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:25:33,173 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.49 GFlops/s]\n", + "2021-02-25 16:25:33,175 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:33,177 - INFO WORKER:0:0 (ShotID 3) Completed state equation run for shot\n", + "2021-02-25 16:25:33,240 - INFO HEAD Shot 3 retrieved\n", + "2021-02-25 16:25:33,497 - INFO HEAD Appended traces for shot 3 to observed file\n", + "2021-02-25 16:25:33,665 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:33,665 - INFO HEAD Giving shot 4 to worker:0:0\n", + "2021-02-25 16:25:33,833 - INFO WORKER:0:0 (ShotID 4) Preparing to run shot\n", + "2021-02-25 16:25:33,834 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:33,834 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:33,835 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:34,207 - INFO WORKER:0:0 (ShotID 4) Running state equation for shot\n", + "2021-02-25 16:25:34,219 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:34,223 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.28 s\n", + "2021-02-25 16:25:34,225 - INFO WORKER:0:0 Global performance: [OI=2.90, 117.12 GFlops/s, 1.99 GPts/s]\n", + "2021-02-25 16:25:34,226 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:34,228 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.27 s [OI=2.90, 122.30 GFlops/s, 2.08 GPts/s]\n", + "2021-02-25 16:25:34,230 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.03 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:34,241 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.50 GFlops/s]\n", + "2021-02-25 16:25:34,244 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:34,246 - INFO WORKER:0:0 (ShotID 4) Completed state equation run for shot\n", + "2021-02-25 16:25:34,310 - INFO HEAD Shot 4 retrieved\n", + "2021-02-25 16:25:34,581 - INFO HEAD Appended traces for shot 4 to observed file\n", + "2021-02-25 16:25:34,733 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:34,733 - INFO HEAD Giving shot 5 to worker:0:0\n", + "2021-02-25 16:25:34,895 - INFO WORKER:0:0 (ShotID 5) Preparing to run shot\n", + "2021-02-25 16:25:34,895 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:34,895 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:34,896 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:35,408 - INFO WORKER:0:0 (ShotID 5) Running state equation for shot\n", + "2021-02-25 16:25:35,408 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:35,409 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.42 s\n", + "2021-02-25 16:25:35,409 - INFO WORKER:0:0 Global performance: [OI=2.90, 78.81 GFlops/s, 1.34 GPts/s]\n", + "2021-02-25 16:25:35,410 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:35,410 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.35 s [OI=2.90, 95.69 GFlops/s, 1.63 GPts/s]\n", + "2021-02-25 16:25:35,410 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.07 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:35,411 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.16 GFlops/s]\n", + "2021-02-25 16:25:35,411 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:35,411 - INFO WORKER:0:0 (ShotID 5) Completed state equation run for shot\n", + "2021-02-25 16:25:35,511 - INFO HEAD Shot 5 retrieved\n", + "2021-02-25 16:25:35,767 - INFO HEAD Appended traces for shot 5 to observed file\n", + "2021-02-25 16:25:35,921 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:35,922 - INFO HEAD Giving shot 6 to worker:0:0\n", + "2021-02-25 16:25:36,094 - INFO WORKER:0:0 (ShotID 6) Preparing to run shot\n", + "2021-02-25 16:25:36,094 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:36,095 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:36,095 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:36,633 - INFO WORKER:0:0 (ShotID 6) Running state equation for shot\n", + "2021-02-25 16:25:36,634 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:36,634 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.44 s\n", + "2021-02-25 16:25:36,635 - INFO WORKER:0:0 Global performance: [OI=2.90, 74.94 GFlops/s, 1.27 GPts/s]\n", + "2021-02-25 16:25:36,635 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:36,636 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.39 s [OI=2.90, 84.30 GFlops/s, 1.43 GPts/s]\n", + "2021-02-25 16:25:36,636 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.04 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:36,636 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.51 GFlops/s]\n", + "2021-02-25 16:25:36,637 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:36,637 - INFO WORKER:0:0 (ShotID 6) Completed state equation run for shot\n", + "2021-02-25 16:25:36,738 - INFO HEAD Shot 6 retrieved\n", + "2021-02-25 16:25:36,991 - INFO HEAD Appended traces for shot 6 to observed file\n", + "2021-02-25 16:25:37,139 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:37,139 - INFO HEAD Giving shot 7 to worker:0:0\n", + "2021-02-25 16:25:37,303 - INFO WORKER:0:0 (ShotID 7) Preparing to run shot\n", + "2021-02-25 16:25:37,303 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:37,304 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:37,304 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:37,739 - INFO WORKER:0:0 (ShotID 7) Running state equation for shot\n", + "2021-02-25 16:25:37,741 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:37,741 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:25:37,741 - INFO WORKER:0:0 Global performance: [OI=2.90, 96.18 GFlops/s, 1.63 GPts/s]\n", + "2021-02-25 16:25:37,742 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:37,742 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 117.47 GFlops/s, 2.00 GPts/s]\n", + "2021-02-25 16:25:37,743 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.05 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:37,743 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 0.81 GFlops/s]\n", + "2021-02-25 16:25:37,743 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:37,744 - INFO WORKER:0:0 (ShotID 7) Completed state equation run for shot\n", + "2021-02-25 16:25:37,843 - INFO HEAD Shot 7 retrieved\n", + "2021-02-25 16:25:38,091 - INFO HEAD Appended traces for shot 7 to observed file\n", + "2021-02-25 16:25:38,245 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:38,245 - INFO HEAD Giving shot 8 to worker:0:0\n", + "2021-02-25 16:25:38,411 - INFO WORKER:0:0 (ShotID 8) Preparing to run shot\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:25:38,411 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:38,412 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:38,412 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:38,892 - INFO WORKER:0:0 (ShotID 8) Running state equation for shot\n", + "2021-02-25 16:25:38,892 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:38,893 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.39 s\n", + "2021-02-25 16:25:38,893 - INFO WORKER:0:0 Global performance: [OI=2.90, 85.29 GFlops/s, 1.45 GPts/s]\n", + "2021-02-25 16:25:38,894 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:38,894 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.31 s [OI=2.90, 108.19 GFlops/s, 1.84 GPts/s]\n", + "2021-02-25 16:25:38,894 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.06 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:38,895 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.67 GFlops/s]\n", + "2021-02-25 16:25:38,895 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:38,895 - INFO WORKER:0:0 (ShotID 8) Completed state equation run for shot\n", + "2021-02-25 16:25:38,995 - INFO HEAD Shot 8 retrieved\n", + "2021-02-25 16:25:39,235 - INFO HEAD Appended traces for shot 8 to observed file\n", + "2021-02-25 16:25:39,387 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:39,388 - INFO HEAD Giving shot 9 to worker:0:0\n", + "2021-02-25 16:25:39,551 - INFO WORKER:0:0 (ShotID 9) Preparing to run shot\n", + "2021-02-25 16:25:39,552 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:39,552 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:39,553 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:40,074 - INFO WORKER:0:0 (ShotID 9) Running state equation for shot\n", + "2021-02-25 16:25:40,075 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:40,076 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.43 s\n", + "2021-02-25 16:25:40,076 - INFO WORKER:0:0 Global performance: [OI=2.90, 76.74 GFlops/s, 1.31 GPts/s]\n", + "2021-02-25 16:25:40,076 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:40,077 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.36 s [OI=2.90, 91.15 GFlops/s, 1.55 GPts/s]\n", + "2021-02-25 16:25:40,077 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.04 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:40,078 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.52 GFlops/s]\n", + "2021-02-25 16:25:40,078 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:40,078 - INFO WORKER:0:0 (ShotID 9) Completed state equation run for shot\n", + "2021-02-25 16:25:40,178 - INFO HEAD Shot 9 retrieved\n", + "2021-02-25 16:25:40,424 - INFO HEAD Appended traces for shot 9 to observed file\n", + "2021-02-25 16:25:40,580 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:40,581 - INFO HEAD Giving shot 10 to worker:0:0\n", + "2021-02-25 16:25:40,742 - INFO WORKER:0:0 (ShotID 10) Preparing to run shot\n", + "2021-02-25 16:25:40,743 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:40,743 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:40,744 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:41,193 - INFO WORKER:0:0 (ShotID 10) Running state equation for shot\n", + "2021-02-25 16:25:41,199 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:41,201 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.36 s\n", + "2021-02-25 16:25:41,203 - INFO WORKER:0:0 Global performance: [OI=2.90, 92.57 GFlops/s, 1.57 GPts/s]\n", + "2021-02-25 16:25:41,205 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:41,207 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.31 s [OI=2.90, 105.89 GFlops/s, 1.80 GPts/s]\n", + "2021-02-25 16:25:41,209 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.04 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:41,211 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.27 GFlops/s]\n", + "2021-02-25 16:25:41,213 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:41,214 - INFO WORKER:0:0 (ShotID 10) Completed state equation run for shot\n", + "2021-02-25 16:25:41,295 - INFO HEAD Shot 10 retrieved\n", + "2021-02-25 16:25:41,541 - INFO HEAD Appended traces for shot 10 to observed file\n", + "2021-02-25 16:25:41,694 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:41,695 - INFO HEAD Giving shot 11 to worker:0:0\n", + "2021-02-25 16:25:41,865 - INFO WORKER:0:0 (ShotID 11) Preparing to run shot\n", + "2021-02-25 16:25:41,865 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:41,866 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:41,866 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:42,358 - INFO WORKER:0:0 (ShotID 11) Running state equation for shot\n", + "2021-02-25 16:25:42,365 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:42,369 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.40 s\n", + "2021-02-25 16:25:42,371 - INFO WORKER:0:0 Global performance: [OI=2.90, 82.88 GFlops/s, 1.41 GPts/s]\n", + "2021-02-25 16:25:42,373 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:42,386 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.31 s [OI=2.90, 107.37 GFlops/s, 1.82 GPts/s]\n", + "2021-02-25 16:25:42,394 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.07 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:42,396 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.67 GFlops/s]\n", + "2021-02-25 16:25:42,402 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:42,405 - INFO WORKER:0:0 (ShotID 11) Completed state equation run for shot\n", + "2021-02-25 16:25:42,470 - INFO HEAD Shot 11 retrieved\n", + "2021-02-25 16:25:42,706 - INFO HEAD Appended traces for shot 11 to observed file\n", + "2021-02-25 16:25:42,857 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:42,858 - INFO HEAD Giving shot 12 to worker:0:0\n", + "2021-02-25 16:25:43,032 - INFO WORKER:0:0 (ShotID 12) Preparing to run shot\n", + "2021-02-25 16:25:43,032 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:43,033 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:43,034 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:43,469 - INFO WORKER:0:0 (ShotID 12) Running state equation for shot\n", + "2021-02-25 16:25:43,469 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:43,470 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:25:43,470 - INFO WORKER:0:0 Global performance: [OI=2.90, 96.68 GFlops/s, 1.64 GPts/s]\n", + "2021-02-25 16:25:43,471 - INFO WORKER:0:0 Local performance:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:25:43,471 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.31 s [OI=2.90, 107.75 GFlops/s, 1.83 GPts/s]\n", + "2021-02-25 16:25:43,472 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:43,472 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 0.76 GFlops/s]\n", + "2021-02-25 16:25:43,473 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:43,473 - INFO WORKER:0:0 (ShotID 12) Completed state equation run for shot\n", + "2021-02-25 16:25:43,572 - INFO HEAD Shot 12 retrieved\n", + "2021-02-25 16:25:43,860 - INFO HEAD Appended traces for shot 12 to observed file\n", + "2021-02-25 16:25:44,017 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:44,017 - INFO HEAD Giving shot 13 to worker:0:0\n", + "2021-02-25 16:25:44,191 - INFO WORKER:0:0 (ShotID 13) Preparing to run shot\n", + "2021-02-25 16:25:44,192 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:44,192 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:44,192 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:44,664 - INFO WORKER:0:0 (ShotID 13) Running state equation for shot\n", + "2021-02-25 16:25:44,665 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:44,665 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.37 s\n", + "2021-02-25 16:25:44,666 - INFO WORKER:0:0 Global performance: [OI=2.90, 88.29 GFlops/s, 1.50 GPts/s]\n", + "2021-02-25 16:25:44,667 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:44,667 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.32 s [OI=2.90, 103.15 GFlops/s, 1.75 GPts/s]\n", + "2021-02-25 16:25:44,668 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.04 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:44,668 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.08 GFlops/s]\n", + "2021-02-25 16:25:44,669 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:44,669 - INFO WORKER:0:0 (ShotID 13) Completed state equation run for shot\n", + "2021-02-25 16:25:44,768 - INFO HEAD Shot 13 retrieved\n", + "2021-02-25 16:25:45,017 - INFO HEAD Appended traces for shot 13 to observed file\n", + "2021-02-25 16:25:45,172 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:45,172 - INFO HEAD Giving shot 14 to worker:0:0\n", + "2021-02-25 16:25:45,342 - INFO WORKER:0:0 (ShotID 14) Preparing to run shot\n", + "2021-02-25 16:25:45,342 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:45,343 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:45,343 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:46,173 - INFO WORKER:0:0 (ShotID 14) Running state equation for shot\n", + "2021-02-25 16:25:46,173 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:46,174 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.73 s\n", + "2021-02-25 16:25:46,174 - INFO WORKER:0:0 Global performance: [OI=2.90, 44.90 GFlops/s, 0.77 GPts/s]\n", + "2021-02-25 16:25:46,175 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:46,175 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.49 s [OI=2.90, 66.77 GFlops/s, 1.14 GPts/s]\n", + "2021-02-25 16:25:46,175 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.18 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:46,176 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.07 s [OI=3.84, 0.22 GFlops/s]\n", + "2021-02-25 16:25:46,176 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:46,177 - INFO WORKER:0:0 (ShotID 14) Completed state equation run for shot\n", + "2021-02-25 16:25:46,275 - INFO HEAD Shot 14 retrieved\n", + "2021-02-25 16:25:46,526 - INFO HEAD Appended traces for shot 14 to observed file\n", + "2021-02-25 16:25:46,673 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:46,673 - INFO HEAD Giving shot 15 to worker:0:0\n", + "2021-02-25 16:25:46,838 - INFO WORKER:0:0 (ShotID 15) Preparing to run shot\n", + "2021-02-25 16:25:46,839 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:46,839 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:46,840 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:47,333 - INFO WORKER:0:0 (ShotID 15) Running state equation for shot\n", + "2021-02-25 16:25:47,333 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:47,334 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.40 s\n", + "2021-02-25 16:25:47,334 - INFO WORKER:0:0 Global performance: [OI=2.90, 82.29 GFlops/s, 1.40 GPts/s]\n", + "2021-02-25 16:25:47,334 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:47,335 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.34 s [OI=2.90, 96.20 GFlops/s, 1.64 GPts/s]\n", + "2021-02-25 16:25:47,335 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.05 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:47,335 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.19 GFlops/s]\n", + "2021-02-25 16:25:47,336 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:47,336 - INFO WORKER:0:0 (ShotID 15) Completed state equation run for shot\n", + "2021-02-25 16:25:47,435 - INFO HEAD Shot 15 retrieved\n", + "2021-02-25 16:25:47,673 - INFO HEAD Appended traces for shot 15 to observed file\n", + "2021-02-25 16:25:47,819 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:47,820 - INFO HEAD Giving shot 16 to worker:0:0\n", + "2021-02-25 16:25:47,985 - INFO WORKER:0:0 (ShotID 16) Preparing to run shot\n", + "2021-02-25 16:25:47,986 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:47,986 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:47,986 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:48,493 - INFO WORKER:0:0 (ShotID 16) Running state equation for shot\n", + "2021-02-25 16:25:48,493 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:48,494 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.42 s\n", + "2021-02-25 16:25:48,494 - INFO WORKER:0:0 Global performance: [OI=2.90, 79.05 GFlops/s, 1.34 GPts/s]\n", + "2021-02-25 16:25:48,494 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:48,495 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.37 s [OI=2.90, 89.99 GFlops/s, 1.53 GPts/s]\n", + "2021-02-25 16:25:48,495 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.04 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:48,495 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.09 GFlops/s]\n", + "2021-02-25 16:25:48,495 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:48,496 - INFO WORKER:0:0 (ShotID 16) Completed state equation run for shot\n", + "2021-02-25 16:25:48,597 - INFO HEAD Shot 16 retrieved\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:25:48,847 - INFO HEAD Appended traces for shot 16 to observed file\n", + "2021-02-25 16:25:48,997 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:48,998 - INFO HEAD Giving shot 17 to worker:0:0\n", + "2021-02-25 16:25:49,159 - INFO WORKER:0:0 (ShotID 17) Preparing to run shot\n", + "2021-02-25 16:25:49,160 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:49,160 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:49,161 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:49,719 - INFO WORKER:0:0 (ShotID 17) Running state equation for shot\n", + "2021-02-25 16:25:49,726 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:49,728 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.47 s\n", + "2021-02-25 16:25:49,730 - INFO WORKER:0:0 Global performance: [OI=2.90, 70.71 GFlops/s, 1.20 GPts/s]\n", + "2021-02-25 16:25:49,737 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:49,739 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.35 s [OI=2.90, 95.31 GFlops/s, 1.62 GPts/s]\n", + "2021-02-25 16:25:49,740 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.08 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:49,742 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.05 s [OI=3.84, 0.32 GFlops/s]\n", + "2021-02-25 16:25:49,743 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:49,745 - INFO WORKER:0:0 (ShotID 17) Completed state equation run for shot\n", + "2021-02-25 16:25:49,822 - INFO HEAD Shot 17 retrieved\n", + "2021-02-25 16:25:50,063 - INFO HEAD Appended traces for shot 17 to observed file\n", + "2021-02-25 16:25:50,211 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:50,211 - INFO HEAD Giving shot 18 to worker:0:0\n", + "2021-02-25 16:25:50,372 - INFO WORKER:0:0 (ShotID 18) Preparing to run shot\n", + "2021-02-25 16:25:50,373 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:50,373 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:50,373 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:51,150 - INFO WORKER:0:0 (ShotID 18) Running state equation for shot\n", + "2021-02-25 16:25:51,151 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:51,151 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.69 s\n", + "2021-02-25 16:25:51,151 - INFO WORKER:0:0 Global performance: [OI=2.90, 47.95 GFlops/s, 0.82 GPts/s]\n", + "2021-02-25 16:25:51,152 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:51,152 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.45 s [OI=2.90, 72.51 GFlops/s, 1.23 GPts/s]\n", + "2021-02-25 16:25:51,153 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.15 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:51,153 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.09 s [OI=3.84, 0.19 GFlops/s]\n", + "2021-02-25 16:25:51,153 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:51,154 - INFO WORKER:0:0 (ShotID 18) Completed state equation run for shot\n", + "2021-02-25 16:25:51,253 - INFO HEAD Shot 18 retrieved\n", + "2021-02-25 16:25:51,512 - INFO HEAD Appended traces for shot 18 to observed file\n", + "2021-02-25 16:25:51,675 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:51,675 - INFO HEAD Giving shot 19 to worker:0:0\n", + "2021-02-25 16:25:51,840 - INFO WORKER:0:0 (ShotID 19) Preparing to run shot\n", + "2021-02-25 16:25:51,841 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:51,841 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:51,842 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:52,280 - INFO WORKER:0:0 (ShotID 19) Running state equation for shot\n", + "2021-02-25 16:25:52,281 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:52,281 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.35 s\n", + "2021-02-25 16:25:52,281 - INFO WORKER:0:0 Global performance: [OI=2.90, 95.21 GFlops/s, 1.62 GPts/s]\n", + "2021-02-25 16:25:52,282 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:52,282 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 113.34 GFlops/s, 1.93 GPts/s]\n", + "2021-02-25 16:25:52,283 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:52,283 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.57 GFlops/s]\n", + "2021-02-25 16:25:52,284 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:52,284 - INFO WORKER:0:0 (ShotID 19) Completed state equation run for shot\n", + "2021-02-25 16:25:52,384 - INFO HEAD Shot 19 retrieved\n", + "2021-02-25 16:25:52,645 - INFO HEAD Appended traces for shot 19 to observed file\n", + "2021-02-25 16:25:52,796 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:52,796 - INFO HEAD Giving shot 20 to worker:0:0\n", + "2021-02-25 16:25:52,960 - INFO WORKER:0:0 (ShotID 20) Preparing to run shot\n", + "2021-02-25 16:25:52,961 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:52,961 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:52,961 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:53,417 - INFO WORKER:0:0 (ShotID 20) Running state equation for shot\n", + "2021-02-25 16:25:53,424 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:53,427 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.37 s\n", + "2021-02-25 16:25:53,428 - INFO WORKER:0:0 Global performance: [OI=2.90, 90.39 GFlops/s, 1.54 GPts/s]\n", + "2021-02-25 16:25:53,434 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:53,437 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.34 s [OI=2.90, 97.56 GFlops/s, 1.66 GPts/s]\n", + "2021-02-25 16:25:53,439 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:53,441 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.32 GFlops/s]\n", + "2021-02-25 16:25:53,443 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:53,444 - INFO WORKER:0:0 (ShotID 20) Completed state equation run for shot\n", + "2021-02-25 16:25:53,521 - INFO HEAD Shot 20 retrieved\n", + "2021-02-25 16:25:53,770 - INFO HEAD Appended traces for shot 20 to observed file\n", + "2021-02-25 16:25:53,916 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:53,917 - INFO HEAD Giving shot 21 to worker:0:0\n", + "2021-02-25 16:25:54,079 - INFO WORKER:0:0 (ShotID 21) Preparing to run shot\n", + "2021-02-25 16:25:54,080 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:54,080 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:54,080 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:54,530 - INFO WORKER:0:0 (ShotID 21) Running state equation for shot\n", + "2021-02-25 16:25:54,538 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:25:54,541 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.36 s\n", + "2021-02-25 16:25:54,543 - INFO WORKER:0:0 Global performance: [OI=2.90, 92.33 GFlops/s, 1.57 GPts/s]\n", + "2021-02-25 16:25:54,545 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:54,546 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.33 s [OI=2.90, 98.89 GFlops/s, 1.68 GPts/s]\n", + "2021-02-25 16:25:54,551 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:54,551 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.89 GFlops/s]\n", + "2021-02-25 16:25:54,552 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:54,552 - INFO WORKER:0:0 (ShotID 21) Completed state equation run for shot\n", + "2021-02-25 16:25:54,632 - INFO HEAD Shot 21 retrieved\n", + "2021-02-25 16:25:54,885 - INFO HEAD Appended traces for shot 21 to observed file\n", + "2021-02-25 16:25:55,033 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:55,034 - INFO HEAD Giving shot 22 to worker:0:0\n", + "2021-02-25 16:25:55,194 - INFO WORKER:0:0 (ShotID 22) Preparing to run shot\n", + "2021-02-25 16:25:55,194 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:55,195 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:55,195 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:55,612 - INFO WORKER:0:0 (ShotID 22) Running state equation for shot\n", + "2021-02-25 16:25:55,613 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:55,613 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.33 s\n", + "2021-02-25 16:25:55,615 - INFO WORKER:0:0 Global performance: [OI=2.90, 100.93 GFlops/s, 1.71 GPts/s]\n", + "2021-02-25 16:25:55,615 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:55,615 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.30 s [OI=2.90, 109.34 GFlops/s, 1.86 GPts/s]\n", + "2021-02-25 16:25:55,616 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:55,616 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.50 GFlops/s]\n", + "2021-02-25 16:25:55,617 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:55,617 - INFO WORKER:0:0 (ShotID 22) Completed state equation run for shot\n", + "2021-02-25 16:25:55,719 - INFO HEAD Shot 22 retrieved\n", + "2021-02-25 16:25:55,964 - INFO HEAD Appended traces for shot 22 to observed file\n", + "2021-02-25 16:25:56,119 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:56,119 - INFO HEAD Giving shot 23 to worker:0:0\n", + "2021-02-25 16:25:56,286 - INFO WORKER:0:0 (ShotID 23) Preparing to run shot\n", + "2021-02-25 16:25:56,287 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:56,287 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:56,287 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:56,730 - INFO WORKER:0:0 (ShotID 23) Running state equation for shot\n", + "2021-02-25 16:25:56,731 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:56,732 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.35 s\n", + "2021-02-25 16:25:56,732 - INFO WORKER:0:0 Global performance: [OI=2.90, 93.71 GFlops/s, 1.59 GPts/s]\n", + "2021-02-25 16:25:56,732 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:56,733 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.32 s [OI=2.90, 104.63 GFlops/s, 1.78 GPts/s]\n", + "2021-02-25 16:25:56,733 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:56,733 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.82 GFlops/s]\n", + "2021-02-25 16:25:56,734 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:56,734 - INFO WORKER:0:0 (ShotID 23) Completed state equation run for shot\n", + "2021-02-25 16:25:56,833 - INFO HEAD Shot 23 retrieved\n", + "2021-02-25 16:25:57,068 - INFO HEAD Appended traces for shot 23 to observed file\n", + "2021-02-25 16:25:57,218 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:57,219 - INFO HEAD Giving shot 24 to worker:0:0\n", + "2021-02-25 16:25:57,381 - INFO WORKER:0:0 (ShotID 24) Preparing to run shot\n", + "2021-02-25 16:25:57,381 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:57,382 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:57,382 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:57,812 - INFO WORKER:0:0 (ShotID 24) Running state equation for shot\n", + "2021-02-25 16:25:57,813 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:57,813 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:25:57,814 - INFO WORKER:0:0 Global performance: [OI=2.90, 98.21 GFlops/s, 1.67 GPts/s]\n", + "2021-02-25 16:25:57,814 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:57,814 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.32 s [OI=2.90, 102.26 GFlops/s, 1.74 GPts/s]\n", + "2021-02-25 16:25:57,815 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.03 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:57,815 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.33 GFlops/s]\n", + "2021-02-25 16:25:57,816 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:57,816 - INFO WORKER:0:0 (ShotID 24) Completed state equation run for shot\n", + "2021-02-25 16:25:57,918 - INFO HEAD Shot 24 retrieved\n", + "2021-02-25 16:25:58,161 - INFO HEAD Appended traces for shot 24 to observed file\n", + "2021-02-25 16:25:58,315 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:58,316 - INFO HEAD Giving shot 25 to worker:0:0\n", + "2021-02-25 16:25:58,478 - INFO WORKER:0:0 (ShotID 25) Preparing to run shot\n", + "2021-02-25 16:25:58,478 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:58,479 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:58,479 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:58,903 - INFO WORKER:0:0 (ShotID 25) Running state equation for shot\n", + "2021-02-25 16:25:58,910 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:58,917 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.33 s\n", + "2021-02-25 16:25:58,921 - INFO WORKER:0:0 Global performance: [OI=2.90, 99.46 GFlops/s, 1.69 GPts/s]\n", + "2021-02-25 16:25:58,923 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:58,924 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.31 s [OI=2.90, 107.79 GFlops/s, 1.83 GPts/s]\n", + "2021-02-25 16:25:58,926 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:58,928 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.09 GFlops/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:25:58,930 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:58,936 - INFO WORKER:0:0 (ShotID 25) Completed state equation run for shot\n", + "2021-02-25 16:25:59,005 - INFO HEAD Shot 25 retrieved\n", + "2021-02-25 16:25:59,255 - INFO HEAD Appended traces for shot 25 to observed file\n", + "2021-02-25 16:25:59,403 - INFO HEAD \n", + "\n", + "2021-02-25 16:25:59,404 - INFO HEAD Giving shot 26 to worker:0:0\n", + "2021-02-25 16:25:59,567 - INFO WORKER:0:0 (ShotID 26) Preparing to run shot\n", + "2021-02-25 16:25:59,568 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:25:59,568 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:25:59,568 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:25:59,939 - INFO WORKER:0:0 (ShotID 26) Running state equation for shot\n", + "2021-02-25 16:25:59,940 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:25:59,940 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.28 s\n", + "2021-02-25 16:25:59,941 - INFO WORKER:0:0 Global performance: [OI=2.90, 119.25 GFlops/s, 2.03 GPts/s]\n", + "2021-02-25 16:25:59,941 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:25:59,941 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.25 s [OI=2.90, 134.02 GFlops/s, 2.28 GPts/s]\n", + "2021-02-25 16:25:59,942 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.03 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:25:59,942 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.63 GFlops/s]\n", + "2021-02-25 16:25:59,943 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:25:59,943 - INFO WORKER:0:0 (ShotID 26) Completed state equation run for shot\n", + "2021-02-25 16:26:00,042 - INFO HEAD Shot 26 retrieved\n", + "2021-02-25 16:26:00,272 - INFO HEAD Appended traces for shot 26 to observed file\n", + "2021-02-25 16:26:00,423 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:00,423 - INFO HEAD Giving shot 27 to worker:0:0\n", + "2021-02-25 16:26:00,585 - INFO WORKER:0:0 (ShotID 27) Preparing to run shot\n", + "2021-02-25 16:26:00,586 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:00,586 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:00,586 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:01,004 - INFO WORKER:0:0 (ShotID 27) Running state equation for shot\n", + "2021-02-25 16:26:01,004 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:01,004 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.31 s\n", + "2021-02-25 16:26:01,005 - INFO WORKER:0:0 Global performance: [OI=2.90, 106.01 GFlops/s, 1.80 GPts/s]\n", + "2021-02-25 16:26:01,005 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:01,006 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.27 s [OI=2.90, 121.17 GFlops/s, 2.06 GPts/s]\n", + "2021-02-25 16:26:01,006 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:01,006 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.28 GFlops/s]\n", + "2021-02-25 16:26:01,007 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:01,007 - INFO WORKER:0:0 (ShotID 27) Completed state equation run for shot\n", + "2021-02-25 16:26:01,106 - INFO HEAD Shot 27 retrieved\n", + "2021-02-25 16:26:01,339 - INFO HEAD Appended traces for shot 27 to observed file\n", + "2021-02-25 16:26:01,486 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:01,486 - INFO HEAD Giving shot 28 to worker:0:0\n", + "2021-02-25 16:26:01,654 - INFO WORKER:0:0 (ShotID 28) Preparing to run shot\n", + "2021-02-25 16:26:01,655 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:01,655 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:01,655 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:02,091 - INFO WORKER:0:0 (ShotID 28) Running state equation for shot\n", + "2021-02-25 16:26:02,092 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:02,092 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:26:02,093 - INFO WORKER:0:0 Global performance: [OI=2.90, 96.04 GFlops/s, 1.63 GPts/s]\n", + "2021-02-25 16:26:02,093 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:02,093 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 113.16 GFlops/s, 1.92 GPts/s]\n", + "2021-02-25 16:26:02,094 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.04 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:02,094 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 0.85 GFlops/s]\n", + "2021-02-25 16:26:02,094 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:02,095 - INFO WORKER:0:0 (ShotID 28) Completed state equation run for shot\n", + "2021-02-25 16:26:02,195 - INFO HEAD Shot 28 retrieved\n", + "2021-02-25 16:26:02,449 - INFO HEAD Appended traces for shot 28 to observed file\n", + "2021-02-25 16:26:02,597 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:02,597 - INFO HEAD Giving shot 29 to worker:0:0\n", + "2021-02-25 16:26:02,760 - INFO WORKER:0:0 (ShotID 29) Preparing to run shot\n", + "2021-02-25 16:26:02,761 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:02,761 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:02,762 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:03,181 - INFO WORKER:0:0 (ShotID 29) Running state equation for shot\n", + "2021-02-25 16:26:03,188 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:03,191 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.33 s\n", + "2021-02-25 16:26:03,193 - INFO WORKER:0:0 Global performance: [OI=2.90, 100.94 GFlops/s, 1.72 GPts/s]\n", + "2021-02-25 16:26:03,195 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:03,201 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 114.68 GFlops/s, 1.95 GPts/s]\n", + "2021-02-25 16:26:03,204 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:03,206 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.63 GFlops/s]\n", + "2021-02-25 16:26:03,207 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:03,209 - INFO WORKER:0:0 (ShotID 29) Completed state equation run for shot\n", + "2021-02-25 16:26:03,286 - INFO HEAD Shot 29 retrieved\n", + "2021-02-25 16:26:03,541 - INFO HEAD Appended traces for shot 29 to observed file\n", + "2021-02-25 16:26:03,692 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:03,692 - INFO HEAD Giving shot 30 to worker:0:0\n", + "2021-02-25 16:26:03,855 - INFO WORKER:0:0 (ShotID 30) Preparing to run shot\n", + "2021-02-25 16:26:03,855 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:26:03,856 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:03,856 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:04,284 - INFO WORKER:0:0 (ShotID 30) Running state equation for shot\n", + "2021-02-25 16:26:04,284 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:04,284 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:26:04,285 - INFO WORKER:0:0 Global performance: [OI=2.90, 98.11 GFlops/s, 1.67 GPts/s]\n", + "2021-02-25 16:26:04,285 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:04,286 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 119.48 GFlops/s, 2.03 GPts/s]\n", + "2021-02-25 16:26:04,286 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.05 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:04,287 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 0.82 GFlops/s]\n", + "2021-02-25 16:26:04,287 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:04,288 - INFO WORKER:0:0 (ShotID 30) Completed state equation run for shot\n", + "2021-02-25 16:26:04,387 - INFO HEAD Shot 30 retrieved\n", + "2021-02-25 16:26:04,634 - INFO HEAD Appended traces for shot 30 to observed file\n", + "2021-02-25 16:26:04,780 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:04,781 - INFO HEAD Giving shot 31 to worker:0:0\n", + "2021-02-25 16:26:04,944 - INFO WORKER:0:0 (ShotID 31) Preparing to run shot\n", + "2021-02-25 16:26:04,944 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:04,945 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:04,945 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:05,337 - INFO WORKER:0:0 (ShotID 31) Running state equation for shot\n", + "2021-02-25 16:26:05,344 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:05,346 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.30 s\n", + "2021-02-25 16:26:05,351 - INFO WORKER:0:0 Global performance: [OI=2.90, 110.26 GFlops/s, 1.87 GPts/s]\n", + "2021-02-25 16:26:05,353 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:05,355 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.27 s [OI=2.90, 124.28 GFlops/s, 2.11 GPts/s]\n", + "2021-02-25 16:26:05,356 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:05,358 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.37 GFlops/s]\n", + "2021-02-25 16:26:05,360 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:05,361 - INFO WORKER:0:0 (ShotID 31) Completed state equation run for shot\n", + "2021-02-25 16:26:05,441 - INFO HEAD Shot 31 retrieved\n", + "2021-02-25 16:26:05,711 - INFO HEAD Appended traces for shot 31 to observed file\n", + "2021-02-25 16:26:05,858 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:05,858 - INFO HEAD Giving shot 32 to worker:0:0\n", + "2021-02-25 16:26:06,020 - INFO WORKER:0:0 (ShotID 32) Preparing to run shot\n", + "2021-02-25 16:26:06,020 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:06,021 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:06,021 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:06,431 - INFO WORKER:0:0 (ShotID 32) Running state equation for shot\n", + "2021-02-25 16:26:06,438 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:06,441 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:26:06,443 - INFO WORKER:0:0 Global performance: [OI=2.90, 103.11 GFlops/s, 1.75 GPts/s]\n", + "2021-02-25 16:26:06,445 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:06,447 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 119.55 GFlops/s, 2.03 GPts/s]\n", + "2021-02-25 16:26:06,448 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:06,449 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 0.92 GFlops/s]\n", + "2021-02-25 16:26:06,454 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:06,455 - INFO WORKER:0:0 (ShotID 32) Completed state equation run for shot\n", + "2021-02-25 16:26:06,535 - INFO HEAD Shot 32 retrieved\n", + "2021-02-25 16:26:06,777 - INFO HEAD Appended traces for shot 32 to observed file\n", + "2021-02-25 16:26:06,924 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:06,924 - INFO HEAD Giving shot 33 to worker:0:0\n", + "2021-02-25 16:26:07,088 - INFO WORKER:0:0 (ShotID 33) Preparing to run shot\n", + "2021-02-25 16:26:07,089 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:07,089 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:07,090 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:07,506 - INFO WORKER:0:0 (ShotID 33) Running state equation for shot\n", + "2021-02-25 16:26:07,508 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:07,508 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.33 s\n", + "2021-02-25 16:26:07,509 - INFO WORKER:0:0 Global performance: [OI=2.90, 101.08 GFlops/s, 1.72 GPts/s]\n", + "2021-02-25 16:26:07,509 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:07,509 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 114.65 GFlops/s, 1.95 GPts/s]\n", + "2021-02-25 16:26:07,510 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:07,510 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.06 GFlops/s]\n", + "2021-02-25 16:26:07,511 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:07,511 - INFO WORKER:0:0 (ShotID 33) Completed state equation run for shot\n", + "2021-02-25 16:26:07,623 - INFO HEAD Shot 33 retrieved\n", + "2021-02-25 16:26:07,852 - INFO HEAD Appended traces for shot 33 to observed file\n", + "2021-02-25 16:26:08,004 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:08,004 - INFO HEAD Giving shot 34 to worker:0:0\n", + "2021-02-25 16:26:08,166 - INFO WORKER:0:0 (ShotID 34) Preparing to run shot\n", + "2021-02-25 16:26:08,167 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:08,167 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:08,168 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:08,584 - INFO WORKER:0:0 (ShotID 34) Running state equation for shot\n", + "2021-02-25 16:26:08,584 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:08,585 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.33 s\n", + "2021-02-25 16:26:08,585 - INFO WORKER:0:0 Global performance: [OI=2.90, 101.46 GFlops/s, 1.72 GPts/s]\n", + "2021-02-25 16:26:08,586 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:08,586 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 113.51 GFlops/s, 1.93 GPts/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:26:08,586 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:08,587 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.45 GFlops/s]\n", + "2021-02-25 16:26:08,587 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:08,587 - INFO WORKER:0:0 (ShotID 34) Completed state equation run for shot\n", + "2021-02-25 16:26:08,687 - INFO HEAD Shot 34 retrieved\n", + "2021-02-25 16:26:08,934 - INFO HEAD Appended traces for shot 34 to observed file\n", + "2021-02-25 16:26:09,083 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:09,084 - INFO HEAD Giving shot 35 to worker:0:0\n", + "2021-02-25 16:26:09,253 - INFO WORKER:0:0 (ShotID 35) Preparing to run shot\n", + "2021-02-25 16:26:09,253 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:09,254 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:09,254 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:09,690 - INFO WORKER:0:0 (ShotID 35) Running state equation for shot\n", + "2021-02-25 16:26:09,696 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:09,702 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.35 s\n", + "2021-02-25 16:26:09,705 - INFO WORKER:0:0 Global performance: [OI=2.90, 95.56 GFlops/s, 1.62 GPts/s]\n", + "2021-02-25 16:26:09,706 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:09,708 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.32 s [OI=2.90, 103.73 GFlops/s, 1.76 GPts/s]\n", + "2021-02-25 16:26:09,710 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:09,711 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.99 GFlops/s]\n", + "2021-02-25 16:26:09,717 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:09,718 - INFO WORKER:0:0 (ShotID 35) Completed state equation run for shot\n", + "2021-02-25 16:26:09,792 - INFO HEAD Shot 35 retrieved\n", + "2021-02-25 16:26:10,042 - INFO HEAD Appended traces for shot 35 to observed file\n", + "2021-02-25 16:26:10,191 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:10,192 - INFO HEAD Giving shot 36 to worker:0:0\n", + "2021-02-25 16:26:10,355 - INFO WORKER:0:0 (ShotID 36) Preparing to run shot\n", + "2021-02-25 16:26:10,355 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:10,356 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:10,356 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:10,755 - INFO WORKER:0:0 (ShotID 36) Running state equation for shot\n", + "2021-02-25 16:26:10,761 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:10,768 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.30 s\n", + "2021-02-25 16:26:10,772 - INFO WORKER:0:0 Global performance: [OI=2.90, 109.20 GFlops/s, 1.85 GPts/s]\n", + "2021-02-25 16:26:10,773 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:10,774 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.27 s [OI=2.90, 121.77 GFlops/s, 2.07 GPts/s]\n", + "2021-02-25 16:26:10,776 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:10,777 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.23 GFlops/s]\n", + "2021-02-25 16:26:10,778 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:10,779 - INFO WORKER:0:0 (ShotID 36) Completed state equation run for shot\n", + "2021-02-25 16:26:10,857 - INFO HEAD Shot 36 retrieved\n", + "2021-02-25 16:26:11,114 - INFO HEAD Appended traces for shot 36 to observed file\n", + "2021-02-25 16:26:11,260 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:11,261 - INFO HEAD Giving shot 37 to worker:0:0\n", + "2021-02-25 16:26:11,425 - INFO WORKER:0:0 (ShotID 37) Preparing to run shot\n", + "2021-02-25 16:26:11,425 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:11,426 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:11,426 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:11,803 - INFO WORKER:0:0 (ShotID 37) Running state equation for shot\n", + "2021-02-25 16:26:11,804 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:11,804 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.28 s\n", + "2021-02-25 16:26:11,804 - INFO WORKER:0:0 Global performance: [OI=2.90, 116.92 GFlops/s, 1.99 GPts/s]\n", + "2021-02-25 16:26:11,805 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:11,805 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.27 s [OI=2.90, 122.78 GFlops/s, 2.09 GPts/s]\n", + "2021-02-25 16:26:11,806 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.03 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:11,806 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.39 GFlops/s]\n", + "2021-02-25 16:26:11,806 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:11,807 - INFO WORKER:0:0 (ShotID 37) Completed state equation run for shot\n", + "2021-02-25 16:26:11,905 - INFO HEAD Shot 37 retrieved\n", + "2021-02-25 16:26:12,162 - INFO HEAD Appended traces for shot 37 to observed file\n", + "2021-02-25 16:26:12,310 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:12,311 - INFO HEAD Giving shot 38 to worker:0:0\n", + "2021-02-25 16:26:12,481 - INFO WORKER:0:0 (ShotID 38) Preparing to run shot\n", + "2021-02-25 16:26:12,482 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:12,482 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:12,483 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:12,893 - INFO WORKER:0:0 (ShotID 38) Running state equation for shot\n", + "2021-02-25 16:26:12,894 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:12,896 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:26:12,897 - INFO WORKER:0:0 Global performance: [OI=2.90, 103.48 GFlops/s, 1.76 GPts/s]\n", + "2021-02-25 16:26:12,898 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:12,899 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 117.98 GFlops/s, 2.00 GPts/s]\n", + "2021-02-25 16:26:12,901 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:12,903 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 0.93 GFlops/s]\n", + "2021-02-25 16:26:12,905 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:12,906 - INFO WORKER:0:0 (ShotID 38) Completed state equation run for shot\n", + "2021-02-25 16:26:12,996 - INFO HEAD Shot 38 retrieved\n", + "2021-02-25 16:26:13,235 - INFO HEAD Appended traces for shot 38 to observed file\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:26:13,398 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:13,398 - INFO HEAD Giving shot 39 to worker:0:0\n", + "2021-02-25 16:26:13,562 - INFO WORKER:0:0 (ShotID 39) Preparing to run shot\n", + "2021-02-25 16:26:13,562 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:13,563 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:13,563 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:13,966 - INFO WORKER:0:0 (ShotID 39) Running state equation for shot\n", + "2021-02-25 16:26:13,968 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:13,970 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.31 s\n", + "2021-02-25 16:26:13,971 - INFO WORKER:0:0 Global performance: [OI=2.90, 106.49 GFlops/s, 1.81 GPts/s]\n", + "2021-02-25 16:26:13,973 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:13,974 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 114.54 GFlops/s, 1.95 GPts/s]\n", + "2021-02-25 16:26:13,976 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:13,977 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.38 GFlops/s]\n", + "2021-02-25 16:26:13,979 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:13,981 - INFO WORKER:0:0 (ShotID 39) Completed state equation run for shot\n", + "2021-02-25 16:26:14,068 - INFO HEAD Shot 39 retrieved\n", + "2021-02-25 16:26:14,324 - INFO HEAD Appended traces for shot 39 to observed file\n", + "2021-02-25 16:26:14,472 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:14,473 - INFO HEAD Giving shot 40 to worker:0:0\n", + "2021-02-25 16:26:14,641 - INFO WORKER:0:0 (ShotID 40) Preparing to run shot\n", + "2021-02-25 16:26:14,642 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:14,642 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:14,643 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:15,023 - INFO WORKER:0:0 (ShotID 40) Running state equation for shot\n", + "2021-02-25 16:26:15,028 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:15,035 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.29 s\n", + "2021-02-25 16:26:15,038 - INFO WORKER:0:0 Global performance: [OI=2.90, 114.94 GFlops/s, 1.95 GPts/s]\n", + "2021-02-25 16:26:15,039 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:15,040 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.27 s [OI=2.90, 124.17 GFlops/s, 2.11 GPts/s]\n", + "2021-02-25 16:26:15,042 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:15,043 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.58 GFlops/s]\n", + "2021-02-25 16:26:15,044 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:15,045 - INFO WORKER:0:0 (ShotID 40) Completed state equation run for shot\n", + "2021-02-25 16:26:15,125 - INFO HEAD Shot 40 retrieved\n", + "2021-02-25 16:26:15,389 - INFO HEAD Appended traces for shot 40 to observed file\n", + "2021-02-25 16:26:15,538 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:15,538 - INFO HEAD Giving shot 41 to worker:0:0\n", + "2021-02-25 16:26:15,698 - INFO WORKER:0:0 (ShotID 41) Preparing to run shot\n", + "2021-02-25 16:26:15,698 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:15,699 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:15,699 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:16,101 - INFO WORKER:0:0 (ShotID 41) Running state equation for shot\n", + "2021-02-25 16:26:16,102 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:16,102 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.31 s\n", + "2021-02-25 16:26:16,102 - INFO WORKER:0:0 Global performance: [OI=2.90, 106.77 GFlops/s, 1.81 GPts/s]\n", + "2021-02-25 16:26:16,103 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:16,103 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 119.39 GFlops/s, 2.03 GPts/s]\n", + "2021-02-25 16:26:16,103 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:16,104 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.20 GFlops/s]\n", + "2021-02-25 16:26:16,104 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:16,104 - INFO WORKER:0:0 (ShotID 41) Completed state equation run for shot\n", + "2021-02-25 16:26:16,220 - INFO HEAD Shot 41 retrieved\n", + "2021-02-25 16:26:16,465 - INFO HEAD Appended traces for shot 41 to observed file\n", + "2021-02-25 16:26:16,615 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:16,616 - INFO HEAD Giving shot 42 to worker:0:0\n", + "2021-02-25 16:26:16,777 - INFO WORKER:0:0 (ShotID 42) Preparing to run shot\n", + "2021-02-25 16:26:16,778 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:16,778 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:16,779 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:17,184 - INFO WORKER:0:0 (ShotID 42) Running state equation for shot\n", + "2021-02-25 16:26:17,185 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:17,185 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:26:17,186 - INFO WORKER:0:0 Global performance: [OI=2.90, 104.45 GFlops/s, 1.77 GPts/s]\n", + "2021-02-25 16:26:17,186 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:17,186 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 113.11 GFlops/s, 1.92 GPts/s]\n", + "2021-02-25 16:26:17,187 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:17,187 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.62 GFlops/s]\n", + "2021-02-25 16:26:17,188 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:17,188 - INFO WORKER:0:0 (ShotID 42) Completed state equation run for shot\n", + "2021-02-25 16:26:17,286 - INFO HEAD Shot 42 retrieved\n", + "2021-02-25 16:26:17,523 - INFO HEAD Appended traces for shot 42 to observed file\n", + "2021-02-25 16:26:17,673 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:17,673 - INFO HEAD Giving shot 43 to worker:0:0\n", + "2021-02-25 16:26:17,834 - INFO WORKER:0:0 (ShotID 43) Preparing to run shot\n", + "2021-02-25 16:26:17,835 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:17,835 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:17,835 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:18,235 - INFO WORKER:0:0 (ShotID 43) Running state equation for shot\n", + "2021-02-25 16:26:18,245 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:18,251 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.31 s\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:26:18,255 - INFO WORKER:0:0 Global performance: [OI=2.90, 106.52 GFlops/s, 1.81 GPts/s]\n", + "2021-02-25 16:26:18,257 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:18,259 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 113.25 GFlops/s, 1.92 GPts/s]\n", + "2021-02-25 16:26:18,261 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:18,262 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.85 GFlops/s]\n", + "2021-02-25 16:26:18,269 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:18,271 - INFO WORKER:0:0 (ShotID 43) Completed state equation run for shot\n", + "2021-02-25 16:26:18,338 - INFO HEAD Shot 43 retrieved\n", + "2021-02-25 16:26:18,594 - INFO HEAD Appended traces for shot 43 to observed file\n", + "2021-02-25 16:26:18,741 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:18,741 - INFO HEAD Giving shot 44 to worker:0:0\n", + "2021-02-25 16:26:18,908 - INFO WORKER:0:0 (ShotID 44) Preparing to run shot\n", + "2021-02-25 16:26:18,908 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:18,909 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:18,909 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:19,351 - INFO WORKER:0:0 (ShotID 44) Running state equation for shot\n", + "2021-02-25 16:26:19,357 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:19,360 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.35 s\n", + "2021-02-25 16:26:19,361 - INFO WORKER:0:0 Global performance: [OI=2.90, 94.49 GFlops/s, 1.61 GPts/s]\n", + "2021-02-25 16:26:19,367 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:19,369 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.33 s [OI=2.90, 99.09 GFlops/s, 1.68 GPts/s]\n", + "2021-02-25 16:26:19,371 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:19,372 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.00 GFlops/s]\n", + "2021-02-25 16:26:19,374 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:19,376 - INFO WORKER:0:0 (ShotID 44) Completed state equation run for shot\n", + "2021-02-25 16:26:19,453 - INFO HEAD Shot 44 retrieved\n", + "2021-02-25 16:26:19,697 - INFO HEAD Appended traces for shot 44 to observed file\n", + "2021-02-25 16:26:19,844 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:19,844 - INFO HEAD Giving shot 45 to worker:0:0\n", + "2021-02-25 16:26:20,006 - INFO WORKER:0:0 (ShotID 45) Preparing to run shot\n", + "2021-02-25 16:26:20,006 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:20,007 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:20,007 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:20,369 - INFO WORKER:0:0 (ShotID 45) Running state equation for shot\n", + "2021-02-25 16:26:20,370 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:20,370 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.27 s\n", + "2021-02-25 16:26:20,371 - INFO WORKER:0:0 Global performance: [OI=2.90, 122.41 GFlops/s, 2.08 GPts/s]\n", + "2021-02-25 16:26:20,371 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:20,372 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.24 s [OI=2.90, 136.76 GFlops/s, 2.32 GPts/s]\n", + "2021-02-25 16:26:20,372 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:20,373 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.73 GFlops/s]\n", + "2021-02-25 16:26:20,373 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:20,374 - INFO WORKER:0:0 (ShotID 45) Completed state equation run for shot\n", + "2021-02-25 16:26:20,488 - INFO HEAD Shot 45 retrieved\n", + "2021-02-25 16:26:20,735 - INFO HEAD Appended traces for shot 45 to observed file\n", + "2021-02-25 16:26:20,885 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:20,886 - INFO HEAD Giving shot 46 to worker:0:0\n", + "2021-02-25 16:26:21,048 - INFO WORKER:0:0 (ShotID 46) Preparing to run shot\n", + "2021-02-25 16:26:21,048 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:21,049 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:21,049 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:21,448 - INFO WORKER:0:0 (ShotID 46) Running state equation for shot\n", + "2021-02-25 16:26:21,448 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:21,449 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.31 s\n", + "2021-02-25 16:26:21,449 - INFO WORKER:0:0 Global performance: [OI=2.90, 107.85 GFlops/s, 1.83 GPts/s]\n", + "2021-02-25 16:26:21,450 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:21,450 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 115.22 GFlops/s, 1.96 GPts/s]\n", + "2021-02-25 16:26:21,450 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:21,450 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.00 GFlops/s]\n", + "2021-02-25 16:26:21,451 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:21,451 - INFO WORKER:0:0 (ShotID 46) Completed state equation run for shot\n", + "2021-02-25 16:26:21,550 - INFO HEAD Shot 46 retrieved\n", + "2021-02-25 16:26:21,809 - INFO HEAD Appended traces for shot 46 to observed file\n", + "2021-02-25 16:26:21,963 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:21,964 - INFO HEAD Giving shot 47 to worker:0:0\n", + "2021-02-25 16:26:22,128 - INFO WORKER:0:0 (ShotID 47) Preparing to run shot\n", + "2021-02-25 16:26:22,129 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:22,129 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:22,130 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:22,567 - INFO WORKER:0:0 (ShotID 47) Running state equation for shot\n", + "2021-02-25 16:26:22,574 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:22,577 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.35 s\n", + "2021-02-25 16:26:22,583 - INFO WORKER:0:0 Global performance: [OI=2.90, 95.39 GFlops/s, 1.62 GPts/s]\n", + "2021-02-25 16:26:22,585 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:22,587 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.31 s [OI=2.90, 108.10 GFlops/s, 1.84 GPts/s]\n", + "2021-02-25 16:26:22,589 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:22,591 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.61 GFlops/s]\n", + "2021-02-25 16:26:22,593 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:26:22,594 - INFO WORKER:0:0 (ShotID 47) Completed state equation run for shot\n", + "2021-02-25 16:26:22,669 - INFO HEAD Shot 47 retrieved\n", + "2021-02-25 16:26:22,910 - INFO HEAD Appended traces for shot 47 to observed file\n", + "2021-02-25 16:26:23,058 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:23,059 - INFO HEAD Giving shot 48 to worker:0:0\n", + "2021-02-25 16:26:23,226 - INFO WORKER:0:0 (ShotID 48) Preparing to run shot\n", + "2021-02-25 16:26:23,227 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:23,227 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:23,228 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:23,660 - INFO WORKER:0:0 (ShotID 48) Running state equation for shot\n", + "2021-02-25 16:26:23,668 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:23,670 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:26:23,672 - INFO WORKER:0:0 Global performance: [OI=2.90, 96.95 GFlops/s, 1.65 GPts/s]\n", + "2021-02-25 16:26:23,673 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:23,675 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.30 s [OI=2.90, 112.17 GFlops/s, 1.91 GPts/s]\n", + "2021-02-25 16:26:23,677 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.04 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:23,678 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.68 GFlops/s]\n", + "2021-02-25 16:26:23,685 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:23,687 - INFO WORKER:0:0 (ShotID 48) Completed state equation run for shot\n", + "2021-02-25 16:26:23,762 - INFO HEAD Shot 48 retrieved\n", + "2021-02-25 16:26:24,016 - INFO HEAD Appended traces for shot 48 to observed file\n", + "2021-02-25 16:26:24,163 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:24,164 - INFO HEAD Giving shot 49 to worker:0:0\n", + "2021-02-25 16:26:24,324 - INFO WORKER:0:0 (ShotID 49) Preparing to run shot\n", + "2021-02-25 16:26:24,325 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:24,325 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:24,325 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:24,903 - INFO WORKER:0:0 (ShotID 49) Running state equation for shot\n", + "2021-02-25 16:26:24,905 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:24,905 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.49 s\n", + "2021-02-25 16:26:24,906 - INFO WORKER:0:0 Global performance: [OI=2.90, 67.63 GFlops/s, 1.15 GPts/s]\n", + "2021-02-25 16:26:24,906 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:24,906 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.37 s [OI=2.90, 88.71 GFlops/s, 1.51 GPts/s]\n", + "2021-02-25 16:26:24,907 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.08 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:24,907 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.05 s [OI=3.84, 0.35 GFlops/s]\n", + "2021-02-25 16:26:24,907 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:24,908 - INFO WORKER:0:0 (ShotID 49) Completed state equation run for shot\n", + "2021-02-25 16:26:25,006 - INFO HEAD Shot 49 retrieved\n", + "2021-02-25 16:26:25,241 - INFO HEAD Appended traces for shot 49 to observed file\n", + "2021-02-25 16:26:25,401 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:25,402 - INFO HEAD Giving shot 50 to worker:0:0\n", + "2021-02-25 16:26:25,561 - INFO WORKER:0:0 (ShotID 50) Preparing to run shot\n", + "2021-02-25 16:26:25,562 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:25,562 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:25,563 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:25,975 - INFO WORKER:0:0 (ShotID 50) Running state equation for shot\n", + "2021-02-25 16:26:25,976 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:25,976 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:26:25,977 - INFO WORKER:0:0 Global performance: [OI=2.90, 102.75 GFlops/s, 1.75 GPts/s]\n", + "2021-02-25 16:26:25,977 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:25,977 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 113.77 GFlops/s, 1.93 GPts/s]\n", + "2021-02-25 16:26:25,978 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:25,978 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.44 GFlops/s]\n", + "2021-02-25 16:26:25,979 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:25,979 - INFO WORKER:0:0 (ShotID 50) Completed state equation run for shot\n", + "2021-02-25 16:26:26,077 - INFO HEAD Shot 50 retrieved\n", + "2021-02-25 16:26:26,312 - INFO HEAD Appended traces for shot 50 to observed file\n", + "2021-02-25 16:26:26,464 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:26,465 - INFO HEAD Giving shot 51 to worker:0:0\n", + "2021-02-25 16:26:26,629 - INFO WORKER:0:0 (ShotID 51) Preparing to run shot\n", + "2021-02-25 16:26:26,630 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:26,630 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:26,631 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:27,027 - INFO WORKER:0:0 (ShotID 51) Running state equation for shot\n", + "2021-02-25 16:26:27,034 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:27,037 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.30 s\n", + "2021-02-25 16:26:27,038 - INFO WORKER:0:0 Global performance: [OI=2.90, 110.14 GFlops/s, 1.87 GPts/s]\n", + "2021-02-25 16:26:27,040 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:27,042 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 115.84 GFlops/s, 1.97 GPts/s]\n", + "2021-02-25 16:26:27,044 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:27,050 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.39 GFlops/s]\n", + "2021-02-25 16:26:27,052 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:27,054 - INFO WORKER:0:0 (ShotID 51) Completed state equation run for shot\n", + "2021-02-25 16:26:27,129 - INFO HEAD Shot 51 retrieved\n", + "2021-02-25 16:26:27,369 - INFO HEAD Appended traces for shot 51 to observed file\n", + "2021-02-25 16:26:27,517 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:27,517 - INFO HEAD Giving shot 52 to worker:0:0\n", + "2021-02-25 16:26:27,682 - INFO WORKER:0:0 (ShotID 52) Preparing to run shot\n", + "2021-02-25 16:26:27,683 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:27,683 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:27,684 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:26:28,057 - INFO WORKER:0:0 (ShotID 52) Running state equation for shot\n", + "2021-02-25 16:26:28,057 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:28,058 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.28 s\n", + "2021-02-25 16:26:28,058 - INFO WORKER:0:0 Global performance: [OI=2.90, 119.74 GFlops/s, 2.03 GPts/s]\n", + "2021-02-25 16:26:28,058 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:28,059 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.27 s [OI=2.90, 125.16 GFlops/s, 2.13 GPts/s]\n", + "2021-02-25 16:26:28,059 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.03 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:28,060 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.50 GFlops/s]\n", + "2021-02-25 16:26:28,060 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:28,060 - INFO WORKER:0:0 (ShotID 52) Completed state equation run for shot\n", + "2021-02-25 16:26:28,159 - INFO HEAD Shot 52 retrieved\n", + "2021-02-25 16:26:28,397 - INFO HEAD Appended traces for shot 52 to observed file\n", + "2021-02-25 16:26:28,546 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:28,546 - INFO HEAD Giving shot 53 to worker:0:0\n", + "2021-02-25 16:26:28,711 - INFO WORKER:0:0 (ShotID 53) Preparing to run shot\n", + "2021-02-25 16:26:28,711 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:28,712 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:28,712 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:29,074 - INFO WORKER:0:0 (ShotID 53) Running state equation for shot\n", + "2021-02-25 16:26:29,075 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:29,075 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.27 s\n", + "2021-02-25 16:26:29,076 - INFO WORKER:0:0 Global performance: [OI=2.90, 122.20 GFlops/s, 2.08 GPts/s]\n", + "2021-02-25 16:26:29,076 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:29,077 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.24 s [OI=2.90, 136.62 GFlops/s, 2.32 GPts/s]\n", + "2021-02-25 16:26:29,077 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:29,077 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.16 GFlops/s]\n", + "2021-02-25 16:26:29,078 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:29,078 - INFO WORKER:0:0 (ShotID 53) Completed state equation run for shot\n", + "2021-02-25 16:26:29,176 - INFO HEAD Shot 53 retrieved\n", + "2021-02-25 16:26:29,444 - INFO HEAD Appended traces for shot 53 to observed file\n", + "2021-02-25 16:26:29,593 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:29,593 - INFO HEAD Giving shot 54 to worker:0:0\n", + "2021-02-25 16:26:29,764 - INFO WORKER:0:0 (ShotID 54) Preparing to run shot\n", + "2021-02-25 16:26:29,764 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:29,765 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:29,765 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:30,187 - INFO WORKER:0:0 (ShotID 54) Running state equation for shot\n", + "2021-02-25 16:26:30,193 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:30,199 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.33 s\n", + "2021-02-25 16:26:30,202 - INFO WORKER:0:0 Global performance: [OI=2.90, 100.84 GFlops/s, 1.71 GPts/s]\n", + "2021-02-25 16:26:30,204 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:30,205 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.31 s [OI=2.90, 108.51 GFlops/s, 1.84 GPts/s]\n", + "2021-02-25 16:26:30,207 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:30,209 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.51 GFlops/s]\n", + "2021-02-25 16:26:30,211 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:30,216 - INFO WORKER:0:0 (ShotID 54) Completed state equation run for shot\n", + "2021-02-25 16:26:30,290 - INFO HEAD Shot 54 retrieved\n", + "2021-02-25 16:26:30,530 - INFO HEAD Appended traces for shot 54 to observed file\n", + "2021-02-25 16:26:30,680 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:30,681 - INFO HEAD Giving shot 55 to worker:0:0\n", + "2021-02-25 16:26:30,843 - INFO WORKER:0:0 (ShotID 55) Preparing to run shot\n", + "2021-02-25 16:26:30,844 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:30,844 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:30,844 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:31,259 - INFO WORKER:0:0 (ShotID 55) Running state equation for shot\n", + "2021-02-25 16:26:31,260 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:31,261 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:26:31,261 - INFO WORKER:0:0 Global performance: [OI=2.90, 102.00 GFlops/s, 1.73 GPts/s]\n", + "2021-02-25 16:26:31,261 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:31,262 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.30 s [OI=2.90, 110.99 GFlops/s, 1.89 GPts/s]\n", + "2021-02-25 16:26:31,262 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:31,263 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.41 GFlops/s]\n", + "2021-02-25 16:26:31,263 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:31,263 - INFO WORKER:0:0 (ShotID 55) Completed state equation run for shot\n", + "2021-02-25 16:26:31,362 - INFO HEAD Shot 55 retrieved\n", + "2021-02-25 16:26:31,602 - INFO HEAD Appended traces for shot 55 to observed file\n", + "2021-02-25 16:26:31,752 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:31,752 - INFO HEAD Giving shot 56 to worker:0:0\n", + "2021-02-25 16:26:31,915 - INFO WORKER:0:0 (ShotID 56) Preparing to run shot\n", + "2021-02-25 16:26:31,915 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:31,915 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:31,916 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:32,341 - INFO WORKER:0:0 (ShotID 56) Running state equation for shot\n", + "2021-02-25 16:26:32,349 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:32,352 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:26:32,354 - INFO WORKER:0:0 Global performance: [OI=2.90, 98.58 GFlops/s, 1.68 GPts/s]\n", + "2021-02-25 16:26:32,355 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:32,357 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 113.28 GFlops/s, 1.92 GPts/s]\n", + "2021-02-25 16:26:32,359 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.04 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:26:32,361 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.08 GFlops/s]\n", + "2021-02-25 16:26:32,367 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:32,369 - INFO WORKER:0:0 (ShotID 56) Completed state equation run for shot\n", + "2021-02-25 16:26:32,444 - INFO HEAD Shot 56 retrieved\n", + "2021-02-25 16:26:32,677 - INFO HEAD Appended traces for shot 56 to observed file\n", + "2021-02-25 16:26:32,826 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:32,827 - INFO HEAD Giving shot 57 to worker:0:0\n", + "2021-02-25 16:26:32,993 - INFO WORKER:0:0 (ShotID 57) Preparing to run shot\n", + "2021-02-25 16:26:32,993 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:32,994 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:32,994 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:33,387 - INFO WORKER:0:0 (ShotID 57) Running state equation for shot\n", + "2021-02-25 16:26:33,395 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:33,402 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.30 s\n", + "2021-02-25 16:26:33,404 - INFO WORKER:0:0 Global performance: [OI=2.90, 109.79 GFlops/s, 1.86 GPts/s]\n", + "2021-02-25 16:26:33,406 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:33,408 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 120.68 GFlops/s, 2.05 GPts/s]\n", + "2021-02-25 16:26:33,409 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:33,411 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.40 GFlops/s]\n", + "2021-02-25 16:26:33,418 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:33,420 - INFO WORKER:0:0 (ShotID 57) Completed state equation run for shot\n", + "2021-02-25 16:26:33,489 - INFO HEAD Shot 57 retrieved\n", + "2021-02-25 16:26:33,749 - INFO HEAD Appended traces for shot 57 to observed file\n", + "2021-02-25 16:26:33,899 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:33,899 - INFO HEAD Giving shot 58 to worker:0:0\n", + "2021-02-25 16:26:34,062 - INFO WORKER:0:0 (ShotID 58) Preparing to run shot\n", + "2021-02-25 16:26:34,063 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:34,063 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:34,064 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:34,528 - INFO WORKER:0:0 (ShotID 58) Running state equation for shot\n", + "2021-02-25 16:26:34,536 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:34,538 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.37 s\n", + "2021-02-25 16:26:34,540 - INFO WORKER:0:0 Global performance: [OI=2.90, 88.94 GFlops/s, 1.51 GPts/s]\n", + "2021-02-25 16:26:34,541 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:34,543 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.33 s [OI=2.90, 100.17 GFlops/s, 1.70 GPts/s]\n", + "2021-02-25 16:26:34,548 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:34,552 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 0.83 GFlops/s]\n", + "2021-02-25 16:26:34,554 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:34,556 - INFO WORKER:0:0 (ShotID 58) Completed state equation run for shot\n", + "2021-02-25 16:26:34,630 - INFO HEAD Shot 58 retrieved\n", + "2021-02-25 16:26:34,867 - INFO HEAD Appended traces for shot 58 to observed file\n", + "2021-02-25 16:26:35,014 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:35,014 - INFO HEAD Giving shot 59 to worker:0:0\n", + "2021-02-25 16:26:35,176 - INFO WORKER:0:0 (ShotID 59) Preparing to run shot\n", + "2021-02-25 16:26:35,177 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:35,177 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:35,177 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:35,576 - INFO WORKER:0:0 (ShotID 59) Running state equation for shot\n", + "2021-02-25 16:26:35,577 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:35,577 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.31 s\n", + "2021-02-25 16:26:35,578 - INFO WORKER:0:0 Global performance: [OI=2.90, 107.20 GFlops/s, 1.82 GPts/s]\n", + "2021-02-25 16:26:35,578 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:35,579 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 119.04 GFlops/s, 2.02 GPts/s]\n", + "2021-02-25 16:26:35,579 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:35,580 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.68 GFlops/s]\n", + "2021-02-25 16:26:35,580 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:35,580 - INFO WORKER:0:0 (ShotID 59) Completed state equation run for shot\n", + "2021-02-25 16:26:35,687 - INFO HEAD Shot 59 retrieved\n", + "2021-02-25 16:26:35,939 - INFO HEAD Appended traces for shot 59 to observed file\n", + "2021-02-25 16:26:36,088 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:36,088 - INFO HEAD Giving shot 60 to worker:0:0\n", + "2021-02-25 16:26:36,251 - INFO WORKER:0:0 (ShotID 60) Preparing to run shot\n", + "2021-02-25 16:26:36,251 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:36,252 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:36,252 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:36,622 - INFO WORKER:0:0 (ShotID 60) Running state equation for shot\n", + "2021-02-25 16:26:36,624 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:36,624 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.28 s\n", + "2021-02-25 16:26:36,625 - INFO WORKER:0:0 Global performance: [OI=2.90, 119.23 GFlops/s, 2.02 GPts/s]\n", + "2021-02-25 16:26:36,625 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:36,626 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.27 s [OI=2.90, 125.04 GFlops/s, 2.12 GPts/s]\n", + "2021-02-25 16:26:36,626 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.03 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:36,627 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.50 GFlops/s]\n", + "2021-02-25 16:26:36,627 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:36,628 - INFO WORKER:0:0 (ShotID 60) Completed state equation run for shot\n", + "2021-02-25 16:26:36,725 - INFO HEAD Shot 60 retrieved\n", + "2021-02-25 16:26:36,964 - INFO HEAD Appended traces for shot 60 to observed file\n", + "2021-02-25 16:26:37,111 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:37,111 - INFO HEAD Giving shot 61 to worker:0:0\n", + "2021-02-25 16:26:37,272 - INFO WORKER:0:0 (ShotID 61) Preparing to run shot\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:26:37,273 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:37,273 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:37,274 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:37,644 - INFO WORKER:0:0 (ShotID 61) Running state equation for shot\n", + "2021-02-25 16:26:37,645 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:37,645 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.28 s\n", + "2021-02-25 16:26:37,646 - INFO WORKER:0:0 Global performance: [OI=2.90, 119.93 GFlops/s, 2.04 GPts/s]\n", + "2021-02-25 16:26:37,646 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:37,647 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.24 s [OI=2.90, 137.43 GFlops/s, 2.33 GPts/s]\n", + "2021-02-25 16:26:37,647 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:37,648 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.98 GFlops/s]\n", + "2021-02-25 16:26:37,648 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:37,649 - INFO WORKER:0:0 (ShotID 61) Completed state equation run for shot\n", + "2021-02-25 16:26:37,746 - INFO HEAD Shot 61 retrieved\n", + "2021-02-25 16:26:38,009 - INFO HEAD Appended traces for shot 61 to observed file\n", + "2021-02-25 16:26:38,160 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:38,161 - INFO HEAD Giving shot 62 to worker:0:0\n", + "2021-02-25 16:26:38,323 - INFO WORKER:0:0 (ShotID 62) Preparing to run shot\n", + "2021-02-25 16:26:38,323 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:38,324 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:38,324 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:38,729 - INFO WORKER:0:0 (ShotID 62) Running state equation for shot\n", + "2021-02-25 16:26:38,730 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:38,730 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:26:38,731 - INFO WORKER:0:0 Global performance: [OI=2.90, 105.18 GFlops/s, 1.79 GPts/s]\n", + "2021-02-25 16:26:38,731 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:38,732 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 119.44 GFlops/s, 2.03 GPts/s]\n", + "2021-02-25 16:26:38,732 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:38,732 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 0.88 GFlops/s]\n", + "2021-02-25 16:26:38,733 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:38,733 - INFO WORKER:0:0 (ShotID 62) Completed state equation run for shot\n", + "2021-02-25 16:26:38,832 - INFO HEAD Shot 62 retrieved\n", + "2021-02-25 16:26:39,088 - INFO HEAD Appended traces for shot 62 to observed file\n", + "2021-02-25 16:26:39,237 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:39,238 - INFO HEAD Giving shot 63 to worker:0:0\n", + "2021-02-25 16:26:39,402 - INFO WORKER:0:0 (ShotID 63) Preparing to run shot\n", + "2021-02-25 16:26:39,402 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:39,402 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:39,403 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:39,799 - INFO WORKER:0:0 (ShotID 63) Running state equation for shot\n", + "2021-02-25 16:26:39,800 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:39,800 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.31 s\n", + "2021-02-25 16:26:39,801 - INFO WORKER:0:0 Global performance: [OI=2.90, 108.60 GFlops/s, 1.84 GPts/s]\n", + "2021-02-25 16:26:39,801 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:39,803 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 118.87 GFlops/s, 2.02 GPts/s]\n", + "2021-02-25 16:26:39,803 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:39,804 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.37 GFlops/s]\n", + "2021-02-25 16:26:39,804 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:39,804 - INFO WORKER:0:0 (ShotID 63) Completed state equation run for shot\n", + "2021-02-25 16:26:39,902 - INFO HEAD Shot 63 retrieved\n", + "2021-02-25 16:26:40,153 - INFO HEAD Appended traces for shot 63 to observed file\n", + "2021-02-25 16:26:40,303 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:40,304 - INFO HEAD Giving shot 64 to worker:0:0\n", + "2021-02-25 16:26:40,470 - INFO WORKER:0:0 (ShotID 64) Preparing to run shot\n", + "2021-02-25 16:26:40,470 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:40,470 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:40,471 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:40,907 - INFO WORKER:0:0 (ShotID 64) Running state equation for shot\n", + "2021-02-25 16:26:40,914 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:40,916 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:26:40,918 - INFO WORKER:0:0 Global performance: [OI=2.90, 97.11 GFlops/s, 1.65 GPts/s]\n", + "2021-02-25 16:26:40,920 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:40,921 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 115.78 GFlops/s, 1.97 GPts/s]\n", + "2021-02-25 16:26:40,923 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.04 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:40,925 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.73 GFlops/s]\n", + "2021-02-25 16:26:40,927 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:40,928 - INFO WORKER:0:0 (ShotID 64) Completed state equation run for shot\n", + "2021-02-25 16:26:41,009 - INFO HEAD Shot 64 retrieved\n", + "2021-02-25 16:26:41,252 - INFO HEAD Appended traces for shot 64 to observed file\n", + "2021-02-25 16:26:41,411 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:41,412 - INFO HEAD Giving shot 65 to worker:0:0\n", + "2021-02-25 16:26:41,574 - INFO WORKER:0:0 (ShotID 65) Preparing to run shot\n", + "2021-02-25 16:26:41,575 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:41,575 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:41,576 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:41,993 - INFO WORKER:0:0 (ShotID 65) Running state equation for shot\n", + "2021-02-25 16:26:41,994 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:41,994 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.31 s\n", + "2021-02-25 16:26:41,994 - INFO WORKER:0:0 Global performance: [OI=2.90, 106.75 GFlops/s, 1.81 GPts/s]\n", + "2021-02-25 16:26:41,995 - INFO WORKER:0:0 Local performance:\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:26:41,995 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 115.82 GFlops/s, 1.97 GPts/s]\n", + "2021-02-25 16:26:41,995 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:41,996 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.48 GFlops/s]\n", + "2021-02-25 16:26:41,996 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:41,996 - INFO WORKER:0:0 (ShotID 65) Completed state equation run for shot\n", + "2021-02-25 16:26:42,096 - INFO HEAD Shot 65 retrieved\n", + "2021-02-25 16:26:42,337 - INFO HEAD Appended traces for shot 65 to observed file\n", + "2021-02-25 16:26:42,486 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:42,487 - INFO HEAD Giving shot 66 to worker:0:0\n", + "2021-02-25 16:26:42,648 - INFO WORKER:0:0 (ShotID 66) Preparing to run shot\n", + "2021-02-25 16:26:42,649 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:42,649 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:42,650 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:43,036 - INFO WORKER:0:0 (ShotID 66) Running state equation for shot\n", + "2021-02-25 16:26:43,044 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:43,051 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.30 s\n", + "2021-02-25 16:26:43,053 - INFO WORKER:0:0 Global performance: [OI=2.90, 112.29 GFlops/s, 1.91 GPts/s]\n", + "2021-02-25 16:26:43,055 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:43,056 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 118.79 GFlops/s, 2.02 GPts/s]\n", + "2021-02-25 16:26:43,058 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:43,060 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.44 GFlops/s]\n", + "2021-02-25 16:26:43,061 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:43,067 - INFO WORKER:0:0 (ShotID 66) Completed state equation run for shot\n", + "2021-02-25 16:26:43,138 - INFO HEAD Shot 66 retrieved\n", + "2021-02-25 16:26:43,387 - INFO HEAD Appended traces for shot 66 to observed file\n", + "2021-02-25 16:26:43,534 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:43,535 - INFO HEAD Giving shot 67 to worker:0:0\n", + "2021-02-25 16:26:43,696 - INFO WORKER:0:0 (ShotID 67) Preparing to run shot\n", + "2021-02-25 16:26:43,697 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:43,697 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:43,698 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:44,077 - INFO WORKER:0:0 (ShotID 67) Running state equation for shot\n", + "2021-02-25 16:26:44,077 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:44,077 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.29 s\n", + "2021-02-25 16:26:44,078 - INFO WORKER:0:0 Global performance: [OI=2.90, 115.29 GFlops/s, 1.96 GPts/s]\n", + "2021-02-25 16:26:44,078 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:44,078 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.26 s [OI=2.90, 129.49 GFlops/s, 2.20 GPts/s]\n", + "2021-02-25 16:26:44,078 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:44,079 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.34 GFlops/s]\n", + "2021-02-25 16:26:44,079 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:44,079 - INFO WORKER:0:0 (ShotID 67) Completed state equation run for shot\n", + "2021-02-25 16:26:44,179 - INFO HEAD Shot 67 retrieved\n", + "2021-02-25 16:26:44,410 - INFO HEAD Appended traces for shot 67 to observed file\n", + "2021-02-25 16:26:44,560 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:44,560 - INFO HEAD Giving shot 68 to worker:0:0\n", + "2021-02-25 16:26:44,726 - INFO WORKER:0:0 (ShotID 68) Preparing to run shot\n", + "2021-02-25 16:26:44,726 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:44,727 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:44,727 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:45,158 - INFO WORKER:0:0 (ShotID 68) Running state equation for shot\n", + "2021-02-25 16:26:45,165 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:45,172 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:26:45,174 - INFO WORKER:0:0 Global performance: [OI=2.90, 96.83 GFlops/s, 1.65 GPts/s]\n", + "2021-02-25 16:26:45,176 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:45,178 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.31 s [OI=2.90, 107.04 GFlops/s, 1.82 GPts/s]\n", + "2021-02-25 16:26:45,184 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:45,186 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.64 GFlops/s]\n", + "2021-02-25 16:26:45,188 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:45,189 - INFO WORKER:0:0 (ShotID 68) Completed state equation run for shot\n", + "2021-02-25 16:26:45,261 - INFO HEAD Shot 68 retrieved\n", + "2021-02-25 16:26:45,498 - INFO HEAD Appended traces for shot 68 to observed file\n", + "2021-02-25 16:26:45,646 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:45,646 - INFO HEAD Giving shot 69 to worker:0:0\n", + "2021-02-25 16:26:45,809 - INFO WORKER:0:0 (ShotID 69) Preparing to run shot\n", + "2021-02-25 16:26:45,810 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:45,810 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:45,810 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:46,205 - INFO WORKER:0:0 (ShotID 69) Running state equation for shot\n", + "2021-02-25 16:26:46,206 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:46,207 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.30 s\n", + "2021-02-25 16:26:46,214 - INFO WORKER:0:0 Global performance: [OI=2.90, 109.71 GFlops/s, 1.86 GPts/s]\n", + "2021-02-25 16:26:46,214 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:46,215 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 115.37 GFlops/s, 1.96 GPts/s]\n", + "2021-02-25 16:26:46,216 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:46,217 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.37 GFlops/s]\n", + "2021-02-25 16:26:46,217 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:46,218 - INFO WORKER:0:0 (ShotID 69) Completed state equation run for shot\n", + "2021-02-25 16:26:46,308 - INFO HEAD Shot 69 retrieved\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:26:46,546 - INFO HEAD Appended traces for shot 69 to observed file\n", + "2021-02-25 16:26:46,695 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:46,696 - INFO HEAD Giving shot 70 to worker:0:0\n", + "2021-02-25 16:26:46,860 - INFO WORKER:0:0 (ShotID 70) Preparing to run shot\n", + "2021-02-25 16:26:46,860 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:46,861 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:46,861 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:47,265 - INFO WORKER:0:0 (ShotID 70) Running state equation for shot\n", + "2021-02-25 16:26:47,271 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:47,273 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.31 s\n", + "2021-02-25 16:26:47,275 - INFO WORKER:0:0 Global performance: [OI=2.90, 106.04 GFlops/s, 1.80 GPts/s]\n", + "2021-02-25 16:26:47,277 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:47,281 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 120.20 GFlops/s, 2.04 GPts/s]\n", + "2021-02-25 16:26:47,283 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:47,286 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.69 GFlops/s]\n", + "2021-02-25 16:26:47,288 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:47,289 - INFO WORKER:0:0 (ShotID 70) Completed state equation run for shot\n", + "2021-02-25 16:26:47,367 - INFO HEAD Shot 70 retrieved\n", + "2021-02-25 16:26:47,609 - INFO HEAD Appended traces for shot 70 to observed file\n", + "2021-02-25 16:26:47,757 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:47,758 - INFO HEAD Giving shot 71 to worker:0:0\n", + "2021-02-25 16:26:47,924 - INFO WORKER:0:0 (ShotID 71) Preparing to run shot\n", + "2021-02-25 16:26:47,924 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:47,925 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:47,925 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:48,592 - INFO WORKER:0:0 (ShotID 71) Running state equation for shot\n", + "2021-02-25 16:26:48,598 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:48,600 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.57 s\n", + "2021-02-25 16:26:48,602 - INFO WORKER:0:0 Global performance: [OI=2.90, 57.29 GFlops/s, 0.98 GPts/s]\n", + "2021-02-25 16:26:48,604 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:48,605 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.45 s [OI=2.90, 73.90 GFlops/s, 1.26 GPts/s]\n", + "2021-02-25 16:26:48,607 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.10 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:48,609 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.04 s [OI=3.84, 0.40 GFlops/s]\n", + "2021-02-25 16:26:48,611 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:48,616 - INFO WORKER:0:0 (ShotID 71) Completed state equation run for shot\n", + "2021-02-25 16:26:48,701 - INFO HEAD Shot 71 retrieved\n", + "2021-02-25 16:26:48,962 - INFO HEAD Appended traces for shot 71 to observed file\n", + "2021-02-25 16:26:49,109 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:49,110 - INFO HEAD Giving shot 72 to worker:0:0\n", + "2021-02-25 16:26:49,271 - INFO WORKER:0:0 (ShotID 72) Preparing to run shot\n", + "2021-02-25 16:26:49,272 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:49,272 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:49,272 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:49,707 - INFO WORKER:0:0 (ShotID 72) Running state equation for shot\n", + "2021-02-25 16:26:49,708 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:49,708 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.35 s\n", + "2021-02-25 16:26:49,709 - INFO WORKER:0:0 Global performance: [OI=2.90, 95.96 GFlops/s, 1.63 GPts/s]\n", + "2021-02-25 16:26:49,709 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:49,710 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.32 s [OI=2.90, 103.41 GFlops/s, 1.76 GPts/s]\n", + "2021-02-25 16:26:49,710 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:49,711 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 0.86 GFlops/s]\n", + "2021-02-25 16:26:49,712 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:49,712 - INFO WORKER:0:0 (ShotID 72) Completed state equation run for shot\n", + "2021-02-25 16:26:49,810 - INFO HEAD Shot 72 retrieved\n", + "2021-02-25 16:26:50,053 - INFO HEAD Appended traces for shot 72 to observed file\n", + "2021-02-25 16:26:50,202 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:50,203 - INFO HEAD Giving shot 73 to worker:0:0\n", + "2021-02-25 16:26:50,364 - INFO WORKER:0:0 (ShotID 73) Preparing to run shot\n", + "2021-02-25 16:26:50,365 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:50,365 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:50,365 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:50,741 - INFO WORKER:0:0 (ShotID 73) Running state equation for shot\n", + "2021-02-25 16:26:50,743 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:50,743 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.29 s\n", + "2021-02-25 16:26:50,744 - INFO WORKER:0:0 Global performance: [OI=2.90, 116.11 GFlops/s, 1.97 GPts/s]\n", + "2021-02-25 16:26:50,744 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:50,744 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.26 s [OI=2.90, 129.93 GFlops/s, 2.21 GPts/s]\n", + "2021-02-25 16:26:50,745 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:50,745 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.05 GFlops/s]\n", + "2021-02-25 16:26:50,745 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:50,746 - INFO WORKER:0:0 (ShotID 73) Completed state equation run for shot\n", + "2021-02-25 16:26:50,844 - INFO HEAD Shot 73 retrieved\n", + "2021-02-25 16:26:51,080 - INFO HEAD Appended traces for shot 73 to observed file\n", + "2021-02-25 16:26:51,228 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:51,228 - INFO HEAD Giving shot 74 to worker:0:0\n", + "2021-02-25 16:26:51,392 - INFO WORKER:0:0 (ShotID 74) Preparing to run shot\n", + "2021-02-25 16:26:51,392 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:51,393 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:51,393 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:51,803 - INFO WORKER:0:0 (ShotID 74) Running state equation for shot\n", + "2021-02-25 16:26:51,811 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:26:51,814 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:26:51,816 - INFO WORKER:0:0 Global performance: [OI=2.90, 104.04 GFlops/s, 1.77 GPts/s]\n", + "2021-02-25 16:26:51,817 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:51,819 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 116.59 GFlops/s, 1.98 GPts/s]\n", + "2021-02-25 16:26:51,821 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:51,822 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.25 GFlops/s]\n", + "2021-02-25 16:26:51,824 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:51,826 - INFO WORKER:0:0 (ShotID 74) Completed state equation run for shot\n", + "2021-02-25 16:26:51,905 - INFO HEAD Shot 74 retrieved\n", + "2021-02-25 16:26:52,154 - INFO HEAD Appended traces for shot 74 to observed file\n", + "2021-02-25 16:26:52,305 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:52,306 - INFO HEAD Giving shot 75 to worker:0:0\n", + "2021-02-25 16:26:52,469 - INFO WORKER:0:0 (ShotID 75) Preparing to run shot\n", + "2021-02-25 16:26:52,470 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:52,471 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:52,471 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:52,973 - INFO WORKER:0:0 (ShotID 75) Running state equation for shot\n", + "2021-02-25 16:26:52,973 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:52,973 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.41 s\n", + "2021-02-25 16:26:52,974 - INFO WORKER:0:0 Global performance: [OI=2.90, 81.28 GFlops/s, 1.38 GPts/s]\n", + "2021-02-25 16:26:52,974 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:52,975 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.34 s [OI=2.90, 98.43 GFlops/s, 1.67 GPts/s]\n", + "2021-02-25 16:26:52,975 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.06 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:52,975 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 0.94 GFlops/s]\n", + "2021-02-25 16:26:52,975 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:52,976 - INFO WORKER:0:0 (ShotID 75) Completed state equation run for shot\n", + "2021-02-25 16:26:53,076 - INFO HEAD Shot 75 retrieved\n", + "2021-02-25 16:26:53,329 - INFO HEAD Appended traces for shot 75 to observed file\n", + "2021-02-25 16:26:53,483 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:53,484 - INFO HEAD Giving shot 76 to worker:0:0\n", + "2021-02-25 16:26:53,646 - INFO WORKER:0:0 (ShotID 76) Preparing to run shot\n", + "2021-02-25 16:26:53,647 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:53,647 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:53,648 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:54,217 - INFO WORKER:0:0 (ShotID 76) Running state equation for shot\n", + "2021-02-25 16:26:54,217 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:54,217 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.48 s\n", + "2021-02-25 16:26:54,218 - INFO WORKER:0:0 Global performance: [OI=2.90, 68.84 GFlops/s, 1.17 GPts/s]\n", + "2021-02-25 16:26:54,218 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:54,219 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.41 s [OI=2.90, 80.08 GFlops/s, 1.36 GPts/s]\n", + "2021-02-25 16:26:54,219 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.05 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:54,219 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.66 GFlops/s]\n", + "2021-02-25 16:26:54,220 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:54,220 - INFO WORKER:0:0 (ShotID 76) Completed state equation run for shot\n", + "2021-02-25 16:26:54,319 - INFO HEAD Shot 76 retrieved\n", + "2021-02-25 16:26:54,558 - INFO HEAD Appended traces for shot 76 to observed file\n", + "2021-02-25 16:26:54,711 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:54,712 - INFO HEAD Giving shot 77 to worker:0:0\n", + "2021-02-25 16:26:54,875 - INFO WORKER:0:0 (ShotID 77) Preparing to run shot\n", + "2021-02-25 16:26:54,876 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:54,876 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:54,877 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:55,296 - INFO WORKER:0:0 (ShotID 77) Running state equation for shot\n", + "2021-02-25 16:26:55,297 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:55,297 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.33 s\n", + "2021-02-25 16:26:55,297 - INFO WORKER:0:0 Global performance: [OI=2.90, 101.02 GFlops/s, 1.72 GPts/s]\n", + "2021-02-25 16:26:55,298 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:55,298 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 113.49 GFlops/s, 1.93 GPts/s]\n", + "2021-02-25 16:26:55,298 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:55,299 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.05 GFlops/s]\n", + "2021-02-25 16:26:55,299 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:55,300 - INFO WORKER:0:0 (ShotID 77) Completed state equation run for shot\n", + "2021-02-25 16:26:55,399 - INFO HEAD Shot 77 retrieved\n", + "2021-02-25 16:26:55,649 - INFO HEAD Appended traces for shot 77 to observed file\n", + "2021-02-25 16:26:55,796 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:55,797 - INFO HEAD Giving shot 78 to worker:0:0\n", + "2021-02-25 16:26:55,963 - INFO WORKER:0:0 (ShotID 78) Preparing to run shot\n", + "2021-02-25 16:26:55,964 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:55,964 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:55,964 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:56,438 - INFO WORKER:0:0 (ShotID 78) Running state equation for shot\n", + "2021-02-25 16:26:56,446 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:56,448 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.38 s\n", + "2021-02-25 16:26:56,450 - INFO WORKER:0:0 Global performance: [OI=2.90, 86.66 GFlops/s, 1.47 GPts/s]\n", + "2021-02-25 16:26:56,452 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:56,454 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.31 s [OI=2.90, 106.81 GFlops/s, 1.82 GPts/s]\n", + "2021-02-25 16:26:56,455 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.05 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:56,457 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.58 GFlops/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:26:56,459 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:56,466 - INFO WORKER:0:0 (ShotID 78) Completed state equation run for shot\n", + "2021-02-25 16:26:56,541 - INFO HEAD Shot 78 retrieved\n", + "2021-02-25 16:26:56,802 - INFO HEAD Appended traces for shot 78 to observed file\n", + "2021-02-25 16:26:56,951 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:56,952 - INFO HEAD Giving shot 79 to worker:0:0\n", + "2021-02-25 16:26:57,118 - INFO WORKER:0:0 (ShotID 79) Preparing to run shot\n", + "2021-02-25 16:26:57,118 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:57,119 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:57,119 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:57,583 - INFO WORKER:0:0 (ShotID 79) Running state equation for shot\n", + "2021-02-25 16:26:57,584 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:57,584 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.37 s\n", + "2021-02-25 16:26:57,585 - INFO WORKER:0:0 Global performance: [OI=2.90, 88.31 GFlops/s, 1.50 GPts/s]\n", + "2021-02-25 16:26:57,585 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:57,585 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.30 s [OI=2.90, 110.42 GFlops/s, 1.88 GPts/s]\n", + "2021-02-25 16:26:57,586 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.05 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:57,586 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.55 GFlops/s]\n", + "2021-02-25 16:26:57,587 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:57,587 - INFO WORKER:0:0 (ShotID 79) Completed state equation run for shot\n", + "2021-02-25 16:26:57,686 - INFO HEAD Shot 79 retrieved\n", + "2021-02-25 16:26:57,935 - INFO HEAD Appended traces for shot 79 to observed file\n", + "2021-02-25 16:26:58,088 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:58,088 - INFO HEAD Giving shot 80 to worker:0:0\n", + "2021-02-25 16:26:58,253 - INFO WORKER:0:0 (ShotID 80) Preparing to run shot\n", + "2021-02-25 16:26:58,254 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:58,254 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:58,255 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:58,753 - INFO WORKER:0:0 (ShotID 80) Running state equation for shot\n", + "2021-02-25 16:26:58,754 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:58,755 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.41 s\n", + "2021-02-25 16:26:58,755 - INFO WORKER:0:0 Global performance: [OI=2.90, 81.01 GFlops/s, 1.38 GPts/s]\n", + "2021-02-25 16:26:58,755 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:58,756 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.31 s [OI=2.90, 106.07 GFlops/s, 1.80 GPts/s]\n", + "2021-02-25 16:26:58,756 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.08 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:58,756 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.62 GFlops/s]\n", + "2021-02-25 16:26:58,757 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:58,757 - INFO WORKER:0:0 (ShotID 80) Completed state equation run for shot\n", + "2021-02-25 16:26:58,858 - INFO HEAD Shot 80 retrieved\n", + "2021-02-25 16:26:59,104 - INFO HEAD Appended traces for shot 80 to observed file\n", + "2021-02-25 16:26:59,257 - INFO HEAD \n", + "\n", + "2021-02-25 16:26:59,257 - INFO HEAD Giving shot 81 to worker:0:0\n", + "2021-02-25 16:26:59,422 - INFO WORKER:0:0 (ShotID 81) Preparing to run shot\n", + "2021-02-25 16:26:59,423 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:26:59,423 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:26:59,423 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:26:59,838 - INFO WORKER:0:0 (ShotID 81) Running state equation for shot\n", + "2021-02-25 16:26:59,844 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:26:59,848 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:26:59,849 - INFO WORKER:0:0 Global performance: [OI=2.90, 102.76 GFlops/s, 1.75 GPts/s]\n", + "2021-02-25 16:26:59,852 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:26:59,854 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 114.05 GFlops/s, 1.94 GPts/s]\n", + "2021-02-25 16:26:59,855 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:26:59,857 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.18 GFlops/s]\n", + "2021-02-25 16:26:59,859 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:26:59,864 - INFO WORKER:0:0 (ShotID 81) Completed state equation run for shot\n", + "2021-02-25 16:26:59,940 - INFO HEAD Shot 81 retrieved\n", + "2021-02-25 16:27:00,190 - INFO HEAD Appended traces for shot 81 to observed file\n", + "2021-02-25 16:27:00,345 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:00,346 - INFO HEAD Giving shot 82 to worker:0:0\n", + "2021-02-25 16:27:00,509 - INFO WORKER:0:0 (ShotID 82) Preparing to run shot\n", + "2021-02-25 16:27:00,509 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:00,510 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:00,510 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:00,881 - INFO WORKER:0:0 (ShotID 82) Running state equation for shot\n", + "2021-02-25 16:27:00,883 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:00,883 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.28 s\n", + "2021-02-25 16:27:00,884 - INFO WORKER:0:0 Global performance: [OI=2.90, 118.18 GFlops/s, 2.01 GPts/s]\n", + "2021-02-25 16:27:00,884 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:00,885 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.24 s [OI=2.90, 136.01 GFlops/s, 2.31 GPts/s]\n", + "2021-02-25 16:27:00,885 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:00,886 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 0.80 GFlops/s]\n", + "2021-02-25 16:27:00,886 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:00,887 - INFO WORKER:0:0 (ShotID 82) Completed state equation run for shot\n", + "2021-02-25 16:27:00,986 - INFO HEAD Shot 82 retrieved\n", + "2021-02-25 16:27:01,225 - INFO HEAD Appended traces for shot 82 to observed file\n", + "2021-02-25 16:27:01,379 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:01,379 - INFO HEAD Giving shot 83 to worker:0:0\n", + "2021-02-25 16:27:01,547 - INFO WORKER:0:0 (ShotID 83) Preparing to run shot\n", + "2021-02-25 16:27:01,548 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:27:01,548 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:01,548 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:01,964 - INFO WORKER:0:0 (ShotID 83) Running state equation for shot\n", + "2021-02-25 16:27:01,969 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:01,974 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:27:01,975 - INFO WORKER:0:0 Global performance: [OI=2.90, 102.24 GFlops/s, 1.74 GPts/s]\n", + "2021-02-25 16:27:01,977 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:01,981 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.31 s [OI=2.90, 106.43 GFlops/s, 1.81 GPts/s]\n", + "2021-02-25 16:27:01,983 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.03 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:01,985 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.52 GFlops/s]\n", + "2021-02-25 16:27:01,987 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:01,988 - INFO WORKER:0:0 (ShotID 83) Completed state equation run for shot\n", + "2021-02-25 16:27:02,067 - INFO HEAD Shot 83 retrieved\n", + "2021-02-25 16:27:02,326 - INFO HEAD Appended traces for shot 83 to observed file\n", + "2021-02-25 16:27:02,486 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:02,486 - INFO HEAD Giving shot 84 to worker:0:0\n", + "2021-02-25 16:27:02,652 - INFO WORKER:0:0 (ShotID 84) Preparing to run shot\n", + "2021-02-25 16:27:02,653 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:02,653 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:02,653 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:03,054 - INFO WORKER:0:0 (ShotID 84) Running state equation for shot\n", + "2021-02-25 16:27:03,055 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:03,056 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.31 s\n", + "2021-02-25 16:27:03,056 - INFO WORKER:0:0 Global performance: [OI=2.90, 106.59 GFlops/s, 1.81 GPts/s]\n", + "2021-02-25 16:27:03,057 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:03,057 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 115.34 GFlops/s, 1.96 GPts/s]\n", + "2021-02-25 16:27:03,057 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:03,058 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.46 GFlops/s]\n", + "2021-02-25 16:27:03,058 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:03,059 - INFO WORKER:0:0 (ShotID 84) Completed state equation run for shot\n", + "2021-02-25 16:27:03,158 - INFO HEAD Shot 84 retrieved\n", + "2021-02-25 16:27:03,413 - INFO HEAD Appended traces for shot 84 to observed file\n", + "2021-02-25 16:27:03,560 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:03,561 - INFO HEAD Giving shot 85 to worker:0:0\n", + "2021-02-25 16:27:03,723 - INFO WORKER:0:0 (ShotID 85) Preparing to run shot\n", + "2021-02-25 16:27:03,724 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:03,724 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:03,724 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:04,135 - INFO WORKER:0:0 (ShotID 85) Running state equation for shot\n", + "2021-02-25 16:27:04,141 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:04,147 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:27:04,150 - INFO WORKER:0:0 Global performance: [OI=2.90, 103.75 GFlops/s, 1.76 GPts/s]\n", + "2021-02-25 16:27:04,151 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:04,153 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.30 s [OI=2.90, 109.27 GFlops/s, 1.86 GPts/s]\n", + "2021-02-25 16:27:04,155 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:04,156 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.99 GFlops/s]\n", + "2021-02-25 16:27:04,158 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:04,160 - INFO WORKER:0:0 (ShotID 85) Completed state equation run for shot\n", + "2021-02-25 16:27:04,241 - INFO HEAD Shot 85 retrieved\n", + "2021-02-25 16:27:04,488 - INFO HEAD Appended traces for shot 85 to observed file\n", + "2021-02-25 16:27:04,636 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:04,637 - INFO HEAD Giving shot 86 to worker:0:0\n", + "2021-02-25 16:27:04,804 - INFO WORKER:0:0 (ShotID 86) Preparing to run shot\n", + "2021-02-25 16:27:04,805 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:04,805 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:04,806 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:05,216 - INFO WORKER:0:0 (ShotID 86) Running state equation for shot\n", + "2021-02-25 16:27:05,217 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:05,217 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:27:05,217 - INFO WORKER:0:0 Global performance: [OI=2.90, 103.19 GFlops/s, 1.75 GPts/s]\n", + "2021-02-25 16:27:05,218 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:05,218 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.31 s [OI=2.90, 108.73 GFlops/s, 1.85 GPts/s]\n", + "2021-02-25 16:27:05,219 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:05,219 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.37 GFlops/s]\n", + "2021-02-25 16:27:05,219 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:05,220 - INFO WORKER:0:0 (ShotID 86) Completed state equation run for shot\n", + "2021-02-25 16:27:05,319 - INFO HEAD Shot 86 retrieved\n", + "2021-02-25 16:27:05,579 - INFO HEAD Appended traces for shot 86 to observed file\n", + "2021-02-25 16:27:05,730 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:05,730 - INFO HEAD Giving shot 87 to worker:0:0\n", + "2021-02-25 16:27:05,892 - INFO WORKER:0:0 (ShotID 87) Preparing to run shot\n", + "2021-02-25 16:27:05,892 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:05,893 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:05,893 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:06,325 - INFO WORKER:0:0 (ShotID 87) Running state equation for shot\n", + "2021-02-25 16:27:06,332 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:06,334 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:27:06,336 - INFO WORKER:0:0 Global performance: [OI=2.90, 97.60 GFlops/s, 1.66 GPts/s]\n", + "2021-02-25 16:27:06,338 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:06,340 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.30 s [OI=2.90, 109.78 GFlops/s, 1.87 GPts/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:27:06,341 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.04 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:06,343 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.54 GFlops/s]\n", + "2021-02-25 16:27:06,350 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:06,352 - INFO WORKER:0:0 (ShotID 87) Completed state equation run for shot\n", + "2021-02-25 16:27:06,427 - INFO HEAD Shot 87 retrieved\n", + "2021-02-25 16:27:06,662 - INFO HEAD Appended traces for shot 87 to observed file\n", + "2021-02-25 16:27:06,811 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:06,811 - INFO HEAD Giving shot 88 to worker:0:0\n", + "2021-02-25 16:27:06,973 - INFO WORKER:0:0 (ShotID 88) Preparing to run shot\n", + "2021-02-25 16:27:06,973 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:06,974 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:06,974 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:07,389 - INFO WORKER:0:0 (ShotID 88) Running state equation for shot\n", + "2021-02-25 16:27:07,392 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:07,392 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:27:07,392 - INFO WORKER:0:0 Global performance: [OI=2.90, 102.38 GFlops/s, 1.74 GPts/s]\n", + "2021-02-25 16:27:07,393 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:07,393 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.30 s [OI=2.90, 112.12 GFlops/s, 1.91 GPts/s]\n", + "2021-02-25 16:27:07,393 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:07,394 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.09 GFlops/s]\n", + "2021-02-25 16:27:07,394 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:07,395 - INFO WORKER:0:0 (ShotID 88) Completed state equation run for shot\n", + "2021-02-25 16:27:07,492 - INFO HEAD Shot 88 retrieved\n", + "2021-02-25 16:27:07,722 - INFO HEAD Appended traces for shot 88 to observed file\n", + "2021-02-25 16:27:07,874 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:07,875 - INFO HEAD Giving shot 89 to worker:0:0\n", + "2021-02-25 16:27:08,043 - INFO WORKER:0:0 (ShotID 89) Preparing to run shot\n", + "2021-02-25 16:27:08,043 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:08,044 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:08,044 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:08,478 - INFO WORKER:0:0 (ShotID 89) Running state equation for shot\n", + "2021-02-25 16:27:08,480 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:08,480 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:27:08,481 - INFO WORKER:0:0 Global performance: [OI=2.90, 97.73 GFlops/s, 1.66 GPts/s]\n", + "2021-02-25 16:27:08,481 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:08,481 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.30 s [OI=2.90, 109.97 GFlops/s, 1.87 GPts/s]\n", + "2021-02-25 16:27:08,482 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.04 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:08,482 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.24 GFlops/s]\n", + "2021-02-25 16:27:08,483 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:08,483 - INFO WORKER:0:0 (ShotID 89) Completed state equation run for shot\n", + "2021-02-25 16:27:08,582 - INFO HEAD Shot 89 retrieved\n", + "2021-02-25 16:27:08,812 - INFO HEAD Appended traces for shot 89 to observed file\n", + "2021-02-25 16:27:08,959 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:08,959 - INFO HEAD Giving shot 90 to worker:0:0\n", + "2021-02-25 16:27:09,130 - INFO WORKER:0:0 (ShotID 90) Preparing to run shot\n", + "2021-02-25 16:27:09,131 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:09,131 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:09,132 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:09,695 - INFO WORKER:0:0 (ShotID 90) Running state equation for shot\n", + "2021-02-25 16:27:09,696 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:09,696 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.47 s\n", + "2021-02-25 16:27:09,697 - INFO WORKER:0:0 Global performance: [OI=2.90, 70.02 GFlops/s, 1.19 GPts/s]\n", + "2021-02-25 16:27:09,697 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:09,697 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.37 s [OI=2.90, 88.84 GFlops/s, 1.51 GPts/s]\n", + "2021-02-25 16:27:09,698 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.07 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:09,698 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.04 s [OI=3.84, 0.42 GFlops/s]\n", + "2021-02-25 16:27:09,698 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:09,699 - INFO WORKER:0:0 (ShotID 90) Completed state equation run for shot\n", + "2021-02-25 16:27:09,802 - INFO HEAD Shot 90 retrieved\n", + "2021-02-25 16:27:10,045 - INFO HEAD Appended traces for shot 90 to observed file\n", + "2021-02-25 16:27:10,193 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:10,193 - INFO HEAD Giving shot 91 to worker:0:0\n", + "2021-02-25 16:27:10,358 - INFO WORKER:0:0 (ShotID 91) Preparing to run shot\n", + "2021-02-25 16:27:10,359 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:10,359 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:10,359 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:10,766 - INFO WORKER:0:0 (ShotID 91) Running state equation for shot\n", + "2021-02-25 16:27:10,775 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:10,782 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:27:10,785 - INFO WORKER:0:0 Global performance: [OI=2.90, 104.74 GFlops/s, 1.78 GPts/s]\n", + "2021-02-25 16:27:10,787 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:10,789 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 118.44 GFlops/s, 2.01 GPts/s]\n", + "2021-02-25 16:27:10,790 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:10,792 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.70 GFlops/s]\n", + "2021-02-25 16:27:10,794 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:10,801 - INFO WORKER:0:0 (ShotID 91) Completed state equation run for shot\n", + "2021-02-25 16:27:10,869 - INFO HEAD Shot 91 retrieved\n", + "2021-02-25 16:27:11,100 - INFO HEAD Appended traces for shot 91 to observed file\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:27:11,252 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:11,252 - INFO HEAD Giving shot 92 to worker:0:0\n", + "2021-02-25 16:27:11,420 - INFO WORKER:0:0 (ShotID 92) Preparing to run shot\n", + "2021-02-25 16:27:11,420 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:11,421 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:11,421 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:11,818 - INFO WORKER:0:0 (ShotID 92) Running state equation for shot\n", + "2021-02-25 16:27:11,826 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:11,833 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.31 s\n", + "2021-02-25 16:27:11,835 - INFO WORKER:0:0 Global performance: [OI=2.90, 107.91 GFlops/s, 1.83 GPts/s]\n", + "2021-02-25 16:27:11,837 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:11,838 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 112.87 GFlops/s, 1.92 GPts/s]\n", + "2021-02-25 16:27:11,840 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.03 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:11,842 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.36 GFlops/s]\n", + "2021-02-25 16:27:11,843 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:11,848 - INFO WORKER:0:0 (ShotID 92) Completed state equation run for shot\n", + "2021-02-25 16:27:11,921 - INFO HEAD Shot 92 retrieved\n", + "2021-02-25 16:27:12,158 - INFO HEAD Appended traces for shot 92 to observed file\n", + "2021-02-25 16:27:12,314 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:12,315 - INFO HEAD Giving shot 93 to worker:0:0\n", + "2021-02-25 16:27:12,481 - INFO WORKER:0:0 (ShotID 93) Preparing to run shot\n", + "2021-02-25 16:27:12,482 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:12,482 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:12,483 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:12,874 - INFO WORKER:0:0 (ShotID 93) Running state equation for shot\n", + "2021-02-25 16:27:12,875 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:12,875 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.30 s\n", + "2021-02-25 16:27:12,876 - INFO WORKER:0:0 Global performance: [OI=2.90, 110.28 GFlops/s, 1.87 GPts/s]\n", + "2021-02-25 16:27:12,876 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:12,877 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.27 s [OI=2.90, 123.58 GFlops/s, 2.10 GPts/s]\n", + "2021-02-25 16:27:12,877 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:12,877 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.17 GFlops/s]\n", + "2021-02-25 16:27:12,878 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:12,878 - INFO WORKER:0:0 (ShotID 93) Completed state equation run for shot\n", + "2021-02-25 16:27:12,978 - INFO HEAD Shot 93 retrieved\n", + "2021-02-25 16:27:13,220 - INFO HEAD Appended traces for shot 93 to observed file\n", + "2021-02-25 16:27:13,385 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:13,385 - INFO HEAD Giving shot 94 to worker:0:0\n", + "2021-02-25 16:27:13,547 - INFO WORKER:0:0 (ShotID 94) Preparing to run shot\n", + "2021-02-25 16:27:13,548 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:13,548 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:13,549 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:13,954 - INFO WORKER:0:0 (ShotID 94) Running state equation for shot\n", + "2021-02-25 16:27:13,961 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:13,964 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.31 s\n", + "2021-02-25 16:27:13,965 - INFO WORKER:0:0 Global performance: [OI=2.90, 105.63 GFlops/s, 1.79 GPts/s]\n", + "2021-02-25 16:27:13,967 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:13,969 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.27 s [OI=2.90, 121.27 GFlops/s, 2.06 GPts/s]\n", + "2021-02-25 16:27:13,971 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.04 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:13,972 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.74 GFlops/s]\n", + "2021-02-25 16:27:13,974 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:13,976 - INFO WORKER:0:0 (ShotID 94) Completed state equation run for shot\n", + "2021-02-25 16:27:14,057 - INFO HEAD Shot 94 retrieved\n", + "2021-02-25 16:27:14,299 - INFO HEAD Appended traces for shot 94 to observed file\n", + "2021-02-25 16:27:14,451 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:14,452 - INFO HEAD Giving shot 95 to worker:0:0\n", + "2021-02-25 16:27:14,616 - INFO WORKER:0:0 (ShotID 95) Preparing to run shot\n", + "2021-02-25 16:27:14,616 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:14,616 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:14,617 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:15,001 - INFO WORKER:0:0 (ShotID 95) Running state equation for shot\n", + "2021-02-25 16:27:15,001 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:15,002 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.29 s\n", + "2021-02-25 16:27:15,002 - INFO WORKER:0:0 Global performance: [OI=2.90, 113.18 GFlops/s, 1.92 GPts/s]\n", + "2021-02-25 16:27:15,003 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:15,003 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 118.41 GFlops/s, 2.01 GPts/s]\n", + "2021-02-25 16:27:15,003 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.03 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:15,004 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.54 GFlops/s]\n", + "2021-02-25 16:27:15,004 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:15,004 - INFO WORKER:0:0 (ShotID 95) Completed state equation run for shot\n", + "2021-02-25 16:27:15,104 - INFO HEAD Shot 95 retrieved\n", + "2021-02-25 16:27:15,332 - INFO HEAD Appended traces for shot 95 to observed file\n", + "2021-02-25 16:27:15,483 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:15,483 - INFO HEAD Giving shot 96 to worker:0:0\n", + "2021-02-25 16:27:15,644 - INFO WORKER:0:0 (ShotID 96) Preparing to run shot\n", + "2021-02-25 16:27:15,644 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:15,645 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:15,645 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:16,050 - INFO WORKER:0:0 (ShotID 96) Running state equation for shot\n", + "2021-02-25 16:27:16,051 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:16,051 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.31 s\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:27:16,051 - INFO WORKER:0:0 Global performance: [OI=2.90, 105.99 GFlops/s, 1.80 GPts/s]\n", + "2021-02-25 16:27:16,052 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:16,052 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 116.45 GFlops/s, 1.98 GPts/s]\n", + "2021-02-25 16:27:16,053 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:16,053 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.65 GFlops/s]\n", + "2021-02-25 16:27:16,053 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:16,054 - INFO WORKER:0:0 (ShotID 96) Completed state equation run for shot\n", + "2021-02-25 16:27:16,152 - INFO HEAD Shot 96 retrieved\n", + "2021-02-25 16:27:16,390 - INFO HEAD Appended traces for shot 96 to observed file\n", + "2021-02-25 16:27:16,540 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:16,541 - INFO HEAD Giving shot 97 to worker:0:0\n", + "2021-02-25 16:27:16,702 - INFO WORKER:0:0 (ShotID 97) Preparing to run shot\n", + "2021-02-25 16:27:16,703 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:16,703 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:16,704 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:17,127 - INFO WORKER:0:0 (ShotID 97) Running state equation for shot\n", + "2021-02-25 16:27:17,133 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:17,136 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.33 s\n", + "2021-02-25 16:27:17,137 - INFO WORKER:0:0 Global performance: [OI=2.90, 99.37 GFlops/s, 1.69 GPts/s]\n", + "2021-02-25 16:27:17,139 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:17,141 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.27 s [OI=2.90, 121.51 GFlops/s, 2.06 GPts/s]\n", + "2021-02-25 16:27:17,142 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.04 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:17,147 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.70 GFlops/s]\n", + "2021-02-25 16:27:17,149 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:17,150 - INFO WORKER:0:0 (ShotID 97) Completed state equation run for shot\n", + "2021-02-25 16:27:17,230 - INFO HEAD Shot 97 retrieved\n", + "2021-02-25 16:27:17,467 - INFO HEAD Appended traces for shot 97 to observed file\n", + "2021-02-25 16:27:17,614 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:17,615 - INFO HEAD Giving shot 98 to worker:0:0\n", + "2021-02-25 16:27:17,778 - INFO WORKER:0:0 (ShotID 98) Preparing to run shot\n", + "2021-02-25 16:27:17,779 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:17,779 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:17,780 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:18,172 - INFO WORKER:0:0 (ShotID 98) Running state equation for shot\n", + "2021-02-25 16:27:18,173 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:18,173 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.30 s\n", + "2021-02-25 16:27:18,174 - INFO WORKER:0:0 Global performance: [OI=2.90, 110.41 GFlops/s, 1.88 GPts/s]\n", + "2021-02-25 16:27:18,174 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:18,175 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 117.23 GFlops/s, 1.99 GPts/s]\n", + "2021-02-25 16:27:18,175 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:18,175 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.68 GFlops/s]\n", + "2021-02-25 16:27:18,176 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:18,176 - INFO WORKER:0:0 (ShotID 98) Completed state equation run for shot\n", + "2021-02-25 16:27:18,274 - INFO HEAD Shot 98 retrieved\n", + "2021-02-25 16:27:18,519 - INFO HEAD Appended traces for shot 98 to observed file\n", + "2021-02-25 16:27:18,666 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:18,666 - INFO HEAD Giving shot 99 to worker:0:0\n", + "2021-02-25 16:27:18,831 - INFO WORKER:0:0 (ShotID 99) Preparing to run shot\n", + "2021-02-25 16:27:18,831 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:18,832 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:18,832 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:19,246 - INFO WORKER:0:0 (ShotID 99) Running state equation for shot\n", + "2021-02-25 16:27:19,247 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:19,247 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:27:19,247 - INFO WORKER:0:0 Global performance: [OI=2.90, 103.15 GFlops/s, 1.75 GPts/s]\n", + "2021-02-25 16:27:19,248 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:19,248 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.30 s [OI=2.90, 109.72 GFlops/s, 1.86 GPts/s]\n", + "2021-02-25 16:27:19,249 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:19,249 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.73 GFlops/s]\n", + "2021-02-25 16:27:19,249 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:19,250 - INFO WORKER:0:0 (ShotID 99) Completed state equation run for shot\n", + "2021-02-25 16:27:19,349 - INFO HEAD Shot 99 retrieved\n", + "2021-02-25 16:27:19,590 - INFO HEAD Appended traces for shot 99 to observed file\n", + "2021-02-25 16:27:19,744 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:19,744 - INFO HEAD Giving shot 100 to worker:0:0\n", + "2021-02-25 16:27:19,909 - INFO WORKER:0:0 (ShotID 100) Preparing to run shot\n", + "2021-02-25 16:27:19,910 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:19,910 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:19,910 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:20,300 - INFO WORKER:0:0 (ShotID 100) Running state equation for shot\n", + "2021-02-25 16:27:20,300 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:20,301 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.30 s\n", + "2021-02-25 16:27:20,301 - INFO WORKER:0:0 Global performance: [OI=2.90, 112.05 GFlops/s, 1.90 GPts/s]\n", + "2021-02-25 16:27:20,302 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:20,302 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.26 s [OI=2.90, 126.08 GFlops/s, 2.14 GPts/s]\n", + "2021-02-25 16:27:20,302 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:20,303 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.40 GFlops/s]\n", + "2021-02-25 16:27:20,303 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:27:20,303 - INFO WORKER:0:0 (ShotID 100) Completed state equation run for shot\n", + "2021-02-25 16:27:20,404 - INFO HEAD Shot 100 retrieved\n", + "2021-02-25 16:27:20,647 - INFO HEAD Appended traces for shot 100 to observed file\n", + "2021-02-25 16:27:20,796 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:20,797 - INFO HEAD Giving shot 101 to worker:0:0\n", + "2021-02-25 16:27:20,960 - INFO WORKER:0:0 (ShotID 101) Preparing to run shot\n", + "2021-02-25 16:27:20,961 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:20,962 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:20,962 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:21,378 - INFO WORKER:0:0 (ShotID 101) Running state equation for shot\n", + "2021-02-25 16:27:21,379 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:21,379 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.33 s\n", + "2021-02-25 16:27:21,380 - INFO WORKER:0:0 Global performance: [OI=2.90, 101.87 GFlops/s, 1.73 GPts/s]\n", + "2021-02-25 16:27:21,380 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:21,381 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.31 s [OI=2.90, 106.38 GFlops/s, 1.81 GPts/s]\n", + "2021-02-25 16:27:21,381 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:21,381 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.43 GFlops/s]\n", + "2021-02-25 16:27:21,382 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:21,382 - INFO WORKER:0:0 (ShotID 101) Completed state equation run for shot\n", + "2021-02-25 16:27:21,481 - INFO HEAD Shot 101 retrieved\n", + "2021-02-25 16:27:21,713 - INFO HEAD Appended traces for shot 101 to observed file\n", + "2021-02-25 16:27:21,862 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:21,862 - INFO HEAD Giving shot 102 to worker:0:0\n", + "2021-02-25 16:27:22,026 - INFO WORKER:0:0 (ShotID 102) Preparing to run shot\n", + "2021-02-25 16:27:22,027 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:22,027 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:22,028 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:22,425 - INFO WORKER:0:0 (ShotID 102) Running state equation for shot\n", + "2021-02-25 16:27:22,431 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:22,434 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.31 s\n", + "2021-02-25 16:27:22,435 - INFO WORKER:0:0 Global performance: [OI=2.90, 108.42 GFlops/s, 1.84 GPts/s]\n", + "2021-02-25 16:27:22,437 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:22,439 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 114.90 GFlops/s, 1.95 GPts/s]\n", + "2021-02-25 16:27:22,441 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.03 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:22,443 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.40 GFlops/s]\n", + "2021-02-25 16:27:22,447 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:22,450 - INFO WORKER:0:0 (ShotID 102) Completed state equation run for shot\n", + "2021-02-25 16:27:22,529 - INFO HEAD Shot 102 retrieved\n", + "2021-02-25 16:27:22,773 - INFO HEAD Appended traces for shot 102 to observed file\n", + "2021-02-25 16:27:22,927 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:22,928 - INFO HEAD Giving shot 103 to worker:0:0\n", + "2021-02-25 16:27:23,096 - INFO WORKER:0:0 (ShotID 103) Preparing to run shot\n", + "2021-02-25 16:27:23,097 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:23,097 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:23,098 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:23,507 - INFO WORKER:0:0 (ShotID 103) Running state equation for shot\n", + "2021-02-25 16:27:23,508 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:23,508 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:27:23,508 - INFO WORKER:0:0 Global performance: [OI=2.90, 104.31 GFlops/s, 1.77 GPts/s]\n", + "2021-02-25 16:27:23,509 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:23,509 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 112.73 GFlops/s, 1.92 GPts/s]\n", + "2021-02-25 16:27:23,509 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:23,510 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.13 GFlops/s]\n", + "2021-02-25 16:27:23,510 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:23,511 - INFO WORKER:0:0 (ShotID 103) Completed state equation run for shot\n", + "2021-02-25 16:27:23,610 - INFO HEAD Shot 103 retrieved\n", + "2021-02-25 16:27:23,852 - INFO HEAD Appended traces for shot 103 to observed file\n", + "2021-02-25 16:27:24,002 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:24,003 - INFO HEAD Giving shot 104 to worker:0:0\n", + "2021-02-25 16:27:24,165 - INFO WORKER:0:0 (ShotID 104) Preparing to run shot\n", + "2021-02-25 16:27:24,166 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:24,166 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:24,166 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:24,589 - INFO WORKER:0:0 (ShotID 104) Running state equation for shot\n", + "2021-02-25 16:27:24,597 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:24,600 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.33 s\n", + "2021-02-25 16:27:24,602 - INFO WORKER:0:0 Global performance: [OI=2.90, 99.94 GFlops/s, 1.70 GPts/s]\n", + "2021-02-25 16:27:24,603 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:24,605 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 113.42 GFlops/s, 1.93 GPts/s]\n", + "2021-02-25 16:27:24,607 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:24,609 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.61 GFlops/s]\n", + "2021-02-25 16:27:24,616 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:24,618 - INFO WORKER:0:0 (ShotID 104) Completed state equation run for shot\n", + "2021-02-25 16:27:24,693 - INFO HEAD Shot 104 retrieved\n", + "2021-02-25 16:27:24,931 - INFO HEAD Appended traces for shot 104 to observed file\n", + "2021-02-25 16:27:25,078 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:25,079 - INFO HEAD Giving shot 105 to worker:0:0\n", + "2021-02-25 16:27:25,241 - INFO WORKER:0:0 (ShotID 105) Preparing to run shot\n", + "2021-02-25 16:27:25,241 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:25,242 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:25,243 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:27:25,656 - INFO WORKER:0:0 (ShotID 105) Running state equation for shot\n", + "2021-02-25 16:27:25,662 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:25,664 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:27:25,666 - INFO WORKER:0:0 Global performance: [OI=2.90, 103.55 GFlops/s, 1.76 GPts/s]\n", + "2021-02-25 16:27:25,668 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:25,669 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 114.32 GFlops/s, 1.94 GPts/s]\n", + "2021-02-25 16:27:25,671 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:25,672 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.83 GFlops/s]\n", + "2021-02-25 16:27:25,674 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:25,676 - INFO WORKER:0:0 (ShotID 105) Completed state equation run for shot\n", + "2021-02-25 16:27:25,758 - INFO HEAD Shot 105 retrieved\n", + "2021-02-25 16:27:25,992 - INFO HEAD Appended traces for shot 105 to observed file\n", + "2021-02-25 16:27:26,140 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:26,140 - INFO HEAD Giving shot 106 to worker:0:0\n", + "2021-02-25 16:27:26,306 - INFO WORKER:0:0 (ShotID 106) Preparing to run shot\n", + "2021-02-25 16:27:26,306 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:26,307 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:26,307 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:26,737 - INFO WORKER:0:0 (ShotID 106) Running state equation for shot\n", + "2021-02-25 16:27:26,738 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:26,738 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:27:26,739 - INFO WORKER:0:0 Global performance: [OI=2.90, 97.75 GFlops/s, 1.66 GPts/s]\n", + "2021-02-25 16:27:26,739 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:26,740 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 112.62 GFlops/s, 1.91 GPts/s]\n", + "2021-02-25 16:27:26,740 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.04 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:26,741 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.62 GFlops/s]\n", + "2021-02-25 16:27:26,741 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:26,742 - INFO WORKER:0:0 (ShotID 106) Completed state equation run for shot\n", + "2021-02-25 16:27:26,839 - INFO HEAD Shot 106 retrieved\n", + "2021-02-25 16:27:27,083 - INFO HEAD Appended traces for shot 106 to observed file\n", + "2021-02-25 16:27:27,230 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:27,230 - INFO HEAD Giving shot 107 to worker:0:0\n", + "2021-02-25 16:27:27,394 - INFO WORKER:0:0 (ShotID 107) Preparing to run shot\n", + "2021-02-25 16:27:27,394 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:27,395 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:27,395 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:27,825 - INFO WORKER:0:0 (ShotID 107) Running state equation for shot\n", + "2021-02-25 16:27:27,826 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:27,826 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:27:27,826 - INFO WORKER:0:0 Global performance: [OI=2.90, 98.64 GFlops/s, 1.68 GPts/s]\n", + "2021-02-25 16:27:27,827 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:27,827 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.32 s [OI=2.90, 102.35 GFlops/s, 1.74 GPts/s]\n", + "2021-02-25 16:27:27,827 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.03 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:27,828 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.43 GFlops/s]\n", + "2021-02-25 16:27:27,828 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:27,829 - INFO WORKER:0:0 (ShotID 107) Completed state equation run for shot\n", + "2021-02-25 16:27:27,927 - INFO HEAD Shot 107 retrieved\n", + "2021-02-25 16:27:28,180 - INFO HEAD Appended traces for shot 107 to observed file\n", + "2021-02-25 16:27:28,327 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:28,328 - INFO HEAD Giving shot 108 to worker:0:0\n", + "2021-02-25 16:27:28,492 - INFO WORKER:0:0 (ShotID 108) Preparing to run shot\n", + "2021-02-25 16:27:28,493 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:28,493 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:28,493 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:28,901 - INFO WORKER:0:0 (ShotID 108) Running state equation for shot\n", + "2021-02-25 16:27:28,902 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:28,902 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.30 s\n", + "2021-02-25 16:27:28,903 - INFO WORKER:0:0 Global performance: [OI=2.90, 111.36 GFlops/s, 1.89 GPts/s]\n", + "2021-02-25 16:27:28,904 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:28,904 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.27 s [OI=2.90, 122.29 GFlops/s, 2.08 GPts/s]\n", + "2021-02-25 16:27:28,905 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:28,905 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.38 GFlops/s]\n", + "2021-02-25 16:27:28,906 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:28,906 - INFO WORKER:0:0 (ShotID 108) Completed state equation run for shot\n", + "2021-02-25 16:27:29,008 - INFO HEAD Shot 108 retrieved\n", + "2021-02-25 16:27:29,254 - INFO HEAD Appended traces for shot 108 to observed file\n", + "2021-02-25 16:27:29,405 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:29,405 - INFO HEAD Giving shot 109 to worker:0:0\n", + "2021-02-25 16:27:29,575 - INFO WORKER:0:0 (ShotID 109) Preparing to run shot\n", + "2021-02-25 16:27:29,575 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:29,576 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:29,576 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:29,952 - INFO WORKER:0:0 (ShotID 109) Running state equation for shot\n", + "2021-02-25 16:27:29,960 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:29,962 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.28 s\n", + "2021-02-25 16:27:29,964 - INFO WORKER:0:0 Global performance: [OI=2.90, 118.37 GFlops/s, 2.01 GPts/s]\n", + "2021-02-25 16:27:29,965 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:29,966 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.27 s [OI=2.90, 123.64 GFlops/s, 2.10 GPts/s]\n", + "2021-02-25 16:27:29,967 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.03 GFlops/s, 0.01 GPts/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:27:29,969 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.33 GFlops/s]\n", + "2021-02-25 16:27:29,970 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:29,971 - INFO WORKER:0:0 (ShotID 109) Completed state equation run for shot\n", + "2021-02-25 16:27:30,056 - INFO HEAD Shot 109 retrieved\n", + "2021-02-25 16:27:30,307 - INFO HEAD Appended traces for shot 109 to observed file\n", + "2021-02-25 16:27:30,457 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:30,458 - INFO HEAD Giving shot 110 to worker:0:0\n", + "2021-02-25 16:27:30,622 - INFO WORKER:0:0 (ShotID 110) Preparing to run shot\n", + "2021-02-25 16:27:30,623 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:30,623 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:30,624 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:31,025 - INFO WORKER:0:0 (ShotID 110) Running state equation for shot\n", + "2021-02-25 16:27:31,026 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:31,026 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.31 s\n", + "2021-02-25 16:27:31,026 - INFO WORKER:0:0 Global performance: [OI=2.90, 106.68 GFlops/s, 1.81 GPts/s]\n", + "2021-02-25 16:27:31,027 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:31,027 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 115.83 GFlops/s, 1.97 GPts/s]\n", + "2021-02-25 16:27:31,028 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:31,028 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.11 GFlops/s]\n", + "2021-02-25 16:27:31,029 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:31,029 - INFO WORKER:0:0 (ShotID 110) Completed state equation run for shot\n", + "2021-02-25 16:27:31,128 - INFO HEAD Shot 110 retrieved\n", + "2021-02-25 16:27:31,403 - INFO HEAD Appended traces for shot 110 to observed file\n", + "2021-02-25 16:27:31,565 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:31,565 - INFO HEAD Giving shot 111 to worker:0:0\n", + "2021-02-25 16:27:31,733 - INFO WORKER:0:0 (ShotID 111) Preparing to run shot\n", + "2021-02-25 16:27:31,733 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:31,734 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:31,734 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:32,110 - INFO WORKER:0:0 (ShotID 111) Running state equation for shot\n", + "2021-02-25 16:27:32,111 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:32,111 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.28 s\n", + "2021-02-25 16:27:32,112 - INFO WORKER:0:0 Global performance: [OI=2.90, 117.08 GFlops/s, 1.99 GPts/s]\n", + "2021-02-25 16:27:32,112 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:32,112 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.27 s [OI=2.90, 124.42 GFlops/s, 2.11 GPts/s]\n", + "2021-02-25 16:27:32,113 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:32,113 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.50 GFlops/s]\n", + "2021-02-25 16:27:32,114 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:32,114 - INFO WORKER:0:0 (ShotID 111) Completed state equation run for shot\n", + "2021-02-25 16:27:32,213 - INFO HEAD Shot 111 retrieved\n", + "2021-02-25 16:27:32,460 - INFO HEAD Appended traces for shot 111 to observed file\n", + "2021-02-25 16:27:32,606 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:32,607 - INFO HEAD Giving shot 112 to worker:0:0\n", + "2021-02-25 16:27:32,770 - INFO WORKER:0:0 (ShotID 112) Preparing to run shot\n", + "2021-02-25 16:27:32,771 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:32,771 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:32,772 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:33,202 - INFO WORKER:0:0 (ShotID 112) Running state equation for shot\n", + "2021-02-25 16:27:33,209 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:33,212 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:27:33,214 - INFO WORKER:0:0 Global performance: [OI=2.90, 97.83 GFlops/s, 1.66 GPts/s]\n", + "2021-02-25 16:27:33,216 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:33,217 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.30 s [OI=2.90, 109.70 GFlops/s, 1.86 GPts/s]\n", + "2021-02-25 16:27:33,219 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:33,221 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.02 GFlops/s]\n", + "2021-02-25 16:27:33,223 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:33,224 - INFO WORKER:0:0 (ShotID 112) Completed state equation run for shot\n", + "2021-02-25 16:27:33,305 - INFO HEAD Shot 112 retrieved\n", + "2021-02-25 16:27:33,548 - INFO HEAD Appended traces for shot 112 to observed file\n", + "2021-02-25 16:27:33,695 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:33,696 - INFO HEAD Giving shot 113 to worker:0:0\n", + "2021-02-25 16:27:33,860 - INFO WORKER:0:0 (ShotID 113) Preparing to run shot\n", + "2021-02-25 16:27:33,861 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:33,861 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:33,861 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:34,257 - INFO WORKER:0:0 (ShotID 113) Running state equation for shot\n", + "2021-02-25 16:27:34,258 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:34,258 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.30 s\n", + "2021-02-25 16:27:34,259 - INFO WORKER:0:0 Global performance: [OI=2.90, 109.58 GFlops/s, 1.86 GPts/s]\n", + "2021-02-25 16:27:34,259 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:34,259 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 120.04 GFlops/s, 2.04 GPts/s]\n", + "2021-02-25 16:27:34,260 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:34,260 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.38 GFlops/s]\n", + "2021-02-25 16:27:34,260 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:34,261 - INFO WORKER:0:0 (ShotID 113) Completed state equation run for shot\n", + "2021-02-25 16:27:34,360 - INFO HEAD Shot 113 retrieved\n", + "2021-02-25 16:27:34,614 - INFO HEAD Appended traces for shot 113 to observed file\n", + "2021-02-25 16:27:34,761 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:34,762 - INFO HEAD Giving shot 114 to worker:0:0\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:27:34,925 - INFO WORKER:0:0 (ShotID 114) Preparing to run shot\n", + "2021-02-25 16:27:34,925 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:34,926 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:34,926 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:35,360 - INFO WORKER:0:0 (ShotID 114) Running state equation for shot\n", + "2021-02-25 16:27:35,360 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:35,361 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:27:35,361 - INFO WORKER:0:0 Global performance: [OI=2.90, 97.37 GFlops/s, 1.65 GPts/s]\n", + "2021-02-25 16:27:35,361 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:35,362 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 115.64 GFlops/s, 1.96 GPts/s]\n", + "2021-02-25 16:27:35,362 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.04 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:35,362 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.73 GFlops/s]\n", + "2021-02-25 16:27:35,363 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:35,363 - INFO WORKER:0:0 (ShotID 114) Completed state equation run for shot\n", + "2021-02-25 16:27:35,463 - INFO HEAD Shot 114 retrieved\n", + "2021-02-25 16:27:35,710 - INFO HEAD Appended traces for shot 114 to observed file\n", + "2021-02-25 16:27:35,860 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:35,860 - INFO HEAD Giving shot 115 to worker:0:0\n", + "2021-02-25 16:27:36,026 - INFO WORKER:0:0 (ShotID 115) Preparing to run shot\n", + "2021-02-25 16:27:36,026 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:36,027 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:36,027 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:36,434 - INFO WORKER:0:0 (ShotID 115) Running state equation for shot\n", + "2021-02-25 16:27:36,434 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:36,435 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:27:36,435 - INFO WORKER:0:0 Global performance: [OI=2.90, 104.90 GFlops/s, 1.78 GPts/s]\n", + "2021-02-25 16:27:36,435 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:36,436 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 116.63 GFlops/s, 1.98 GPts/s]\n", + "2021-02-25 16:27:36,436 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:36,437 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.65 GFlops/s]\n", + "2021-02-25 16:27:36,437 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:36,437 - INFO WORKER:0:0 (ShotID 115) Completed state equation run for shot\n", + "2021-02-25 16:27:36,537 - INFO HEAD Shot 115 retrieved\n", + "2021-02-25 16:27:36,767 - INFO HEAD Appended traces for shot 115 to observed file\n", + "2021-02-25 16:27:36,922 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:36,922 - INFO HEAD Giving shot 116 to worker:0:0\n", + "2021-02-25 16:27:37,087 - INFO WORKER:0:0 (ShotID 116) Preparing to run shot\n", + "2021-02-25 16:27:37,088 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:37,088 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:37,088 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:37,500 - INFO WORKER:0:0 (ShotID 116) Running state equation for shot\n", + "2021-02-25 16:27:37,501 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:37,501 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:27:37,501 - INFO WORKER:0:0 Global performance: [OI=2.90, 103.33 GFlops/s, 1.76 GPts/s]\n", + "2021-02-25 16:27:37,502 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:37,502 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 119.80 GFlops/s, 2.04 GPts/s]\n", + "2021-02-25 16:27:37,503 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.04 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:37,503 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.26 GFlops/s]\n", + "2021-02-25 16:27:37,503 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:37,504 - INFO WORKER:0:0 (ShotID 116) Completed state equation run for shot\n", + "2021-02-25 16:27:37,603 - INFO HEAD Shot 116 retrieved\n", + "2021-02-25 16:27:37,844 - INFO HEAD Appended traces for shot 116 to observed file\n", + "2021-02-25 16:27:37,993 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:37,993 - INFO HEAD Giving shot 117 to worker:0:0\n", + "2021-02-25 16:27:38,155 - INFO WORKER:0:0 (ShotID 117) Preparing to run shot\n", + "2021-02-25 16:27:38,155 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:38,156 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:38,157 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:38,551 - INFO WORKER:0:0 (ShotID 117) Running state equation for shot\n", + "2021-02-25 16:27:38,552 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:38,552 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.30 s\n", + "2021-02-25 16:27:38,552 - INFO WORKER:0:0 Global performance: [OI=2.90, 109.40 GFlops/s, 1.86 GPts/s]\n", + "2021-02-25 16:27:38,553 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:38,553 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 114.54 GFlops/s, 1.95 GPts/s]\n", + "2021-02-25 16:27:38,554 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.01 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:38,554 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.42 GFlops/s]\n", + "2021-02-25 16:27:38,554 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:38,555 - INFO WORKER:0:0 (ShotID 117) Completed state equation run for shot\n", + "2021-02-25 16:27:38,654 - INFO HEAD Shot 117 retrieved\n", + "2021-02-25 16:27:38,885 - INFO HEAD Appended traces for shot 117 to observed file\n", + "2021-02-25 16:27:39,048 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:39,048 - INFO HEAD Giving shot 118 to worker:0:0\n", + "2021-02-25 16:27:39,214 - INFO WORKER:0:0 (ShotID 118) Preparing to run shot\n", + "2021-02-25 16:27:39,214 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:39,214 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:39,215 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:39,751 - INFO WORKER:0:0 (ShotID 118) Running state equation for shot\n", + "2021-02-25 16:27:39,759 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:39,762 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.44 s\n", + "2021-02-25 16:27:39,763 - INFO WORKER:0:0 Global performance: [OI=2.90, 74.49 GFlops/s, 1.27 GPts/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:27:39,765 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:39,767 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.37 s [OI=2.90, 90.29 GFlops/s, 1.54 GPts/s]\n", + "2021-02-25 16:27:39,769 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.05 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:39,770 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.53 GFlops/s]\n", + "2021-02-25 16:27:39,772 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:39,774 - INFO WORKER:0:0 (ShotID 118) Completed state equation run for shot\n", + "2021-02-25 16:27:39,855 - INFO HEAD Shot 118 retrieved\n", + "2021-02-25 16:27:40,100 - INFO HEAD Appended traces for shot 118 to observed file\n", + "2021-02-25 16:27:40,248 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:40,249 - INFO HEAD Giving shot 119 to worker:0:0\n", + "2021-02-25 16:27:40,417 - INFO WORKER:0:0 (ShotID 119) Preparing to run shot\n", + "2021-02-25 16:27:40,417 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:40,418 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:40,418 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:40,850 - INFO WORKER:0:0 (ShotID 119) Running state equation for shot\n", + "2021-02-25 16:27:40,851 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:40,851 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:27:40,852 - INFO WORKER:0:0 Global performance: [OI=2.90, 97.12 GFlops/s, 1.65 GPts/s]\n", + "2021-02-25 16:27:40,852 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:40,853 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.31 s [OI=2.90, 107.55 GFlops/s, 1.83 GPts/s]\n", + "2021-02-25 16:27:40,853 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:40,854 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.65 GFlops/s]\n", + "2021-02-25 16:27:40,854 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:40,855 - INFO WORKER:0:0 (ShotID 119) Completed state equation run for shot\n", + "2021-02-25 16:27:40,953 - INFO HEAD Shot 119 retrieved\n", + "2021-02-25 16:27:41,200 - INFO HEAD Appended traces for shot 119 to observed file\n", + "2021-02-25 16:27:41,352 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:41,353 - INFO HEAD Giving shot 120 to worker:0:0\n", + "2021-02-25 16:27:41,519 - INFO WORKER:0:0 (ShotID 120) Preparing to run shot\n", + "2021-02-25 16:27:41,519 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:41,520 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:41,520 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:41,918 - INFO WORKER:0:0 (ShotID 120) Running state equation for shot\n", + "2021-02-25 16:27:41,920 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:41,920 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.31 s\n", + "2021-02-25 16:27:41,921 - INFO WORKER:0:0 Global performance: [OI=2.90, 107.65 GFlops/s, 1.83 GPts/s]\n", + "2021-02-25 16:27:41,921 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:41,922 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.27 s [OI=2.90, 121.94 GFlops/s, 2.07 GPts/s]\n", + "2021-02-25 16:27:41,922 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:41,923 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.03 s [OI=3.84, 0.59 GFlops/s]\n", + "2021-02-25 16:27:41,923 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:41,924 - INFO WORKER:0:0 (ShotID 120) Completed state equation run for shot\n", + "2021-02-25 16:27:42,022 - INFO HEAD Shot 120 retrieved\n", + "2021-02-25 16:27:42,261 - INFO HEAD Appended traces for shot 120 to observed file\n", + "2021-02-25 16:27:42,420 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:42,420 - INFO HEAD Giving shot 121 to worker:0:0\n", + "2021-02-25 16:27:42,588 - INFO WORKER:0:0 (ShotID 121) Preparing to run shot\n", + "2021-02-25 16:27:42,589 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:42,589 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:42,589 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:42,991 - INFO WORKER:0:0 (ShotID 121) Running state equation for shot\n", + "2021-02-25 16:27:42,992 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:42,992 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.31 s\n", + "2021-02-25 16:27:42,992 - INFO WORKER:0:0 Global performance: [OI=2.90, 108.51 GFlops/s, 1.84 GPts/s]\n", + "2021-02-25 16:27:42,993 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:42,999 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.28 s [OI=2.90, 120.81 GFlops/s, 2.05 GPts/s]\n", + "2021-02-25 16:27:43,000 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:43,000 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.39 GFlops/s]\n", + "2021-02-25 16:27:43,000 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:43,001 - INFO WORKER:0:0 (ShotID 121) Completed state equation run for shot\n", + "2021-02-25 16:27:43,095 - INFO HEAD Shot 121 retrieved\n", + "2021-02-25 16:27:43,337 - INFO HEAD Appended traces for shot 121 to observed file\n", + "2021-02-25 16:27:43,486 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:43,486 - INFO HEAD Giving shot 122 to worker:0:0\n", + "2021-02-25 16:27:43,652 - INFO WORKER:0:0 (ShotID 122) Preparing to run shot\n", + "2021-02-25 16:27:43,653 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:43,653 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:43,654 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:44,061 - INFO WORKER:0:0 (ShotID 122) Running state equation for shot\n", + "2021-02-25 16:27:44,063 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:44,063 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.32 s\n", + "2021-02-25 16:27:44,064 - INFO WORKER:0:0 Global performance: [OI=2.90, 104.96 GFlops/s, 1.78 GPts/s]\n", + "2021-02-25 16:27:44,064 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:44,065 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.26 s [OI=2.90, 126.28 GFlops/s, 2.15 GPts/s]\n", + "2021-02-25 16:27:44,065 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.04 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:44,066 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 0.99 GFlops/s]\n", + "2021-02-25 16:27:44,066 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:44,067 - INFO WORKER:0:0 (ShotID 122) Completed state equation run for shot\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:27:44,164 - INFO HEAD Shot 122 retrieved\n", + "2021-02-25 16:27:44,397 - INFO HEAD Appended traces for shot 122 to observed file\n", + "2021-02-25 16:27:44,546 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:44,547 - INFO HEAD Giving shot 123 to worker:0:0\n", + "2021-02-25 16:27:44,712 - INFO WORKER:0:0 (ShotID 123) Preparing to run shot\n", + "2021-02-25 16:27:44,713 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:44,713 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:44,713 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:45,098 - INFO WORKER:0:0 (ShotID 123) Running state equation for shot\n", + "2021-02-25 16:27:45,099 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:45,099 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.29 s\n", + "2021-02-25 16:27:45,100 - INFO WORKER:0:0 Global performance: [OI=2.90, 113.92 GFlops/s, 1.93 GPts/s]\n", + "2021-02-25 16:27:45,100 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:45,100 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.26 s [OI=2.90, 128.50 GFlops/s, 2.18 GPts/s]\n", + "2021-02-25 16:27:45,101 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.03 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:45,101 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.45 GFlops/s]\n", + "2021-02-25 16:27:45,102 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:45,102 - INFO WORKER:0:0 (ShotID 123) Completed state equation run for shot\n", + "2021-02-25 16:27:45,201 - INFO HEAD Shot 123 retrieved\n", + "2021-02-25 16:27:45,439 - INFO HEAD Appended traces for shot 123 to observed file\n", + "2021-02-25 16:27:45,588 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:45,589 - INFO HEAD Giving shot 124 to worker:0:0\n", + "2021-02-25 16:27:45,751 - INFO WORKER:0:0 (ShotID 124) Preparing to run shot\n", + "2021-02-25 16:27:45,751 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:45,752 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:45,752 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:46,169 - INFO WORKER:0:0 (ShotID 124) Running state equation for shot\n", + "2021-02-25 16:27:46,170 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:46,170 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.33 s\n", + "2021-02-25 16:27:46,170 - INFO WORKER:0:0 Global performance: [OI=2.90, 101.86 GFlops/s, 1.73 GPts/s]\n", + "2021-02-25 16:27:46,171 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:46,171 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 113.34 GFlops/s, 1.93 GPts/s]\n", + "2021-02-25 16:27:46,171 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:46,172 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.15 GFlops/s]\n", + "2021-02-25 16:27:46,172 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:46,173 - INFO WORKER:0:0 (ShotID 124) Completed state equation run for shot\n", + "2021-02-25 16:27:46,273 - INFO HEAD Shot 124 retrieved\n", + "2021-02-25 16:27:46,521 - INFO HEAD Appended traces for shot 124 to observed file\n", + "2021-02-25 16:27:46,678 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:46,679 - INFO HEAD Giving shot 125 to worker:0:0\n", + "2021-02-25 16:27:46,841 - INFO WORKER:0:0 (ShotID 125) Preparing to run shot\n", + "2021-02-25 16:27:46,841 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:46,842 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:46,842 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:47,274 - INFO WORKER:0:0 (ShotID 125) Running state equation for shot\n", + "2021-02-25 16:27:47,274 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:47,275 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.34 s\n", + "2021-02-25 16:27:47,275 - INFO WORKER:0:0 Global performance: [OI=2.90, 97.19 GFlops/s, 1.65 GPts/s]\n", + "2021-02-25 16:27:47,276 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:47,276 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.30 s [OI=2.90, 112.05 GFlops/s, 1.90 GPts/s]\n", + "2021-02-25 16:27:47,276 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.04 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:47,277 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 2.53 GFlops/s]\n", + "2021-02-25 16:27:47,277 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:47,277 - INFO WORKER:0:0 (ShotID 125) Completed state equation run for shot\n", + "2021-02-25 16:27:47,376 - INFO HEAD Shot 125 retrieved\n", + "2021-02-25 16:27:47,607 - INFO HEAD Appended traces for shot 125 to observed file\n", + "2021-02-25 16:27:47,756 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:47,757 - INFO HEAD Giving shot 126 to worker:0:0\n", + "2021-02-25 16:27:47,922 - INFO WORKER:0:0 (ShotID 126) Preparing to run shot\n", + "2021-02-25 16:27:47,922 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:47,923 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:47,923 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:48,331 - INFO WORKER:0:0 (ShotID 126) Running state equation for shot\n", + "2021-02-25 16:27:48,331 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:48,332 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.31 s\n", + "2021-02-25 16:27:48,332 - INFO WORKER:0:0 Global performance: [OI=2.90, 105.34 GFlops/s, 1.79 GPts/s]\n", + "2021-02-25 16:27:48,332 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:48,333 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.29 s [OI=2.90, 112.90 GFlops/s, 1.92 GPts/s]\n", + "2021-02-25 16:27:48,333 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.02 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:48,334 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.01 s [OI=3.84, 1.69 GFlops/s]\n", + "2021-02-25 16:27:48,334 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:48,334 - INFO WORKER:0:0 (ShotID 126) Completed state equation run for shot\n", + "2021-02-25 16:27:48,433 - INFO HEAD Shot 126 retrieved\n", + "2021-02-25 16:27:48,680 - INFO HEAD Appended traces for shot 126 to observed file\n", + "2021-02-25 16:27:48,832 - INFO HEAD \n", + "\n", + "2021-02-25 16:27:48,832 - INFO HEAD Giving shot 127 to worker:0:0\n", + "2021-02-25 16:27:49,009 - INFO WORKER:0:0 (ShotID 127) Preparing to run shot\n", + "2021-02-25 16:27:49,010 - INFO WORKER:0:0 Estimated bandwidth for the propagated wavelet 0.255-0.735 MHz\n", + "2021-02-25 16:27:49,010 - INFO WORKER:0:0 Using DRP scheme\n", + "2021-02-25 16:27:49,011 - INFO WORKER:0:0 Time grid spacing (0.080 μs) is below OT2 limit (0.136 μs)\n", + "2021-02-25 16:27:49,397 - INFO WORKER:0:0 (ShotID 127) Running state equation for shot\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2021-02-25 16:27:49,404 - WARNING WORKER:0:0 AutoTuner: could not perform any runs\n", + "2021-02-25 16:27:49,407 - INFO WORKER:0:0 Operator `acoustic_iso_state` ran in 0.29 s\n", + "2021-02-25 16:27:49,412 - INFO WORKER:0:0 Global performance: [OI=2.90, 112.70 GFlops/s, 1.91 GPts/s]\n", + "2021-02-25 16:27:49,414 - INFO WORKER:0:0 Local performance:\n", + "2021-02-25 16:27:49,416 - INFO WORKER:0:0 * section0<2500,456,485> ran in 0.27 s [OI=2.90, 123.48 GFlops/s, 2.10 GPts/s]\n", + "2021-02-25 16:27:49,417 - INFO WORKER:0:0 * section1<<2500,1>,<2500,1>,<2500,1>,<2500,1>,<2500,1>> ran in 0.02 s [OI=3.57, 0.01 GFlops/s, 0.01 GPts/s]\n", + "2021-02-25 16:27:49,419 - INFO WORKER:0:0 * section2<<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>,<2500,128>> ran in 0.02 s [OI=3.84, 1.40 GFlops/s]\n", + "2021-02-25 16:27:49,421 - INFO WORKER:0:0 Performance[mode=advanced] arguments: {'nthreads': 12, 'nthreads_nonaffine': 12}\n", + "2021-02-25 16:27:49,423 - INFO WORKER:0:0 (ShotID 127) Completed state equation run for shot\n", + "2021-02-25 16:27:49,501 - INFO HEAD Shot 127 retrieved\n", + "2021-02-25 16:27:49,757 - INFO HEAD Appended traces for shot 127 to observed file\n" + ] + } + ], + "source": [ + "# Run\n", + "await problem.forward()" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "postal-celtic", + "metadata": {}, + "outputs": [], + "source": [ + "await mosaic.interactive('off')" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.8" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/stride/anastasio2D/forward.py b/examples/stride/anastasio2D/forward.py new file mode 100644 index 00000000..d026dec5 --- /dev/null +++ b/examples/stride/anastasio2D/forward.py @@ -0,0 +1,64 @@ + +import mosaic + +from stride import * +from stride.utils import wavelets + + +async def main(runtime): + # Create the grid + shape = (356, 385) + extra = (50, 50) + absorbing = (40, 40) + spacing = (0.5e-3, 0.5e-3) + + space = Space(shape=shape, + extra=extra, + absorbing=absorbing, + spacing=spacing) + + start = 0. + step = 0.08e-6 + num = 2500 + + time = Time(start=start, + step=step, + num=num) + + # Create problem + problem = Problem(name='anastasio2D', + space=space, time=time) + + # Create medium + vp = ScalarField('vp', grid=problem.grid) + vp.load('data/anastasio2D-TrueModel.h5') + + problem.medium.add(vp) + + # Create transducers + problem.transducers.default() + + # Create geometry + num_locations = 128 + problem.geometry.default('elliptical', num_locations) + + # Create acquisitions + problem.acquisitions.default() + + # Create wavelets + f_centre = 0.50e6 + n_cycles = 3 + + for shot in problem.acquisitions.shots: + shot.wavelets.data[0, :] = wavelets.tone_burst(f_centre, n_cycles, + time.num, time.step) + + # Plot + problem.plot() + + # Run + await problem.forward() + + +if __name__ == '__main__': + mosaic.run(main) diff --git a/examples/stride/anastasio2D/inverse.py b/examples/stride/anastasio2D/inverse.py new file mode 100644 index 00000000..e3f3299e --- /dev/null +++ b/examples/stride/anastasio2D/inverse.py @@ -0,0 +1,77 @@ + +import mosaic + +from stride import * + + +async def main(runtime): + # Create the grid + shape = (356, 385) + extra = (50, 50) + absorbing = (40, 40) + spacing = (0.5e-3, 0.5e-3) + + space = Space(shape=shape, + extra=extra, + absorbing=absorbing, + spacing=spacing) + + start = 0. + step = 0.08e-6 + num = 2500 + + time = Time(start=start, + step=step, + num=num) + + # Create problem + problem = Problem(name='anastasio2D', + space=space, time=time) + + # Create medium + vp = ScalarField('vp', grid=problem.grid) + vp.fill(1500.) + + problem.medium.add(vp) + + # Create transducers + problem.transducers.default() + + # Create geometry + num_locations = 128 + problem.geometry.default('elliptical', num_locations) + + # Create acquisitions + problem.acquisitions.load(path=problem.output_folder, + project_name=problem.name, version=0) + + # Plot + problem.plot() + + # Create optimisation variable + optimisation = Optimisation() + + optim_vp = Vp('vp', grid=problem.grid) + optim_vp.extended_data[:] = vp.extended_data[:] + + # Create optimiser + step_size = 10 + optimiser = GradientDescent(optim_vp, step=step_size) + + optimisation.add(optim_vp, optimiser) + + # Run optimisation + max_freqs = [0.3e6, 0.4e6, 0.5e6, 0.6e6] + + for freq, block in zip(max_freqs, optimisation.blocks(4)): + block.config(num_iterations=8, + f_min=0.05e6, f_max=freq, + min=1400., max=1700., + select_shots={'num': 16, 'randomly': True}) + + await optimisation.run(block, problem) + + optim_vp.plot() + +if __name__ == '__main__': + mosaic.run(main) diff --git a/examples/stride/anastasio3D/forward.py b/examples/stride/anastasio3D/forward.py new file mode 100644 index 00000000..8045fc6c --- /dev/null +++ b/examples/stride/anastasio3D/forward.py @@ -0,0 +1,78 @@ + +import numpy as np + +import mosaic + +from stride import * +from stride.utils import fetch, wavelets + + +async def main(runtime): + # Create the grid + shape = (356, 385, 160) + extra = (50, 50, 50) + absorbing = (40, 40, 40) + spacing = (0.5e-3, 0.5e-3, 0.5e-3) + + space = Space(shape=shape, + extra=extra, + absorbing=absorbing, + spacing=spacing) + + start = 0. + step = 0.08e-6 + num = 2500 + + time = Time(start=start, + step=step, + num=num) + + # Create problem + problem = Problem(name='anastasio3D', + space=space, time=time) + + # Create medium + vp = ScalarField('vp', grid=problem.grid) + fetch('anastasio3D', + token='', + dest='data/anastasio3D-TrueModel.h5') + vp.load('data/anastasio3D-TrueModel.h5') + + problem.medium.add(vp) + + # Create transducers + problem.transducers.default() + + # Create geometry + radius = ((space.limit[0] - 30e-3) / 2, + (space.limit[1] - 05e-3) / 2, + (space.limit[2] - 05e-3)) + centre = (space.limit[0] / 2, + space.limit[1] / 2, + space.limit[2]) + + num_locations = 1024 + + problem.geometry.default('ellipsoidal', num_locations, radius, centre, + theta=np.pi, threshold=0.5) + + # Create acquisitions + problem.acquisitions.default() + + # Create wavelets + f_centre = 0.50e6 + n_cycles = 3 + + for shot in problem.acquisitions.shots: + shot.wavelets.data[0, :] = wavelets.tone_burst(f_centre, n_cycles, + time.num, time.step) + + # Plot + problem.plot() + + # Run + await problem.forward() + + +if __name__ == '__main__': + mosaic.run(main) diff --git a/examples/stride/anastasio3D/inverse.py b/examples/stride/anastasio3D/inverse.py new file mode 100644 index 00000000..3705cf03 --- /dev/null +++ b/examples/stride/anastasio3D/inverse.py @@ -0,0 +1,88 @@ + +import numpy as np + +import mosaic + +from stride import * + + +async def main(runtime): + # Create the grid + shape = (356, 385, 160) + extra = (50, 50, 50) + absorbing = (40, 40, 40) + spacing = (0.5e-3, 0.5e-3, 0.5e-3) + + space = Space(shape=shape, + extra=extra, + absorbing=absorbing, + spacing=spacing) + + start = 0. + step = 0.08e-6 + num = 2500 + + time = Time(start=start, + step=step, + num=num) + + # Create problem + problem = Problem(name='anastasio3D', + space=space, time=time) + + # Create medium + vp = ScalarField('vp', grid=problem.grid) + vp.fill(1500.) + + problem.medium.add(vp) + + # Create transducers + problem.transducers.default() + + # Create geometry + radius = ((space.limit[0] - 30e-3) / 2, + (space.limit[1] - 05e-3) / 2, + (space.limit[2] - 05e-3)) + centre = (space.limit[0] / 2, + space.limit[1] / 2, + space.limit[2]) + + num_locations = 1024 + + problem.geometry.default('ellipsoidal', num_locations, radius, centre, + theta=np.pi, threshold=0.5) + + # Create acquisitions + problem.acquisitions.load(path=problem.output_folder, + project_name=problem.name, version=0) + + # Plot + problem.plot() + + # Create optimisation variable + optimisation = Optimisation() + + optim_vp = Vp('vp', grid=problem.grid) + optim_vp.extended_data[:] = vp.extended_data[:] + + # Create optimiser + step_size = 10 + optimiser = GradientDescent(optim_vp, step=step_size) + + optimisation.add(optim_vp, optimiser) + + # Run optimisation + max_freqs = [0.3e6, 0.4e6, 0.5e6, 0.6e6] + + for freq, block in zip(max_freqs, optimisation.blocks(4)): + block.config(num_iterations=8, + f_min=0.05e6, f_max=freq, + min=1400., max=1700., + select_shots={'num': 16, 'randomly': True}) + + await optimisation.run(block, problem) + + optim_vp.plot() + +if __name__ == '__main__': + mosaic.run(main) diff --git a/mosaic/__init__.py b/mosaic/__init__.py new file mode 100644 index 00000000..d8584e83 --- /dev/null +++ b/mosaic/__init__.py @@ -0,0 +1,264 @@ + +__version__ = '1.0' + +import asyncio + +from .core import tessera +from .runtime import Head, Monitor, Node, Worker +from .utils.subprocess import subprocess +from .utils import logger as mlogger +from .utils import gather +from .file_manipulation import yaml, h5 + + +_runtime = None +_runtime_types = { + 'head': Head, + 'monitor': Monitor, + 'node': Node, + 'worker': Worker, +} + + +def init(runtime_type='head', runtime_indices=(), + address=None, port=None, + parent_id=None, parent_address=None, parent_port=None, + monitor_address=None, monitor_port=None, + num_workers=None, num_threads=None, + mode='local', monitor_strategy='round-robin', + log_level='info', node_list=None, + asyncio_loop=None, wait=False, + **kwargs): + """ + Starts the global mosaic runtime. + + Parameters + ---------- + runtime_type : str, optional + Type of runtime to instantiate, defaults to ``head``. + runtime_indices : tuple, optional + Indices associated with the runtime, defaults to None. + address : str, optional + Address to use for the runtime, defaults to None. If None, the comms will + try to guess the address. + port : int, optional + Port to use for the runtime, defaults to None. If None, the comms will + test ports until one becomes available. + parent_id : str, optional + UID of the parent runtime, if any. + parent_address : str, optional + Address of the parent runtime, if any. + parent_port : int, optional + Port of the parent runtime, if any. + monitor_address : str, optional + Address of the monitor to connect to. + monitor_port : int, optional + Port of the monitor to connect to. + num_workers : int, optional + Number of workers to instantiate in each node, defaults to 1. + num_threads : int, optional + Number of threads to assign to each worker, defaults to the number of + available cores over ``num_workers``. + mode : str, optional + Mode of the runtime, defaults to ``local``. + monitor_strategy : str, optional + Strategy used by the monitor to allocate tessera, defaults to round robin. + log_level : str, optional + Log level, defaults to ``info``. + node_list : list, optional + List of available node addresses to connect to. + asyncio_loop: object, optional + Async loop to use in our mosaic event loop, defaults to new loop. + wait : bool, optional + Whether or not to return control to calling frame, defaults to False. + kwargs : optional + Extra keyword arguments. + + Returns + ------- + + """ + global _runtime + + if _runtime is not None: + return _runtime + + mlogger.log_level = log_level + + runtime_config = { + 'runtime_indices': runtime_indices, + 'mode': mode, + 'monitor_strategy': monitor_strategy, + 'num_workers': num_workers, + 'num_threads': num_threads, + 'log_level': log_level, + 'node_list': node_list, + } + + if address is not None and port is not None: + runtime_config['address'] = address + runtime_config['port'] = port + + if parent_id is not None and parent_address is not None and parent_port is not None: + runtime_config['parent_id'] = parent_id + runtime_config['parent_address'] = parent_address + runtime_config['parent_port'] = parent_port + + elif monitor_address is not None and monitor_port is not None: + runtime_config['monitor_address'] = monitor_address + runtime_config['monitor_port'] = monitor_port + + elif runtime_type != 'head': + ValueError('Either parent address:port or the monitor address:port are needed to ' + 'init a %s' % runtime_type) + + # Create global runtime + try: + _runtime = _runtime_types[runtime_type](**runtime_config) + except KeyError: + raise KeyError('Endpoint type is not recognised, available types are head, ' + 'monitor, node and worker') + + loop = _runtime.get_event_loop(asyncio_loop=asyncio_loop) + result = loop.run(_runtime.init, kwargs=runtime_config, wait=True) + + if wait is True: + try: + loop.run_forever() + + finally: + loop.stop() + + return result + + +def __getattr__(key): + global _runtime + + try: + return getattr(_runtime, key) + + except AttributeError: + raise AttributeError('module mosaic has no attribute %s' % key) + + +def clear_runtime(): + """ + Clear the global runtime. + + Returns + ------- + + """ + global _runtime + + if _runtime is not None: + mlogger.clear_logger() + + del _runtime + _runtime = None + + +def runtime(): + """ + Access the global runtime. + + Returns + ------- + + """ + global _runtime + + return _runtime + + +def stop(): + """ + Stop the global runtime. + + Returns + ------- + + """ + global _runtime + + loop = _runtime.get_event_loop() + + try: + loop.run(_runtime.stop, args=(), kwargs={}, wait=True) + + finally: + loop.stop() + clear_runtime() + + +def run(main, *args, **kwargs): + """ + Initialise the runtime and then run the ``main`` in it. + + Parameters + ---------- + main : callable + Entry point for mosaic. + args : tuple, optional + Arguments to `mosaic.init`. + kwargs : optional + Keyword arguments to `mosaic.init`. + + Returns + ------- + + """ + global _runtime + + init(*args, **kwargs) + + loop = _runtime.get_event_loop() + + try: + loop.run(main, args=(_runtime,), kwargs={}, wait=True) + + finally: + stop() + + +async def interactive(switch, *args, **kwargs): + """ + Initialise the runtime interactively. + + Parameters + ---------- + switch : str + Whether to switch interactive mode ``on`` or ``off``. + args : tuple, optional + Arguments to `mosaic.init`. + kwargs : optional + Keyword arguments to `mosaic.init`. + + Returns + ------- + + """ + global _runtime + + if switch == 'on': + if _runtime is not None: + return + + fut = init(*args, **kwargs, + mode='interactive', + asyncio_loop=asyncio.get_event_loop()) + + await fut + + else: + if _runtime is None: + return + + loop = _runtime.get_event_loop() + + try: + await loop.run(_runtime.stop, args=(), kwargs={}) + + finally: + clear_runtime() diff --git a/mosaic/cli/clusters/__init__.py b/mosaic/cli/clusters/__init__.py new file mode 100644 index 00000000..bf1d6538 --- /dev/null +++ b/mosaic/cli/clusters/__init__.py @@ -0,0 +1,3 @@ + + +from . import sge diff --git a/mosaic/cli/clusters/sge.py b/mosaic/cli/clusters/sge.py new file mode 100644 index 00000000..e33194bc --- /dev/null +++ b/mosaic/cli/clusters/sge.py @@ -0,0 +1,113 @@ + +import os + + +__all__ = ['node_list', 'submission_script'] + + +def node_list(host_name): + """ + Attempt to find a node list for SGE clusters. + + Parameters + ---------- + host_name + + Returns + ------- + + """ + sge_nodes = os.environ.get('PE_HOSTFILE', None) + + if sge_nodes is None: + return + + sge_list = [] + with open(sge_nodes, 'r') as file: + lines = file.readlines() + + for line in lines: + line = line.split(' ') + + if line[0] != host_name: + sge_list.append(line[0]) + + return sge_list + + +def submission_script(name, num_nodes, num_workers, num_threads, node_memory): + """ + Generate a submission script for SGE clusters. + + Parameters + ---------- + name + num_nodes + num_workers + num_threads + node_memory + + Returns + ------- + str + + """ + + return f"""#!/bin/bash -l + +name={name} +num_nodes={num_nodes} +num_workers_per_node={num_workers} +num_threads_per_worker={num_threads} + +#$ -P +#$ -A + +# only allow C nodes +#$ -ac allow=C + +# wall clock time (format hours:minutes:seconds). +#$ -l h_rt=48:00:00 + +# amount of RAM per core (must be an integer) +# node_memory/(num_threads_per_worker*num_workers_per_node) +#$ -l mem={int(node_memory/(num_threads*num_workers))}G + +# set the name of the job. +#$ -N {name} + +# select the MPI parallel environment and number of cores. +# num_threads_per_worker*num_workers_per_node*(num_nodes+1) +#$ -pe mpi {num_threads*num_workers*(num_nodes+1)} + +# set the working directory +#$ -cwd + +# set output logs +#$ -o out.log +#$ -e err.log + +# load any modules before activating the conda env +# for example: +# module load compilers/intel/2020/release + +# activate conda environment +conda activate stride + +# set number of threads per process +# use $(ppn) to use one worker per node and as many threads pr worker as cores in the node +export OMP_NUM_THREADS={num_workers*num_threads} + +# set any environment variables +# for example: +# export DEVITO_COMPILER=icc + +# run our job +ls -l +date +mrun -n $num_nodes -nw $num_workers_per_node -nth $num_threads_per_worker python forward.py &> {name}-output.log +date + +stat=$? +echo "Exit status: $stat" >> "{name}-output.log" +""" diff --git a/mosaic/cli/mrun.py b/mosaic/cli/mrun.py new file mode 100644 index 00000000..ae37d6a7 --- /dev/null +++ b/mosaic/cli/mrun.py @@ -0,0 +1,164 @@ + +import os +import click +import subprocess as cmd_subprocess + +from . import clusters +from .. import init, stop, runtime +from ..comms import get_hostname +from ..utils import subprocess +from ..utils.logger import _stdout, _stderr + + +@click.command() +@click.argument('cmd', required=False, nargs=-1) +# runtime type +@click.option('--head', 'runtime_type', flag_value='head', show_default=True, + help='start the head runtime') +@click.option('--monitor', 'runtime_type', flag_value='monitor', show_default=True, + help='start the monitor runtime') +@click.option('--node', 'runtime_type', flag_value='node', show_default=True, + help='start the node runtime') +@click.option('--indices', '-i', type=str, required=False, show_default=True, + help='runtime indices if any') +@click.option('--daemon/--inproc', type=bool, default=False, required=True, show_default=True, + help='whether to run as a daemon') +# network config +@click.option('--nnodes', '-n', type=int, required=False, default=1, show_default=True, + help='number of nodes to be generated') +@click.option('--nworkers', '-nw', type=int, required=False, default=1, show_default=True, + help='number of workers to be spawned') +@click.option('--nthreads', '-nth', type=int, required=False, show_default=True, + help='number of threads per worker') +# comms config +@click.option('--address', type=str, required=False, show_default=True, + help='IP address to use for the runtime') +@click.option('--port', type=int, required=False, show_default=True, + help='port to use for the runtime') +@click.option('--monitor-address', type=str, required=False, show_default=True, + help='IP address of the monitor') +@click.option('--monitor-port', type=int, required=False, show_default=True, + help='port of the monitor') +# cluster options +@click.option('--local/--cluster', '-l/-c', default=False, required=True, show_default=True, + help='whether to run mosaic locally or in a cluster system') +# log level +@click.option('--info', 'log_level', flag_value='info', default='info', show_default=True, + help='set log level to INFO') +@click.option('--debug', 'log_level', flag_value='debug', show_default=True, + help='set log level to DEBUG') +@click.option('--error', 'log_level', flag_value='error', show_default=True, + help='set log level to ERROR') +@click.version_option() +def go(cmd=None, **kwargs): + runtime_type = kwargs.get('runtime_type', None) + runtime_indices = kwargs.get('indices', None) + local = kwargs.get('local', False) + + if runtime_indices is not None: + runtime_indices = tuple(runtime_indices.split(':')) + + if not local: + num_nodes = kwargs.get('nnodes', 1) + else: + num_nodes = 1 + num_workers = kwargs.get('nworkers', 1) + num_threads = kwargs.get('nthreads', None) + log_level = kwargs.get('log_level', 'info') + + # If not in local mode, find the node list + node_list = None + if not local and runtime_type in [None, 'monitor']: + # sun grid engine - PE_HOSTFILE + # slurm - SLURM_JOB_NODELIST + # pbs/torque - PBS_NODEFILE + + host_name = get_hostname() + + sge_nodes = clusters.sge.node_list(host_name) + if sge_nodes is not None: + node_list = sge_nodes + num_nodes = len(node_list) + + else: + local = True + + runtime_config = { + 'runtime_indices': runtime_indices, + 'address': kwargs.get('address', None), + 'port': kwargs.get('port', None), + 'monitor_address': kwargs.get('monitor_address', None), + 'monitor_port': kwargs.get('monitor_port', None), + 'num_nodes': num_nodes, + 'num_workers': num_workers, + 'num_threads': num_threads, + 'mode': 'local' if local is True else 'cluster', + 'log_level': log_level, + 'node_list': node_list, + } + + # Initialise the runtime + if runtime_type is not None: + if kwargs.get('daemon', False): + def start_runtime(*args, **extra_kwargs): + extra_kwargs.update(runtime_config) + + init(runtime_type, **extra_kwargs, wait=True) + + runtime_subprocess = subprocess(start_runtime)(runtime_type, daemon=True) + runtime_subprocess.start_process() + + else: + init(runtime_type, **runtime_config, wait=True) + + return + + else: + init('monitor', **runtime_config, wait=False) + _runtime = runtime() + + # Get the initialised runtime + loop = _runtime.get_event_loop() + runtime_id = _runtime.uid + runtime_address = _runtime.address + runtime_port = _runtime.port + + # Store runtime ID, address and port in a tmp file for the + # head to use + path = os.path.join(os.getcwd(), 'mosaic-workspace') + if not os.path.exists(path): + os.makedirs(path) + + filename = os.path.join(path, 'monitor.key') + with open(filename, 'w') as file: + file.write('[ADDRESS]\n') + file.write('UID=%s\n' % runtime_id) + file.write('ADD=%s\n' % runtime_address) + file.write('PRT=%s\n' % runtime_port) + + def run_head(): + process = cmd_subprocess.run(cmd, + stdout=_stdout, + stderr=_stderr) + + _runtime.logger.info('Process ended with code: %d' % process.returncode) + + async def main(): + await loop.run_in_executor(run_head, args=(), kwargs={}) + + try: + loop.run(main, args=(), kwargs={}, wait=True) + + finally: + stop() + + try: + os.remove(filename) + os.rmdir(path) + + except Exception: + pass + + +if __name__ == '__main__': + go(auto_envvar_prefix='MOSAIC') diff --git a/mosaic/cli/mscript.py b/mosaic/cli/mscript.py new file mode 100644 index 00000000..b25df314 --- /dev/null +++ b/mosaic/cli/mscript.py @@ -0,0 +1,41 @@ + +import click + +from . import clusters + + +@click.command() +@click.argument('cluster_type', required=True, nargs=1) +@click.argument('name', required=True, nargs=1) +@click.option('--nnodes', '-n', type=int, required=True, show_default=True, + help='number of nodes to be generated') +@click.option('--nworkers', '-nw', type=int, required=True, show_default=True, + help='number of workers to be spawned') +@click.option('--nthreads', '-nth', type=int, required=True, show_default=True, + help='number of threads per worker') +@click.option('--memory', '-m', type=int, required=True, show_default=True, + help='available memory per node (in GBytes)') +@click.version_option() +def go(cluster_type, name, **kwargs): + cluster_type = cluster_type.lower() + + num_nodes = kwargs.get('nnodes', None) + num_workers = kwargs.get('nworkers', None) + num_threads = kwargs.get('nthreads', None) + node_memory = kwargs.get('memory', None) + + valid_clusters = ['sge'] + + if cluster_type not in valid_clusters: + raise ValueError('Cluster type %s is not valid (%s).' % (cluster_type, ', '.join(valid_clusters))) + + submission_script = getattr(clusters, cluster_type).submission_script + + run_file = submission_script(name, num_nodes, num_workers, num_threads, node_memory) + + with open(cluster_type + '_run.sh', 'w') as file: + file.write(run_file) + + +if __name__ == '__main__': + go(auto_envvar_prefix='MOSAIC') diff --git a/mosaic/comms/__init__.py b/mosaic/comms/__init__.py new file mode 100644 index 00000000..423f7b48 --- /dev/null +++ b/mosaic/comms/__init__.py @@ -0,0 +1,3 @@ + + +from .comms import * diff --git a/mosaic/comms/comms.py b/mosaic/comms/comms.py new file mode 100644 index 00000000..66bbbca1 --- /dev/null +++ b/mosaic/comms/comms.py @@ -0,0 +1,1643 @@ + +import sys +import asyncio +import zmq +import zmq.asyncio +import tblib +import errno +import psutil +import socket +import warnings +import contextlib +import weakref +from concurrent.futures import CancelledError + +import mosaic +from .compression import maybe_compress, decompress +from .serialisation import serialise, deserialise +from ..utils import Future +from ..utils.utils import sizeof + + +__all__ = ['CommsManager', 'get_hostname'] + + +_protocol_version = '0.0.0' + + +def join_address(address, port, protocol='tcp'): + return '%s://%s:%d' % (protocol, address, port) + + +def validate_address(address, port=False): + if type(address) is not str: + raise ValueError('Address %s is not valid' % (address,)) + + if port is False: + error_msg = 'Address %s is not valid' % (address,) + else: + error_msg = 'Address and port combination %s:%d is not valid' % (address, port) + + try: + socket.inet_pton(socket.AF_INET, address) + except AttributeError: + try: + socket.inet_aton(address) + except socket.error: + raise ValueError(error_msg) + except socket.error: + raise ValueError(error_msg) + + if port is not False: + if type(port) is not int or not 1024 <= port <= 65535: + raise ValueError(error_msg) + + +def get_hostname(): + return socket.getfqdn(socket.gethostname()) + + +class CMD: + """ + Container for a CMD for the runtime. + + Parameters + ---------- + cmd : dict + Dictionary description of the CMD. + + """ + + def __init__(self, cmd): + self.type = cmd['type'] + self.uid = cmd['uid'] + self.method = cmd['method'] + self.args = cmd['args'] + self.kwargs = cmd['kwargs'] + + +class Message: + """ + Container for a received message from another comms. + + Parameters + ---------- + sender_id : str + Identity of the message sender. + msg : dict + Dictionary description of the message. + + """ + + def __init__(self, sender_id, msg): + self.method = msg['method'] + self.sender_id = sender_id + self.runtime_id = msg['runtime_id'] + self.kwargs = msg['kwargs'] + self.reply = msg['reply'] + + cmd = msg.get('cmd', {}) + self.cmd = CMD(cmd) if cmd is not None else None + + +class Reply(Future): + """ + Future-like object to asynchronously wait for a comms reply. + + """ + pass + + +class Connection: + """ + Socket connection through ZMQ. + + Parameters + ---------- + uid : str + UID of the current runtime. + address : str + IP address of the connection. + port : int + Port to use for the connection. + runtime : Runtime, optional + Current runtime, defaults to global runtime. + comms : CommsManager, optional + Comms to which the connection belongs, defaults to global comms. + in_node : bool, optional + Whether the connection is inside the node or not, defaults to False. + context : zmq.Context, optional + ZMQ socket context, defaults to global context. + loop : EventLoop, optional + Event loop to use, defaults to global event loop. + + + """ + + def __init__(self, uid, address, port, + runtime=None, comms=None, in_node=False, context=None, loop=None): + self._runtime = runtime or mosaic.runtime() + self._comms = comms or mosaic.get_comms() + self._loop = loop or mosaic.get_event_loop() + self._zmq_context = context or mosaic.get_zmq_context() + + self._uid = uid + self._address = address + self._port = port + self._in_node = in_node + + self._socket = None + self._state = 'disconnected' + + def __repr__(self): + return "<%s object at %s, address=%s, port=%d, state=%s>" % \ + (self.__class__.__name__, id(self), + self.address, self.port, self.state) + + @property + def uid(self): + """ + Runtime UID. + + """ + return self._uid + + @property + def address(self): + """ + Connection address. + + """ + return self._address + + @property + def port(self): + """ + Connection port. + + """ + return self._port + + @property + def socket(self): + """ + Connection ZMQ socket. + + """ + return self._socket + + @property + def state(self): + """ + Connection state. + + """ + return self._state + + @property + def connect_address(self): + """ + Full formatted address for connection. + + """ + if self._in_node is True: + return join_address('127.0.0.1', self.port) + + else: + return join_address(self.address, self.port) + + @property + def bind_address(self): + """ + Full formatted address for binding. + + """ + return join_address('*', self.port) + + @property + def logger(self): + """ + Runtime logger. + + """ + return self._runtime.logger + + def disconnect(self): + """ + Disconnect the socket. + + Returns + ------- + + """ + if self._state != 'connected': + return + + self._socket.close() + self._state = 'disconnected' + + +class InboundConnection(Connection): + """ + Object encapsulating an incoming connection to the CommsManager. + + Parameters + ---------- + uid : str + UID of the current runtime. + address : str + IP address of the connection. + port : int + Port to use for the connection. + runtime : Runtime, optional + Current runtime, defaults to global runtime. + comms : CommsManager, optional + Comms to which the connection belongs, defaults to global comms. + in_node : bool, optional + Whether the connection is inside the node or not, defaults to False. + context : zmq.Context, optional + ZMQ socket context, defaults to global context. + loop : EventLoop, optional + Event loop to use, defaults to global event loop. + + """ + + def __init__(self, uid, address, port=None, + runtime=None, comms=None, in_node=False, context=None, loop=None): + super().__init__(uid, address, port, + runtime=runtime, comms=comms, in_node=in_node, context=context, loop=loop) + + self._socket = self._zmq_context.socket(zmq.ROUTER, + copy_threshold=zmq.COPY_THRESHOLD, + io_loop=self._loop.get_event_loop()) + + @property + def address(self): + """ + Connection address. + + If no address is set, it will try to discover it. + + """ + if self._address is None: + address, port = '8.8.8.8', '53' + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + try: + # This command will raise an exception if there is no internet + # connection. + s.connect((address, int(port))) + self._address = s.getsockname()[0] + except OSError as e: + self._address = '127.0.0.1' + # [Errno 101] Network is unreachable + if e.errno == errno.ENETUNREACH: + try: + # try get node ip address from host name + host_name = get_hostname() + self._address = socket.gethostbyname(host_name) + except Exception: + pass + finally: + s.close() + + return self._address + + def connect(self): + """ + Connect the socket. + + Returns + ------- + + """ + if self._state != 'disconnected': + return + + if self._port is None: + self._port = 3000 + existing_ports = [each.laddr.port for each in psutil.net_connections()] + while self._port in existing_ports: + self._port += 1 + + self._socket.bind(self.bind_address) + + self._state = 'connected' + + async def recv(self): + """ + Asynchronously receive on the socket. + + Returns + ------- + str + Sender UID. + Message + Message object. + + """ + if self._state == 'disconnected': + warnings.warn('Trying to receive in a disconnected InboundConnection "%s"' % self.uid, Warning) + return + + multipart_msg = await self._socket.recv_multipart(copy=False) + + sender_id = multipart_msg[1] + multipart_msg = multipart_msg[2:] + num_parts = int(multipart_msg[0]) + + if len(multipart_msg) != num_parts: + raise ValueError('Wrong number of parts') + + sender_id = str(sender_id) + header = deserialise(multipart_msg[1], []) + + if num_parts > 3: + compressed_msg = [multipart_msg[2], multipart_msg[3:]] + else: + compressed_msg = [multipart_msg[2], []] + + msg = [] + + _msg = decompress(header['compression'][0], compressed_msg[0]) + msg.append(_msg) + + _msg = [decompress(compression, payload) + for compression, payload in zip(header['compression'][1], compressed_msg[1])] + msg.append(_msg) + + msg = deserialise(msg[0], msg[1]) + msg = Message(sender_id, msg) + + if not msg.method.startswith('log') and not msg.method.startswith('update_monitored_node'): + if msg.method == 'cmd': + self.logger.debug('Received cmd %s %s from %s at %s (%s)' % (msg.method, msg.cmd.method, + sender_id, self._runtime.uid, + msg.cmd.uid)) + else: + self.logger.debug('Received msg %s from %s at %s' % (msg.method, sender_id, self._runtime.uid)) + + return sender_id, msg + + +class OutboundConnection(Connection): + """ + Object encapsulating an outgoing connection from the CommsManager. + + Parameters + ---------- + uid : str + UID of the current runtime. + address : str + IP address of the connection. + port : int + Port to use for the connection. + runtime : Runtime, optional + Current runtime, defaults to global runtime. + comms : CommsManager, optional + Comms to which the connection belongs, defaults to global comms. + in_node : bool, optional + Whether the connection is inside the node or not, defaults to False. + context : zmq.Context, optional + ZMQ socket context, defaults to global context. + loop : EventLoop, optional + Event loop to use, defaults to global event loop. + + """ + + def __init__(self, uid, address, port, + runtime=None, comms=None, in_node=False, context=None, loop=None): + super().__init__(uid, address, port, + runtime=runtime, comms=comms, in_node=in_node, context=context, loop=loop) + + validate_address(address, port) + + self._socket = self._zmq_context.socket(zmq.DEALER, + copy_threshold=zmq.COPY_THRESHOLD, + io_loop=self._loop.get_event_loop()) + + self._heartbeat_timeout = None + self._heartbeat_attempts = 0 + self._heartbeat_max_attempts = 5 + self._heartbeat_interval = 30 + + self._shaken = False + + @property + def shaken(self): + """ + Whether or not the handshake has happened. + + """ + return self._shaken + + def connect(self): + """ + Connect the socket. + + Returns + ------- + + """ + if self._state != 'disconnected': + return + + self._socket.connect(self.connect_address) + self.start_heartbeat() + + self._state = 'connected' + + def shake(self): + """ + Complete the handshake. + + Returns + ------- + + """ + self._shaken = True + + def start_heartbeat(self): + """ + Start the heartbeat procedure with the remote endpoint. + + After 5 failed heartbeat attempts, the endpoint is considered disconnected. + + The heartbeat only operates if this is the monitor runtime. + + Returns + ------- + + """ + # if not self._runtime.is_monitor or not self.uid.startswith('node'): + if not self._runtime.is_monitor: + return + + if self._heartbeat_timeout is not None: + self._heartbeat_timeout.cancel() + + self._heartbeat_attempts = self._heartbeat_max_attempts + 1 + + self._heartbeat_timeout = self._loop.timeout(self.heart, timeout=self._heartbeat_interval) + + def stop_heartbeat(self): + """ + Stop the heartbeat. + + Returns + ------- + + """ + if self._heartbeat_timeout is not None: + self._heartbeat_timeout.cancel() + self._heartbeat_timeout = None + + async def heart(self): + """ + Send heart signal + + Returns + ------- + + """ + self._heartbeat_attempts -= 1 + + if self._heartbeat_attempts == 0: + await self._comms.disconnect(self.uid, self.uid, notify=True) + await self._loop.run(self._runtime.disconnect, args=(self.uid, self.uid)) + return + + interval = self._heartbeat_interval * self._heartbeat_max_attempts/self._heartbeat_attempts + self._heartbeat_timeout = self._loop.timeout(self.heart, timeout=interval) + + await self.send(method='heart') + + async def beat(self): + """ + Process beat signal + + Returns + ------- + + """ + self._heartbeat_attempts = self._heartbeat_max_attempts + 1 + + self.stop_heartbeat() + self.start_heartbeat() + + async def send(self, method, cmd=None, reply=False, **kwargs): + """ + Send message through the connection. + + Parameters + ---------- + method : str + Remote method. + cmd : dict, optional + If the method is ``cmd`` a description of the command has to be provided. + reply : bool, optional + Whether the connection should wait for a reply, defaults to False. + kwargs : optional + Keywird arguments for the remote method. + + Returns + ------- + Reply or None + Depending on whether a reply is expected or not. + + """ + if self._state == 'disconnected': + warnings.warn('Trying to send in a disconnected OutboundConnection "%s"' % self.uid, Warning) + return + + if reply is True: + reply_future = Reply(name=method) + self._comms.register_reply_future(reply_future) + reply = reply_future.uid + + else: + reply_future = None + + msg = { + 'method': method, + 'runtime_id': self.uid, + 'kwargs': kwargs, + 'reply': reply, + 'cmd': cmd, + } + + if not method.startswith('log') and not method.startswith('update_monitored_node'): + if method == 'cmd': + self.logger.debug('Sending cmd %s %s to %s (%s) from %s' % (method, cmd['method'], + self.uid, cmd['uid'], + self._runtime.uid)) + else: + self.logger.debug('Sending msg %s to %s from %s' % (method, self.uid, + self._runtime.uid)) + + msg = serialise(msg) + msg_size = sizeof(msg) + + compression = [] + compressed_msg = [] + + _compression, _compressed_msg = maybe_compress(msg[0]) + compression.append(_compression) + compressed_msg.append(_compressed_msg) + + if len(msg[1]) > 0: + _compression, _compressed_msg = zip(*map(maybe_compress, msg[1])) + compression.append(_compression) + compressed_msg.append(_compressed_msg) + + else: + compression.append([]) + compressed_msg.append([]) + + header = { + 'version': _protocol_version, + 'compression': compression, + } + + header = serialise(header)[0] + + multipart_msg = [self._runtime.uid.encode()] + multipart_msg += [str(3 + len(compressed_msg[1])).encode()] + multipart_msg += [header] + multipart_msg += [compressed_msg[0]] + multipart_msg += compressed_msg[1] + + await self._socket.send_multipart(multipart_msg, copy=msg_size < zmq.COPY_THRESHOLD) + + return reply_future + + +class CircularConnection(Connection): + """ + Object encapsulating a circular connection to itself. + + Parameters + ---------- + uid : str + UID of the current runtime. + address : str + IP address of the connection. + port : int + Port to use for the connection. + runtime : Runtime, optional + Current runtime, defaults to global runtime. + comms : CommsManager, optional + Comms to which the connection belongs, defaults to global comms. + in_node : bool, optional + Whether the connection is inside the node or not, defaults to False. + context : zmq.Context, optional + ZMQ socket context, defaults to global context. + loop : EventLoop, optional + Event loop to use, defaults to global event loop. + + """ + + def __init__(self, uid, address, port, + runtime=None, comms=None, in_node=False, context=None, loop=None): + super().__init__(uid, address, port, + runtime=runtime, comms=comms, in_node=in_node, context=context, loop=loop) + + self._socket = None + self._state = 'connected' + self._shaken = True + + def connect(self): + """ + Connect the socket. + + Returns + ------- + + """ + return + + async def send(self, method, cmd=None, reply=False, **kwargs): + """ + Send message through the connection. + + Parameters + ---------- + method : str + Remote method. + cmd : dict, optional + If the method is ``cmd`` a description of the command has to be provided. + reply : bool, optional + Whether the connection should wait for a reply, defaults to False. + kwargs : optional + Keywird arguments for the remote method. + + Returns + ------- + Reply or None + Depending on whether a reply is expected or not. + + """ + if self._state == 'disconnected': + warnings.warn('Trying to send in a disconnected OutboundConnection "%s"' % self.uid, Warning) + return + + if reply is True: + reply_future = Reply(name=method) + self._comms.register_reply_future(reply_future) + reply = reply_future.uid + + else: + reply_future = None + + msg = { + 'method': method, + 'runtime_id': self.uid, + 'kwargs': kwargs, + 'reply': reply, + 'cmd': cmd, + } + + if not method.startswith('log'): + if method == 'cmd': + self.logger.debug('Sending cmd %s %s to %s (%s) from %s' % (method, cmd['method'], + self.uid, cmd['uid'], self._runtime.uid)) + else: + self.logger.debug('Sending msg %s to %s from %s' % (method, self.uid, self._runtime.uid)) + + msg = Message(self._runtime.uid, msg) + + if not msg.method.startswith('log'): + if msg.method == 'cmd': + self.logger.debug('Received cmd %s %s from %s at %s (%s)' % (msg.method, msg.cmd.method, + self._runtime.uid, self._runtime.uid, + msg.cmd.uid)) + else: + self.logger.debug('Received msg %s from %s at %s' % (msg.method, self._runtime.uid, self._runtime.uid)) + + await self._comms.process_msg(self._runtime.uid, msg) + + return reply_future + + +class CommsManager: + """ + Objects of this type manage the connections and message passing between different + runtimes. + + Parameters + ---------- + runtime : Runtime, optional + Current runtime, defaults to global runtime. + address : str + IP address of the connection. + port : int + Port to use for the connection. + context : zmq.Context, optional + ZMQ socket context, defaults to global context. + loop : EventLoop, optional + Event loop to use, defaults to global event loop. + + """ + + _comms_methods = ['hand', 'shake', 'heart', 'beat', 'stop', 'connect', 'disconnect', 'reply'] + + def __init__(self, runtime=None, address=None, port=None, context=None, loop=None): + self._runtime = runtime or mosaic.runtime() + self._loop = loop or mosaic.get_event_loop() + self._zmq_context = context or mosaic.get_zmq_context() + + self._recv_socket = InboundConnection(self._runtime.uid, address, port, + runtime=self._runtime, + comms=self, + in_node=False, + context=self._zmq_context, + loop=self._loop) + self._recv_socket.socket.setsockopt(zmq.IDENTITY, self._runtime.uid.encode()) + self._recv_socket.socket.setsockopt(zmq.RCVHWM, 0) + + self._send_socket = dict() + self._circ_socket = CircularConnection(self._runtime.uid, self.address, self.port, + runtime=self._runtime, + comms=self, + in_node=False, + context=self._zmq_context, + loop=self._loop) + + self._listen_future = None + self._reply_futures = weakref.WeakValueDictionary() + self._reply_futures = dict() + + self._state = 'disconnected' + + def __repr__(self): + return "" % \ + (id(self), self._runtime.uid, self._recv_socket.address, self._recv_socket.port, self._state) + + def __await__(self): + if self._listen_future is None: + raise RuntimeError('Cannot wait for comms that has not started listening') + + future = self._loop.wrap_future(self._listen_future) + return (yield from future.__await__()) + + def wait(self): + """ + Wait until the listening loop of the comms is done. + + Returns + ------- + + """ + if self._listen_future is None: + raise RuntimeError('Cannot wait for comms that has not started listening') + + try: + self._listen_future.result() + + except CancelledError: + pass + + @property + def address(self): + """ + Connection address. + + """ + return self._recv_socket.address + + @property + def port(self): + """ + Connection port. + + """ + return self._recv_socket.port + + @property + def logger(self): + """ + Runtime logger. + + """ + return self._runtime.logger + + def uid_address(self, uid): + """ + Find remote address given UID. + + Parameters + ---------- + uid : str + Remote UID. + + Returns + ------- + str + Address. + + """ + return self._send_socket[uid].address + + def uid_port(self, uid): + """ + Find remote port given UID. + + Parameters + ---------- + uid : str + Remote UID. + + Returns + ------- + int + Port. + + """ + return self._send_socket[uid].port + + def connect_recv(self): + """ + Connect inbound connection. + + Returns + ------- + + """ + if self._state != 'disconnected': + return + + self._recv_socket.connect() + self._circ_socket.connect() + + self._state = 'connected' + + def connect_send(self, uid, address, port): + """ + Create and connect outbound connection for a remote runtime, + with a given address and port. + + Parameters + ---------- + uid : str + Remote UID. + address : str + Remote address. + port : int + Remote port. + + Returns + ------- + + """ + validate_address(address, port) + + if uid not in self._send_socket.keys() and uid != self._runtime.uid: + self._send_socket[uid] = OutboundConnection(uid, address, port, + runtime=self._runtime, + comms=self, + in_node=False, + context=self._zmq_context, + loop=self._loop) + self._send_socket[uid].socket.setsockopt(zmq.IDENTITY, self._runtime.uid.encode()) + self._send_socket[uid].socket.setsockopt(zmq.SNDHWM, 0) + self._send_socket[uid].connect() + + def connected(self, uid): + """ + Check whether remote UID is connected. + + Parameters + ---------- + uid : str + Remote UID. + + Returns + ------- + + """ + return uid in self._send_socket.keys() or uid == self._runtime.uid + + def shaken(self, uid): + """ + Check whether remote UID has completed handshake. + + Parameters + ---------- + uid : str + Remote UID. + + Returns + ------- + + """ + return self.connected(uid) and self._send_socket[uid].shaken + + def disconnect_recv(self): + """ + Disconnect inbound connection. + + Returns + ------- + + """ + self._recv_socket.socket.close() + + def disconnect_send(self): + """ + Connect all outbound connections. + + Returns + ------- + + """ + for sender_id, connection in self._send_socket.items(): + connection.socket.close() + + def send(self, *args, **kwargs): + """ + Synchronously send message to remote runtime. + + For arguments and return values check ``Comms.send_async``. + + """ + wait = kwargs.pop('wait', True) + + return self._loop.run(self.send_async, args=args, kwargs=kwargs, wait=wait) + + def cmd(self, *args, **kwargs): + """ + Synchronously send command to remote runtime. + + For arguments and return values check ``Comms.cmd_async``. + + """ + wait = kwargs.pop('wait', True) + + return self._loop.run(self.cmd_async, args=args, kwargs=kwargs, wait=wait) + + def recv(self, **kwargs): + """ + Synchronously receive message from remote runtime. + + For arguments and return values check ``Comms.recv_async``. + + """ + wait = kwargs.pop('wait', True) + + return self._loop.run(self.recv_async, wait=wait) + + def send_recv(self, *args, **kwargs): + """ + Synchronously send message to remote runtime and wait for reply. + + For arguments and return values check ``Comms.send_async``. + + """ + wait = kwargs.pop('wait', True) + kwargs['reply'] = True + + future = self._loop.run(self.send_async, args=args, kwargs=kwargs, + wait=True) + + if wait is True: + return future.result() + + else: + return future + + def cmd_recv(self, *args, **kwargs): + """ + Synchronously send command to remote runtime and wait for reply. + + For arguments and return values check ``Comms.cmd_async``. + + """ + wait = kwargs.pop('wait', True) + kwargs['reply'] = True + + future = self._loop.run(self.cmd_async, args=args, kwargs=kwargs, + wait=True) + + if wait is True: + return future.result() + + else: + return future + + def reply(self, sender_id, uid, result): + """ + Process reply from remote runtime. + + Parameters + ---------- + sender_id : str + UID of the remote endpoint. + uid : str + UID of the associated Reply. + result : object + Result of the reply. + + Returns + ------- + + """ + if uid not in self._reply_futures.keys(): + return + + self._reply_futures[uid].set_result(result) + + def register_reply_future(self, future): + """ + Register a Reply to be accessible later on. + + Parameters + ---------- + future : Reply + + Returns + ------- + + """ + self._reply_futures[future.uid] = future + + def listen(self): + """ + Start the listening loop. + + Returns + ------- + concurrent.futures.Future + Future associated with the running loop. + + """ + if self._state != 'connected': + return + + def done(fut): + try: + exception = fut.exception() + + except Exception: + return + + if exception is not None: + raise exception + + self._listen_future = self._loop.run(self.listen_async) + self._listen_future.add_done_callback(done) + + return self._listen_future + + async def listen_async(self): + """ + Asynchronous listening loop. + + The loop waits on messages from the incoming connection, then + processes them and, if necessary, passes them to the runtime. + + Returns + ------- + + """ + if self._state != 'connected': + return + + self._state = 'listening' + + self.logger.info('Listening at %s' % self) + + while self._state != 'disconnected': + sender_id, msg = await self.recv_async() + await self.process_msg(sender_id, msg) + + if msg.method == 'stop': + break + + async def process_msg(self, sender_id, msg): + """ + Process a received message to decide what to do with it. + + Parameters + ---------- + sender_id : str + UID of the remote endpoint. + msg : Message + Message object. + + Returns + ------- + + """ + if self._state == 'disconnected': + return + + runtime = self._runtime + method = getattr(runtime, msg.method, False) + comms_method = getattr(self, msg.method, False) + + await self.beat(sender_id) + + if msg.method.startswith('raise') or msg.method.startswith('stop'): + call = self.call + else: + call = self.call_safe + + async with self.send_exception(sender_id): + if msg.method not in self._comms_methods: + if method is False: + raise AttributeError('Class %s does not have method %s' % (runtime.__class__.__name__, + msg.method)) + + if not callable(method): + raise ValueError('Method %s of class %s is not callable' % (msg.method, + runtime.__class__.__name__)) + + if method is not False: + if msg.cmd is not None: + msg.kwargs['cmd'] = msg.cmd + + future = self._loop.run(call, + args=(sender_id, method, msg.reply), + kwargs=msg.kwargs) + + if comms_method is not False: + await future + + if comms_method is not False and msg.method in self._comms_methods: + self._loop.run(call, + args=(sender_id, comms_method, False), + kwargs=msg.kwargs) + + async def call(self, sender_id, method, reply, **kwargs): + """ + Run method in the loop. + + Parameters + ---------- + sender_id : str + UID of the remote endpoint. + method : callable + Method to execute + reply : False or str + Whether a reply is needed and, if so, the UID of the reply. + kwargs : optional + Keyword arguments for the method. + + Returns + ------- + + """ + if self._state == 'disconnected': + return + + args = (sender_id,) + + await self._loop.run(method, args=args, kwargs=kwargs) + + async def call_safe(self, sender_id, method, reply, **kwargs): + """ + Run method in the loop, and within an exception handler that + will process exceptions and send them back to the sender. + + Parameters + ---------- + sender_id : str + UID of the remote endpoint. + method : callable + Method to execute + reply : False or str + Whether a reply is needed and, if so, the UID of the reply. + kwargs : optional + Keyword arguments for the method. + + Returns + ------- + + """ + if self._state == 'disconnected': + return + + args = (sender_id,) + + async with self.send_exception(sender_id): + future = self._loop.run(method, args=args, kwargs=kwargs) + + if future is None: + return + + result = await future + + if reply is not False: + await self.send_async(sender_id, + method='reply', + uid=reply, result=result) + + async def send_async(self, send_uid, *args, **kwargs): + """ + Send message to ``sender_id`` with given arguments and keyword arguments. + + Parameters + ---------- + send_uid : str + UID of the remote runtime. + args : tuple, optional + Any arguments for the message. + kwargs : optional + Keyword arguments for the method. + + Returns + ------- + Reply or None + Depending on whether a reply is expected or not. + + """ + if send_uid == self._runtime.uid: + return await self._circ_socket.send(*args, **kwargs) + + if send_uid not in self._send_socket.keys(): + raise KeyError('Endpoint %s is not connected' % send_uid) + + if self._state == 'disconnected': + return + + return await self._send_socket[send_uid].send(*args, **kwargs) + + async def cmd_async(self, *args, **kwargs): + """ + Send command with given arguments and keyword arguments. + + Parameters + ---------- + args : tuple, optional + Any arguments for the message. + kwargs : optional + Keyword arguments for the method. + + Returns + ------- + Reply or None + Depending on whether a reply is expected or not. + + """ + cmd = { + 'type': kwargs.pop('type'), + 'uid': kwargs.pop('uid'), + 'method': kwargs.pop('method'), + 'args': kwargs.pop('args', ()), + 'kwargs': kwargs.pop('kwargs', {}), + } + + return await self.send_async(*args, method='cmd', cmd=cmd, **kwargs) + + async def recv_async(self): + """ + Wait for received message from the inbound socket. + + Returns + ------- + str + Sender UID. + Message + Received message. + + """ + if self._state == 'disconnected': + return None, None + + sender_id, msg = await self._recv_socket.recv() + + return sender_id, msg + + async def send_recv_async(self, send_uid, *args, **kwargs): + """ + Send message to ``sender_id`` with given arguments and keyword arguments, + and then wait for the reply. + + Parameters + ---------- + send_uid : str + UID of the remote runtime. + args : tuple, optional + Any arguments for the message. + kwargs : optional + Keyword arguments for the method. + + Returns + ------- + object + Result of the reply + + """ + if self._state == 'disconnected': + return + + if send_uid == self._runtime.uid: + future = await self._circ_socket.send(*args, reply=True, **kwargs) + + else: + if send_uid not in self._send_socket.keys(): + raise KeyError('Endpoint %s is not connected' % send_uid) + + future = await self._send_socket[send_uid].send(*args, reply=True, **kwargs) + + return await future + + async def cmd_recv_async(self, *args, **kwargs): + """ + Send command with given arguments and keyword arguments, + and then wait for the reply. + + Parameters + ---------- + args : tuple, optional + Any arguments for the message. + kwargs : optional + Keyword arguments for the method. + + Returns + ------- + object + Result of the reply + + """ + cmd = { + 'type': kwargs.pop('type'), + 'uid': kwargs.pop('uid'), + 'method': kwargs.pop('method'), + 'args': kwargs.pop('args', ()), + 'kwargs': kwargs.pop('kwargs', {}), + } + + future = await self.send_recv_async(*args, method='cmd', cmd=cmd, **kwargs) + + return future + + @contextlib.asynccontextmanager + async def send_exception(self, uid): + """ + Context manager that handles exceptions by sending them + back to the ``uid``. + + Parameters + ---------- + uid : str + Remote UID. + + Returns + ------- + + """ + try: + yield + + except Exception: + et, ev, tb = sys.exc_info() + tb = tblib.Traceback(tb) + + await self.send_async(uid, + method='raise_exception', + exc=(et, ev, tb)) + + finally: + pass + + async def connect(self, sender_id, uid, address, port, notify=False): + """ + Create and connect outbound connection for a remote runtime, + with a given address and port. + + Parameters + ---------- + uid : str + Remote UID. + address : str + Remote address. + port : int + Remote port. + notify : bool, optional + Whether or not to notify others of a new connection, defaults to False. + + Returns + ------- + + """ + if self._state == 'disconnected': + return + + self.connect_send(uid, address, port) + + if notify is True: + for connected_id, connection in self._send_socket.items(): + await self.send_async(connected_id, + method='connect', + uid=uid, address=address, port=port) + + async def wait_for(self, uid): + """ + Wait until remote endpoint has connected. + + Parameters + ---------- + uid : str + Remote UID. + + Returns + ------- + + """ + if self._state == 'disconnected': + return + + while uid not in self._send_socket.keys() and uid != self._runtime.uid: + await asyncio.sleep(0.1) + + async def disconnect(self, sender_id, uid, notify=False): + """ + Disconnect a remote endpoint. + + Parameters + ---------- + sender_id : str + Sender UID. + uid : str + Remote UID to disconnect. + notify : bool, optional + Whether or not to notify others of the disconnection, defaults to False. + + Returns + ------- + + """ + if self._state == 'disconnected': + return + + if uid in self._send_socket.keys(): + self._send_socket[uid].disconnect() + + if notify is True: + for connected_id, connection in self._send_socket.items(): + await self.send_async(connected_id, + method='disconnect', + uid=uid) + + async def handshake(self, uid, address, port): + """ + Start handshake with remote ``uid``, located at a certain ``address`` and ``port``. + + Parameters + ---------- + uid : str + Remote UID. + address : str + Remote address. + port : int + Remote port. + + Returns + ------- + + """ + if self._state == 'disconnected': + return + + validate_address(address, port) + + self.connect_send(uid, address, port) + self._runtime.connect(uid, uid, address, port) + + await self.send_async(uid, + method='hand', + address=self._recv_socket.address, port=self._recv_socket.port) + + while True: + sender_id, response = await self.recv_async() + + if uid == sender_id and response.method == 'shake': + break + + await self.shake(sender_id, **response.kwargs) + await self._loop.run(self._runtime.shake, args=(sender_id,), kwargs=response.kwargs) + + self._send_socket[uid].shake() + + async def hand(self, sender_id, address, port): + """ + Handle incoming handshake. + + Parameters + ---------- + sender_id : str + Remote UID. + address : str + Remote address. + port : int + Remote port. + + Returns + ------- + + """ + if self._state == 'disconnected': + return + + for connected_id, connection in self._send_socket.items(): + await self.send_async(connected_id, + method='connect', + uid=sender_id, address=address, port=port) + + self.connect_send(sender_id, address, port) + + network = {} + for connected_id, connection in self._send_socket.items(): + network[connected_id] = (connection.address, connection.port) + + await self.send_async(sender_id, + method='shake', + network=network) + + async def shake(self, sender_id, network): + """ + Handle confirmation of complete handshake. + + Parameters + ---------- + sender_id : str + Remote UID. + network : dict + Existing topology of connected sockets. + + Returns + ------- + + """ + if self._state == 'disconnected': + return + + for uid, address in network.items(): + self.connect_send(uid, *address) + + if uid in self._send_socket: + self._send_socket[uid].shake() + + async def heart(self, sender_id): + """ + Received ``heart`` message, respond with ``beat``. + + Parameters + ---------- + sender_id : str + Remote UID. + + Returns + ------- + + """ + if self._state == 'disconnected': + return + + await self.send_async(sender_id, + method='beat') + + async def beat(self, sender_id): + """ + Received ``beat`` message, the remote endpoint is alive. + + Parameters + ---------- + sender_id : str + Remote UID. + + Returns + ------- + + """ + if self._state == 'disconnected': + return + + if sender_id not in self._send_socket.keys(): + return + + await self._send_socket[sender_id].beat() + + async def stop(self, sender_id): + """ + Stop the CommsManager. + + Parameters + ---------- + sender_id : str + Remote UID. + + Returns + ------- + + """ + if self._state == 'disconnected': + return + + self._listen_future.cancel() + + self.disconnect_send() + self.disconnect_recv() + self._zmq_context.term() + + self._state = 'disconnected' diff --git a/mosaic/comms/compression.py b/mosaic/comms/compression.py new file mode 100644 index 00000000..f9c1b989 --- /dev/null +++ b/mosaic/comms/compression.py @@ -0,0 +1,266 @@ + +import random +import numpy as np +import functools +import contextlib +import pickle5 + + +__all__ = ['maybe_compress', 'decompress'] + + +try: + import blosc + + n = blosc.set_nthreads(6) + if hasattr('blosc', 'releasegil'): + blosc.set_releasegil(True) +except ImportError: + blosc = False + + +def identity(data): + return data + + +compression_methods = {None: {'compress': identity, 'decompress': identity}} +compression_methods[False] = compression_methods[None] # alias + +default_compression = None + + +with contextlib.suppress(ImportError): + import zlib + + compression_methods['zlib'] = {'compress': zlib.compress, 'decompress': zlib.decompress} + +with contextlib.suppress(ImportError): + import snappy + + def _fixed_snappy_decompress(data): + # snappy.decompress() doesn't accept memoryviews + if isinstance(data, (memoryview, bytearray)): + data = bytes(data) + return snappy.decompress(data) + + compression_methods['snappy'] = { + 'compress': snappy.compress, + 'decompress': _fixed_snappy_decompress, + } + default_compression = 'snappy' + +with contextlib.suppress(ImportError): + import lz4 + + try: + # try using the new lz4 API + import lz4.block + + lz4_compress = lz4.block.compress + lz4_decompress = lz4.block.decompress + except ImportError: + # fall back to old one + lz4_compress = lz4.LZ4_compress + lz4_decompress = lz4.LZ4_uncompress + + # helper to bypass missing memoryview support in current lz4 + # (fixed in later versions) + + def _fixed_lz4_compress(data): + try: + return lz4_compress(data) + except TypeError: + if isinstance(data, (memoryview, bytearray)): + return lz4_compress(bytes(data)) + else: + raise + + def _fixed_lz4_decompress(data): + try: + return lz4_decompress(data) + except (ValueError, TypeError): + if isinstance(data, (memoryview, bytearray)): + return lz4_decompress(bytes(data)) + else: + raise + + compression_methods['lz4'] = { + 'compress': _fixed_lz4_compress, + 'decompress': _fixed_lz4_decompress, + } + default_compression = 'lz4' + + +with contextlib.suppress(ImportError): + import zstandard + + zstd_compressor = zstandard.ZstdCompressor( + level=22, + threads=6, + ) + + zstd_decompressor = zstandard.ZstdDecompressor() + + def zstd_compress(data): + return zstd_compressor.compress(data) + + def zstd_decompress(data): + return zstd_decompressor.decompress(data) + + compression_methods['zstd'] = { + 'compress': zstd_compress, + 'decompress': zstd_decompress + } + default_compression = 'zstd' + + +with contextlib.suppress(ImportError): + import blosc + + compression_methods['blosc'] = { + 'compress': functools.partial(blosc.compress, clevel=5, cname='lz4'), + 'decompress': blosc.decompress, + } + default_compression = 'blosc' + + +user_compression = 'auto' +if user_compression != 'auto': + if user_compression in compression_methods: + default_compression = user_compression + else: + raise ValueError( + 'Default compression "%s" not found.\n' + 'Choices include auto, %s' + % (user_compression, ', '.join(sorted(map(str, compression_methods)))) + ) + + +def ensure_bytes(s): + """ + Attempt to turn `s` into bytes. + + Parameters + ---------- + s : Any + The object to be converted. Will correctly handled + * str + * bytes + * objects implementing the buffer protocol (memoryview, ndarray, etc.) + + Returns + ------- + b : bytes + + Raises + ------ + TypeError + When `s` cannot be converted + + Examples + -------- + >>> ensure_bytes('123') + b'123' + >>> ensure_bytes(b'123') + b'123' + """ + if isinstance(s, bytes): + return s + elif hasattr(s, 'encode'): + return s.encode() + else: + try: + return bytes(s) + except Exception as e: + raise TypeError('Object %s is neither a bytes object nor has an encode method' % s) from e + + +def byte_sample(b, size, n): + """ + Sample a bytestring from many locations + + Parameters + ---------- + b : bytes or memoryview + size : int + size of each sample to collect + n : int + number of samples to collect + """ + + if type(b) is memoryview: + b = memoryview(np.asarray(b).ravel()) + + starts = [random.randint(0, len(b) - size) for j in range(n)] + ends = [] + for i, start in enumerate(starts[:-1]): + ends.append(min(start + size, starts[i + 1])) + ends.append(starts[-1] + size) + + parts = [b[start:end] for start, end in zip(starts, ends)] + return b''.join(map(ensure_bytes, parts)) + + +def maybe_compress(payload, min_size=1e4, sample_size=1e4, nsamples=5): + """ + Maybe compress payload: + + 1. We don't compress small messages + 2. We sample the payload in a few spots, compress that, and if it doesn't + do any good we return the original + 3. We then compress the full original, it it doesn't compress well then we + return the original + 4. We return the compressed result + + """ + + if isinstance(payload, pickle5.PickleBuffer): + payload = memoryview(payload) + + if type(payload) is memoryview: + nbytes = payload.nbytes + else: + nbytes = len(payload) + + if not default_compression: + return None, payload + if nbytes < min_size: + return None, payload + if nbytes > 2 ** 31: # Too large, compression libraries often fail + return None, payload + + min_size = int(min_size) + sample_size = int(sample_size) + + compression = default_compression + compress = compression_methods[default_compression]['compress'] + + # Compress a sample, return original if not very compressed, but not for memoryviews + if type(payload) is not memoryview: + sample = byte_sample(payload, sample_size, nsamples) + if len(compress(sample)) > 0.9 * len(sample): # sample not very compressible + return None, payload + + if default_compression and blosc and type(payload) is memoryview: + # Blosc does itemsize-aware shuffling, resulting in better compression + compressed = blosc.compress(payload, + typesize=payload.itemsize, + cname='lz4', + clevel=5) + compression = 'blosc' + else: + compressed = compress(ensure_bytes(payload)) + + if len(compressed) > 0.9 * nbytes: # full data not very compressible + return None, payload + else: + return compression, compressed + + +def decompress(compression, payload): + """ + Decompress payload according to information in the header + + """ + + return compression_methods[compression]['decompress'](payload) diff --git a/mosaic/comms/serialisation.py b/mosaic/comms/serialisation.py new file mode 100644 index 00000000..606d3e88 --- /dev/null +++ b/mosaic/comms/serialisation.py @@ -0,0 +1,59 @@ + +import pickle5 +import cloudpickle + + +__all__ = ['serialise', 'deserialise'] + + +def pickle5_dumps(data): + out_band = [] + in_band = pickle5.dumps(data, protocol=5, buffer_callback=out_band.append) + return in_band, out_band + + +def pickle5_loads(in_band, out_band): + out_band = [bytearray(each) for each in out_band] + return pickle5.loads(in_band, buffers=out_band) + + +def serialise(data): + """ + Serialise ``data`` using Pickle protocol 5 as a default and, failing that, + resort to cloudpickle. + + Parameters + ---------- + data : object + + Returns + ------- + bytes + Pickled object, in-band. + list + List of zero-copy buffers, out-of-band. + + """ + try: + return pickle5_dumps(data) + except pickle5.PicklingError: + return cloudpickle.dumps(data), [] + + +def deserialise(in_band, out_band): + """ + Deserialise using Pickle protocol 5. + + Parameters + ---------- + in_band : bytes + Pickled object. + out_band : list + List of buffers. + + Returns + ------- + deserialised object + + """ + return pickle5_loads(in_band, out_band) diff --git a/mosaic/core/__init__.py b/mosaic/core/__init__.py new file mode 100644 index 00000000..747e3b8c --- /dev/null +++ b/mosaic/core/__init__.py @@ -0,0 +1,3 @@ + +from .task import * +from .tessera import * diff --git a/mosaic/core/base.py b/mosaic/core/base.py new file mode 100644 index 00000000..cf240dca --- /dev/null +++ b/mosaic/core/base.py @@ -0,0 +1,495 @@ + +import datetime + +import mosaic + + +__all__ = ['RemoteBase', 'ProxyBase', 'MonitoredBase'] + + +class Base: + + @property + def runtime(self): + return mosaic.runtime() + + @property + def comms(self): + return mosaic.get_comms() + + @property + def zmq_context(self): + return mosaic.get_zmq_context() + + @property + def loop(self): + return mosaic.get_event_loop() + + @property + def head(self): + return mosaic.get_head() + + @property + def monitor(self): + return mosaic.get_monitor() + + @property + def node(self): + return mosaic.get_node() + + @property + def worker(self): + return mosaic.get_worker() + + @property + def logger(self): + return self.runtime.logger + + +class CMDBase(Base): + """ + Base class for objects that accept remote commands, such as tesserae and tasks, and their proxies. + + """ + + type = 'none' + is_proxy = False + is_remote = False + + def __init__(self, *args, **kwargs): + super().__init__() + + self._uid = None + self._state = '' + + # CMD specific config + self.retries = 0 + self.max_retries = None + + def __repr__(self): + NotImplementedError('Unimplemented Base method __repr__') + + @property + def uid(self): + """ + Object UID. + + """ + return self._uid + + @property + def state(self): + """ + Object state. + + """ + return self._state + + @property + def remote_runtime(self): + """ + Proxy to runtime where remote counterpart(s) is(are). + + """ + raise NotImplementedError('Unimplemented Base property remote_runtime') + + @classmethod + def remote_type(cls): + """ + Type of the remote. + + """ + NotImplementedError('Unimplemented Base method remote_type') + + def _fill_config(self, **kwargs): + self.max_retries = kwargs.pop('max_retries', 0) + + return kwargs + + def _remotes(self): + NotImplementedError('Unimplemented Base method _remotes') + + def proxy(self, uid): + """ + Generate proxy for specific UID. + + Parameters + ---------- + uid : str + + Returns + ------- + ProxyBase + + """ + return self.runtime.proxy(uid) + + def _prepare_cmd(self, method, *args, **kwargs): + obj_type = self.remote_type() + remotes = self._remotes() + + cmd = { + 'type': obj_type, + 'uid': self._uid, + 'method': method, + 'args': args, + 'kwargs': kwargs, + } + + return remotes, cmd + + def cmd(self, method, *args, **kwargs): + """ + Send command to remote counterparts. + + Parameters + ---------- + method : str + Method of the command. + args : tuple, optional + Arguments for the command. + kwargs : optional + Keyword arguments for the command. + + Returns + ------- + concurrent.futures.Future + + """ + wait = kwargs.pop('wait', False) + + remotes, cmd = self._prepare_cmd(method, *args, **kwargs) + + result = [] + for remote in remotes: + result.append(remote.cmd(**cmd, wait=wait, as_async=False)) + + if len(result) == 1: + result = result[0] + + return result + + def cmd_recv(self, method, *args, **kwargs): + """ + Send command to remote counterparts and await reply. + + Parameters + ---------- + method : str + Method of the command. + args : tuple, optional + Arguments for the command. + kwargs : optional + Keyword arguments for the command. + + Returns + ------- + reply + + """ + wait = kwargs.pop('wait', False) + + remotes, cmd = self._prepare_cmd(method, *args, **kwargs) + + result = [] + for remote in remotes: + result.append(remote.cmd(**cmd, wait=wait, reply=True, as_async=False)) + + if len(result) == 1: + result = result[0] + + return result + + async def cmd_async(self, method, *args, **kwargs): + """ + Send async command to remote counterparts. + + Parameters + ---------- + method : str + Method of the command. + args : tuple, optional + Arguments for the command. + kwargs : optional + Keyword arguments for the command. + + Returns + ------- + asyncio.Future + + """ + remotes, cmd = self._prepare_cmd(method, *args, **kwargs) + + result = [] + for remote in remotes: + result.append(await remote.cmd(**cmd)) + + if len(result) == 1: + result = result[0] + + return result + + async def cmd_recv_async(self, method, *args, **kwargs): + """ + Send async command to remote counterparts and await reply. + + Parameters + ---------- + method : str + Method of the command. + args : tuple, optional + Arguments for the command. + kwargs : optional + Keyword arguments for the command. + + Returns + ------- + asyncio.Future + + """ + remotes, cmd = self._prepare_cmd(method, *args, **kwargs) + + result = [] + for remote in remotes: + result.append(await remote.cmd(**cmd, reply=True)) + + if len(result) == 1: + result = result[0] + + return result + + _serialisation_attrs = ['_uid', '_state'] + + def _serialisation_helper(self): + state = {} + + for attr in self._serialisation_attrs: + state[attr] = getattr(self, attr) + + return state + + @classmethod + def _deserialisation_helper(cls, state): + instance = cls.__new__(cls) + + for attr, value in state.items(): + setattr(instance, attr, value) + + return instance + + def __reduce__(self): + state = self._serialisation_helper() + return self._deserialisation_helper, (state,) + + def __copy__(self): + return self + + def __deepcopy__(self, memo): + return self + + +class RemoteBase(CMDBase): + """ + Base class for CMD objects that live in a remote runtime (e.g. tesserae and tasks). + + """ + + is_proxy = False + is_remote = True + + def __init__(self, uid, *args, **kwargs): + super().__init__(*args, **kwargs) + + self._uid = uid + self._ref_count = 1 + self._proxies = set() + + def __repr__(self): + runtime_id = self.runtime.uid + + return "<%s object at %s, uid=%s, runtime=%s, state=%s>" % \ + (self.__class__.__name__, id(self), + self.uid, runtime_id, self._state) + + @property + def proxies(self): + """ + Set of proxies that keep references to this remote. + + """ + raise self._proxies + + @property + def remote_runtime(self): + raise NotImplementedError('Unimplemented RemoteBase property remote_runtime') + + @classmethod + def remote_type(cls): + return cls.type + '_proxy' + + def _remotes(self): + return list(self.remote_runtime) + + def register_proxy(self, uid): + """ + Register a new proxy pointing to this remote. + + Parameters + ---------- + uid : str + + Returns + ------- + + """ + self._proxies.add(uid) + + def deregister_proxy(self, uid): + """ + Deregister proxy pointing to this remote. + + Parameters + ---------- + uid : str + + Returns + ------- + + """ + self._proxies.remove(uid) + + def inc_ref(self): + """ + Increase reference count. + + Returns + ------- + + """ + self._ref_count += 1 + + def dec_ref(self): + """ + Decrease reference count and deregister from runtime if needed. + + Returns + ------- + + """ + self._ref_count -= 1 + + if self._ref_count < 1: + self.runtime.deregister(self) + + +class ProxyBase(CMDBase): + """ + Base class for CMD objects that represent proxies to remote objects (e.g. tessera proxies and task proxies). + + """ + + is_proxy = True + is_remote = False + + def __repr__(self): + runtime_id = self.runtime_id + + return "<%s object at %s, uid=%s, runtime=%s, state=%s>" % \ + (self.__class__.__name__, id(self), + self.uid, runtime_id, self._state) + + @property + def runtime_id(self): + raise NotImplementedError('Unimplemented ProxyBase property runtime_id') + + @property + def remote_runtime(self): + raise NotImplementedError('Unimplemented ProxyBase property remote_runtime') + + @classmethod + def remote_type(cls): + return cls.type.split('_')[0] + + def _remotes(self): + return [self.remote_runtime] + + _serialisation_attrs = CMDBase._serialisation_attrs + [] + + @classmethod + def _deserialisation_helper(cls, state): + instance = super()._deserialisation_helper(state) + + obj_type = cls.remote_type() + + instance = instance.runtime.register(instance) + instance.remote_runtime.inc_ref(uid=instance.uid, type=obj_type, as_async=False) + + return instance + + def __del__(self): + self.remote_runtime.dec_ref(uid=self.uid, type=self.remote_type(), as_async=False) + self.runtime.deregister(self) + + +class MonitoredBase: + """ + Base class for those that keep track of the state of a remote object, + + """ + + def __init__(self, uid, runtime_id): + self.uid = uid + self.state = 'init' + self.runtime_id = runtime_id + + self.time = -1 + self.history = [] + + def update(self, **update): + """ + Update internal state. + + Parameters + ---------- + update + + Returns + ------- + + """ + self.time = str(datetime.datetime.now()) + + for key, value in update.items(): + setattr(self, key, value) + + def update_history(self, **update): + """ + Update internal state and add the update to the history. + + Parameters + ---------- + update + + Returns + ------- + + """ + self.update(**update) + + update['time'] = self.time + self.history.append(update) + + def get_update(self): + """ + Get latest update. + + Returns + ------- + dict + + """ + update = dict( + state=self.state, + ) + + return update diff --git a/mosaic/core/task.py b/mosaic/core/task.py new file mode 100644 index 00000000..4d1ea42f --- /dev/null +++ b/mosaic/core/task.py @@ -0,0 +1,745 @@ + +import uuid +import time +import weakref +import operator +from cached_property import cached_property + +from .base import Base, RemoteBase, ProxyBase, MonitoredBase +from ..utils import Future + + +__all__ = ['Task', 'TaskProxy', 'TaskOutputGenerator', 'TaskOutput', 'TaskDone', 'MonitoredTask'] + + +class Task(RemoteBase): + """ + When we call a method on a remote tessera, two things will happen: + + - a Task will be generated on the remote tessera and queued to be executed by it; + - and a TaskProxy is generated on the calling side as a pointer to that remote task. + + We can use the task proxy to wait for the completion of the task (``await task_proxy``), + as an argument to other tessera method calls, or to retrieve the result of the + task (``await task_proxy.result()``). + + It is also possible to access references to the individual outputs of the task by + using ``task_proxy.outputs``. Outputs can be accessed through their position: ``task_proxy.outputs[0]`` + will reference the first output of the task. + + A reference to the termination of the task is also available through ``task_proxy.outputs.done``, + which can be used to create explicit dependencies between tasks, thus controlling the order + of execution. + + Tasks on a particular tessera are guaranteed to be executed in the order in which they were called, + but no such guarantees exist for tasks on different tesserae. + + A completed task is kept in memory at the worker for as long as there are proxy references to + it. If none exist, it will be made available for garbage collection. + + Objects of class Task should not be instantiated directly by the user. + + Parameters + ---------- + uid : str + UID of the task. + sender_id : str + UID of the caller. + tessera : Tessera + Tessera on which the task is to be executed. + method : callable + Method associated with the task. + args : tuple, optional + Arguments to pass to the method. + kwargs : optional + Keyword arguments to pass to the method. + + """ + + type = 'task' + is_remote = True + + def __init__(self, uid, sender_id, tessera, method, *args, **kwargs): + super().__init__(uid, *args, **kwargs) + + self._sender_id = sender_id + self._tessera = weakref.proxy(tessera) + + kwargs = self._fill_config(**kwargs) + + self.method = method + self.args = args + self.kwargs = kwargs + + self._tic = None + self._elapsed = None + + self._args_pending = weakref.WeakSet() + self._kwargs_pending = weakref.WeakSet() + + self._args_value = dict() + self._kwargs_value = dict() + + self._args_state = dict() + self._kwargs_state = dict() + + self._ready_future = Future() + self._result = None + self._exception = None + + self._state = 'init' + self.runtime.register(self) + self.register_proxy(self._sender_id) + + @property + def sender_id(self): + """ + Caller UID. + + """ + return self._sender_id + + @cached_property + def remote_runtime(self): + """ + Proxies that have references to this task. + + """ + return {self.proxy(each) for each in list(self._proxies)} + + def _fill_config(self, **kwargs): + kwargs['max_retries'] = kwargs.get('max_retries', self._tessera.max_retries) + + return super()._fill_config(**kwargs) + + def args_value(self): + """ + Processed value of the args of the task. + + Returns + ------- + tuple + + """ + args = [value for key, value in sorted(self._args_value.items(), key=operator.itemgetter(1))] + + return tuple(args) + + def kwargs_value(self): + """ + Processed value of the args of the task. + + Returns + ------- + dict + + """ + return self._kwargs_value + + def set_result(self, result): + """ + Set task result. + + Parameters + ---------- + result + + Returns + ------- + + """ + self._result = result + + def get_result(self, key=None): + """ + Get task result. + + Parameters + ---------- + key : optional + Access particular item within the result, defaults to None. + + Returns + ------- + + """ + if self._state == 'failed': + raise Exception('Tried to get the result on failed task %s' % self._uid) + + if self._state != 'done': + raise Exception('Tried to get result of task not done, this should never happen!') + + if key is None: + return self._result + + else: + result = self._result + + if not isinstance(result, tuple) and not isinstance(result, dict): + result = (result,) + + return result[key] + + def check_result(self): + """ + Check if the result is present. + + Returns + ------- + str + State of the task. + Exception or None + Exception if task has failed, None otherwise. + + """ + if self._state == 'failed': + return 'failed', self._exception + + else: + return self._state, None + + def _cleanup(self): + self.args = None + self.kwargs = None + + self._args_pending = weakref.WeakSet() + self._kwargs_pending = weakref.WeakSet() + + self._args_value = dict() + self._kwargs_value = dict() + + self._args_state = dict() + self._kwargs_state = dict() + + # TODO Await all of the remote results together using gather + async def prepare_args(self): + """ + Prepare the arguments of the task for execution. + + Returns + ------- + Future + + """ + waitable_types = [TaskProxy, TaskOutput, TaskDone] + + for index in range(len(self.args)): + arg = self.args[index] + + if type(arg) in waitable_types: + self._args_state[index] = arg.state + + if arg.state != 'done': + if not isinstance(arg, TaskDone): + self._args_value[index] = None + self._args_pending.add(arg) + + def callback(fut): + self.loop.run(self._set_arg_done, args=(index, arg)) + + arg.add_done_callback(callback) + + else: + result = await arg.result() + if not isinstance(arg, TaskDone): + self._args_value[index] = result + + else: + self._args_state[index] = 'ready' + self._args_value[index] = arg + + for key, value in self.kwargs.items(): + if type(value) in waitable_types: + self._kwargs_state[key] = value.state + + if value.state != 'done': + if not isinstance(value, TaskDone): + self._kwargs_value[key] = None + self._kwargs_pending.add(value) + + def callback(fut): + self.loop.run(self._set_kwarg_done, args=(key, value)) + + value.add_done_callback(callback) + + else: + result = await value.result() + if not isinstance(value, TaskDone): + self._kwargs_value[key] = result + + else: + self._kwargs_state[key] = 'ready' + self._kwargs_value[key] = value + + await self._check_ready() + + return self._ready_future + + async def set_exception(self, exc): + """ + Set task exception + + Parameters + ---------- + exc : Exception + + Returns + ------- + + """ + await self.state_changed('failed') + self._exception = exc + + await self.cmd_async(method='set_exception', exc=exc) + + # Once done release local copy of the arguments + self._cleanup() + + async def set_done(self): + """ + Set task as done. + + Returns + ------- + + """ + await self.state_changed('done') + + await self.cmd_async(method='set_done') + + # Once done release local copy of the arguments + self._cleanup() + + async def _set_arg_done(self, index, arg): + result = await arg.result() + + self._args_state[index] = 'ready' + if not isinstance(arg, TaskDone): + self._args_value[index] = result + + self._args_pending.remove(arg) + await self._check_ready() + + async def _set_kwarg_done(self, index, arg): + result = await arg.result() + + self._kwargs_state[index] = 'ready' + if not isinstance(arg, TaskDone): + self._kwargs_value[index] = result + + self._kwargs_pending.remove(arg) + await self._check_ready() + + async def _check_ready(self): + if not len(self._args_pending) and not len(self._kwargs_pending): + await self.state_changed('ready') + self._ready_future.set_result(True) + + async def state_changed(self, state): + """ + Signal change in task state. + + Parameters + ---------- + state : str + New state of the task. + + Returns + ------- + + """ + self._state = state + + if state == 'running': + self._tic = time.time() + + elapsed = None + if state == 'done' or state == 'failed': + self._elapsed = elapsed = time.time() - self._tic + + await self.runtime.task_state_changed(self, elapsed=elapsed) + + def __del__(self): + self.logger.debug('Garbage collected object %s' % self) + self.loop.run(self.state_changed, args=('collected',)) + + +class TaskProxy(ProxyBase): + """ + Proxy pointing to a remote task that has been or will be executed. + + """ + + type = 'task_proxy' + + def __init__(self, proxy, method, *args, **kwargs): + super().__init__(*args, **kwargs) + + self._uid = '%s-%s-%s' % ('task', + method, + uuid.uuid4().hex) + self._tessera_proxy = proxy + + self._fill_config(**kwargs) + + self.method = method + self.args = args + self.kwargs = kwargs + self.outputs = TaskOutputGenerator(self) + + self._state = 'pending' + self._result = None + self._done_future = Future() + + async def init(self): + """ + Asynchronous correlate of ``__init__``. + + Returns + ------- + + """ + await self.monitor.init_task(uid=self._uid, + tessera_id=self._tessera_proxy.uid, + runtime_id=self.runtime_id) + + self.runtime.register(self) + + task = { + 'tessera_id': self._tessera_proxy.uid, + 'method': self.method, + 'args': self.args, + 'kwargs': self.kwargs, + } + + await self.remote_runtime.init_task(task=task, uid=self._uid, + reply=True) + + self._state = 'queued' + + @property + def runtime_id(self): + """ + UID of the runtime where the task lives. + + """ + return self._tessera_proxy.runtime_id + + @cached_property + def remote_runtime(self): + """ + Proxy to the runtime where the task lives. + + """ + return self._tessera_proxy.remote_runtime + + @property + def done_future(self): + """ + Future that will be completed when the remote task is done. + + """ + return self._done_future + + def set_done(self): + """ + Set task as done. + + Returns + ------- + + """ + self._state = 'done' + + self._done_future.set_result(True) + + # Once done release local copy of the arguments + self._cleanup() + + def set_exception(self, exc): + """ + Set exception during task execution. + + Parameters + ---------- + exc : Exception description + + Returns + ------- + + """ + self._state = 'failed' + + exc = exc[1].with_traceback(exc[2].as_traceback()) + self._done_future.set_exception(exc) + + # Once done release local copy of the arguments + self._cleanup() + + def wait(self): + """ + Wait on the task to be completed. + + Returns + ------- + + """ + return self._done_future.result() + + def add_done_callback(self, fun): + """ + Add done callback. + + Parameters + ---------- + fun : callable + + Returns + ------- + + """ + self._done_future.add_done_callback(fun) + + def _cleanup(self): + self.args = None + self.kwargs = None + # Release the strong reference to the tessera proxy once the task is complete + # so that it can be garbage collected if necessary + self._tessera_proxy = weakref.proxy(self._tessera_proxy) + + async def result(self): + """ + Gather remote result from the task. + + Returns + ------- + Task result + + """ + await self + + if self._result is not None: + return self._result + + self._result = await self.cmd_recv_async(method='get_result') + + return self._result + + async def check_result(self): + """ + Check the remote result. + + Returns + ------- + + """ + if self._state != 'done' and self._state != 'failed': + state, exc = await self.cmd_recv_async(method='check_result') + + if state == 'done': + self.set_done() + + elif state == 'failed': + self.set_exception(exc) + + def __await__(self): + return (yield from self._done_future.__await__()) + + _serialisation_attrs = ProxyBase._serialisation_attrs + ['_tessera_proxy', 'method'] + + @classmethod + def _deserialisation_helper(cls, state): + instance = super()._deserialisation_helper(state) + + if not hasattr(instance, 'args'): + instance.args = None + instance.kwargs = None + + if not hasattr(instance, '_result'): + instance._result = None + instance._done_future = Future() + if instance.state == 'done': + instance.set_done() + + # TODO Unsure about the need for this + # Synchronise the task state, in case something has happened between + # the moment when it was pickled until it has been re-registered on + # this side + # instance.check_result() + + return instance + + +class TaskOutputGenerator: + """ + Class that generates pointers to specific outputs of a remote task, + + """ + + def __init__(self, task_proxy): + self._task_proxy = weakref.ref(task_proxy) + + self._generated_outputs = weakref.WeakValueDictionary() + + def __repr__(self): + runtime_id = self._task_proxy().runtime_id + + return "<%s object at %s, uid=%s, runtime=%s, state=%s>" % \ + (self.__class__.__name__, id(self), + self._task_proxy().uid, runtime_id, self._task_proxy().state) + + def __getattribute__(self, item): + try: + return super().__getattribute__(item) + + except AttributeError: + if item not in self._generated_outputs: + if item == 'done': + generated_output = TaskDone(self._task_proxy()) + else: + generated_output = TaskOutput(item, self._task_proxy()) + + self._generated_outputs[item] = generated_output + + return self._generated_outputs[item] + + def __getitem__(self, item): + if item not in self._generated_outputs: + if item == 'done': + generated_output = TaskDone(self._task_proxy()) + else: + generated_output = TaskOutput(item, self._task_proxy()) + + self._generated_outputs[item] = generated_output + + return self._generated_outputs[item] + + +class TaskOutputBase(Base): + """ + Base class for outputs of a task. + + """ + + def __init__(self, task_proxy): + self._task_proxy = task_proxy + self._result = None + + @property + def uid(self): + return self._task_proxy.uid + + @property + def state(self): + return self._task_proxy.state + + @property + def runtime_id(self): + return self._task_proxy.runtime_id + + @cached_property + def remote_runtime(self): + return self._task_proxy.remote_runtime + + @property + def done_future(self): + return self._task_proxy.done_future + + def wait(self): + return self._task_proxy.wait() + + async def result(self): + pass + + def add_done_callback(self, fun): + self._task_proxy.add_done_callback(fun) + + def __await__(self): + return (yield from self._task_proxy.__await__()) + + +class TaskOutput(TaskOutputBase): + """ + Pointer to specific remote output of a class. + + """ + + def __init__(self, key, task_proxy): + super().__init__(task_proxy) + + self._key = key + + def __repr__(self): + runtime_id = self.runtime_id + + return "<%s object [%s] at %s, uid=%s, runtime=%s, state=%s>" % \ + (self.__class__.__name__, self._key, id(self), + self.uid, runtime_id, self.state) + + def _select_result(self, result): + if not isinstance(result, tuple) and not isinstance(result, dict): + result = (result,) + + return result[self._key] + + async def result(self): + """ + Gather output from the remote task. + + Returns + ------- + Output + + """ + await self + + if self._result is None and self._task_proxy._result is not None: + self._result = self._select_result(self._task_proxy._result) + + if self._result is not None: + return self._result + + self._result = await self._task_proxy.cmd_recv_async(method='get_result', key=self._key) + + return self._result + + +class TaskDone(TaskOutputBase): + """ + Reference to the termination of a remote task. + + """ + + def __repr__(self): + runtime_id = self.runtime_id + + return "<%s object at %s, uid=%s, runtime=%s, state=%s>" % \ + (self.__class__.__name__, id(self), + self.uid, runtime_id, self.state) + + async def result(self): + """ + Wait for task termination. + + Returns + ------- + + """ + await self + + self._result = True + + return self._result + + +class MonitoredTask(MonitoredBase): + """ + Information container on the state of a task. + + """ + + def __init__(self, uid, tessera_id, runtime_id): + super().__init__(uid, runtime_id) + + self.tessera_id = tessera_id + self.elapsed = None diff --git a/mosaic/core/tessera.py b/mosaic/core/tessera.py new file mode 100644 index 00000000..3b68552c --- /dev/null +++ b/mosaic/core/tessera.py @@ -0,0 +1,681 @@ + +import sys +import uuid +import tblib +import asyncio +import weakref +import contextlib +import cloudpickle +from cached_property import cached_property + +from .task import TaskProxy +from .base import Base, CMDBase, RemoteBase, ProxyBase, MonitoredBase + + +__all__ = ['Tessera', 'TesseraProxy', 'ArrayProxy', 'MonitoredTessera', 'tessera'] + + +class Tessera(RemoteBase): + """ + A tessera is an actor in the mosaic parallelism model. + + A tessera represents an object that is instantiated in a remote portion of + the network, and which we reference through a proxy. This proxy + allows us to execute methods on that remote object simply by calling the + method. + + A tessera is kept in memory at the worker for as long as there are proxy references to + it. If none exist, it will be made available for garbage collection. + + Objects of class Tessera should not be instantiated directly by the user + and the ``@mosaic.tessera`` decorator should be used instead. + + Parameters + ---------- + cls : type + Class of the remote object. + uid : str + UID assigned to the tessera. + + """ + + type = 'tessera' + + def __init__(self, cls, uid, *args, **kwargs): + super().__init__(uid, *args, **kwargs) + kwargs = self._fill_config(**kwargs) + + self._state = 'init' + self._task_queue = asyncio.Queue() + self._task_lock = asyncio.Lock() + + self._cls = cls + self._init_cls(*args, **kwargs) + + self.runtime.register(self) + self.listen(wait=False) + + @cached_property + def remote_runtime(self): + """ + Proxies that have references to this tessera. + + """ + return {self.proxy(each) for each in list(self._proxies)} + + def _init_cls(self, *args, **kwargs): + try: + self._obj = self._cls(*args, **kwargs) + + except Exception: + self.retries += 1 + + if self.retries > self.max_retries: + raise + + else: + self.logger.error('Tessera %s failed, attempting ' + 'retry %d out of %s' % (self.uid, + self.retries, self.max_retries)) + + return self._init_cls(*args, **kwargs) + + def queue_task(self, task): + """ + Add a task to the queue of the tessera. + + Parameters + ---------- + task : Task + + Returns + ------- + + """ + self._task_queue.put_nowait(task) + + def listen(self, wait=False): + """ + Start the listening loop that consumes tasks. + + Parameters + ---------- + wait : bool, optional + Whether or not to wait for the loop end, defaults to False. + + Returns + ------- + + """ + if self._state != 'init': + return + + self.loop.run(self.listen_async, wait=wait) + + async def listen_async(self): + """ + Listening loop that consumes tasks from the tessera queue. + + Returns + ------- + + """ + if self._state != 'init': + return + + while True: + await self.state_changed('listening') + + sender_id, task = await self._task_queue.get() + + if type(task) is str and task == 'stop': + break + + await asyncio.sleep(0) + future = await task.prepare_args() + await future + + method = getattr(self._obj, task.method, False) + + async with self.send_exception(task=task): + if method is False: + raise AttributeError('Class %s does not have method %s' % (self._obj.__class__.__name__, + task.method)) + + if not callable(method): + raise ValueError('Method %s of class %s is not callable' % (task.method, + self._obj.__class__.__name__)) + + await asyncio.sleep(0) + await self.state_changed('running') + await task.state_changed('running') + await self.call_safe(sender_id, method, task) + + # Make sure that the loop does not keep implicit references to the task until the + # next task arrives in the queue + self._task_queue.task_done() + del task + + async def call_safe(self, sender_id, method, task): + """ + Call a method while handling exceptions, which will be sent back to the + sender if they arise. + + Parameters + ---------- + sender_id : str + UID of the original caller. + method : callable + Method to execute. + task : Task + Task that has asked for the execution of the method. + + Returns + ------- + + """ + async with self._task_lock: + async with self.send_exception(sender_id, method, task): + future = self.loop.run_in_executor(method, + args=task.args_value(), + kwargs=task.kwargs_value()) + result = await future + # TODO Dodgy + await asyncio.sleep(0.1) + + task.set_result(result) + await task.set_done() + + @contextlib.asynccontextmanager + async def send_exception(self, sender_id=None, method=None, task=None): + """ + Context manager that handles exceptions by sending them + back to the ``uid``. + + Parameters + ---------- + sender_id : str + Remote UID. + method : callable + Method being executed. + task : Task + Task that has asked for the execution of the method. + + Returns + ------- + + """ + try: + yield + + except Exception: + task.retries += 1 + + if task.retries > task.max_retries or sender_id is None: + et, ev, tb = sys.exc_info() + tb = tblib.Traceback(tb) + + await task.set_exception((et, ev, tb)) + + else: + self.logger.error('Task %s at %s failed, attempting ' + 'retry %d out of %s' % (task.uid, self.uid, + task.retries, task.max_retries)) + await self.call_safe(sender_id, method, task) + + finally: + pass + + async def state_changed(self, state): + """ + Signal state changed. + + Parameters + ---------- + state : str + New state. + + Returns + ------- + + """ + self._state = state + await self.runtime.tessera_state_changed(self) + + def __del__(self): + self.logger.debug('Garbage collected object %s' % self) + self.loop.run(self.state_changed, args=('collected',)) + + +class TesseraProxy(ProxyBase): + """ + Objects of this class represent connections to remote tessera, allowing us to + call methods on them. + + Objects of class TesseraProxy should not be instantiated directly by the user + and the ``@mosaic.tessera`` decorator should be used instead. + + Parameters + ---------- + cls : type + Class of the remote object. + args : tuple, optional + Arguments for the instantiation of the remote tessera. + kwargs : optional + Keyword arguments for the instantiation of the remote tessera. + + """ + + type = 'tessera_proxy' + + def __init__(self, cls, *args, **kwargs): + super().__init__(*args, **kwargs) + + self._cls = PickleClass(cls) + self._runtime_id = None + + self._uid = '%s-%s-%s' % ('tess', + self._cls.__name__.lower(), + uuid.uuid4().hex) + + async def init(self, *args, **kwargs): + """ + Asynchronous correlate of ``__init__``. + + """ + kwargs = self._fill_config(**kwargs) + + self._runtime_id = await self.monitor.select_worker(reply=True) + + self._state = 'pending' + await self.monitor.init_tessera(uid=self._uid, + runtime_id=self._runtime_id) + + self.runtime.register(self) + await self.remote_runtime.init_tessera(cls=self._cls, uid=self._uid, args=args, + reply=True, **kwargs) + self._state = 'listening' + + @property + def runtime_id(self): + """ + UID of the runtime where the tessera lives. + + """ + return self._runtime_id + + @cached_property + def remote_runtime(self): + """ + Proxy to the runtime where the tessera lives. + + """ + return self.proxy(self._runtime_id) + + def __getattribute__(self, item): + try: + return super().__getattribute__(item) + + except AttributeError: + + if not hasattr(self._cls, item): + raise AttributeError('Class %s does not have method %s' % (self._cls.__name__, item)) + + if not callable(getattr(self._cls, item)): + raise ValueError('Method %s of class %s is not callable' % (item, self._cls.__name__)) + + async def remote_method(*args, **kwargs): + task_proxy = TaskProxy(self, item, *args, **kwargs) + await task_proxy.init() + + return task_proxy + + return remote_method + + def __getitem__(self, item): + return self.__getattribute__(item) + + _serialisation_attrs = ProxyBase._serialisation_attrs + ['_cls', '_runtime_id'] + + +class ArrayProxy(CMDBase): + """ + Objects of this class represent more a set of remote tesserae that may live on one or + more remote runtimes. An array proxy allows us to reference all of them together + through a common interface, as well as map calls to them. + + Objects of class ArrayProxy should not be instantiated directly by the user + and the ``@mosaic.tessera`` decorator should be used instead. + + Parameters + ---------- + cls : type + Class of the remote object. + args : tuple, optional + Arguments for the instantiation of the remote tessera. + len : int, optional + Length of the array, defaults to 1. + kwargs : optional + Keyword arguments for the instantiation of the remote tessera. + + """ + + type = 'tessera_proxy_array' + + def __init__(self, cls, *args, **kwargs): + super().__init__(*args, **kwargs) + + self._cls = PickleClass(cls) + self._len = kwargs.pop('len', 1) + + self._state = 'pending' + + self._proxies = [] + self._runtime_id = [] + for _ in range(self._len): + proxy = TesseraProxy(cls, *args, **kwargs) + self._proxies.append(proxy) + + self._uid = '%s-%s-%s' % ('array', + self._cls.__name__.lower(), + uuid.uuid4().hex) + + async def init(self, *args, **kwargs): + """ + Asynchronous correlate of ``__init__``. + + """ + for proxy in self._proxies: + await proxy.init(*args, **kwargs) + self._runtime_id.append(proxy.runtime_id) + + self.runtime.register(self) + + self._state = 'listening' + + @property + def runtime_id(self): + """ + UID of the runtime where the tessera lives. + + """ + return self._runtime_id + + @cached_property + def remote_runtime(self): + """ + Proxy to the runtime where the tessera lives. + + """ + return [self.proxy(each) for each in self._runtime_id] + + @classmethod + def remote_type(cls): + """ + Type of mosaic object. + + """ + return cls.type.split('_')[0] + + def _remotes(self): + return self.remote_runtime + + async def _map_tasks(self, fun, elements, *args, **kwargs): + proxy_queue = asyncio.Queue() + for proxy in self._proxies: + await proxy_queue.put(proxy) + + async def call(_element): + async with self._proxy(proxy_queue) as _proxy: + res = await fun(_element, _proxy, *args, **kwargs) + + return res + + tasks = [call(element) for element in elements] + + return tasks + + @contextlib.asynccontextmanager + async def _proxy(self, proxy_queue): + proxy = await proxy_queue.get() + + yield proxy + + await proxy_queue.put(proxy) + + async def map(self, fun, elements, *args, **kwargs): + """ + Map a function to an iterable, distributed across the proxies + of the proxy array. + + The function is given control over a certain proxy for as long as + it takes to be executed. Once all mappings have completed, the + results are returned together + + Parameters + ---------- + fun : callable + Function to execute + elements : iterable + Iterable to map. + args : tuple, optional + Arguments to the function. + kwargs : optional + Keyword arguments to the function. + + Returns + ------- + list + Results of the mapping. + + """ + tasks = await self._map_tasks(fun, elements, *args, **kwargs) + + return await asyncio.gather(*tasks) + + async def map_as_completed(self, fun, elements, *args, **kwargs): + """ + Generator which maps a function to an iterable, + distributed across the proxies of the proxy array. + + The function is given control over a certain proxy for as long as + it takes to be executed. Once a function is completed, the result + to that function is yielded immediately. + + Parameters + ---------- + fun : callable + Function to execute + elements : iterable + Iterable to map. + args : tuple, optional + Arguments to the function. + kwargs : optional + Keyword arguments to the function. + + Returns + ------- + object + Result of each execution as they are completed. + + """ + tasks = await self._map_tasks(fun, elements, *args, **kwargs) + + for task in asyncio.as_completed(tasks): + res = await task + yield res + + def __getattribute__(self, item): + try: + return super().__getattribute__(item) + + except AttributeError: + + if not hasattr(self._cls, item): + raise AttributeError('Class %s does not have method %s' % (self._cls.__name__, item)) + + if not callable(getattr(self._cls, item)): + raise ValueError('Method %s of class %s is not callable' % (item, self._cls.__name__)) + + async def remote_method(*args, **kwargs): + tasks = [] + for proxy in self._proxies: + tasks.append(proxy[item](*args, **kwargs)) + + return await asyncio.gather(*tasks) + + return remote_method + + def __getitem__(self, item): + return self._proxies[item] + + def __repr__(self): + runtime_id = ', '.join([str(each) for each in self.runtime_id]) + + return "<%s object at %s, uid=%s, runtime=(%s), state=%s>" % \ + (self.__class__.__name__, id(self), + self.uid, runtime_id, self._state) + + _serialisation_attrs = CMDBase._serialisation_attrs + [] + + +class MonitoredTessera(MonitoredBase): + """ + Information container on the state of a tessera. + + """ + pass + + +class PickleClass: + """ + A wrapper for a class that can be pickled safely. + + Parameters + ---------- + cls : type + Class to wrap. + + """ + + def __init__(self, cls): + self.cls = cls + + def __call__(self, *args, **kwargs): + return self.cls(*args, **kwargs) + + def __getattribute__(self, item): + try: + return super().__getattribute__(item) + + except AttributeError: + return getattr(self.cls, item) + + def _serialisation_helper(self): + state = { + 'cls': cloudpickle.dumps(self.cls) + } + + return state + + @classmethod + def _deserialisation_helper(cls, state): + instance = cls.__new__(cls) + instance.cls = cloudpickle.loads(state['cls']) + + return instance + + def __reduce__(self): + state = self._serialisation_helper() + return self._deserialisation_helper, (state,) + + +def tessera(*args, **cmd_config): + """ + Decorator that transforms a standard class into a tessera-capable class. + + The resulting class can still be instantiated as usual ``Klass(...)``, which + will generate a standard local instance, or onto the mosaic runtime ``await Klass.remote(...)``, + which will instantiate the class in a remote endpoint and return a proxy to the user. + + TODO - Better explanations and more examples. + + Parameters + ---------- + + Returns + ------- + Enriched class + + Examples + -------- + + >>> @tessera + >>> class Klass: + >>> def __init__(self, value): + >>> self.value = value + >>> + >>> def add(self, other): + >>> self.value += other + >>> return self.value + >>> + >>> # We can still generate a standard local instance + >>> local_instance = Klass(10) + >>> + >>> # but also a remote instance by invoking remote. + >>> remote_proxy = await Klass.remote(10) + >>> + >>> # The resulting proxy can be used to call the instance methods, + >>> task = await remote_proxy.add(5) + >>> # which will return immediately. + >>> + >>> # We can do some work while the remote method is executed + >>> # and then wait for it to end + >>> await task + >>> + >>> # We can retrieve the result of the task invoking result on the task + >>> await task.result() + 15 + + """ + + def tessera_wrapper(cls): + + @classmethod + async def remote(_, *args, **kwargs): + kwargs.update(cmd_config) + + array_len = kwargs.pop('len', None) + + if array_len is None: + proxy = TesseraProxy(cls, *args, **kwargs) + await proxy.init(*args, **kwargs) + + else: + proxy = ArrayProxy(cls, *args, len=array_len, **kwargs) + await proxy.init(*args, **kwargs) + + return proxy + + @classmethod + def tessera(_, *args, **kwargs): + kwargs.update(cmd_config) + return Tessera(cls, *args, **kwargs) + + cls.remote = remote + cls.tessera = tessera + + method_list = [func for func in dir(Base) if not func.startswith("__")] + for method in method_list: + setattr(cls, method, getattr(Base, method)) + + return cls + + if len(args) == 1 and len(cmd_config) == 0 and callable(args[0]): + return tessera_wrapper(args[0]) + + if len(args) != 0 or len(cmd_config) < 1: + raise ValueError('@tessera should be applied to a class without brackets' + 'or with configuration options within brackets.') + + return tessera_wrapper diff --git a/mosaic/file_manipulation/__init__.py b/mosaic/file_manipulation/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/mosaic/file_manipulation/h5.py b/mosaic/file_manipulation/h5.py new file mode 100644 index 00000000..f0546a3d --- /dev/null +++ b/mosaic/file_manipulation/h5.py @@ -0,0 +1,330 @@ + +import os +import h5py +import numpy as np +from datetime import datetime + +from mosaic.utils.change_case import camel_case +from mosaic.types import Struct + + +__all__ = ['HDF5', 'file_exists'] + + +_protocol_version = '0.1' + + +def _abs_filename(filename, path=None): + if not os.path.isabs(filename): + filename = os.path.join(path, filename) + + return filename + + +def _decode_list(str_list): + for index in range(len(str_list)): + if isinstance(str_list[index], list): + str_list[index] = _decode_list(str_list[index]) + + else: + str_list[index] = str_list[index].decode('utf-8') + + return str_list + + +def write(name, obj, group): + if isinstance(obj, dict): + if name != '/': + sub_group = group.create_group(name) + else: + sub_group = group + sub_group.attrs['protocol'] = _protocol_version + sub_group.attrs['datetime'] = str(datetime.now()) + sub_group.attrs['is_array'] = False + + for key, value in obj.items(): + write(key, value, sub_group) + + elif isinstance(obj, list) and len(obj) > 0 and isinstance(obj[0], dict): + sub_group = group.create_group(name) + sub_group.attrs['is_array'] = True + + for index in range(len(obj)): + sub_group_name = '%s_%08d' % (name, index) + write(sub_group_name, obj[index], sub_group) + + else: + _write_dataset(name, obj, group) + + +def append(name, obj, group): + if isinstance(obj, dict): + if name != '/': + if name not in group: + sub_group = group.create_group(name) + + else: + sub_group = group[name] + else: + sub_group = group + sub_group.attrs['protocol'] = _protocol_version + sub_group.attrs['datetime'] = str(datetime.now()) + sub_group.attrs['is_array'] = False + + for key, value in obj.items(): + append(key, value, sub_group) + + elif isinstance(obj, list) and len(obj) > 0 and isinstance(obj[0], dict): + if name not in group: + sub_group = group.create_group(name) + sub_group.attrs['is_array'] = True + + else: + sub_group = group[name] + + for index in range(len(obj)): + sub_group_name = '%s_%08d' % (name, index) + append(sub_group_name, obj[index], sub_group) + + else: + _write_dataset(name, obj, group) + + +def _write_dataset(name, obj, group): + if name in group: + return group[name] + + dataset = group.create_dataset(name, data=obj) + dataset.attrs['is_ndarray'] = isinstance(obj, np.ndarray) + dataset.attrs['is_list'] = isinstance(obj, list) + dataset.attrs['is_tuple'] = isinstance(obj, tuple) + dataset.attrs['is_str'] = isinstance(obj, str) + + if isinstance(obj, list) and len(obj): + flat_obj = np.asarray(obj).flatten().tolist() + dataset.attrs['is_str'] = isinstance(flat_obj[0], str) + + return dataset + + +def read(obj, lazy=True): + if isinstance(obj, h5py.Group): + if obj.attrs['is_array']: + data = [] + for key in obj.keys(): + data.append(read(obj[key], lazy=lazy)) + + else: + data = dict() + for key in obj.keys(): + data[key] = read(obj[key], lazy=lazy) + + return data + + elif isinstance(obj, h5py.Dataset): + return _read_dataset(obj, lazy=lazy) + + +def _read_dataset(obj, lazy=True): + if obj.attrs['is_ndarray']: + + def load(): + return obj[()] + + setattr(obj, 'load', load) + + if lazy is True: + return obj + + else: + return obj[()] + + else: + data = obj[()] + + if obj.attrs['is_str'] and not obj.attrs['is_list']: + data = data.decode('utf-8') + + elif obj.attrs['is_tuple']: + data = tuple(data) + + elif obj.attrs['is_list']: + data = list(data.tolist()) + + if obj.attrs['is_str']: + _decode_list(data) + + else: + data = data.item() + + return data + + +class HDF5: + """ + This class provides an interface to read and write HDF5 files. It can be used by instantiating the + class on its own, + + >>> file = HDF5(...) + >>> file.write(...) + >>> file.close() + + or as a context manager, + + >>> with HDF5(...) as file: + >>> file.write(...) + + If a particular version is given, the filename will be generated without checks. If no version is given, + the ``path`` will be checked for the latest available version of the file. + + The file will have the form ``-`` for version 0 and + ``--`` for higher versions. + + Parameters + ---------- + filename : str + Full path to a file, instead of a file being formed with version. + path : str + Location of the file in the filesystem, defaults to the current working directory. + project_name : str + Name of the project, the prefix that all files of the project will have. + parameter : str + Parameter that determines which specific type of file to look for. + version : int, optional + Integer version of the file, starting at 0. If not given, the last available version will be found. + extension : str, optional + File extension, defaults to ``.h5``. + mode : str + Mode in which the file will be opened. + + """ + + def __init__(self, *args, **kwargs): + self._mode = kwargs.pop('mode') + + if len(args) > 0: + filename = args[0] + else: + filename = kwargs.pop('filename', None) + + path = kwargs.pop('path', None) or os.getcwd() + + if filename is None: + project_name = kwargs.pop('project_name', None) + parameter = kwargs.pop('parameter', None) + + if project_name is None or parameter is None: + raise RuntimeError('Either filename or project_name and parameter are needed to generate a filename') + + file_parameter = camel_case(parameter) + version = kwargs.pop('version', None) + extension = kwargs.pop('extension', '.h5') + + if version is None or version < 0: + version = 0 + filename = _abs_filename('%s-%s%s' % (project_name, file_parameter, extension), path) + while os.path.exists(filename): + version += 1 + filename = _abs_filename('%s-%s-%05d%s' % (project_name, file_parameter, version, extension), path) + + if self._mode.startswith('r'): + version -= 1 + + if version > 0: + filename = _abs_filename('%s-%s-%05d%s' % (project_name, file_parameter, version, extension), path) + + else: + filename = _abs_filename('%s-%s%s' % (project_name, file_parameter, extension), path) + + self._filename = _abs_filename(filename, path) + self._file = h5py.File(self._filename, self._mode) + + @property + def mode(self): + return self._mode + + @property + def filename(self): + return self._filename + + @property + def file(self): + return self._file + + def close(self): + self._file.close() + + def load(self, lazy=True): + group = self._file['/'] + description = read(group, lazy=lazy) + + return Struct(description) + + def dump(self, description): + write('/', description, self._file) + + def append(self, description): + append('/', description, self._file) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + +def file_exists(*args, **kwargs): + """ + Check whether a certain file exists. + + The file will have the form + ``-`` for version 0 and + ``--`` for higher versions. + + Parameters + ---------- + project_name : str + Name of the project, the prefix that all files of the project will have. + parameter : str + Parameter that determines which specific type of file to look for. + version : int + Integer version of the file, starting at 0. + extension : str, optional + File extension, defaults to ``.h5``. + folder : str, optional + Location of the file in the filesystem, defaults to the current folder. + + Returns + ------- + bool + Whether or not a file of the specified version exists. + + """ + + if len(args) > 0: + filename = args[0] + else: + filename = kwargs.pop('filename', None) + + path = kwargs.pop('path', None) or os.getcwd() + + if filename is None: + project_name = kwargs.pop('project_name', None) + parameter = kwargs.pop('parameter', None) + + if project_name is None or parameter is None: + raise RuntimeError('Either filename or project_name and parameter are needed to generate a filename') + + file_parameter = camel_case(parameter) + version = kwargs.pop('version', None) + extension = kwargs.pop('extension', '.h5') + + if version > 0: + filename = _abs_filename('%s-%s-%05d%s' % (project_name, file_parameter, version, extension), path) + + else: + filename = _abs_filename('%s-%s%s' % (project_name, file_parameter, extension), path) + + filename = _abs_filename(filename, path) + + return os.path.exists(filename) diff --git a/mosaic/file_manipulation/yaml.py b/mosaic/file_manipulation/yaml.py new file mode 100644 index 00000000..3fdfd7c2 --- /dev/null +++ b/mosaic/file_manipulation/yaml.py @@ -0,0 +1,240 @@ + +import os +import re +import yaml +from yaml import Loader +from collections import OrderedDict + +from ..types import Path, Struct, ImportedFunction + + +__all__ = ['parse_yaml', 'parse_python'] + + +# Add a constructor to avoid !tags from being parsed +def default_constructor(loader, tag_suffix, node): + return tag_suffix + ' ' + node.value + + +yaml.add_multi_constructor('', default_constructor) + +# Make sure scientific notation without dot or exponent sign is understood +Loader.add_implicit_resolver( + u'tag:yaml.org,2002:float', + re.compile(u'''^(?: + [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)? + |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) + |\\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]* + |[-+]?\\.(?:inf|Inf|INF) + |\\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')) + + +def _parse_dict(config, work_dir): + for key, value in config.items(): + if isinstance(value, str): + if value.startswith('!func'): + value = value[5:].split(',') + + if len(value) > 1: + function_name = value[0].strip() + module_name = value[1].strip() + + else: + function_name = value[0].strip() + module_name = value[0].strip() + + if not module_name.startswith('/'): + module_name = os.path.join(work_dir, module_name) + module_name = os.path.abspath(module_name) + + function_path = module_name if module_name.endswith('.py') else '%s.py' % module_name + function_dir = os.path.dirname(function_path) + module_name = os.path.basename(module_name) + + value = ImportedFunction(function_name, module_name, (function_dir, function_path,)) + + elif value.startswith('!eval'): + value = eval(value[5:]) + + elif value.startswith('!path'): + value = Path(os.path.abspath(os.path.join(work_dir, value[5:].strip()))) + + elif isinstance(value, dict): + value = _parse_dict(value, work_dir) + + config[key] = value + + return config + + +def parse_yaml(file_path): + """ + This utility function uses the default YAML constructors to parse most of the content of the files. + + It also includes constructors to handle our custom YAML tags ``!func``, ``!eval`` and ``!path``. + + Parameters + ---------- + file_path : str + Absolute path to the YAML file to be loaded + + Returns + ------- + OrderedDict + Ordered nested dictionary with the parsed contents of the YAML file + + Examples + -------- + The custom tags that are have been implemented allow for the definition of new YAML types. The ``!func`` + declares that a certain field is a callable, importable function by using the syntax: + + .. code-block:: python + + field_name: !func function_name + + This syntax will automatically look for a function called ``function_name`` inside a file named + ``function_name.py``. The paths of the files are automatically resolved with respect to the path of the YAML file. + This syntax can be further expanded by specifying a file where the function is located: + + .. code-block:: python + + field_name: !func function_name, path_to_file/file_name + + This syntax will result in the import of a function called ``function_name`` inside a file named + ``path_to_file/file_name.py``. + The ``!eval`` tag allows for the evaluation of valid one-line python expressions: + + .. code-block:: python + + field_name: !eval 2*4 + + will be evaluated to the integer 8. More complex expressions could also be created: + + .. code-block:: python + + field_name: !eval [2, 5, 7, 8] + + This syntax should also be used to assign tuple values, that YAML does not handle otherwise: + + .. code-block:: python + + field_name: !eval (2, 3) + + The ``!path`` tag should be used to specify fields that are meant to represent paths in the filesystem. Whether + absolute or relative, they will be automatically resolved with respect to the location of the YAML file: + + .. code-block:: python + + field_name: !path ./folder + + will be evaluated to ``/path/of/yaml/file/folder``. + """ + work_dir = os.path.dirname(file_path) + with open(file_path, 'r') as file: + config = yaml.load(file, Loader=Loader) + config = _parse_dict(config, work_dir) + + return config + + +def parse_python(obj, folder=None, comments=None, level=0): + """ + This function generates a YAML-like string out of a Python object, usually a dictionary or a Struct. + + It takes care of handling the format of the files so they do not look cluttered, it handles out custom tags + ``!func``, ``!eval`` and ``!path``, and allows for the inclusion of comments in the parsed string. + + Parameters + ---------- + obj : any Python literal, Path or Struct + Python object to parse into a string, usually a dictionary + folder : str, optional + Folder with respect to which the paths will be resolved + comments : dict, optional + A dictionary mimicking the structure of the input dictionary that contains comments to be added to the + parsed lines + level : int, optional + Nesting level of the object being parsed, usually 0 + + Returns + ------- + str + String containing a parsed version of the Python input + + """ + if comments is None: + comments = {} + + if callable(obj): + rel_path, ext = os.path.splitext(os.path.relpath(obj.__globals__['__file__'], folder)) + if ext == '.py': + parsed = '!func %s, %s' % (obj.__name__, rel_path) + else: + parsed = '!func %s, %s' % (obj.__name__, rel_path + ext) + + elif isinstance(obj, tuple): + parsed = '!eval (' + for each in obj: + if isinstance(each, str): + parsed += '\'%s\', ' % each + else: + parsed += '%s, ' % parse_python(each, folder=folder) + + parsed += ')' + + elif isinstance(obj, list): + parsed = '!eval [' + for each in obj: + if isinstance(each, str): + parsed += '\'%s\', ' % each + else: + parsed += '%s, ' % parse_python(each, folder=folder) + + parsed += ']' + + elif isinstance(obj, Path): + rel_path, ext = os.path.splitext(os.path.relpath(obj, folder)) + if ext == '.py': + parsed = '!path %s' % rel_path + else: + parsed = '!path %s' % rel_path + ext + + elif isinstance(obj, float): + if obj/1e4 > 1 or obj < 1e-2: + parsed = '%e' % obj + + parsed = parsed.split('e') + parsed[0] = parsed[0].rstrip('0').rstrip('.') + parsed = 'e'.join(parsed) + else: + parsed = '%g' % obj + + elif isinstance(obj, int): + parsed = '%d' % obj + + elif isinstance(obj, (dict, Struct)): + _obj = OrderedDict() + for key, value in obj.items(): + _obj[key] = parse_python(value, folder=folder, comments=comments.get(key, {}), level=level+1) + + parsed = '''\ + ''' + + for key, value in _obj.items(): + if isinstance(obj[key], (dict, Struct)): + parsed += '\n' + + if isinstance(comments.get(key, ''), str): + _comment = comments.get(key, '') + else: + _comment = '' + + parsed += f'''\ +\n{' '*level}{key} : {value} {_comment}''' + + else: + parsed = obj + + return parsed diff --git a/mosaic/runtime/__init__.py b/mosaic/runtime/__init__.py new file mode 100644 index 00000000..60783279 --- /dev/null +++ b/mosaic/runtime/__init__.py @@ -0,0 +1,6 @@ + + +from .head import * +from .monitor import * +from .node import * +from .worker import * diff --git a/mosaic/runtime/head.py b/mosaic/runtime/head.py new file mode 100644 index 00000000..c78afd10 --- /dev/null +++ b/mosaic/runtime/head.py @@ -0,0 +1,119 @@ + +import os + +import mosaic +from .runtime import Runtime, RuntimeProxy +from ..utils import LoggerManager +from ..utils import subprocess + + +__all__ = ['Head'] + + +class Head(Runtime): + """ + The head is the main runtime, where the user entry point is executed. + + """ + + is_head = True + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + async def init(self, **kwargs): + """ + Asynchronous counterpart of ``__init__``. + + Parameters + ---------- + kwargs + + Returns + ------- + + """ + monitor_address = kwargs.get('monitor_address', None) + if not self.is_monitor and monitor_address is None: + path = os.path.join(os.getcwd(), 'mosaic-workspace') + if not os.path.exists(path): + os.makedirs(path) + + filename = os.path.join(path, 'monitor.key') + + if os.path.exists(filename): + with open(filename, 'r') as file: + file.readline() + + _ = file.readline().split('=')[1].strip() + parent_address = file.readline().split('=')[1].strip() + parent_port = file.readline().split('=')[1].strip() + + kwargs['monitor_address'] = parent_address + kwargs['monitor_port'] = int(parent_port) + + await super().init(**kwargs) + + # if self.mode == 'local': + # available_cpus = list(range(psutil.cpu_count())) + # psutil.Process().cpu_affinity([available_cpus[0]]) + + # Start monitor if necessary and handshake in reverse + monitor_address = kwargs.get('monitor_address', None) + if not self.is_monitor and monitor_address is None: + await self.init_monitor(**kwargs) + + async def init_monitor(self, **kwargs): + """ + Init monitor runtime. + + Parameters + ---------- + kwargs + + Returns + ------- + + """ + def start_monitor(*args, **extra_kwargs): + kwargs.update(extra_kwargs) + mosaic.init('monitor', *args, **kwargs, wait=True) + + monitor_proxy = RuntimeProxy(name='monitor') + monitor_subprocess = subprocess(start_monitor)(name=monitor_proxy.uid, + daemon=False) + monitor_subprocess.start_process() + monitor_proxy.subprocess = monitor_subprocess + + self._monitor = monitor_proxy + await self._comms.wait_for(monitor_proxy.uid) + + async def stop(self, sender_id=None): + """ + Stop runtime. + + Parameters + ---------- + sender_id : str + + Returns + ------- + + """ + if self._monitor.subprocess is not None: + await self._monitor.stop() + self._monitor.subprocess.join_process() + + super().stop(sender_id) + # os._exit(0) + + def set_logger(self): + """ + Set up logging. + + Returns + ------- + + """ + self.logger = LoggerManager() + self.logger.set_local() diff --git a/mosaic/runtime/monitor.py b/mosaic/runtime/monitor.py new file mode 100644 index 00000000..a5b21e71 --- /dev/null +++ b/mosaic/runtime/monitor.py @@ -0,0 +1,308 @@ + +import os +import pprint +import asyncio +import subprocess as cmd_subprocess + +import mosaic +from .runtime import Runtime, RuntimeProxy +from .node import MonitoredNode +from .strategies import RoundRobin +from ..core.tessera import MonitoredTessera +from ..core.task import MonitoredTask +from ..utils import subprocess +from ..utils.logger import LoggerManager, _stdout, _stderr + + +__all__ = ['Monitor', 'monitor_strategies'] + + +monitor_strategies = { + 'round-robin': RoundRobin +} + + +class Monitor(Runtime): + """ + The monitor takes care of keeping track of the state of the network + and collects statistics about it. + + It also handles the allocation of tesserae to certain workers. + + """ + + is_monitor = True + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + self.strategy_name = kwargs.get('monitor_strategy', 'round-robin') + self._monitor_strategy = monitor_strategies[self.strategy_name](self) + + self._monitored_nodes = dict() + self._monitored_tessera = dict() + self._monitored_tasks = dict() + + async def init(self, **kwargs): + """ + Asynchronous counterpart of ``__init__``. + + Parameters + ---------- + kwargs + + Returns + ------- + + """ + await super().init(**kwargs) + + # if self.mode == 'local': + # available_cpus = list(range(psutil.cpu_count())) + # psutil.Process().cpu_affinity([available_cpus[0]]) + + # Start local cluster + if self.mode in ['local', 'interactive']: + await self.init_local(**kwargs) + + else: + await self.init_cluster(**kwargs) + + async def init_local(self, **kwargs): + """ + Init nodes in local mode. + + Parameters + ---------- + kwargs + + Returns + ------- + + """ + def start_node(*args, **extra_kwargs): + kwargs.update(extra_kwargs) + kwargs['runtime_indices'] = 0 + + mosaic.init('node', *args, **kwargs, wait=True) + + node_proxy = RuntimeProxy(name='node', indices=0) + node_subprocess = subprocess(start_node)(name=node_proxy.uid, daemon=False) + node_subprocess.start_process() + node_proxy.subprocess = node_subprocess + + self._nodes[node_proxy.uid] = node_proxy + await self._comms.wait_for(node_proxy.uid) + + async def init_cluster(self, **kwargs): + """ + Init nodes in cluster mode. + + Parameters + ---------- + kwargs + + Returns + ------- + + """ + node_list = kwargs.get('node_list', None) + + if node_list is None: + raise ValueError('No node_list was provided to initialise mosaic in cluster mode') + + num_nodes = len(node_list) + num_workers = kwargs.get('num_workers', 1) + num_threads = kwargs.get('num_threads', None) + log_level = kwargs.get('log_level', 'info') + runtime_address = self.address + runtime_port = self.port + + for node_index, node_address in zip(range(num_nodes), node_list): + node_proxy = RuntimeProxy(name='node', indices=node_index) + + cmd = (f'ssh {node_address} ' + f'"mrun --node -i {node_index} ' + f'--monitor-address {runtime_address} --monitor-port {runtime_port} ' + f'-n {num_nodes} -nw {num_workers} -nth {num_threads} ' + f'--cluster --{log_level}"') + + cmd_subprocess.Popen(cmd, + shell=True, + stdout=_stdout, + stderr=_stderr) + + self._nodes[node_proxy.uid] = node_proxy + await self._comms.wait_for(node_proxy.uid) + + self.logger.info('Started node %d at %s' % (node_index, node_address)) + + def set_logger(self): + """ + Set up logging. + + Returns + ------- + + """ + self.logger = LoggerManager() + + if self.mode == 'interactive': + self.logger.set_remote(runtime_id='head') + else: + self.logger.set_local() + + def update_monitored_node(self, sender_id, monitored_node): + """ + Update inner record of node state. + + Parameters + ---------- + sender_id : str + monitored_node : dict + + Returns + ------- + + """ + if sender_id not in self._monitored_nodes: + self._monitored_nodes[sender_id] = MonitoredNode(sender_id) + + self._monitored_nodes[sender_id].update_history(**monitored_node) + + if os.environ.get('MOSAIC_DUMP_NODES', None) == 'yes': + update = self._monitored_nodes[sender_id].get_update() + update_str = pprint.pformat(update) + + with open('nodes.log', 'a') as file: + file.write('============\n') + file.write(sender_id + '\n') + file.write(update['time'] + '\n') + file.write(update_str) + file.write('\n') + + self._monitor_strategy.update_node(self._monitored_nodes[sender_id]) + + def init_tessera(self, sender_id, uid, runtime_id): + """ + Start monitoring given tessera. + + Parameters + ---------- + sender_id : str + uid : str + runtime_id : str + + Returns + ------- + + """ + monitored = MonitoredTessera(uid, runtime_id) + self._monitored_tessera[uid] = monitored + + self._monitor_strategy.update_tessera(self._monitored_tessera[uid]) + + def init_task(self, sender_id, uid, tessera_id, runtime_id): + """ + Start monitoring given task. + + Parameters + ---------- + sender_id : str + uid : str + tessera_id : str + runtime_id : str + + Returns + ------- + + """ + monitored = MonitoredTask(uid, tessera_id, runtime_id) + self._monitored_tasks[uid] = monitored + + self._monitor_strategy.update_task(self._monitored_tasks[uid]) + + def tessera_state_changed(self, sender_id, uid, state): + """ + Update monitored tessera state. + + Parameters + ---------- + sender_id : str + uid : str + state : str + + Returns + ------- + + """ + if uid not in self._monitored_tasks: + return + + self._monitored_tasks[uid].update_history(state=state) + + self._monitor_strategy.update_tessera(self._monitored_tessera[uid]) + + def task_state_changed(self, sender_id, uid, state, elapsed=None): + """ + Update monitored task state. + + Parameters + ---------- + sender_id : str + uid : str + state : str + elapsed : float, optional + + Returns + ------- + + """ + if uid not in self._monitored_tasks: + return + + self._monitored_tasks[uid].update_history(state=state) + + if elapsed is not None: + self._monitored_tasks[uid].elapsed = elapsed + + self._monitor_strategy.update_task(self._monitored_tasks[uid]) + + async def stop(self, sender_id=None): + """ + Stop runtime. + + Parameters + ---------- + sender_id : str + + Returns + ------- + + """ + for node_id, node in self._nodes.items(): + await node.stop() + + if hasattr(node.subprocess, 'stop_process'): + node.subprocess.join_process() + + super().stop(sender_id) + + async def select_worker(self, sender_id): + """ + Select appropriate worker to allocate a tessera. + + Parameters + ---------- + sender_id : str + + Returns + ------- + str + UID of selected worker. + + """ + while not len(self._monitored_nodes.keys()): + await asyncio.sleep(0.1) + + return self._monitor_strategy.select_worker(sender_id) diff --git a/mosaic/runtime/node.py b/mosaic/runtime/node.py new file mode 100644 index 00000000..c138bbfe --- /dev/null +++ b/mosaic/runtime/node.py @@ -0,0 +1,408 @@ + +import psutil +import datetime +from collections import OrderedDict + +import mosaic +from .runtime import Runtime, RuntimeProxy +from ..utils import LoggerManager +from ..utils import subprocess +from ..utils.utils import memory_limit + + +__all__ = ['Node', 'MonitoredNode', 'MonitoredWorker', 'MonitoredGPU'] + + +class Node(Runtime): + """ + A node represents a physically independent portion of the network, + such as a separate cluster node. Nodes contain one or more + workers, which they initialise and manage. + + """ + + is_node = True + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + num_workers = kwargs.pop('num_workers', None) + num_workers = num_workers or 1 + num_threads = kwargs.pop('num_threads', None) + num_threads = num_threads or psutil.cpu_count() // num_workers + + self._own_workers = dict() + self._num_workers = num_workers + self._num_threads = num_threads + self._memory_limit = memory_limit() + + self._monitored_node = MonitoredNode(self.uid) + + async def init(self, **kwargs): + """ + Asynchronous counterpart of ``__init__``. + + Parameters + ---------- + kwargs + + Returns + ------- + + """ + await super().init(**kwargs) + + # Start local workers + await self.init_workers(**kwargs) + + async def init_workers(self, **kwargs): + """ + Init workers in the node. + + Parameters + ---------- + kwargs + + Returns + ------- + + """ + num_workers = self._num_workers + num_threads = self._num_threads + + # available_cpus = list(range(psutil.cpu_count())) + # + # if self.mode == 'local': + # psutil.Process().cpu_affinity([available_cpus[2]]) + # available_cpus = available_cpus[3:] + # else: + # psutil.Process().cpu_affinity([available_cpus[0]]) + # available_cpus = available_cpus[1:] + # + # cpus_per_worker = len(available_cpus) // self._num_workers + + for worker_index in range(self._num_workers): + indices = self.indices + (worker_index,) + + # if worker_index < num_workers - 1: + # cpu_affinity = available_cpus[worker_index*cpus_per_worker:(worker_index+1)*cpus_per_worker] + # + # else: + # cpu_affinity = available_cpus[worker_index*cpus_per_worker:] + + def start_worker(*args, **extra_kwargs): + kwargs.update(extra_kwargs) + kwargs['runtime_indices'] = indices + kwargs['num_workers'] = num_workers + kwargs['num_threads'] = num_threads + + mosaic.init('worker', *args, **kwargs, wait=True) + + worker_proxy = RuntimeProxy(name='worker', indices=indices) + worker_subprocess = subprocess(start_worker)(name=worker_proxy.uid, + daemon=False) + worker_subprocess.start_process() + worker_proxy.subprocess = worker_subprocess + + self._workers[worker_proxy.uid] = worker_proxy + self._own_workers[worker_proxy.uid] = worker_proxy + await self._comms.wait_for(worker_proxy.uid) + + self.resource_monitor() + + await self.update_monitored_node() + + self._loop.interval(self.resource_monitor, interval=0.1) + self._loop.interval(self.update_monitored_node, interval=0.1) + + def set_logger(self): + """ + Set up logging. + + Returns + ------- + + """ + self.logger = LoggerManager() + + if self.mode == 'local': + self.logger.set_local() + else: + runtime_id = 'head' if self.mode == 'interactive' else 'monitor' + self.logger.set_remote(runtime_id=runtime_id) + + def resource_monitor(self): + """ + Monitor reseources available for workers, and worker state. + + Returns + ------- + + """ + try: + import GPUtil + gpus = GPUtil.getGPUs() + except ImportError: + gpus = [] + + cpu_load = 0. + memory_fraction = 0. + + for gpu in gpus: + if gpu.id not in self._monitored_node.gpu_info: + self._monitored_node.gpu_info[gpu.id] = MonitoredGPU(gpu.id) + + self._monitored_node.gpu_info[gpu.id].update(gpu_load=gpu.load, + memory_limit=gpu.memoryTotal*1024**2, + memory_fraction=gpu.memoryUtil) + + for worker_id, worker in self._own_workers.items(): + if worker_id not in self._monitored_node.worker_info: + self._monitored_node.worker_info[worker_id] = MonitoredWorker(worker_id) + + worker_cpu_load = worker.subprocess.cpu_load() + worker_memory_fraction = worker.subprocess.memory() / self._memory_limit + + cpu_load += worker_cpu_load + memory_fraction += worker_memory_fraction + + self._monitored_node.worker_info[worker_id].update(state=worker.subprocess.state, + cpu_load=worker_cpu_load, + memory_fraction=worker_memory_fraction) + + if memory_fraction > 0.95: + self._monitored_node.sort_workers(desc=True) + + for worker_id, worker in self._monitored_node.worker_info.items(): + if self._own_workers[worker_id].subprocess.paused(): + continue + + self._own_workers[worker_id].subprocess.pause_process() + self._monitored_node.worker_info[worker_id].state = self._own_workers[worker_id].subprocess.state + break + + else: + self._monitored_node.sort_workers(desc=False) + + for worker_id, worker in self._monitored_node.worker_info.items(): + if self._own_workers[worker_id].subprocess.running(): + continue + + self._own_workers[worker_id].subprocess.start_process() + self._monitored_node.worker_info[worker_id].state = self._own_workers[worker_id].subprocess.state + break + + # TODO Dynamic constraints and shared resources + + self._monitored_node.update(num_cpus=psutil.cpu_count(), + num_gpus=len(gpus), + num_workers=self._num_workers, + num_threads=self._num_threads, + cpu_load=cpu_load, + memory_limit=self._memory_limit, + memory_fraction=memory_fraction) + + async def stop(self, sender_id=None): + """ + Stop runtime. + + Parameters + ---------- + sender_id : str + + Returns + ------- + + """ + for worker_id, worker in self._own_workers.items(): + await worker.stop() + worker.subprocess.join_process() + + super().stop(sender_id) + + async def update_monitored_node(self): + """ + Send status update to monitor. + + Returns + ------- + + """ + await self._comms.send_async('monitor', + method='update_monitored_node', + monitored_node=self._monitored_node.get_update()) + + +class MonitoredGPU: + """ + Container to keep track of monitored GPU resources. + + """ + + def __init__(self, uid): + self.uid = self.name = uid + self.time = -1 + + self.gpu_load = -1 + self.memory_limit = -1 + self.memory_fraction = -1 + + self.history = [] + + def update(self, **update): + self.time = str(datetime.datetime.now()) + + for key, value in update.items(): + setattr(self, key, value) + + def update_history(self, **update): + self.update(**update) + + update['time'] = self.time + self.history.append(update) + + def get_update(self): + update = dict( + gpu_load=self.gpu_load, + memory_limit=self.memory_limit, + memory_fraction=self.memory_fraction, + ) + + return update + + +class MonitoredWorker: + """ + Container to keep track of monitored worker. + + """ + + def __init__(self, uid): + self.uid = self.name = uid + self.state = 'running' + self.time = -1 + + self.cpu_load = -1 + self.memory_fraction = -1 + + self.history = [] + + def update(self, **update): + self.time = str(datetime.datetime.now()) + + for key, value in update.items(): + setattr(self, key, value) + + def update_history(self, **update): + self.update(**update) + + update['time'] = self.time + self.history.append(update) + + def get_update(self): + update = dict( + state=self.state, + cpu_load=self.cpu_load, + memory_fraction=self.memory_fraction, + ) + + return update + + +class MonitoredNode: + """ + Container to keep track of monitored node. + + """ + + def __init__(self, uid): + self.uid = self.name = uid + self.state = 'running' + self.time = -1 + + self.num_cpus = -1 + self.num_gpus = -1 + self.num_workers = -1 + self.num_threads = -1 + self.memory_limit = -1 + self.cpu_load = -1 + self.memory_fraction = -1 + + self.gpu_info = OrderedDict() + self.worker_info = OrderedDict() + + self.history = [] + + def update(self, **update): + if 'gpu_info' in update: + for gpu_id, gpu in update.pop('gpu_info').items(): + if gpu_id not in self.gpu_info: + self.gpu_info[gpu_id] = MonitoredGPU(gpu_id) + + self.gpu_info[gpu_id].update(**gpu) + + if 'worker_info' in update: + for worker_id, worker in update.pop('worker_info').items(): + if worker_id not in self.gpu_info: + self.worker_info[worker_id] = MonitoredWorker(worker_id) + + self.worker_info[worker_id].update(**worker) + + self.time = str(datetime.datetime.now()) + + for key, value in update.items(): + setattr(self, key, value) + + def update_history(self, **update): + if 'gpu_info' in update: + for gpu_id, gpu in update.pop('gpu_info').items(): + if gpu_id not in self.gpu_info: + self.gpu_info[gpu_id] = MonitoredGPU(gpu_id) + + self.gpu_info[gpu_id].update_history(**gpu) + + if 'worker_info' in update: + for worker_id, worker in update.pop('worker_info').items(): + if worker_id not in self.gpu_info: + self.worker_info[worker_id] = MonitoredWorker(worker_id) + + self.worker_info[worker_id].update_history(**worker) + + self.update(**update) + + update['time'] = self.time + self.history.append(update) + + def get_update(self): + update = dict( + time=self.time, + state=self.state, + num_cpus=self.num_cpus, + num_gpus=self.num_gpus, + num_workers=self.num_workers, + num_threads=self.num_threads, + memory_limit=self.memory_limit, + cpu_load=self.cpu_load, + memory_fraction=self.memory_fraction, + ) + + update['gpu_info'] = dict() + update['worker_info'] = dict() + + for gpu_id, gpu in self.gpu_info.items(): + update['gpu_info'][gpu_id] = gpu.get_update() + + for worker_id, worker in self.worker_info.items(): + update['worker_info'][worker_id] = worker.get_update() + + return update + + def sort_workers(self, desc=False): + self.worker_info = OrderedDict(sorted(self.worker_info.items(), + key=lambda x: x[1].memory_fraction, + reverse=desc)) + + def sort_gpus(self, desc=False): + self.gpu_info = OrderedDict(sorted(self.gpu_info.items(), + key=lambda x: x[1].memory_fraction, + reverse=desc)) diff --git a/mosaic/runtime/runtime.py b/mosaic/runtime/runtime.py new file mode 100644 index 00000000..27c2fada --- /dev/null +++ b/mosaic/runtime/runtime.py @@ -0,0 +1,769 @@ + +import gc +import zmq +import zmq.asyncio +import weakref + +import mosaic +from ..utils.event_loop import EventLoop +from ..comms import CommsManager + + +__all__ = ['Runtime', 'RuntimeProxy'] + + +class BaseRPC: + """ + Base class representing either a mosaic runtime or a proxy to that runtime. + + Runtimes represent network endpoints, and proxies represent references to those + endpoints. Using proxies, runtimes can be addressed transparently through + remote procedural calls. That is, calling a method on the proxy will execute it + in the remote runtime. + + Runtimes also keep a series of resident mosaic objects (such as tessera or tasks), + and can direct remote commands to those objects. + + A runtime has a name and an (optional) set of indices, which together produce + a unique ID associated with that endpoint in the network. The runtime UID + is used to direct messages across the network. + + A name ``runtime`` and indices ``(0, 0)`` will result in a UID ``runtime:0:0``, + while the same name with no indices will result in a UID ``runtime``. + + Parameters + ---------- + name : str, optional + Name of the runtime, defaults to None. If no name is provided, the UID has + to be given. + indices : tuple or int, optional + Indices associated with the runtime, defaults to none. + uid : str + UID from which to find the name and indices, defaults to None. + + """ + + def __init__(self, name=None, indices=(), uid=None): + if uid is not None: + uid = uid.split(':') + name = uid[0] + + if len(uid) > 1: + indices = tuple([int(each) for each in uid[1:]]) + else: + indices = () + + elif name is None: + raise ValueError('Either name and indices or UID are required to instantiate the RPC') + + indices = () if indices is None else indices + + if type(indices) is not tuple: + indices = (indices,) + + self._name = name + self._indices = indices + + @property + def name(self): + """ + Runtime name. + + """ + return self._name + + @property + def indices(self): + """ + Runtime indices. + + """ + return self._indices + + @property + def uid(self): + """ + Runtime UID. + + """ + if len(self.indices): + indices = ':'.join([str(each) for each in self.indices]) + return '%s:%s' % (self.name, indices) + + else: + return self.name + + @property + def address(self): + """ + Runtime IP address. + + """ + return None + + @property + def port(self): + """ + Runtime port. + + """ + return None + + +class Runtime(BaseRPC): + """ + Class representing a local runtime of any possible type. + + The runtime handles the mosaic life cycle: + + - it handles the comms manager, the event loop, the logger and keeps proxies to existing remote runtimes; + - it keeps track of resident mosaic objects (tessera, task) and proxies to those; + - it routes remote commands to these resident mosaic objects. + + For referece on accepted parameters, check `mosaic.init`. + + """ + + is_head = False + is_monitor = False + is_node = False + is_worker = False + + def __init__(self, **kwargs): + runtime_indices = kwargs.pop('runtime_indices', ()) + super().__init__(name=self.__class__.__name__.lower(), indices=runtime_indices) + + self.mode = kwargs.get('mode', 'local') + + self._comms = None + self._head = None + self._monitor = None + self._node = None + self._nodes = dict() + self._worker = None + self._workers = dict() + self._zmq_context = None + self._loop = None + + self.logger = None + + self._tessera = dict() + self._tessera_proxy = weakref.WeakValueDictionary() + self._tessera_proxy_array = weakref.WeakValueDictionary() + self._task = dict() + self._task_proxy = weakref.WeakValueDictionary() + + async def init(self, **kwargs): + """ + Asynchronous counterpart of ``__init__``. + + Parameters + ---------- + kwargs + + Returns + ------- + + """ + # Start comms + address = kwargs.pop('address', None) + port = kwargs.pop('port', None) + self.set_comms(address, port) + + # Start logger + self.set_logger() + + # Connect to parent if necessary + parent_id = kwargs.pop('parent_id', None) + parent_address = kwargs.pop('parent_address', None) + parent_port = kwargs.pop('parent_port', None) + if parent_id is not None and parent_address is not None and parent_port is not None: + await self._comms.handshake(parent_id, parent_address, parent_port) + + # Connect to monitor if necessary + monitor_address = kwargs.get('monitor_address', None) + monitor_port = kwargs.get('monitor_port', None) + if not self.is_monitor and monitor_address is not None and monitor_port is not None: + await self._comms.handshake('monitor', monitor_address, monitor_port) + + # Start listening + self._comms.listen() + + def wait(self, wait=False): + """ + Wait on the comms loop until done. + + Parameters + ---------- + wait : bool + Whether or not to wait, defaults to False. + + Returns + ------- + + """ + if wait is True: + self._comms.wait() + + @property + def address(self): + """ + IP address of the runtime. + + """ + return self._comms.address + + @property + def port(self): + """ + Port of the runtime. + + """ + return self._comms.port + + @property + def num_nodes(self): + """ + Number of nodes on the network. + + """ + return len(self._nodes.keys()) + + @property + def num_workers(self): + """ + Number of workers on the network. + + """ + return len(self._workers.keys()) + + # Interfaces to global objects + + def set_logger(self): + """ + Set up logging. + + Returns + ------- + + """ + pass + + def set_comms(self, address=None, port=None): + """ + Set up comms manager. + + Parameters + ---------- + address : str, optional + Address to use, defaults to None. If None, the comms will try to + guess the address. + port : int, optional + Port to use, defaults to None. If None, the comms will test ports + until one becomes available. + + Returns + ------- + + """ + if self._comms is None: + self._comms = CommsManager(runtime=self, address=address, port=port) + self._comms.connect_recv() + + def get_comms(self): + """ + Access comms. + + Returns + ------- + + """ + return self._comms + + def get_zmq_context(self): + """ + Access ZMQ socket context. + + Returns + ------- + + """ + if self._zmq_context is None: + self._zmq_context = zmq.asyncio.Context() + + return self._zmq_context + + def get_event_loop(self, asyncio_loop=None): + """ + Access event loop. + + Parameters + ---------- + asyncio_loop: object, optional + Async loop to use in our mosaic event loop, defaults to new loop. + + Returns + ------- + + """ + if self._loop is None: + self._loop = EventLoop(loop=asyncio_loop) + + return self._loop + + def get_head(self): + """ + Access head runtime. + + Returns + ------- + + """ + return self._head + + def get_monitor(self): + """ + Access monitor runtime. + + Returns + ------- + + """ + return self._monitor + + def get_node(self, uid=None): + """ + Access specific node runtime. + + Parameters + ---------- + uid : str + + Returns + ------- + + """ + if uid is not None: + return self._nodes[uid] + + else: + return self._node + + def get_worker(self, uid=None): + """ + Access specific worker runtime. + + Parameters + ---------- + uid : str + + Returns + ------- + + """ + if uid is not None: + return self._workers[uid] + + else: + return self._worker + + def proxy_from_uid(self, uid, proxy=None): + """ + Generate a proxy from a UID. + + Parameters + ---------- + uid : str + proxy : BaseProxy + + Returns + ------- + BaseProxy + + """ + proxy = proxy or self.proxy(uid=uid) + + found_proxy = None + if hasattr(self, '_' + proxy.name + 's'): + found_proxy = getattr(self, '_' + proxy.name + 's').get(uid, None) + + elif hasattr(self, '_' + proxy.name): + found_proxy = getattr(self, '_' + proxy.name) + + if found_proxy is None: + if hasattr(self, '_' + proxy.name + 's'): + getattr(self, '_' + proxy.name + 's')[uid] = proxy + + elif hasattr(self, '_' + proxy.name): + setattr(self, '_' + proxy.name, proxy) + + return proxy + + else: + return found_proxy + + @staticmethod + def proxy(name=None, indices=(), uid=None): + """ + Generate proxy from name, indices or UID. + + Parameters + ---------- + name : str, optional + indices : tuple, optional + uid : str, optional + + Returns + ------- + + """ + return RuntimeProxy(name=name, indices=indices, uid=uid) + + # Network management methods + + def log_info(self, sender_id, buf): + """ + Log remote message from ``sender_id`` on info stream. + + Parameters + ---------- + sender_id : str + buf : str + + Returns + ------- + + """ + if self.logger is None: + return + + self.logger.info(buf, uid=sender_id) + + def log_debug(self, sender_id, buf): + """ + Log remote message from ``sender_id`` on debug stream. + + Parameters + ---------- + sender_id : str + buf : str + + Returns + ------- + + """ + if self.logger is None: + return + + self.logger.debug(buf, uid=sender_id) + + def log_error(self, sender_id, buf): + """ + Log remote message from ``sender_id`` on error stream. + + Parameters + ---------- + sender_id : str + buf : str + + Returns + ------- + + """ + if self.logger is None: + return + + self.logger.error(buf, uid=sender_id) + + def log_warning(self, sender_id, buf): + """ + Log remote message from ``sender_id`` on warning stream. + + Parameters + ---------- + sender_id : str + buf : str + + Returns + ------- + + """ + if self.logger is None: + return + + self.logger.warning(buf, uid=sender_id) + + def raise_exception(self, sender_id, exc): + """ + Raise remote exception that ocurred on ``sender_id``. + + Parameters + ---------- + sender_id : str + exc : Exception description + + Returns + ------- + + """ + self.log_error(sender_id, 'Endpoint raised exception "%s"' % str(exc[1])) + raise exc[1].with_traceback(exc[2].as_traceback()) + + def hand(self, sender_id, address, port): + """ + Handle incoming handshake petition. + + Parameters + ---------- + sender_id : str + address : str + port : int + + Returns + ------- + + """ + self.proxy_from_uid(sender_id) + + def shake(self, sender_id, network): + """ + Handle handshake response. + + Parameters + ---------- + sender_id : str + network : dict + + Returns + ------- + + """ + for uid, address in network.items(): + self.connect(sender_id, uid, *address) + + def connect(self, sender_id, uid, address, port): + """ + Connect to a specific remote runtime. + + Parameters + ---------- + sender_id : str + uid : str + address : str + port : int + + Returns + ------- + + """ + self.hand(uid, address, port) + + def disconnect(self, sender_id, uid): + """ + Disconnect specific remote runtime. + + Parameters + ---------- + sender_id : str + uid : str + + Returns + ------- + + """ + pass + + def stop(self, sender_id=None): + """ + Stop runtime. + + Parameters + ---------- + sender_id : str + + Returns + ------- + + """ + if self._comms is not None: + self._loop.run(self._comms.stop, args=(sender_id,)) + + if self._loop is not None: + self._loop._stop.set() + + # Command and task management methods + + def register(self, obj): + """ + Register CMD object with runtime. + + Parameters + ---------- + obj : BaseCMD + + Returns + ------- + + """ + obj_type = obj.type + obj_uid = obj.uid + obj_store = getattr(self, '_' + obj_type) + + if obj_uid not in obj_store.keys(): + obj_store[obj_uid] = obj + + return obj_store[obj_uid] + + def deregister(self, obj): + """ + Deregister CMD object from runtime. + + Parameters + ---------- + obj : BaseCMD + + Returns + ------- + + """ + obj_type = obj.type + obj_uid = obj.uid + obj_store = getattr(self, '_' + obj_type) + + obj = obj_store.pop(obj_uid, None) + + if hasattr(obj, 'queue_task'): + obj.queue_task((None, 'stop')) + + gc.collect() + + def cmd(self, sender_id, cmd): + """ + Process incoming command address to one of the resident objects. + + Parameters + ---------- + sender_id : str + cmd : CMD + + Returns + ------- + + """ + obj_type = cmd.type + obj_uid = cmd.uid + obj_store = getattr(self, '_' + obj_type) + + if obj_uid not in obj_store.keys(): + self.logger.warning('Runtime %s does not own object %s of type %s' % (self.uid, obj_uid, obj_type)) + return + + obj = obj_store[obj_uid] + + method = getattr(obj, cmd.method) + result = method(*cmd.args, **cmd.kwargs) + + return result + + +class RuntimeProxy(BaseRPC): + """ + This class represents a proxy to a remote running runtime. + + This proxy can be used to execute methods and commands on the remote runtime simply by + calling methods on it. + + The proxy uses the comms to direct messages to the correct endpoint using its UID. + + Parameters + ---------- + name : str, optional + Name of the runtime, defaults to None. If no name is provided, the UID has + to be given. + indices : tuple or int, optional + Indices associated with the runtime, defaults to none. + uid : str + UID from which to find the name and indices, defaults to None. + comms : CommsManager + Comms instance to use, defaults to global comms. + + """ + + def __init__(self, name=None, indices=(), uid=None, comms=None): + super().__init__(name=name, indices=indices, uid=uid) + + self._comms = comms or mosaic.get_comms() + self._subprocess = None + + @property + def address(self): + """ + Remote runtime IP address. + + """ + return self._comms.uid_address(self.uid) + + @property + def port(self): + """ + Remote runtime port. + + """ + return self._comms.uid_port(self.uid) + + @property + def subprocess(self): + """ + Subprocess on which remote runtime lives, if any. + + """ + return self._subprocess + + @subprocess.setter + def subprocess(self, subprocess): + """ + Set remote runtime subprocess. + + Parameters + ---------- + subprocess : Subprocess + + Returns + ------- + + """ + self._subprocess = subprocess + + def __getattribute__(self, item): + try: + return super().__getattribute__(item) + + except AttributeError: + def remote_method(**kwargs): + wait = kwargs.pop('wait', False) + reply = kwargs.pop('reply', False) + as_async = kwargs.pop('as_async', True) + + if item.startswith('cmd'): + send_method = 'cmd' + + else: + send_method = 'send' + kwargs['method'] = item + + if reply is True: + send_method += '_recv' + + if as_async is True: + send_method += '_async' + + else: + kwargs['wait'] = wait + + send_method = getattr(self._comms, send_method) + return send_method(self.uid, **kwargs) + + return remote_method + + def __getitem__(self, item): + return self.__getattribute__(item) diff --git a/mosaic/runtime/strategies.py b/mosaic/runtime/strategies.py new file mode 100644 index 00000000..915f4a5e --- /dev/null +++ b/mosaic/runtime/strategies.py @@ -0,0 +1,92 @@ + + +class MonitorStrategy: + """ + Base class for the strategies used to allocate tesserae to + workers. + + """ + + def __init__(self, monitor): + self._monitor = monitor + + def update_node(self, updated): + """ + Update inner record of node state. + + Parameters + ---------- + updated : MonitoredNode + + Returns + ------- + + """ + pass + + def update_tessera(self, updated): + """ + Update inner record of tesserae state. + + Parameters + ---------- + updated : MonitoredTessera + + Returns + ------- + + """ + pass + + def update_task(self, updated): + """ + Update inner record of task state. + + Parameters + ---------- + updated : MonitoredTask + + Returns + ------- + + """ + pass + + def select_worker(self, sender_id): + """ + Select an appropriate worker. + + Parameters + ---------- + sender_id : str + + Returns + ------- + + """ + pass + + +class RoundRobin(MonitorStrategy): + """ + Round robin strategy for allocating tesserae. + + """ + + def __init__(self, monitor): + super().__init__(monitor) + + self._worker_list = set() + self._num_workers = -1 + self._last_worker = -1 + + def update_node(self, updated): + for worker_id in updated.worker_info.keys(): + self._worker_list.add(worker_id) + + self._num_workers = len(self._worker_list) + + def select_worker(self, sender_id): + self._last_worker = (self._last_worker + 1) % self._num_workers + + return list(self._worker_list)[self._last_worker] diff --git a/mosaic/runtime/worker.py b/mosaic/runtime/worker.py new file mode 100644 index 00000000..78296953 --- /dev/null +++ b/mosaic/runtime/worker.py @@ -0,0 +1,198 @@ + +import os +import psutil + +from .runtime import Runtime +from ..core import Task +from ..utils import LoggerManager + + +__all__ = ['Worker'] + + +class Worker(Runtime): + """ + Workers are the runtimes where tesserae live, and where tasks are executed on them. + + Workers are initialised and managed by the node runtimes. + + """ + + is_worker = True + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + num_threads = kwargs.pop('num_threads', None) + num_threads = num_threads or psutil.cpu_count() + + if num_threads is None: + num_threads = psutil.cpu_count() + self._num_threads = num_threads + + os.environ['OMP_NUM_THREADS'] = str(self._num_threads) + + def set_logger(self): + """ + Set up logging. + + Returns + ------- + + """ + self.logger = LoggerManager() + + if self.mode == 'local': + self.logger.set_local() + else: + runtime_id = 'head' if self.mode == 'interactive' else 'monitor' + self.logger.set_remote(runtime_id=runtime_id) + + # Command and task management methods + + async def init_tessera(self, sender_id, cls, uid, args, **kwargs): + """ + Create tessera in this worker. + + Parameters + ---------- + sender_id : str + Caller UID. + cls : type + Class of the tessera. + uid : str + UID of the new tessera. + args : tuple, optional + Arguments for the initialisation of the tessera. + kwargs : optional + Keyword arguments for the initialisation of the tessera. + + Returns + ------- + + """ + tessera = cls.tessera(uid, *args, **kwargs) + tessera.register_proxy(sender_id) + + async def init_task(self, sender_id, task, uid): + """ + Create new task for a tessera in this worker. + + Parameters + ---------- + sender_id : str + Caller UID. + task : dict + Task configuration. + uid : str + UID of the new task. + + Returns + ------- + + """ + obj_uid = task['tessera_id'] + obj_store = self._tessera + tessera = obj_store[obj_uid] + + task = Task(uid, sender_id, tessera, + task['method'], *task['args'], **task['kwargs']) + + tessera.queue_task((sender_id, task)) + await task.state_changed('pending') + + async def tessera_state_changed(self, tessera): + """ + Notify change in tessera state. + + Parameters + ---------- + tessera : Tessera + + Returns + ------- + + """ + monitor = self.get_monitor() + await monitor.tessera_state_changed(uid=tessera.uid, + state=tessera.state) + + async def task_state_changed(self, task, elapsed=None): + """ + Notify change in task state. + + Parameters + ---------- + task : Task + elapsed : float, optional + + Returns + ------- + + """ + monitor = self.get_monitor() + await monitor.task_state_changed(uid=task.uid, + state=task.state, + elapsed=elapsed) + + def inc_ref(self, sender_id, uid, type): + """ + Increase reference count for a resident object. + + Parameters + ---------- + sender_id : str + Caller UID. + uid : str + UID of the object being referenced. + type : str + Type of the object being referenced. + + Returns + ------- + + """ + self.logger.debug('Increased ref count for object %s' % uid) + + obj_type = type + obj_uid = uid + obj_store = getattr(self, '_' + obj_type) + + if obj_uid not in obj_store.keys(): + raise KeyError('Runtime %s does not own object %s of type %s' % (self.uid, obj_uid, obj_type)) + + obj = obj_store[obj_uid] + obj.inc_ref() + obj.register_proxy(uid=sender_id) + + def dec_ref(self, sender_id, uid, type): + """ + Decrease reference count for a resident object. + + If reference count decreases below 1, deregister the object. + + Parameters + ---------- + sender_id : str + Caller UID. + uid : str + UID of the object being referenced. + type : str + Type of the object being referenced. + + Returns + ------- + + """ + self.logger.debug('Decreased ref count for object %s' % uid) + + obj_type = type + obj_uid = uid + obj_store = getattr(self, '_' + obj_type) + + if obj_uid not in obj_store.keys(): + raise KeyError('Runtime %s does not own object %s of type %s' % (self.uid, obj_uid, obj_type)) + + obj = obj_store[obj_uid] + obj.dec_ref() + obj.deregister_proxy(uid=sender_id) diff --git a/mosaic/types/__init__.py b/mosaic/types/__init__.py new file mode 100644 index 00000000..0c9063ad --- /dev/null +++ b/mosaic/types/__init__.py @@ -0,0 +1,6 @@ + +from .struct import * +from .config import * +from .extensible import * +from .path import * +from .imported_function import * diff --git a/mosaic/types/config.py b/mosaic/types/config.py new file mode 100644 index 00000000..a5205473 --- /dev/null +++ b/mosaic/types/config.py @@ -0,0 +1,61 @@ + +import copy +from collections import OrderedDict + +from .struct import Struct + + +__all__ = ['Config'] + + +class Config(Struct): + """ + A Config is a specific type of (``mutable`` and not ``extensible``) Struct that populates itself + with some provided default values. This allows to have some default configuration that can then be + superseded by the user. + + Parameters + ---------- + content : dict-like, optional + Dict-like object to initialise the Struct, defaults to empty. + defaults : dict-like, optional + Dict-like object that provides the default values of the Config object. + + """ + + _allowed_attributes = ['_content', '_extensible', '_mutable', '_defaults'] + + def __init__(self, content=None, defaults=None): + super(Config, self).__init__(mutable=True, extensible=False) + + if defaults is None: + defaults = OrderedDict() + + if content is None: + content = OrderedDict() + + self._defaults = defaults + + defaulted_content = copy.deepcopy(defaults) + defaulted_content.update(content) + self._content.update(defaulted_content) + + @property + def defaults(self): + """ + Access the default values of the Config object. + + """ + return self._defaults + + def copy(self): + """ + Returns a deepcopy of the Config. + + Returns + ------- + Config + Copied Config + + """ + return Config(content=copy.deepcopy(self._content), defaults=copy.deepcopy(self._defaults)) diff --git a/mosaic/types/extensible.py b/mosaic/types/extensible.py new file mode 100644 index 00000000..d4c74e6f --- /dev/null +++ b/mosaic/types/extensible.py @@ -0,0 +1,149 @@ + +import sys + +from ..utils import camel_case + + +__all__ = ['ExtensibleObject', 'extensible_module'] + + +class ExtensibleObject: + """ + Similarly to the ``extensible`` option in Struct, a class that inherits from ExtensibleObject + provides a means for accessing variants of its attributes and methods without having to use an ``if-else`` + construct. + + Examples + -------- + >>> class Klass(ExtensibleObject): + >>> class KlassVariant1: + >>> def __init__(self): + >>> print('Instantiated Variant1') + >>> + >>> class KlassVariant2: + >>> def __init__(self): + >>> print('Instantiated Variant2') + >>> + >>> def function_variant_1(self): + >>> print('Executed variant_1') + >>> + >>> def function_variant_2(self): + >>> print('Executed variant_2') + >>> + >>> klass = Klass() + >>> klass.function(use='variant_1') + Executed variant_1 + >>> klass.Klass('Variant2')() + Instantiated Variant2 + >>> klass.Klass('variant_1')() + Instantiated Variant1 + """ + + def __getattribute__(self, item): + try: + return super(ExtensibleObject, self).__getattribute__(item) + + except AttributeError: + exists = False + __class__ = super(ExtensibleObject, self).__getattribute__('__class__') + + for key in dir(self): + if item in key and key.startswith(item): + exists = True + break + + if exists: + def dynamic_method_wrapper(*args, **options): + use = options.pop('use', False) + if not use: + if len(args): + key = args[0] + args = args[1:] + else: + raise ValueError('When calling automatic interface "%s" of an ExtensibleObject the ' + 'variant has to be specified' % item) + + else: + key = use + + if key is not None: + method_name = '%s_%s' % (item, key) + if method_name in dir(self): + method = super(ExtensibleObject, self).__getattribute__(method_name) + + if callable(method): + return method(*args, **options) + + raise AttributeError('No valid interface was found for method "%s" with variant "%s"' % (item, key)) + + return dynamic_method_wrapper + + else: + raise + + +def extensible_module(name): + """ + This function extends a module to provide it with extensibility, as defined for + Struct and extensible object. + + TODO - Document this feature. + + Parameters + ---------- + name : str + Name of the module. + + Returns + ------- + callable + + """ + + def get_attribute(item): + module = sys.modules[name] + exists = False + for key in dir(module): + if item in key and key.startswith(item): + exists = True + break + + if exists: + def dynamic_method_wrapper(*args, **options): + use = options.pop('use', False) + if not use: + if len(args): + key = args[0] + args = args[1:] + else: + raise ValueError('When calling automatic interface "%s" of an extensible module the ' + 'variant has to be specified' % item) + + else: + key = use + + if key is not None: + # Check first whether it exists with function syntax + content_name = '%s_%s' % (item, key) + if content_name in dir(module): + content = getattr(module, content_name) + + if callable(content): + return content(*args, **options) + + # Otherwise, check for class syntax + content_name = '%s%s' % (item, camel_case(key)) if isinstance(key, str) else '%s%s' % (item, key) + if content_name in dir(module): + content = getattr(module, content_name) + + if callable(content): + return content + + raise AttributeError('No valid interface was found for method "%s" with variant "%s"' % (item, key)) + + return dynamic_method_wrapper + + else: + raise AttributeError('module %s has no attribute %s' % (name, item)) + + return get_attribute diff --git a/mosaic/types/immutable.py b/mosaic/types/immutable.py new file mode 100644 index 00000000..12d5647a --- /dev/null +++ b/mosaic/types/immutable.py @@ -0,0 +1,48 @@ + +import inspect + + +__all__ = ['ImmutableObject'] + + +class ImmutableObject: + """ + A class that inherits from ImmutableObject produces the same effect as a class with all attributes + private in C++ programming, i.e. the attributes and methods of the instances of this class cannot be + changed by code external to the class. + + Examples + -------- + >>> class Klass(ImmutableObject): + >>> def __init__(self): + >>> self.attribute = 10 + >>> + >>> def change_attr(self): + >>> self.attribute = 20 + >>> + >>> klass = Klass() + >>> klass.attribute + 10 + >>> klass.attribute = 30 + AttributeError('Objects of class "Klass" are immutable') + >>> klass.change_attr() + >>> klass.attribute + 20 + + """ + + def __setattr__(self, key, value): + calling_frame = inspect.stack()[1][0] + args, _, _, value_dict = inspect.getargvalues(calling_frame) + + calling_class = None + if len(args) and args[0] == 'self': + instance = value_dict.get('self', None) + if instance is not None: + calling_class = getattr(instance, '__class__', None) + + if calling_class is not None and calling_class == self.__class__: + super(ImmutableObject, self).__setattr__(key, value) + + else: + raise AttributeError('Objects of class "%s" are immutable' % self.__class__) diff --git a/mosaic/types/imported_function.py b/mosaic/types/imported_function.py new file mode 100644 index 00000000..e798a30b --- /dev/null +++ b/mosaic/types/imported_function.py @@ -0,0 +1,61 @@ + +from ..utils.mimport import mimport + + +__all__ = ['ImportedFunction'] + + +class ImportedFunction: + """ + A function that is imported dynamically and contains information on how to serialise + and deserialise it. + + Parameters + ---------- + name : str + Name of the function. + module : str + Name of the module to which it belongs. + path : tuple + List of paths to look for the function. + + """ + + def __init__(self, name, module, path): + self._name = name + self._module = module + self._path = path + + self._import() + + def __call__(self, *args, **kwargs): + self._fun(*args, **kwargs) + + def _import(self): + value = mimport(self._module, fromlist=self._path) + self._fun = getattr(value, self._name) + + _serialisation_attrs = ['_name', '_module', '_path'] + + def _serialisation_helper(self): + state = {} + + for attr in self._serialisation_attrs: + state[attr] = getattr(self, attr) + + return state + + @classmethod + def _deserialisation_helper(cls, state): + instance = cls.__new__(cls) + + for attr, value in state.items(): + setattr(instance, attr, value) + + instance._import() + + return instance + + def __reduce__(self): + state = self._serialisation_helper() + return self._deserialisation_helper, (state,) diff --git a/mosaic/types/path.py b/mosaic/types/path.py new file mode 100644 index 00000000..2c31f882 --- /dev/null +++ b/mosaic/types/path.py @@ -0,0 +1,15 @@ + + +__all__ = ['Path'] + + +class Path(str): + """ + A Path is a ``str`` that contains meta-information about the filesystem path it represents. + + It does not have much functionality at the moment, and might be dropped at some point. + """ + + def __init__(self, *args, **kwargs): + str.__init__(*args, **kwargs) + self.is_path = True diff --git a/mosaic/types/struct.py b/mosaic/types/struct.py new file mode 100644 index 00000000..9540386d --- /dev/null +++ b/mosaic/types/struct.py @@ -0,0 +1,381 @@ + +import copy +import pprint +from collections import OrderedDict + +from .immutable import ImmutableObject +from ..utils import camel_case + + +__all__ = ['Struct'] + + +class Struct(ImmutableObject): + """ + Structs represent dictionary-like objects that provide some extra features over Python's ``OrderedDict``. + + Their internal representation is an ``OrderedDict``, a dictionary that maintains the order in which members + were added to it. Unlike traditional Python dicts, however, both square-bracket and dot notation are allowed + to access the members of the Struct. + + Structs can be ``extensible`` and ``mutable``, which is determined when they are instantiated and remains + unchanged for the life of the object. + + When a Struct is not ``mutable``, its members can only be assigned once, any further attempts at modifying + the value of a member will result in an ``AttributeError``. For ``mutable`` Structs the values of its + members can be changed as many times as needed. + + Whether or not a Struct is ``extensible`` affects how its members are accessed. When trying to find a member + of the Struct, a naive search is performed first, assuming that the item exists + within its internal dictionary. + + If the search fails, and the Struct is not ``extensible`` an ``AttributeError`` is raised. Otherwise, + a new search starts for members in the dictionary with a similar signature to the requested item. + + If this search fails, an ``AttributeError`` is raised. If if does not fail and a match is found, the match + is returned wrapped in a function that will evaluate whether the variant exists and is callable. + + Parameters + ---------- + content : dict-like, optional + Dict-like object to initialise the Struct, defaults to empty. + extensible : bool, optional + Whether or not the Struct should be extensible, defaults to ``False``. + mutable : bool, optional + Whether or not the Struct should be mutable, defaults to ``True``. + + Examples + -------- + Let's create an empty, immutable Struct: + + >>> struct = Struct(mutable=False) + >>> struct.member = 10 + >>> struct.member + 10 + >>> struct['member'] + 10 + >>> struct.member = 20 + AttributeError('The attribute member already exists in the container, this container is not mutable') + + If the container was ``mutable`` instead: + + >>> struct = Struct() + >>> struct.member = 10 + >>> struct.member + 10 + >>> struct.member = 20 + >>> struct.member + 20 + + The same way mutability affects member assignment, extensibility affects how the members of the Struct are + accessed: + + >>> def function_variant_1(): + >>> print('Executed variant_1') + >>> + >>> def function_variant_2(): + >>> print('Executed variant_2') + >>> + >>> class KlassVariant1: + >>> def __init__(self): + >>> print('Instantiated Variant1') + >>> + >>> class KlassVariant2: + >>> def __init__(self): + >>> print('Instantiated Variant2') + >>> + >>> struct = Struct(extensible=True) + >>> struct.function_variant_1 = function_variant_1 + >>> struct.function_variant_2 = function_variant_2 + >>> struct.KlassVariant1 = KlassVariant1 + >>> struct.KlassVariant2 = KlassVariant2 + >>> + >>> struct.function(use='variant_1') + Executed variant_1 + >>> struct.Klass('Variant2')() + Instantiated Variant2 + >>> struct.Klass('variant_1')() + Instantiated Variant1 + + """ + + _allowed_attributes = ['_content', '_extensible', '_mutable'] + + def __init__(self, content=None, extensible=False, mutable=True): + super(Struct, self).__init__() + + if content is None: + content = OrderedDict() + + self._content = self._prepare_content(content, extensible, mutable) + self._extensible = extensible + self._mutable = mutable + + @staticmethod + def _prepare_content(content, extensible, mutable): + _content = OrderedDict() + for key, value in content.items(): + if isinstance(value, (dict, OrderedDict)): + value = Struct(value, extensible=extensible, mutable=mutable) + + elif isinstance(value, list) and len(value) and isinstance(value[0], (dict, OrderedDict)): + for index in range(len(value)): + value[index] = Struct(value[index], extensible=extensible, mutable=mutable) + + if isinstance(key, str): + _key = '_'.join(key.split(' ')) + + else: + _key = key + + _content[_key] = value + + return _content + + def _get(self, item): + if item in super(ImmutableObject, self).__getattribute__('_content').keys(): + return super(ImmutableObject, self).__getattribute__('_content')[item] + + else: + if super(ImmutableObject, self).__getattribute__('_extensible') is False: + raise AttributeError('The attribute "%s" does not exist in the container' % item) + + else: + exists = False + + for key in super(ImmutableObject, self).__getattribute__('_content').keys(): + if item in key and key.startswith(item): + exists = True + break + + if exists: + def dynamic_content_wrapper(*args, **options): + use = options.pop('use', None) + if use is None: + if len(args): + key = args[0] + args = args[1:] + else: + raise ValueError('When calling automatic interface "%s" of an ExtensibleObject the ' + 'variant has to be specified' % item) + + else: + key = use + + if key is not None: + # Check first whether it exists with function syntax + content_name = '%s_%s' % (item, key) + if content_name in super(ImmutableObject, self).__getattribute__('_content').keys(): + content = super(ImmutableObject, self).__getattribute__('_content')[content_name] + + if callable(content): + return content(*args, **options) + + # Otherwise, check for class syntax + content_name = '%s%s' % (item, camel_case(key)) if isinstance(key, str) \ + else '%s%s' % (item, key) + if content_name in super(ImmutableObject, self).__getattribute__('_content').keys(): + content = super(ImmutableObject, self).__getattribute__('_content')[content_name] + + if callable(content): + return content + + raise AttributeError( + 'No valid interface was found for content "%s" with variant "%s"' % (item, key)) + + return dynamic_content_wrapper + + else: + raise AttributeError('The attribute "%s" does not exist in the container' % item) + + def __contains__(self, item): + if item in super(ImmutableObject, self).__getattribute__('_content').keys(): + return True + + else: + if super(ImmutableObject, self).__getattribute__('_extensible') is False: + return False + + else: + exists = False + + for key in super(ImmutableObject, self).__getattribute__('_content').keys(): + if item in key and key.startswith(item): + exists = True + break + + return exists + + def __getattr__(self, item): + return self._get(item) + + def __getitem__(self, item): + return self._get(item) + + def get(self, item, default=None): + """ + Returns an item from the Struct or a default value if it is not found. + + Parameters + ---------- + item : str + Name of the item to find + default : object, optional + Default value to be returned in case the item is not found, defaults to ``None`` + + Returns + ------- + + """ + return self._content.get(item, default) + + def pop(self, item, default=None): + """ + Returns an item from the Struct and deletes it, or returns a default value if it is not found. + + Parameters + ---------- + item : str + Name of the item to find + default : object, optional + Default value to be returned in case the item is not found + + Returns + ------- + + """ + return self._content.pop(item, default) + + def _set(self, item, value): + if item in self._allowed_attributes: + return super(Struct, self).__setattr__(item, value) + + if item in self._content.keys() and not self._mutable: + raise AttributeError('The attribute "%s" already exists in the container, ' + 'this container is not mutable' % item) + + else: + self._content[item] = value + return value + + def __setattr__(self, item, value): + self._set(item, value) + + def __setitem__(self, item, value): + self._set(item, value) + + def delete(self, item): + """ + Delete an item from the container using its key. + + Parameters + ---------- + item : str + Name of the item to delete. + + Returns + ------- + + """ + if item in self._content.keys() and not self._mutable: + raise AttributeError('The attribute "%s" cannot be deleted from the container, ' + 'this container is not mutable' % item) + + else: + del self._content[item] + + def items(self): + """ + Returns the list of keys and values in the Struct. + + Returns + ------- + odict_items + List of keys and values in the Struct. + + """ + return self._content.items() + + def keys(self): + """ + Returns the list of keys in the Struct. + + Returns + ------- + odict_keys + List of keys in the Struct. + + """ + return self._content.keys() + + def values(self): + """ + Returns the list of values in the Struct. + + Returns + ------- + odict_values + List of values in the Struct. + + """ + return self._content.values() + + def copy(self): + """ + Returns a deepcopy of the Struct. + + Returns + ------- + Struct + Copied Struct + + """ + return Struct(content=copy.deepcopy(self._content), extensible=self._extensible, mutable=self._mutable) + + def update(self, content): + """ + Updates the Struct with the contents of a dict-like object. + + Parameters + ---------- + content : dict-like + Content with which to update the Struct + + Returns + ------- + + """ + for key, value in content.items(): + if key not in self._content.keys(): + if isinstance(value, (dict, OrderedDict)): + value = Struct(value, extensible=self._extensible, mutable=self._mutable) + + elif isinstance(value, list) and len(value) and isinstance(value[0], (dict, OrderedDict)): + for index in range(len(value)): + value[index] = Struct(value[index], extensible=self._extensible, mutable=self._mutable) + + if isinstance(key, str): + _key = '_'.join(key.split(' ')) + + else: + _key = key + + self._content[_key] = value + + else: + if isinstance(self._content[key], Struct): + self._content[key].update(value) + + elif isinstance(self._content[key], list) \ + and len(self._content[key]) \ + and isinstance(self._content[key][0], Struct): + for index in range(len(value)): + self._content[key][index].update(value[index]) + + else: + self._content[key] = value + + def __str__(self, printer=None): + return pprint.pformat(self._content) + + __repr__ = __str__ diff --git a/mosaic/utils/__init__.py b/mosaic/utils/__init__.py new file mode 100644 index 00000000..0626cce2 --- /dev/null +++ b/mosaic/utils/__init__.py @@ -0,0 +1,7 @@ + + +from .event_loop import * +from .logger import * +from .subprocess import * +from .utils import * +from .change_case import * diff --git a/mosaic/utils/change_case.py b/mosaic/utils/change_case.py new file mode 100644 index 00000000..1f36cb46 --- /dev/null +++ b/mosaic/utils/change_case.py @@ -0,0 +1,55 @@ + +import re + + +__all__ = ['snake_case', 'camel_case'] + + +def snake_case(name): + """ + Change case to snake case. + + Parameters + ---------- + name : str + String in camelcase format to convert into snake case + + Returns + ------- + str + String in snake case + + """ + name = re.sub(r"[\-\.\s]", '_', str(name)) + if not name: + return name + return lowercase(name[0]) + re.sub(r"[A-Z]", lambda matched: '_' + lowercase(matched.group(0)), name[1:]) + + +def camel_case(name): + """ + Change case to camel case. + + Parameters + ---------- + name : str + String in snake case format to convert into camelcase + + Returns + ------- + str + String in camelcase + + """ + name = re.sub(r"^[\-_\.]", '', str(name)) + if not name: + return name + return uppercase(name[0]) + re.sub(r"[\-_\.\s]([a-z0-9])", lambda matched: uppercase(matched.group(1)), name[1:]) + + +def lowercase(name): + return str(name).lower() + + +def uppercase(name): + return str(name).upper() diff --git a/mosaic/utils/event_loop.py b/mosaic/utils/event_loop.py new file mode 100644 index 00000000..8ea1bc8f --- /dev/null +++ b/mosaic/utils/event_loop.py @@ -0,0 +1,424 @@ + +import os +import uuid +import asyncio +import inspect +import weakref +import functools +import concurrent.futures + +import mosaic +from .utils import set_main_thread + + +__all__ = ['EventLoop', 'Future', 'gather'] + + +class Future: + """ + A local future associated with an EventLoop. + + Parameters + ---------- + name : str, optional + Name to give to the future, defaults to ``anon``. + loop : EventLoop, optional + Loop associated with the future, defaults to current loop. + + """ + + def __init__(self, name='anon', loop=None): + self._future = asyncio.Future() + self._loop = loop or mosaic.get_event_loop() + + self._name = name + self._uid = '%s-%s-%s' % ('fut', + name, + uuid.uuid4().hex) + + @property + def uid(self): + """ + Access the UID of the future. + + """ + return self._uid + + @property + def state(self): + """ + Check the state of the future (``pending``, ``done``, ``cancelled``). + + """ + if self._future.cancelled(): + return 'cancelled' + + elif self._future.done(): + return 'done' + + else: + return 'pending' + + @property + def future(self): + """ + The wrapped future + + """ + return self._future + + def __repr__(self): + return "<%s object at %s, uid=%s, state=%s>" % \ + (self.__class__.__name__, id(self), self.uid, self.state) + + def __await__(self): + return (yield from self._future.__await__()) + + def result(self): + """ + Get the future result. + + Returns + ------- + + """ + return self._future.result() + + def exception(self): + """ + Get the future exception. + + Returns + ------- + + """ + return self._future.exception() + + def set_result(self, result): + """ + Set the future result. + + Parameters + ---------- + result : object + + Returns + ------- + + """ + self._future.set_result(result) + + def set_exception(self, exc): + """ + Set the future exception. + + Parameters + ---------- + exc : Exception + + Returns + ------- + + """ + self._future.set_exception(exc) + + def done(self): + """ + Check whether the future is done. + + Returns + ------- + + """ + return self._future.done() + + def cancelled(self): + """ + Check whether the future is cancelled. + + Returns + ------- + + """ + return self._future.cancelled() + + def add_done_callback(self, fun): + """ + Add done callback. + + Parameters + ---------- + fun : callable + + Returns + ------- + + """ + self._future.add_done_callback(fun) + + +class EventLoop: + """ + The event loop encapsulates the asyncio (or equivalent) event loop, which + will run in a separate thread. + + It provides helper functions to run things within the loop, in an executor, + and to call functions after a period of time or every fixed amount of time. + + Parameters + ---------- + loop : asyncio loop, optional + Asyncio event loop to use internally, defaults to new loop. + + """ + + def __init__(self, loop=None): + self._loop = loop or asyncio.new_event_loop() + asyncio.set_event_loop(self._loop) + + # TODO Figure out the best way to set this + num_workers = int(os.environ.get('OMP_NUM_THREADS', 2)) + self._executor = concurrent.futures.ThreadPoolExecutor(1) + + self._stop = asyncio.Event() + + self._recurring_tasks = weakref.WeakSet() + + def get_event_loop(self): + """ + Access the internal loop. + + Returns + ------- + asyncio loop + + """ + return self._loop + + def run_forever(self): + """ + Run event loop forever. + + Returns + ------- + + """ + async def main(): + await self._stop.wait() + + return self._loop.run_until_complete(main()) + + def stop(self): + """ + Stop the event loop. + + Returns + ------- + + """ + try: + if self._stop.is_set(): + return + + self._stop.set() + + for task in list(self._recurring_tasks): + if not task.done(): + task.cancel() + + tasks = asyncio.Task.all_tasks() + pending = [task for task in tasks if not task.done()] + + for task in pending: + task.cancel() + + while len(pending): + self._loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True)) + + pending = [task for task in tasks if not task.done()] + + self._loop.stop() + self._loop.close() + self._executor.shutdown() + self._executor.shutdown(wait=True) + + asyncio.set_event_loop(None) + + except RuntimeError: + pass + + def __del__(self): + self.stop() + + def run(self, coro, args=(), kwargs=None, wait=False): + """ + Schedule a function in the event loop from synchronous code. + + The call can be waited or returned immediately. + + Parameters + ---------- + coro : callable + Function to execute in the loop. + args : tuple, optional + Set of arguments for the function. + kwargs : optional + Set of keyword arguments for the function. + wait : bool, optional + Whether or not to wait for the call to end, defaults to False. + + Returns + ------- + Return value from call or concurrent.futures.Future, depending on whether it is waited or not. + + """ + if self._stop.is_set(): + return + + kwargs = kwargs or {} + + if not inspect.iscoroutine(coro) and not inspect.iscoroutinefunction(coro): + coro = asyncio.coroutine(coro) + + if not self._loop.is_running(): + return self._loop.run_until_complete(coro(*args, **kwargs)) + + future = self._loop.create_task(coro(*args, **kwargs)) + return future + + def run_in_executor(self, callback, args=(), kwargs=None): + """ + Run function in a thread executor. + + Parameters + ---------- + callback : callable + Function to execute. + args : tuple, optional + Set of arguments for the function. + kwargs : optional + Set of keyword arguments for the function. + + Returns + ------- + asyncio.Future + + """ + if self._stop.is_set(): + return + + callback = functools.partial(callback, *args, **kwargs) + future = self._loop.run_in_executor(self._executor, callback) + + return future + + def wrap_future(self, future): + """ + Wrap a concurrent.futures.Future to be compatible + with asyncio. + + Parameters + ---------- + future : concurrent.futures.Future + + Returns + ------- + asyncio.Future + + """ + return asyncio.wrap_future(future, loop=self._loop) + + def timeout(self, coro, timeout, args=(), kwargs=None): + """ + Run function after a certain ``timeout`` in seconds. + + Parameters + ---------- + coro : callable + Function to execute in the loop. + timeout : float + Time to wait before execution in seconds. + args : tuple, optional + Set of arguments for the function. + kwargs : optional + Set of keyword arguments for the function. + + Returns + ------- + concurrent.futures.Future + + """ + kwargs = kwargs or {} + + async def _timeout(): + await asyncio.sleep(timeout) + await self.run(coro, args=args, kwargs=kwargs) + + future = asyncio.run_coroutine_threadsafe(_timeout(), self._loop) + self._recurring_tasks.add(future) + + return future + + def interval(self, coro, interval, args=(), kwargs=None): + """ + Run function every ``interval`` in seconds, starting after ``interval`` seconds. + + Parameters + ---------- + coro : callable + Function to execute in the loop. + interval : float + Time to wait between executions in seconds. + args : tuple, optional + Set of arguments for the function. + kwargs : optional + Set of keyword arguments for the function. + + Returns + ------- + concurrent.futures.Future + + """ + kwargs = kwargs or {} + + async def _interval(): + while not self._stop.is_set(): + await asyncio.sleep(interval) + await self.run(coro, args=args, kwargs=kwargs) + + future = asyncio.run_coroutine_threadsafe(_interval(), self._loop) + self._recurring_tasks.add(future) + + return future + + def set_main_thread(self): + """ + Set loop thread as main thread. + + Returns + ------- + + """ + self._loop.call_soon_threadsafe(set_main_thread) + + +def gather(tasks): + """ + Wait for the termination of a group of tasks concurrently. + + Parameters + ---------- + tasks : list + Set of tasks to wait. + + Returns + ------- + list + Set of results from the task list. + + """ + if not isinstance(tasks, list): + return tasks + + else: + return asyncio.gather(*tasks) diff --git a/mosaic/utils/logger.py b/mosaic/utils/logger.py new file mode 100644 index 00000000..1517da0c --- /dev/null +++ b/mosaic/utils/logger.py @@ -0,0 +1,396 @@ + +import sys +import logging +from cached_property import cached_property + +import mosaic + + +__all__ = ['LoggerManager', 'clear_logger'] + + +log_level = 'info' + + +_stdout = sys.stdout +_stderr = sys.stderr + + +_local_log_levels = { + 'info': logging.INFO, + 'debug': logging.DEBUG, + 'error': logging.ERROR, + 'warning': logging.WARNING, +} + + +_remote_log_levels = { + 'info': 'log_info', + 'debug': 'log_debug', + 'error': 'log_error', + 'warning': 'log_warning', +} + + +class LoggerBase: + + @property + def runtime(self): + return mosaic.runtime() + + @property + def comms(self): + return mosaic.get_comms() + + @property + def loop(self): + return mosaic.get_event_loop() + + def isatty(self): + return False + + +class LocalLogger(LoggerBase): + def __init__(self, logger, log_level=logging.INFO): + self._logger = logger + self._log_level = log_level + self._linebuf = '' + + def write(self, buf, uid=None): + if uid is None: + uid = self.runtime.uid + uid = uid.upper() + + temp_linebuf = self._linebuf + buf + self._linebuf = '' + for line in temp_linebuf.splitlines(True): + # From the io.TextIOWrapper docs: + # On output, if newline is None, any '\n' characters written + # are translated to the system default line separator. + # By default sys.stdout.write() expects '\n' newlines and then + # translates them so this is still cross platform. + if line[-1] == '\n': + self._logger.log(self._log_level, line.rstrip(), extra={'runtime_id': uid}) + else: + self._linebuf += line + + def flush(self, uid=None): + if uid is None: + if self.runtime is not None: + uid = self.runtime.uid + else: + uid = '' + uid = uid.upper() + + if self._linebuf != '': + self._logger.log(self._log_level, self._linebuf.rstrip(), extra={'runtime_id': uid}) + self._linebuf = '' + + def log(self, msg, uid=None): + if uid is None: + if self.runtime is not None: + uid = self.runtime.uid + else: + uid = '' + uid = uid.upper() + + self._logger.log(self._log_level, msg, extra={'runtime_id': uid}) + + +class RemoteLogger(LoggerBase): + def __init__(self, runtime_id, log_level='log_info'): + self._runtime_id = runtime_id + self._log_level = log_level + self._linebuf = '' + + @cached_property + def remote_runtime(self): + return self.runtime.proxy(self._runtime_id) + + def write(self, buf, uid=None): + if buf == '\n': + return + + temp_linebuf = self._linebuf + buf + self._linebuf = '' + for line in temp_linebuf.splitlines(True): + # From the io.TextIOWrapper docs: + # On output, if newline is None, any '\n' characters written + # are translated to the system default line separator. + # By default sys.stdout.write() expects '\n' newlines and then + # translates them so this is still cross platform. + if line[-1] == '\n': + continue + + elif line.rstrip() == '': + continue + + else: + self._linebuf += line + self._linebuf += '\n' + + self.send(self._linebuf) + self._linebuf = '' + + def flush(self): + if self._linebuf != '': + self.send(self._linebuf) + + self._linebuf = '' + + def send(self, buf): + if not self.comms.shaken(self._runtime_id): + _stdout.write(buf) + _stdout.flush() + + else: + self.remote_runtime[self._log_level](buf=buf, as_async=False) + + def log(self, buf, uid=None): + self.send(buf) + + +class LoggerManager: + """ + Class that manages the creation loggers and the interface with them. It creates + local or remote loggers and handles the communication with loggers at different + levels ``info``, ``debug``, ``error`` and ``warning``. + + """ + + def __init__(self): + self._info_logger = None + self._debug_logger = None + self._error_logger = None + self._warn_logger = None + + self._stdout = _stdout + self._stderr = _stderr + + self._log_level = 'info' + self._log_location = None + + def set_local(self): + """ + Set up local loggers. + + Returns + ------- + + """ + self._log_location = 'local' + + sys.stdout = self._stdout + sys.stderr = self._stderr + + handler = logging.StreamHandler(self._stdout) + handler.setFormatter(CustomFormatter('%(asctime)s - %(levelname)-10s %(runtime_id)-15s %(message)s')) + + logger = logging.getLogger('mosaic') + logger.propagate = False + if logger.hasHandlers(): + logger.handlers.clear() + + logger.addHandler(handler) + + self._info_logger = LocalLogger(logger, log_level=_local_log_levels['info']) + self._debug_logger = LocalLogger(logger, log_level=_local_log_levels['debug']) + self._error_logger = LocalLogger(logger, log_level=_local_log_levels['error']) + self._warn_logger = LocalLogger(logger, log_level=_local_log_levels['warning']) + + sys.stdout.flush() + # sys.stdout = self._info_logger + # sys.stderr = self._error_logger + + logging.basicConfig( + # stream=self._info_logger, + level=_local_log_levels[log_level], + format='%(message)s', + ) + + def set_remote(self, runtime_id='monitor'): + """ + Set up remote loggers. + + Parameters + ---------- + runtime_id : str, optional + Runtime to which logging will be directed, defaults to ``monitor``. + + Returns + ------- + + """ + self._log_location = 'remote' + + sys.stdout = self._stdout + sys.stderr = self._stderr + + self._info_logger = RemoteLogger(runtime_id=runtime_id, log_level=_remote_log_levels['info']) + self._debug_logger = RemoteLogger(runtime_id=runtime_id, log_level=_remote_log_levels['debug']) + self._error_logger = RemoteLogger(runtime_id=runtime_id, log_level=_remote_log_levels['error']) + self._warn_logger = RemoteLogger(runtime_id=runtime_id, log_level=_remote_log_levels['warning']) + + sys.stdout.flush() + sys.stdout = self._info_logger + sys.stderr = self._error_logger + + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(CustomFormatter('%(asctime)s - %(levelname)-10s %(runtime_id)-15s %(message)s')) + + logger = logging.getLogger('mosaic') + logger.propagate = False + if logger.hasHandlers(): + logger.handlers.clear() + + logger.addHandler(handler) + + logging.basicConfig( + stream=sys.stdout, + level=_local_log_levels[log_level], + format='%(message)s', + ) + + @staticmethod + def set_level(level): + """ + Set log level from options ``info``, ``debug``, ``error`` and ``warning``. + + Parameters + ---------- + level : str + Log level + + Returns + ------- + + """ + global log_level + log_level = level + + logger = logging.getLogger('mosaic') + logger.setLevel(_local_log_levels[level]) + + def info(self, buf, uid=None): + """ + Log message with level ``info``. + + Parameters + ---------- + buf : str + Message to log. + uid : str, optional + UID of the runtime from which the message originates, defaults to + current runtime. + + Returns + ------- + + """ + if self._info_logger is None: + return + + if log_level in ['error']: + return + + self._info_logger.log(buf, uid=uid) + + def debug(self, buf, uid=None): + """ + Log message with level ``debug``. + + Parameters + ---------- + buf : str + Message to log. + uid : str, optional + UID of the runtime from which the message originates, defaults to + current runtime. + + Returns + ------- + + """ + if self._debug_logger is None: + return + + if log_level in ['info', 'error']: + return + + self._debug_logger.log(buf, uid=uid) + + def error(self, buf, uid=None): + """ + Log message with level ``error``. + + Parameters + ---------- + buf : str + Message to log. + uid : str, optional + UID of the runtime from which the message originates, defaults to + current runtime. + + Returns + ------- + + """ + if self._error_logger is None: + return + + self._error_logger.log(buf, uid=uid) + + def warning(self, buf, uid=None): + """ + Log message with level ``warning``. + + Parameters + ---------- + buf : str + Message to log. + uid : str, optional + UID of the runtime from which the message originates, defaults to + current runtime. + + Returns + ------- + + """ + if self._warn_logger is None: + return + + self._warn_logger.log(buf, uid=uid) + + def warn(self, buf, uid=None): + """ + Log message with level ``warning``. + + Parameters + ---------- + buf : str + Message to log. + uid : str, optional + UID of the runtime from which the message originates, defaults to + current runtime. + + Returns + ------- + + """ + self.warning(buf, uid=uid) + + +class CustomFormatter(logging.Formatter): + def format(self, record): + if not hasattr(record, 'runtime_id'): + record.runtime_id = '' + + return super().format(record) + + +def clear_logger(): + sys.stdout.flush() + sys.stderr.flush() + + sys.stdout = _stdout + sys.stderr = _stderr diff --git a/mosaic/utils/mimport.py b/mosaic/utils/mimport.py new file mode 100644 index 00000000..7a658090 --- /dev/null +++ b/mosaic/utils/mimport.py @@ -0,0 +1,35 @@ + +import sys +import importlib + + +__all__ = ['mimport'] + + +def mimport(name, fromlist=()): + """ + ``mosaic_import`` encapsulates importlib, allowing for dynamically importing a module by name, given a series of + filesystem locations in which to look. + + Unlike usual Python imports, the cache of imported modules is purposefully ignored and modules will always + be re-imported even if they have been imported previously. + + Parameters + ---------- + name : str + Name of the module to be imported + fromlist : tuple, optional + List of paths in which to look for the module + + Returns + ------- + Python module + Imported Python module + + """ + _saved_path = sys.path + sys.path = list(fromlist) + imported_module = importlib.reload(importlib.__import__(name, fromlist=fromlist)) + sys.path = _saved_path + + return imported_module diff --git a/mosaic/utils/subprocess.py b/mosaic/utils/subprocess.py new file mode 100644 index 00000000..74f023d1 --- /dev/null +++ b/mosaic/utils/subprocess.py @@ -0,0 +1,339 @@ + +import os +import sys +import atexit +import psutil +import signal +import daemon +import weakref +import functools +import threading +import multiprocessing + +import mosaic + + +__all__ = ['subprocess'] + + +_open_processes = weakref.WeakSet() + + +class Subprocess: + """ + Class to manage a subprocess that executes a target function. + + It manages the creation and destruction of the process, and can be used + to collect statistics about it. + + Parameters + ---------- + name : str, optional + Name to give to the subprocess. + target : callable + Target function to be executed in the subprocess. + cpu_affinity : list, optional + List of CPUs to set the affinity of the process, defaults to None. + daemon : bool, optional + Whether to start the subprocess as a daemon, defaults to False. + + """ + + def __init__(self, *args, **kwargs): + name = kwargs.pop('name', None) + target = kwargs.pop('target', None) + cpu_affinity = kwargs.pop('cpu_affinity', None) + daemon = kwargs.pop('daemon', False) + + if target is None or not callable(target): + raise ValueError('A subprocess needs to be provided a target function.') + + # _keep_child_alive is the write side of a pipe, which, when it is + # closed, causes the read side of the pipe to unblock for reading. Note + # that it is never closed directly. The write side is closed by the + # kernel when our process exits, or possibly by the garbage collector + # closing the file descriptor when the last reference to + # _keep_child_alive goes away. We can take advantage of this fact to + # monitor from the child and exit when the parent goes away unexpectedly + # (for example due to SIGKILL). This variable is otherwise unused except + # for the assignment here. + parent_alive_pipe, self._keep_child_alive = multiprocessing.Pipe(duplex=False) + + # _parent_start_pipe will be used to signal that the child process is indeed alive + # after we start it before we keep going forward. + self._parent_start_pipe, child_start_pipe = multiprocessing.Pipe() + + self._parent_runtime = mosaic.runtime() + if self._parent_runtime is not None: + parent_args = (self._parent_runtime.uid, + self._parent_runtime.address, + self._parent_runtime.port,) + + else: + parent_args = (None, None, None) + + self._mp_process = multiprocessing.Process(target=self._start_process, + name=name, + args=(target, + daemon, + child_start_pipe, + parent_alive_pipe, + self._keep_child_alive, + cpu_affinity, + *parent_args, + args, kwargs)) + self._ps_process = None + self._target = target + self._obj = None + + self._state = 'pending' + + def __call__(self, *args, **kwargs): + pass + + def __repr__(self): + return "" % (self._target, self._state) + + @property + def state(self): + """ + Current state of the process. + + It could be ``pending``, ``running``, ``paused`` or ``stopped``. + + """ + return self._state + + def running(self): + """ + Whether or not the process is running. + + Returns + ------- + bool + + """ + return self._state == 'running' + + def paused(self): + """ + Whether or not the process is paused. + + Returns + ------- + bool + + """ + return self._state == 'paused' + + def stopped(self): + """ + Whether or not the process is stopped. + + Returns + ------- + bool + + """ + return self._state == 'stopped' + + def pause_process(self): + """ + Pause the subprocess. + + Returns + ------- + + """ + if self._ps_process is not None: + self._ps_process.suspend() + self._state = 'paused' + + def start_process(self): + """ + Start or resume the subprocess. + + Returns + ------- + + """ + if self._ps_process is not None: + self._ps_process.resume() + + else: + self._mp_process.start() + self._ps_process = psutil.Process(self._mp_process.pid) + self.cpu_load() + + _open_processes.add(self) + + self._parent_start_pipe.recv() + self._parent_start_pipe.close() + + self._state = 'running' + + def _start_process(self, target, + is_daemon, + child_start_pipe, + parent_alive_pipe, keep_child_alive, + cpu_affinity, + parent_id, parent_address, parent_port, args, kwargs): + self._state = 'running' + + child_start_pipe.send(True) + child_start_pipe.close() + + if sys.platform == 'linux' and cpu_affinity is not None: + psutil.Process().cpu_affinity(cpu_affinity) + + keep_child_alive.close() + if not daemon: + self._immediate_exit_when_closed(parent_alive_pipe) + + mosaic.clear_runtime() + + try: + if is_daemon: + from .logger import _stdout, _stderr + daemon_context = daemon.DaemonContext(detach_process=True, + stdout=_stdout, + stderr=_stderr) + daemon_context.open() + + self._target = target + self._obj = self._target(*args, **kwargs, + parent_id=parent_id, + parent_address=parent_address, + parent_port=parent_port) + + if hasattr(self._obj, 'run') and callable(self._obj.run): + self._obj.run() + + finally: + if is_daemon: + daemon_context.close() + + @staticmethod + def _immediate_exit_when_closed(parent_alive_pipe): + def monitor_parent(): + try: + # The parent_alive_pipe should be held open as long as the + # parent is alive and wants us to stay alive. Nothing writes to + # it, so the read will block indefinitely. + parent_alive_pipe.recv() + except EOFError: + # Parent process went away unexpectedly. Exit immediately. Could + # consider other exiting approaches here. My initial preference + # is to unconditionally and immediately exit. If we're in this + # state it is possible that a "clean" process exit won't work + # anyway - if, for example, the system is getting bogged down + # due to the running out of memory, exiting sooner rather than + # later might be needed to restore normal system function. + # If this is in appropriate for your use case, please file a + # bug. + os._exit(-1) + + thread = threading.Thread(target=monitor_parent) + thread.daemon = True + thread.start() + + def stop_process(self): + """ + Stop the subprocess. + + Returns + ------- + + """ + if self._ps_process is not None: + try: + self._ps_process.terminate() + + if self in _open_processes: + _open_processes.remove(self) + + except (psutil.NoSuchProcess, OSError, RuntimeError): + pass + + self._state = 'stopped' + + def join_process(self, timeout=0.1): + """ + Join the subprocess. + + Parameters + ---------- + timeout : float, optional + Time to wait to join, defaults to 0.1. + + Returns + ------- + + """ + self._mp_process.join(timeout) + + def memory(self): + """ + Amount of RSS memory being consumed by the process. + + Returns + ------- + float + RSS memory. + + """ + if self._ps_process is not None: + return self._ps_process.memory_info().rss + + return 0 + + def cpu_load(self): + """ + CPU load as a percentage. + + Returns + ------- + float + CPU load. + + """ + if self._ps_process is not None: + return self._ps_process.cpu_percent(interval=None) + + return 0 + + +def subprocess(target): + """ + A decorator that will execute a target function in a subprocess. The generated subprocess + will be encapsulated in a class that has methods to manage the subprocess. + + Parameters + ---------- + target : callable + Target function to be executed in the subprocess + + Returns + ------- + Subprocess + Instance of class Subprocess. + + """ + + return functools.partial(Subprocess, target=target) + + +def _close_processes(): + for process in list(_open_processes): + process.stop_process() + + +def _close_processes_atsignal(signum, frame): + _close_processes() + + os._exit(-1) + + +atexit.register(_close_processes) +signal.signal(signal.SIGINT, _close_processes_atsignal) +signal.signal(signal.SIGTERM, _close_processes_atsignal) diff --git a/mosaic/utils/utils.py b/mosaic/utils/utils.py new file mode 100644 index 00000000..f605d227 --- /dev/null +++ b/mosaic/utils/utils.py @@ -0,0 +1,101 @@ + +import sys +import psutil +import threading +import numpy as np + +__all__ = ['sizeof', 'set_main_thread', 'memory_limit'] + + +def sizeof(obj, seen=None): + """ + Recursively finds size of objects. + + Parameters + ---------- + obj : object + Object to check size. + seen + + Returns + ------- + float + Size in bytes. + + """ + if isinstance(obj, np.ndarray): + size = obj.nbytes + else: + size = sys.getsizeof(obj) + if seen is None: + seen = set() + obj_id = id(obj) + if obj_id in seen: + return 0 + # Important mark as seen *before* entering recursion to gracefully handle + # self-referential objects + seen.add(obj_id) + if isinstance(obj, dict): + size += sum([sizeof(v, seen) for v in obj.values()]) + size += sum([sizeof(k, seen) for k in obj.keys()]) + elif hasattr(obj, '__dict__'): + size += sizeof(obj.__dict__, seen) + elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)): + size += sum([sizeof(i, seen) for i in obj]) + + return size + + +def set_main_thread(): + """ + Set current thread as main thread. + + Returns + ------- + + """ + threading.current_thread().name = 'MainThread' + threading.current_thread().__class__ = threading._MainThread + + +def memory_limit(): + """ + Get the memory limit (in bytes) for this system. + + Takes the minimum value from the following locations: + - Total system host memory + - Cgroups limit (if set) + - RSS rlimit (if set) + + Returns + ------- + float + Memory limit. + + """ + limit = psutil.virtual_memory().total + + # Check cgroups if available + if sys.platform == "linux": + try: + with open("/sys/fs/cgroup/memory/memory.limit_in_bytes") as f: + cgroups_limit = int(f.read()) + + if cgroups_limit > 0: + limit = min(limit, cgroups_limit) + + except Exception: + pass + + # Check rlimit if available + try: + import resource + + hard_limit = resource.getrlimit(resource.RLIMIT_RSS)[1] + if hard_limit > 0: + limit = min(limit, hard_limit) + + except (ImportError, OSError): + pass + + return limit diff --git a/requirements-optional.txt b/requirements-optional.txt new file mode 100644 index 00000000..4f1a8e41 --- /dev/null +++ b/requirements-optional.txt @@ -0,0 +1,4 @@ +matplotlib==3.1 +wxPython +traitsui +mayavi diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..631086b4 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,24 @@ +blosc +cached-property +click +cloudpickle>=1.6 +cython +devito>=4.2.3 +flake8 +gputil +h5py>=3.1 +numpy>=1.19 +pickle5 +pre_commit +psutil +pyflakes +pytest +pytest-cov +pyyaml>=5.1 +pyzmq>=20.0 +python-daemon +scikit-image +scipy>=1.6 +sphinx +sphinx_rtd_theme +pytest-runner diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..ea487a3e --- /dev/null +++ b/setup.cfg @@ -0,0 +1,25 @@ + +[flake8] +# References: +# https://flake8.readthedocs.io/en/latest/user/configuration.html +# https://flake8.readthedocs.io/en/latest/user/error-codes.html + +# Note: there cannot be spaces after comma's here +exclude = __init__.py,.git,docs,__pycache__,legacy +ignore = + E4, # Import formatting + E121, # continuation line under-indented for hanging indent + E126, # continuation line over-indented for hanging indent + E127, # continuation line over-indented for visual indent + E128, # E128 continuation line under-indented for visual indent + W503, # line break before binary operator + E129, # visually indented line with same indent as next logical line + E116, # unexpected indentation + F841, # local variable is assigned to but never used + W504, # line break after binary operator + E226, # missing whitespace around arithmetic operator + E722, # do not use bare 'except' + F405, # name may be undefined, or defined from star imports + F403, # 'from module import *' + +max-line-length = 130 diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..f29952fb --- /dev/null +++ b/setup.py @@ -0,0 +1,53 @@ + +from setuptools import setup, find_packages + +import stride + +with open('requirements.txt') as f: + required = f.read().splitlines() + +with open('requirements-optional.txt') as f: + optionals = f.read().splitlines() + +requirements = [] +links = [] +for requirement in required: + if requirement[0:3] == 'git': + links += [requirement + '#egg=' + requirement.split('/')[-1] + '-0'] + requirements += [requirement.split('/')[-1]] + else: + requirements += [requirement] + +optional_requirements = [] +optional_links = [] +for requirement in optionals: + if requirement[0:3] == 'git': + optional_links += [requirement + '#egg=' + requirement.split('/')[-1] + '-0'] + optional_requirements += [requirement.split('/')[-1]] + else: + optional_requirements += [requirement] + +setup( + name='stride', + version=stride.__version__, + description='A (somewhat) general optimisation framework for ultrasound medical imaging', + long_description='A (somewhat) general optimisation framework for ultrasound medical imaging', + url='https://github.com/trustimaging/stride', + author='TRUST', + author_email='c.cueto@imperial.ac.uk', + license='', + python_requires=">=3.7", + packages=find_packages(exclude=['docs', 'tests', 'examples']), + package_data={}, + include_package_data=True, + install_requires=requirements, + extras_require={'extras': optional_requirements}, + dependency_links=links, + entry_points=''' + [console_scripts] + mrun=mosaic.cli.mrun:go + mscript=mosaic.cli.mscript:go + ''', + zip_safe=False, + test_suite='tests' +) diff --git a/stride/__init__.py b/stride/__init__.py new file mode 100644 index 00000000..177ae588 --- /dev/null +++ b/stride/__init__.py @@ -0,0 +1,9 @@ + + +__version__ = '1.0' + + +from .runner import Runner +from .problem_definition import * +from .problem_types import * +from .optimisation import * diff --git a/stride/optimisation/__init__.py b/stride/optimisation/__init__.py new file mode 100644 index 00000000..cca8aa24 --- /dev/null +++ b/stride/optimisation/__init__.py @@ -0,0 +1,5 @@ + +from . import functionals +from .optimisation import * +from .variables import * +from .optimisers import * diff --git a/stride/optimisation/functionals/__init__.py b/stride/optimisation/functionals/__init__.py new file mode 100644 index 00000000..1e007746 --- /dev/null +++ b/stride/optimisation/functionals/__init__.py @@ -0,0 +1,3 @@ + +from .functional import * +from . import l2_norm_difference diff --git a/stride/optimisation/functionals/functional.py b/stride/optimisation/functionals/functional.py new file mode 100644 index 00000000..fc60f321 --- /dev/null +++ b/stride/optimisation/functionals/functional.py @@ -0,0 +1,76 @@ + +from abc import ABC, abstractmethod + + +__all__ = ['FunctionalBase', 'FunctionalValue'] + + +class FunctionalBase(ABC): + """ + Base class for the implementation of functionals or loss functions. A functional calculates + a scalar value given some modelled and some observed data, as well as the residual and the adjoint source. + + """ + + @abstractmethod + def apply(self, shot, modelled, observed, **kwargs): + """ + Calculate the functional. + + Parameters + ---------- + shot : Shot + Shot for which the functional is calculated. + modelled : Data + Data of the modelled. + observed : Data + Data of the observed. + + Returns + ------- + FunctionalValue + Value of the functional and the residual. + Data + Adjoint source. + + """ + pass + + def get_grad(self, variables, **kwargs): + """ + The functional might contain components of the gradient that need to be calculated. + + Parameters + ---------- + variables : VariableList + Updated list of variables. + + Returns + ------- + + """ + return variables + + +class FunctionalValue: + """ + Container class for the calculated functional value and the residuals. + + Parameters + ---------- + shot_id : int + ID of the shot for which the value has been calculated. + fun_value : float + Scalar value of the functional. + residuals : Data + Calculated residuals. + + """ + + def __init__(self, shot_id, fun_value, residuals): + self.shot_id = shot_id + self.fun_value = fun_value + self.residuals = residuals + + def __repr__(self): + return 'loss %e for shot %d' % (self.fun_value, self.shot_id) diff --git a/stride/optimisation/functionals/l2_norm_difference.py b/stride/optimisation/functionals/l2_norm_difference.py new file mode 100644 index 00000000..bc2c05b3 --- /dev/null +++ b/stride/optimisation/functionals/l2_norm_difference.py @@ -0,0 +1,51 @@ + +import numpy as np + +from .functional import FunctionalBase, FunctionalValue + + +__all__ = ['L2NormDifference'] + + +class L2NormDifference(FunctionalBase): + """ + L2-Norm of the difference between observed and modelled data: + + f = ||modelled - observed||^2 + + """ + + def apply(self, shot, modelled, observed, **kwargs): + """ + Calculate the functional. + + Parameters + ---------- + shot : Shot + Shot for which the functional is calculated. + modelled : Data + Data of the modelled. + observed : Data + Data of the observed. + + Returns + ------- + FunctionalValue + Value of the functional and the residual. + Data + Adjoint source. + + """ + + residual_data = adjoint_source_data = modelled.data-observed.data + + residual = modelled.alike('residual') + adjoint_source = modelled.alike('adjoint_source') + + residual.data[:] = residual_data + adjoint_source.data[:] = adjoint_source_data + + fun = np.sum(residual.data**2) + fun = FunctionalValue(shot.id, fun, residual) + + return fun, adjoint_source diff --git a/stride/optimisation/optimisation.py b/stride/optimisation/optimisation.py new file mode 100644 index 00000000..41daee12 --- /dev/null +++ b/stride/optimisation/optimisation.py @@ -0,0 +1,649 @@ + +import numpy as np +from collections import OrderedDict + +import mosaic +from mosaic.types import Struct +from mosaic.utils import camel_case + +from .. import optimisation +from .. import Runner +from .pipelines import default_pipelines +from stride.problem_definition.base import Saved + + +__all__ = ['Iteration', 'Block', 'Optimisation', 'VariableList', 'CallableList'] + + +_magic_ops = [ + '__add__', + '__sub__', + '__mul__', + '__pow__', + '__truediv__', + '__floordiv__', + '__iadd__', + '__isub__', + '__imul__', + '__ipow__', + '__itruediv__', + '__ifloordiv__', + '__radd__', + '__rsub__', + '__rmul__', + '__rtruediv__', + '__rfloordiv__', +] + + +class CallableList: + """ + Class representing a series of objects that contain common interfaces. + + For example, let's say that we create a certain class a a callable list using it: + + >>> class Klass: + >>> def __init__(self, value): + >>> self.value = value + >>> + >>> def print(self): + >>> print('Value: ', self.value) + >>> + >>> callable_list = CallableList([Klass(1), Klass(2)]) + + then we can call the common method in all elements of the list by doing: + + >>> callable_list.print() + Value: 1 + Value: 2 + + but also that we can set attributes accordingly: + + >>> callable_list.value = [3, 4] + >>> callable_list.print() + Value: 3 + Value: 4 + + A callable list can be iterated and ``len(callable_list)`` is also valid. + + The result of an attribute access on a callable list is another callable list, + ensuring composability of operations. + + + Parameters + ---------- + items : iterable + Items in the callable list. + + """ + + def __init__(self, items=None): + self.items = items + + def __contains__(self, item): + return item in self.items + + def __len__(self): + return len(self.items) + + def __iter__(self): + return iter(self.items) + + def __getitem__(self, item): + return self.items[item] + + def __getattribute__(self, item): + try: + if item in _magic_ops: + raise AttributeError('Magic method') + + return super().__getattribute__(item) + + except AttributeError: + + first_variable = next(self.__iter__()) + + if not hasattr(first_variable, item): + raise AttributeError('Class %s does not have method %s' % + (first_variable.__class__.__name__, item)) + + if not callable(getattr(first_variable, item)): + result_list = [] + for variable in self: + result_list.append(getattr(variable, item)) + + return CallableList(result_list) + + else: + def list_method(*args, **kwargs): + arg_list = [[]] * len(self) + kwarg_list = [{}] * len(self) + + for arg in args: + if isinstance(arg, (list, CallableList)) and len(arg) == len(self): + for index in range(len(self)): + arg_list[index].append(arg[index]) + else: + for index in range(len(self)): + arg_list[index].append(arg) + + for key, arg in kwargs.items(): + if isinstance(arg, (list, CallableList)) and len(arg) == len(self): + for index in range(len(self)): + kwarg_list[index][key] = arg[index] + else: + for index in range(len(self)): + kwarg_list[index][key] = arg + + result_list = [] + for index, elem in zip(range(len(self)), self): + method = getattr(elem, item) + result_list.append(method(*arg_list[index], **kwarg_list[index])) + + return result_list + + return list_method + + def __setattr__(self, key, value): + if key == 'items': + super().__setattr__(key, value) + + else: + value_list = [] + + if isinstance(value, (list, CallableList)) and len(value) == len(self): + for index in range(len(self)): + value_list.append(value[index]) + + else: + for index in range(len(self)): + value_list.append(value) + + for index, elem in zip(range(len(self)), self): + setattr(elem, key, value_list[index]) + + def __setstate__(self, state): + self.items = state['items'] + + @staticmethod + def magic_op(item): + def magic_wrap(self, *args, **kwargs): + return self.__getattribute__(item)(*args, **kwargs) + + return magic_wrap + + +for op in _magic_ops: + setattr(CallableList, op, CallableList.magic_op(op)) + + +class VariableList(CallableList): + """ + A variable list is a specific type of callable list in which the items are a Struct instead of + a Python list. + + """ + + def __init__(self): + super().__init__() + + self.items = Struct() + + def __len__(self): + return len(self.items.keys()) + + def __iter__(self): + return iter(list(self.items.values())) + + def __getitem__(self, item): + try: + return self.items[item] + + except AttributeError: + return list(self.items.values())[item] + + +class Iteration: + """ + Objects of this class contain information about the iteration, such as the value of the functional. + + Parameters + ---------- + id : int + Numerical ID of the iteration. + + """ + + def __init__(self, id, **kwargs): + self.id = id + + self._fun = OrderedDict() + + @property + def fun_value(self): + """ + Functional value for this iteration across all shots. + + """ + return sum([each.fun_value for each in self._fun.values()]) + + def add_fun(self, fun): + """ + Add a functional value for a particular shot to the iteration. + + Parameters + ---------- + fun : FunctionalValue + + Returns + ------- + + """ + self._fun[fun.shot_id] = fun + + +class Block: + """ + A block determines a set of conditions that is maintained over a number of iterations, + such as the frequency band used or the step size applied. + + These can be given to the block through the default ``Block.config``, which will take care + of creating and configuring the ``pipeline``s that implement these conditions. + + Pipelines can be accessed through ``Block.pipelines`` using dot notation. + + The iteration loop of the block can be started using the generator ``Block.iterations`` as: + + >>> for iteration in block.iterations(): + >>> pass + + Parameters + ---------- + id : int + Numerical ID of the block. + functional : Functional + Functional class to be used in the inversion. + + """ + + def __init__(self, id, **kwargs): + self.id = id + + self.functional = kwargs.pop('functional', None) + self.pipelines = Struct() + self.select_shots = dict() + + self._num_iterations = None + self._iterations = OrderedDict() + + @property + def num_iterations(self): + """ + Number of iterations in the block. + + """ + return self._num_iterations + + def iterations(self): + """ + Generator of iterations. + + Returns + ------- + iterable + Iteration iterables. + + """ + for index in range(self._num_iterations): + iteration = Iteration(index) + self._iterations[index] = iteration + + yield iteration + + def config(self, **kwargs): + """ + Configure the block appropriately. + + Parameters + ---------- + num_iterations : int, optional + Number of iterations in the block, defaults to 1. + select_shots : dict, optional + Rules to select shots in each iteration, defaults to selecting all. + wavelets : callable or dict + Pipeline class to process the wavelets or dictionary with configuration for the default pipeline. + wavefield : callable or dict + Pipeline class to process the wavefield or dictionary with configuration for the default pipeline. + traces : callable or dict + Pipeline class to process the traces or dictionary with configuration for the default pipeline. + adjoint_source : callable or dict + Pipeline class to process the adjoint source or dictionary with configuration for the default pipeline. + local_gradient : callable or dict + Pipeline class to process the local gradient or dictionary with configuration for the default pipeline. + global_gradient : callable or dict + Pipeline class to process the global gradient or dictionary with configuration for the default pipeline. + model_iteration : callable or dict + Pipeline class to process the model after each iteration or dictionary with configuration for the default pipeline. + model_block : callable or dict + Pipeline class to process the model after each block or dictionary with configuration for the default pipeline. + + Returns + ------- + + """ + self._num_iterations = kwargs.pop('num_iterations', 1) + self.select_shots = kwargs.pop('select_shots', {}) + + # Process wavelets + wavelets = kwargs.pop('wavelets', {}) + if isinstance(wavelets, dict): + self.pipelines.wavelets = default_pipelines.ProcessWavelets(**kwargs, **wavelets) + else: + self.pipelines.wavelets = wavelets + + # Process wavefield + wavefield = kwargs.pop('wavefield', {}) + if isinstance(wavefield, dict): + self.pipelines.wavefield = default_pipelines.ProcessWavefield(**kwargs, **wavefield) + else: + self.pipelines.wavefield = wavefield + + # Process traces + traces = kwargs.pop('traces', {}) + if isinstance(traces, dict): + self.pipelines.traces = default_pipelines.ProcessTraces(**kwargs, **traces) + else: + self.pipelines.traces = traces + + # Process adjoint source + adjoint_source = kwargs.pop('adjoint_source', {}) + if isinstance(adjoint_source, dict): + self.pipelines.adjoint_source = default_pipelines.ProcessAdjointSource(**kwargs, **adjoint_source) + else: + self.pipelines.adjoint_source = adjoint_source + + # Process local gradient + local_gradient = kwargs.pop('local_gradient', {}) + if isinstance(local_gradient, dict): + self.pipelines.local_gradient = default_pipelines.ProcessLocalGradient(**kwargs, **local_gradient) + else: + self.pipelines.local_gradient = local_gradient + + # Process global gradient + global_gradient = kwargs.pop('global_gradient', {}) + if isinstance(global_gradient, dict): + self.pipelines.global_gradient = default_pipelines.ProcessGlobalGradient(**kwargs, **global_gradient) + else: + self.pipelines.global_gradient = global_gradient + + # Process model iteration + model_iteration = kwargs.pop('model_iteration', {}) + if isinstance(model_iteration, dict): + self.pipelines.model_iteration = default_pipelines.ProcessModelIteration(**kwargs, **model_iteration) + else: + self.pipelines.model_iteration = model_iteration + + # Process model block + model_block = kwargs.pop('model_block', {}) + if isinstance(model_block, dict): + self.pipelines.model_block = default_pipelines.ProcessModelBlock(**kwargs, **model_block) + else: + self.pipelines.model_block = model_block + + +class Optimisation(Saved): + """ + Objects of this class act as managers of a local optimisation process. + + Optimisations are performed with respect of a given ``functional`` + (e.g. L2-norm of the difference between observed and modelled data), over + one or more optimisation variables (such as longitudinal speed of sound or attenuation). + Variables are updated according to an associated local optimiser (such as gradient descent or momentum). + + Variables can be added to the optimisation through ``Optimisation.add(variable, optimiser)``. + + The general convention is to divide the optimisation process in blocks and iterations, + although that doesn't have to be the case. A block determines a set of conditions + that is maintained over a number of iterations, such as the frequency band used or the + step size applied. + + Blocks are generated through ``Optimisation.blocks``: + + >>> for block in optimisation.blocks(num_blocks): + >>> block.config(...) + >>> + + The default running behaviour of the optimisation is obtained when calling ``Optimisation.run(block, problem)``: + + >>> for block in optimisation.blocks(num_blocks): + >>> block.config(...) + >>> await optimisation.run(block, problem) + + but iterations can also be run manually: + + >>> for block in optimisation.blocks(num_blocks): + >>> block.config(...) + >>> + >>> for iteration in block.iterations(): + >>> pass + + Parameters + ---------- + name : str, optional + Optional name for the optimisation object. + functional : str or object, optional + Name of the functional to be used, or object defining that functional, defaults to ``l2_norm_difference``. + + """ + + def __init__(self, name='optimisation', **kwargs): + super().__init__(name, **kwargs) + + functional = kwargs.pop('functional', 'l2_norm_difference') + + if isinstance(functional, str): + functional_module = getattr(optimisation.functionals, functional) + self.functional = getattr(functional_module, camel_case(functional))() + + else: + self.functional = functional + + self._num_blocks = None + self._variables = VariableList() + self._optimisers = OrderedDict() + self._blocks = OrderedDict() + + @property + def num_blocks(self): + """ + Get number of blocks. + + """ + return self._num_blocks + + @property + def variables(self): + """ + Access the variables. + + """ + return self._variables + + @variables.setter + def variables(self, value): + """ + Set the variables + + """ + if isinstance(value, list): + for variable_value in value: + self._variables.items[variable_value.name] = variable_value + + else: + self._variables = value + + def add(self, variable, optimiser): + """ + Add a variable to the optimisation. + + Parameters + ---------- + variable : Variable + Variable to add to the optimisation. + optimiser : LocalOptimiser + Optimiser associated with the given variable. + + Returns + ------- + + """ + if variable.name not in self._variables: + self._variables.items[variable.name] = variable + self._optimisers[variable.name] = optimiser + + def blocks(self, num): + """ + Generator for the blocks of the optimisation. + + Parameters + ---------- + num : int + Number of blocks to generate. + + Returns + ------- + iterable + Blocks iterable. + + """ + self._num_blocks = num + + for index in range(num): + block = Block(index, functional=self.functional) + self._blocks[index] = block + + yield block + + def apply_optimiser(self, updated_variable, block=None, iteration=None, **kwargs): + """ + Apply an optimiser to its associated variable. + + Parameters + ---------- + updated_variable : Variable + Container for the gradient of the variable. + block : Block + Block in which the optimisation is at the moment. + iteration : Iteration + Iteration in which the optimisation is at the moment. + kwargs + Additional arguments for the optimiser. + + Returns + ------- + Variable + Updated variable. + + """ + runtime = mosaic.runtime() + + variable = self._variables[updated_variable.name] + optimiser = self._optimisers[updated_variable.name] + + grad = updated_variable.get_grad() + grad = block.pipelines.global_gradient.apply(grad) + + min_grad = np.min(grad.extended_data) + max_grad = np.max(grad.extended_data) + + min_var = np.min(variable.extended_data) + max_var = np.max(variable.extended_data) + + runtime.logger.info('Updating variable %s, gradient in range [%e, %e]' % + (variable.name, min_grad, max_grad)) + runtime.logger.info('\t variable range before update [%e, %e]' % + (min_var, max_var)) + + variable = optimiser.apply(grad, iteration=iteration, block=block, **kwargs) + self._variables.items[updated_variable.name] = variable + + block.pipelines.model_iteration.apply(variable) + + min_var = np.min(variable.extended_data) + max_var = np.max(variable.extended_data) + + runtime.logger.info('\t variable range after update [%e, %e]' % + (min_var, max_var)) + + return variable + + def dump_variable(self, updated_variable, problem): + """ + Dump the updated value of a variable to disk. + + Parameters + ---------- + updated_variable : Variable + Container for the gradient of the variable. + problem : Problem + Problem being executed. + + Returns + ------- + + """ + variable = self._variables[updated_variable.name] + variable.dump(path=problem.output_folder, + project_name=problem.name) + + async def run(self, block, problem, dump=True): + """ + Run the default a block with default settings. + + Parameters + ---------- + block : Block + Block to run. + problem : Problem + Problem being run. + dump : bool, optional + Whether or not to dump the updated variable at each iteration, defaults to True. + + Returns + ------- + + """ + runtime = mosaic.runtime() + + # Create an array of runners + runners = await Runner.remote(len=runtime.num_workers) + + tasks = await runners.set_block(block) + await mosaic.gather(tasks) + + for iteration in block.iterations(): + runtime.logger.info('Starting iteration %d (out of %d), ' + 'block %d (out of %d)' % + (iteration.id, block.num_iterations, + block.id, self.num_blocks)) + + fun, updated_variables = await problem.inverse(runners, self.variables, + needs_grad=True, + block=block, iteration=iteration) + + for updated_variable in updated_variables: + self.apply_optimiser(updated_variable, + block=block, iteration=iteration) + + if dump is True: + self.dump_variable(updated_variable, + problem=problem) + + runtime.logger.info('Done iteration %d (out of %d), ' + 'block %d (out of %d) - Total loss %e' % + (iteration.id, block.num_iterations, block.id, + self.num_blocks, iteration.fun_value)) + runtime.logger.info('====================================================================') + + for variable in self.variables: + block.pipelines.model_block.apply(variable) diff --git a/stride/optimisation/optimisers/__init__.py b/stride/optimisation/optimisers/__init__.py new file mode 100644 index 00000000..cc03aacc --- /dev/null +++ b/stride/optimisation/optimisers/__init__.py @@ -0,0 +1,4 @@ + + +from .optimiser import * +from .gradient_descent import * diff --git a/stride/optimisation/optimisers/gradient_descent.py b/stride/optimisation/optimisers/gradient_descent.py new file mode 100644 index 00000000..eede8254 --- /dev/null +++ b/stride/optimisation/optimisers/gradient_descent.py @@ -0,0 +1,52 @@ + + +from .optimiser import LocalOptimiser + + +__all__ = ['GradientDescent'] + + +class GradientDescent(LocalOptimiser): + """ + Implementation of a gradient descent update. + + Parameters + ---------- + variable : Variable + Variable to which the optimiser refers. + step : float, optional + Step size for the update, defaults to 1. + kwargs + Extra parameters to be used by the class. + + """ + + def __init__(self, variable, **kwargs): + super().__init__(variable, **kwargs) + + self.step = kwargs.pop('step', 1.) + + def apply(self, grad, **kwargs): + """ + Apply the optimiser. + + Parameters + ---------- + grad : Data + Gradient to apply. + step : float, optional + Step size to use for this application, defaults to instance step. + kwargs + Extra parameters to be used by the method. + + Returns + ------- + Variable + Updated variable. + + """ + step = kwargs.get('step', self.step) + + self.variable -= step*grad + + return self.variable diff --git a/stride/optimisation/optimisers/optimiser.py b/stride/optimisation/optimisers/optimiser.py new file mode 100644 index 00000000..e031544f --- /dev/null +++ b/stride/optimisation/optimisers/optimiser.py @@ -0,0 +1,43 @@ + +from abc import ABC, abstractmethod + + +__all__ = ['LocalOptimiser'] + + +class LocalOptimiser(ABC): + """ + Base class for a local optimiser. It takes the value of the gradient and applies + it to the variable. + + Parameters + ---------- + variable : Variable + Variable to which the optimiser refers. + kwargs + Extra parameters to be used by the class. + + """ + + def __init__(self, variable, **kwargs): + self.variable = variable + + @abstractmethod + def apply(self, grad, **kwargs): + """ + Apply the optimiser. + + Parameters + ---------- + grad : Data + Gradient to apply. + kwargs + Extra parameters to be used by the method. + + Returns + ------- + Variable + Updated variable. + + """ + pass diff --git a/stride/optimisation/pipelines/__init__.py b/stride/optimisation/pipelines/__init__.py new file mode 100644 index 00000000..1e4f8d11 --- /dev/null +++ b/stride/optimisation/pipelines/__init__.py @@ -0,0 +1,5 @@ + + +from . import steps +from .pipeline import * +from .default_pipelines import * diff --git a/stride/optimisation/pipelines/default_pipelines.py b/stride/optimisation/pipelines/default_pipelines.py new file mode 100644 index 00000000..95b1b6dc --- /dev/null +++ b/stride/optimisation/pipelines/default_pipelines.py @@ -0,0 +1,132 @@ + +from .pipeline import Pipeline + + +__all__ = ['ProcessWavelets', 'ProcessWavefield', 'ProcessTraces', + 'ProcessAdjointSource', 'ProcessLocalGradient', 'ProcessGlobalGradient', + 'ProcessModelIteration', 'ProcessModelBlock'] + +# TODO Some of these pipelines should be different for variables +# TODO A more flexible and intuitive way of configuring pipelines is needed +# TODO Default configuration of pipelines should be better defined + + +class ProcessWavelets(Pipeline): + """ + Default pipeline to process wavelets before running the forward problem. + + **Default steps:** + + - ``filter_wavelets`` + + """ + + def __init__(self, steps=None, **kwargs): + steps = steps or [] + steps.append('filter_wavelets') + + super().__init__(steps, **kwargs) + + +class ProcessWavefield(Pipeline): + """ + Default pipeline to process the wavefield after running the forward problem. + + **Default steps:** + + """ + pass + + +class ProcessTraces(Pipeline): + """ + Default pipeline to process modelled and observed before running the functional. + + **Default steps:** + + - ``filter_traces`` + - ``norm_per_shot`` + + """ + + def __init__(self, steps=None, **kwargs): + steps = steps or [] + steps.append('filter_traces') + steps.append('norm_per_shot') + + super().__init__(steps, **kwargs) + + +class ProcessAdjointSource(Pipeline): + """ + Default pipeline to process adjoint source before running the adjoint problem. + + **Default steps:** + + - ``filter_traces`` + + """ + + def __init__(self, steps=None, **kwargs): + steps = steps or [] + steps.append('filter_traces') + + super().__init__(steps, **kwargs) + + +class ProcessLocalGradient(Pipeline): + """ + Default pipeline to process the gradient locally before returning it. + + **Default steps:** + + """ + pass + + +class ProcessGlobalGradient(Pipeline): + """ + Default pipeline to process the global gradient before updating the variable. + + **Default steps:** + + - ``mask`` + - ``smooth_field`` + - ``norm_field`` + + """ + + def __init__(self, steps=None, **kwargs): + steps = steps or [] + steps.append('mask') + steps.append('smooth_field') + steps.append('norm_field') + + super().__init__(steps, **kwargs) + + +class ProcessModelIteration(Pipeline): + """ + Default pipeline to process the model after each iteration. + + **Default steps:** + + - ``clip`` + + """ + + def __init__(self, steps=None, **kwargs): + steps = steps or [] + steps.append('clip') + + super().__init__(steps, **kwargs) + + +class ProcessModelBlock(Pipeline): + """ + Default pipeline to process the model after each block. + + **Default steps:** + + """ + pass diff --git a/stride/optimisation/pipelines/pipeline.py b/stride/optimisation/pipelines/pipeline.py new file mode 100644 index 00000000..97c071c8 --- /dev/null +++ b/stride/optimisation/pipelines/pipeline.py @@ -0,0 +1,70 @@ + +from abc import ABC, abstractmethod + +from mosaic.utils import camel_case + +from . import steps as steps_module + + +__all__ = ['Pipeline', 'PipelineStep'] + + +class PipelineStep(ABC): + """ + Base class for processing steps in pipelines. + + """ + + @abstractmethod + def apply(self, *args, **kwargs): + """ + Apply the processing step to the arguments. + + """ + pass + + +class Pipeline: + """ + A pipeline represents a series of processing steps that will be applied + in order to a series of inputs. Pipelines encode pre-processing or + post-processing steps such as filtering time traces or smoothing a gradient. + + Parameters + ---------- + steps : list, optional + List of steps that form the pipeline. Steps can be callable or strings pointing + to a default, pre-defined step. + + """ + + def __init__(self, steps=None, **kwargs): + steps = steps or [] + + self._steps = [] + for step in steps: + if isinstance(step, str): + step_module = getattr(steps_module, step) + step = getattr(step_module, camel_case(step)) + + self._steps.append(step(**kwargs)) + + else: + self._steps.append(step) + + def apply(self, *args, **kwargs): + """ + Apply all steps in the pipeline in order. + + """ + next_args = args + + for step in self._steps: + next_args = step.apply(*next_args, **kwargs) + next_args = (next_args,) if len(args) == 1 else next_args + + if len(args) == 1: + return next_args[0] + + else: + return next_args diff --git a/stride/optimisation/pipelines/steps/__init__.py b/stride/optimisation/pipelines/steps/__init__.py new file mode 100644 index 00000000..a902979b --- /dev/null +++ b/stride/optimisation/pipelines/steps/__init__.py @@ -0,0 +1,8 @@ + +from . import filter_wavelets +from . import filter_traces +from . import norm_per_shot +from . import norm_field +from . import smooth_field +from . import mask +from . import clip diff --git a/stride/optimisation/pipelines/steps/clip.py b/stride/optimisation/pipelines/steps/clip.py new file mode 100644 index 00000000..6ed24ccc --- /dev/null +++ b/stride/optimisation/pipelines/steps/clip.py @@ -0,0 +1,29 @@ + +import numpy as np + +from ..pipeline import PipelineStep + + +class Clip(PipelineStep): + """ + Clip data between two extreme values. + + Parameters + ---------- + min : float, optional + Lower value for the clipping, defaults to None (no lower clipping). + max : float, optional + Upper value for the clipping, defaults to None (no upper clipping). + + """ + + def __init__(self, **kwargs): + self.min = kwargs.pop('min', None) + self.max = kwargs.pop('max', None) + + def apply(self, field, **kwargs): + if self.min is not None or self.max is not None: + field.extended_data[:] = np.clip(field.extended_data, + self.min, self.max) + + return field diff --git a/stride/optimisation/pipelines/steps/filter_traces.py b/stride/optimisation/pipelines/steps/filter_traces.py new file mode 100644 index 00000000..667287c2 --- /dev/null +++ b/stride/optimisation/pipelines/steps/filter_traces.py @@ -0,0 +1,53 @@ + +from stride.utils import filters + +from ..pipeline import PipelineStep + + +class FilterTraces(PipelineStep): + """ + Filter a set of time traces. + + Parameters + ---------- + f_min : float, optional + Lower value for the frequency filter, defaults to None (no lower filtering). + f_max : float, optional + Upper value for the frequency filter, defaults to None (no upper filtering). + + """ + + def __init__(self, **kwargs): + self.f_min = kwargs.pop('f_min', None) + self.f_max = kwargs.pop('f_max', None) + + def apply(self, *traces, **kwargs): + filtered = [] + for each in traces: + filtered.append(self._apply(each, **kwargs)) + + if len(traces) > 1: + return tuple(filtered) + + else: + return filtered[0] + + def _apply(self, traces, **kwargs): + time = traces.time + + f_min = self.f_min*time.step if self.f_min is not None else 0 + f_max = self.f_max*time.step if self.f_max is not None else 0 + + if self.f_min is None and self.f_max is not None: + filtered = filters.lowpass_filter_fir(traces.extended_data, f_max) + traces.extended_data[:] = filtered + + elif self.f_min is not None and self.f_max is None: + filtered = filters.highpass_filter_fir(traces.extended_data, f_min) + traces.extended_data[:] = filtered + + elif self.f_min is not None and self.f_max is not None: + filtered = filters.bandpass_filter_fir(traces.extended_data, f_min, f_max) + traces.extended_data[:] = filtered + + return traces diff --git a/stride/optimisation/pipelines/steps/filter_wavelets.py b/stride/optimisation/pipelines/steps/filter_wavelets.py new file mode 100644 index 00000000..35398402 --- /dev/null +++ b/stride/optimisation/pipelines/steps/filter_wavelets.py @@ -0,0 +1,42 @@ + +from stride.utils import filters + +from ..pipeline import PipelineStep + + +class FilterWavelets(PipelineStep): + """ + Filter wavelets to 3/4 of the set frequencies. + + Parameters + ---------- + f_min : float, optional + Lower value for the frequency filter, defaults to None (no lower filtering). + f_max : float, optional + Upper value for the frequency filter, defaults to None (no upper filtering). + + """ + + def __init__(self, **kwargs): + self.f_min = kwargs.pop('f_min', None) + self.f_max = kwargs.pop('f_max', None) + + def apply(self, wavelets, **kwargs): + time = wavelets.time + + f_min = self.f_min*time.step / 0.750 if self.f_min is not None else 0 + f_max = self.f_max*time.step / 0.750 if self.f_max is not None else 0 + + if self.f_min is None and self.f_max is not None: + filtered = filters.lowpass_filter_fir(wavelets.extended_data, f_max) + wavelets.extended_data[:] = filtered + + elif self.f_min is not None and self.f_max is None: + filtered = filters.highpass_filter_fir(wavelets.extended_data, f_min) + wavelets.extended_data[:] = filtered + + elif self.f_min is not None and self.f_max is not None: + filtered = filters.bandpass_filter_fir(wavelets.extended_data, f_min, f_max) + wavelets.extended_data[:] = filtered + + return wavelets diff --git a/stride/optimisation/pipelines/steps/mask.py b/stride/optimisation/pipelines/steps/mask.py new file mode 100644 index 00000000..295f0a17 --- /dev/null +++ b/stride/optimisation/pipelines/steps/mask.py @@ -0,0 +1,25 @@ + +import numpy as np + +from ..pipeline import PipelineStep + + +class Mask(PipelineStep): + """ + Mask a StructuredData object to remove values outside inner domain. + + Parameters + ---------- + + """ + + def __init__(self, **kwargs): + pass + + def apply(self, field, **kwargs): + mask = np.zeros(field.extended_shape) + mask[field.inner] = 1 + + field *= mask + + return field diff --git a/stride/optimisation/pipelines/steps/norm_field.py b/stride/optimisation/pipelines/steps/norm_field.py new file mode 100644 index 00000000..13b820ac --- /dev/null +++ b/stride/optimisation/pipelines/steps/norm_field.py @@ -0,0 +1,23 @@ + +import numpy as np + +from ..pipeline import PipelineStep + + +class NormField(PipelineStep): + """ + Normalise a StructuredData object between -1 and +1. + + Parameters + ---------- + + """ + + def __init__(self, **kwargs): + self.norm_value = None + + def apply(self, field, **kwargs): + self.norm_value = np.max(np.abs(field.extended_data)) + 1e-31 + field.extended_data[:] /= self.norm_value + + return field diff --git a/stride/optimisation/pipelines/steps/norm_per_shot.py b/stride/optimisation/pipelines/steps/norm_per_shot.py new file mode 100644 index 00000000..0ae81a26 --- /dev/null +++ b/stride/optimisation/pipelines/steps/norm_per_shot.py @@ -0,0 +1,34 @@ + +import numpy as np + +from ..pipeline import PipelineStep + + +class NormPerShot(PipelineStep): + """ + Normalised a series of time traces to the maximum value of the set. + + Parameters + ---------- + + """ + + def __init__(self, **kwargs): + pass + + def apply(self, modelled, observed, **kwargs): + modelled = self._apply(modelled, **kwargs) + observed = self._apply(observed, **kwargs) + + return modelled, observed + + def _apply(self, traces, **kwargs): + norm_value = 0. + + for index in range(traces.extended_shape[0]): + norm_value += np.sum(traces.extended_data[index]**2) + + norm_value = np.sqrt(norm_value/traces.extended_shape[0]) + 1e-31 + traces.extended_data[:] /= norm_value + + return traces diff --git a/stride/optimisation/pipelines/steps/smooth_field.py b/stride/optimisation/pipelines/steps/smooth_field.py new file mode 100644 index 00000000..51f31752 --- /dev/null +++ b/stride/optimisation/pipelines/steps/smooth_field.py @@ -0,0 +1,25 @@ + +import scipy.ndimage + +from ..pipeline import PipelineStep + + +class SmoothField(PipelineStep): + """ + Apply Gaussian smoothing to a StructuredData object. + + Parameters + ---------- + sigma : float, optional + Standard deviation of the Gaussian kernel, defaults to 0.25 (25% of a grid point). + + """ + + def __init__(self, **kwargs): + self.sigma = kwargs.pop('sigma', 0.25) + + def apply(self, field, **kwargs): + field.extended_data[:] = scipy.ndimage.gaussian_filter(field.extended_data, + sigma=self.sigma, mode='nearest') + + return field diff --git a/stride/optimisation/variables/__init__.py b/stride/optimisation/variables/__init__.py new file mode 100644 index 00000000..f45a0784 --- /dev/null +++ b/stride/optimisation/variables/__init__.py @@ -0,0 +1,4 @@ + + +from .variable import * +from .vp import * diff --git a/stride/optimisation/variables/variable.py b/stride/optimisation/variables/variable.py new file mode 100644 index 00000000..53e844de --- /dev/null +++ b/stride/optimisation/variables/variable.py @@ -0,0 +1,14 @@ + + +__all__ = ['Variable'] + + +class Variable: + + def __new__(cls, variable): + instance = variable.copy() + + instance.grad = variable.alike(instance.name+'_grad') + instance.prec = variable.alike(instance.name+'_prec') + + return instance diff --git a/stride/optimisation/variables/vp.py b/stride/optimisation/variables/vp.py new file mode 100644 index 00000000..bdb31715 --- /dev/null +++ b/stride/optimisation/variables/vp.py @@ -0,0 +1,63 @@ + +import numpy as np + +from stride.problem_definition import ScalarField + + +__all__ = ['Vp'] + + +class Vp(ScalarField): + """ + Class representing longitudinal speed of sound. The scalar field is enriched with a gradient and + a preconditioner. + + For reference on the arguments see `~stride.problem_definition.data.ScalarField`. + + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.grad = ScalarField(self.name + '_grad', grid=self.grid) + self.prec = ScalarField(self.name + '_prec', grid=self.grid) + + self.grad.fill(0.) + self.prec.fill(1.) + + def update_problem(self, problem): + """ + Use the current value of the variable to update the problem. + + Parameters + ---------- + problem : Problem or SubProblem + Problem to update + + Returns + ------- + Problem or SubProblem + Updated problem. + + """ + problem.medium[self.name].extended_data[:] = self.extended_data.copy() + + return problem + + def get_grad(self): + """ + Process the accumulated gradient and preconditioner to produce a final gradient. + + Returns + ------- + Data + Final gradient. + + """ + grad = self.grad + prec = self.prec + + prec += 1e-6 * np.max(prec.data) + grad /= prec + + return grad diff --git a/stride/plotting/__init__.py b/stride/plotting/__init__.py new file mode 100644 index 00000000..b05941f1 --- /dev/null +++ b/stride/plotting/__init__.py @@ -0,0 +1,11 @@ + +import os + +if os.environ.get('DISPLAY', None) is not None: + import matplotlib + matplotlib.use('TkAgg') + + from .plot_fields import * + from .plot_points import * + from .plot_traces import * + from .plot_show import * diff --git a/stride/plotting/plot_fields.py b/stride/plotting/plot_fields.py new file mode 100644 index 00000000..12aedbe4 --- /dev/null +++ b/stride/plotting/plot_fields.py @@ -0,0 +1,377 @@ + +import os +import functools +import warnings +try: + if os.environ.get('DISPLAY', None) is None: + raise ModuleNotFoundError + + os.environ['ETS_TOOLKIT'] = 'qt4' + from wx import wxPyDeprecationWarning + warnings.simplefilter(action='ignore', category=wxPyDeprecationWarning) + + from traits.api import HasTraits, Instance, Array, on_trait_change + from traitsui.api import View, Item, HGroup, Group + + from tvtk.api import tvtk + from tvtk.pyface.scene import Scene + + from mayavi import mlab + from mayavi.core.api import PipelineBase, Source + from mayavi.core.ui.api import SceneEditor, MayaviScene, MlabSceneModel + + ENABLED_3D_PLOTTING = True + +except (ModuleNotFoundError, RuntimeError): + ENABLED_3D_PLOTTING = False + +try: + import matplotlib.pyplot as plt + + ENABLED_2D_PLOTTING = True + +except ModuleNotFoundError: + ENABLED_2D_PLOTTING = False + + +if ENABLED_3D_PLOTTING: + class VolumeSlicer(HasTraits): + # The data to plot + data = Array() + + # The 4 views displayed + scene3d = Instance(MlabSceneModel, ()) + scene_x = Instance(MlabSceneModel, ()) + scene_y = Instance(MlabSceneModel, ()) + scene_z = Instance(MlabSceneModel, ()) + + # The data source + data_source = Instance(Source) + + # The image plane widgets of the 3D scene + plane_widget_3d_x = Instance(PipelineBase) + plane_widget_3d_y = Instance(PipelineBase) + plane_widget_3d_z = Instance(PipelineBase) + + _axis_names = dict(x=0, y=1, z=2) + + def __init__(self, is_Vector, data_range, colourmap, **traits): + self.is_Vector = is_Vector + self.data_range = data_range + self.colourmap = colourmap + + super(VolumeSlicer, self).__init__(**traits) + + # Force the creation of the image_plane_widgets: + self.plane_widget_3d_x + self.plane_widget_3d_y + self.plane_widget_3d_z + + # Default values + def _data_source_default(self): + if self.is_Vector: + return mlab.pipeline.vector_field(self.data, + figure=self.scene3d.mayavi_scene, + colormap=self.colourmap, + vmin=self.data_range[0], vmax=self.data_range[1]) + + else: + return mlab.pipeline.scalar_field(self.data, + figure=self.scene3d.mayavi_scene, + colormap=self.colourmap, + vmin=self.data_range[0], vmax=self.data_range[1]) + + def make_plane_widget_3d(self, axis_name): + plane_widget = mlab.pipeline.image_plane_widget(self.data_source, + figure=self.scene3d.mayavi_scene, + colormap=self.colourmap, + plane_orientation='%s_axes' % axis_name) + + return plane_widget + + def _plane_widget_3d_x_default(self): + return self.make_plane_widget_3d('x') + + def _plane_widget_3d_y_default(self): + return self.make_plane_widget_3d('y') + + def _plane_widget_3d_z_default(self): + return self.make_plane_widget_3d('z') + + # Scene activation callbaks + @on_trait_change('scene3d.activated') + def display_scene3d(self): + outline = mlab.pipeline.outline(self.data_source, + figure=self.scene3d.mayavi_scene, + colormap=self.colourmap, + vmin=self.data_range[0], vmax=self.data_range[1]) + + try: + self.scene3d.mlab.view(40, 50) + except AttributeError: + return + + # Interaction properties can only be changed after the scene + # has been created, and thus the interactor exists + for plane_widget in (self.plane_widget_3d_x, self.plane_widget_3d_y, self.plane_widget_3d_z): + # Turn the interaction off + plane_widget.ipw.interaction = 0 + + self.scene3d.scene.background = (0, 0, 0) + + # Keep the view always pointing up + self.scene3d.scene.interactor.interactor_style = tvtk.InteractorStyleTerrain() + + def make_side_view(self, axis_name): + scene = getattr(self, 'scene_%s' % axis_name) + + # To avoid copying the data, we take a reference to the + # raw VTK dataset, and pass it on to mlab. Mlab will create + # a Mayavi source from the VTK without copying it. + # We have to specify the figure so that the data gets + # added on the figure we are interested in. + outline = mlab.pipeline.outline(self.data_source.mlab_source.dataset, + figure=scene.mayavi_scene, + colormap=self.colourmap, + vmin=self.data_range[0], vmax=self.data_range[1]) + + plane_widget = mlab.pipeline.image_plane_widget(outline, + plane_orientation='%s_axes' % axis_name, + colormap=self.colourmap, + vmin=self.data_range[0], vmax=self.data_range[1]) + setattr(self, 'plane_widget_%s' % axis_name, plane_widget) + + # Synchronize positions between the corresponding image plane + # widgets on different views. + plane_widget.ipw.sync_trait('slice_position', getattr(self, 'plane_widget_3d_%s' % axis_name).ipw) + + # Make left-clicking create a crosshair + plane_widget.ipw.left_button_action = 0 + + # Add a callback on the image plane widget interaction to + # move the others + def move_view(obj, evt): + position = obj.GetCurrentCursorPosition() + for other_axis, axis_number in self._axis_names.items(): + if other_axis == axis_name: + continue + ipw3d = getattr(self, 'plane_widget_3d_%s' % other_axis) + ipw3d.ipw.slice_position = position[axis_number] + + plane_widget.ipw.add_observer('InteractionEvent', move_view) + plane_widget.ipw.add_observer('StartInteractionEvent', move_view) + + # Center the image plane widget + plane_widget.ipw.slice_position = 0.5*self.data.shape[ + self._axis_names[axis_name]] + + # Position the view for the scene + views = dict(x=(0, 90), y=(90, 90), z=(0, 0)) + scene.mlab.view(*views[axis_name]) + + # 2D interaction: only pan and zoom + scene.scene.interactor.interactor_style = tvtk.InteractorStyleImage() + + scene.scene.background = (0, 0, 0) + + @on_trait_change('scene_x.activated') + def display_scene_x(self): + return self.make_side_view('x') + + @on_trait_change('scene_y.activated') + def display_scene_y(self): + return self.make_side_view('y') + + @on_trait_change('scene_z.activated') + def display_scene_z(self): + return self.make_side_view('z') + + # The layout of the dialog created + view = View(HGroup(Group( + Item('scene_y', + editor=SceneEditor(scene_class=Scene), + height=250, width=300), + Item('scene_z', + editor=SceneEditor(scene_class=Scene), + height=250, width=300), + show_labels=True), Group( + Item('scene_x', + editor=SceneEditor(scene_class=Scene), + height=250, width=300), + Item('scene3d', + editor=SceneEditor(scene_class=MayaviScene), + height=250, width=300), + show_labels=True)), resizable=True, title='VolumeSlicer') + + +__all__ = ['plot_scalar_field', 'plot_scalar_field_2d', 'plot_scalar_field_3d'] + + +def prepare_plot_arguments(wrapped): + @functools.wraps(wrapped) + def _prepare_plot_arguments(field, data_range=(None, None), origin=None, limit=None, + axis=None, palette='viridis', title=None): + + space_scale = 1e-3 + if limit is None: + limit = field.T.shape + + else: + limit = tuple(each/space_scale for each in limit) + + if origin is None: + origin = tuple([0 for _ in range(len(limit))]) + + else: + origin = tuple(each/space_scale for each in origin) + + return wrapped(field, + data_range=data_range, limit=limit, origin=origin, + axis=axis, palette=palette, title=title) + + return _prepare_plot_arguments + + +@prepare_plot_arguments +def plot_scalar_field_2d(field, data_range=(None, None), origin=None, limit=None, + axis=None, palette='viridis', title=None): + """ + Utility function to plot a 2D scalar field using matplotlib. + + Parameters + ---------- + field : ScalarFunction or VectorFunction + Field to be plotted + data_range : tuple, optional + Range of the data, defaults to (min(field), max(field)). + origin : tuple, optional + Origin of the axes of the plot, defaults to zero. + limit : tuple, optional + Extent of the axes of the plot, defaults to the spatial extent. + axis : matplotlib axis, optional + Axis in which to make the plotting, defaults to new empty one. + palette : str, optional + Palette to use in the plotting, defaults to plasma. + title : str, optional + Figure title, defaults to empty title. + + Returns + ------- + Axis + Generated axis. + + """ + if not ENABLED_2D_PLOTTING: + return None + + if axis is None: + figure, axis = plt.subplots(1, 1) + + im = axis.imshow(field.T, + cmap=palette, + vmin=data_range[0], vmax=data_range[1], + aspect='equal', + origin='lower', + extent=[origin[0], limit[0], origin[1], limit[1]], + interpolation='bicubic') + + if origin is None or limit is None: + axis.set_xlabel('x') + axis.set_ylabel('y') + + else: + axis.set_xlabel('x (mm)') + axis.set_ylabel('y (mm)') + + if title is not None: + axis.set_title(title) + + plt.colorbar(im, ax=axis) + + return axis + + +@prepare_plot_arguments +def plot_scalar_field_3d(field, data_range=(None, None), origin=None, limit=None, + axis=None, palette='viridis', title=None): + """ + Utility function to plot a 3D scalar field using MayaVi. + + Parameters + ---------- + field : ScalarFunction or VectorFunction + Field to be plotted + data_range : tuple, optional + Range of the data, defaults to (min(field), max(field)). + origin : tuple, optional + Origin of the axes of the plot, defaults to zero. + limit : tuple, optional + Extent of the axes of the plot, defaults to the spatial extent. + axis : MayaVi axis, optional + Axis in which to make the plotting, defaults to new empty one. + palette : str, optional + Palette to use in the plotting, defaults to plasma. + title : str, optional + Figure title, defaults to empty title. + + Returns + ------- + MayaVi figure + Generated MayaVi figure + + """ + if not ENABLED_3D_PLOTTING: + return None + + if axis is None: + axis = MlabSceneModel() + + window = VolumeSlicer(data=field, + is_Vector=False, + colourmap=palette, + scene3d=axis, + data_range=data_range) + + return window + + +def plot_scalar_field(field, data_range=(None, None), origin=None, limit=None, + axis=None, palette='viridis', title=None): + """ + Utility function to plot a scalar field using matplotib (2D) or MayaVi (3D). + + Parameters + ---------- + field : ScalarFunction or VectorFunction + Field to be plotted + data_range : tuple, optional + Range of the data, defaults to (min(field), max(field)). + origin : tuple, optional + Origin of the axes of the plot, defaults to zero. + limit : tuple, optional + Extent of the axes of the plot, defaults to the spatial extent. + axis : MayaVi axis, optional + Axis in which to make the plotting, defaults to new empty one. + palette : str, optional + Palette to use in the plotting, defaults to plasma. + title : str, optional + Figure title, defaults to empty title. + + Returns + ------- + matplotlib or MayaVi figure + Generated matplotlib or MayaVi figure + + """ + + if len(field.shape) > 2: + axis = plot_scalar_field_3d(field, + data_range=data_range, limit=limit, origin=origin, + axis=axis, palette=palette, title=title) + + else: + axis = plot_scalar_field_2d(field, + data_range=data_range, limit=limit, origin=origin, + axis=axis, palette=palette, title=title) + + return axis diff --git a/stride/plotting/plot_points.py b/stride/plotting/plot_points.py new file mode 100644 index 00000000..60acfc00 --- /dev/null +++ b/stride/plotting/plot_points.py @@ -0,0 +1,146 @@ + +import numpy as np + +try: + import matplotlib.pyplot as plt + + ENABLED_2D_PLOTTING = True +except ModuleNotFoundError: + ENABLED_2D_PLOTTING = False + +try: + from mayavi import mlab + from mayavi.core.ui.api import MlabSceneModel + + ENABLED_3D_PLOTTING = True + +except ModuleNotFoundError: + ENABLED_3D_PLOTTING = True + + +__all__ = ['plot_points', 'plot_points_2d', 'plot_points_3d'] + + +def plot_points_2d(coordinates, axis=None, colour='red', size=15, title=None): + """ + Utility function to plot 2D scattered points using matplotlib. + + Parameters + ---------- + coordinates : 2-dimensional array + Coordinates of the points to be plotted, shape should be (n_points, 2). + axis : matplotlib axis, optional + Axis in which to make the plotting, defaults to new empty one. + colour : str + Colour to apply to the points, defaults to red. + size : float + Size of the plotted points, defaults to 15. + title : str, optional + Figure title, defaults to empty title. + + Returns + ------- + matplotlib axis + Generated matplotlib axis + + """ + if not ENABLED_2D_PLOTTING: + return None + + if axis is None: + figure, axis = plt.subplots(1, 1) + + if len(coordinates.shape) == 1: + coordinates = coordinates.reshape((1, coordinates.shape[0])) + + space_scale = 1e-3 + + im = axis.scatter(coordinates[:, 0]/space_scale, coordinates[:, 1]/space_scale, + s=size, c=colour) + + if title is not None: + axis.set_title(title) + + return axis + + +def plot_points_3d(coordinates, axis=None, colour='red', size=15, title=None): + """ + Utility function to plot 3D scattered points using MayaVi. + + Parameters + ---------- + coordinates : 2-dimensional array + Coordinates of the points to be plotted, shape should be (n_points, 3). + axis : MayaVi axis, optional + Axis in which to make the plotting, defaults to new empty one. + colour : str + Colour to apply to the points, defaults to red. + size : float + Size of the plotted points, defaults to 15. + title : str, optional + Figure title, defaults to empty title. + + Returns + ------- + MayaVi figure + Generated MayaVi figure + + """ + if not ENABLED_3D_PLOTTING: + return None + + if axis is None: + axis = MlabSceneModel() + + colour_map = { + 'red': (1., 0., 0.), + 'green': (0., 1., 0.), + 'blue': (0., 0., 1.), + } + + scale_factor = 100 * size / np.max(coordinates) + + transducers = mlab.pipeline.scalar_scatter(coordinates[:, 0], + coordinates[:, 1], + coordinates[:, 2], + figure=axis.scene3d.mayavi_scene) + mlab.pipeline.glyph(transducers, + mode='sphere', color=colour_map[colour], scale_factor=scale_factor, + figure=axis.scene3d.mayavi_scene) + + return axis + + +def plot_points(coordinates, axis=None, colour='red', size=15, title=None): + """ + Utility function to plot scattered points using matplotlib (2D) or MayaVi (3D). + + Parameters + ---------- + coordinates : 2-dimensional array + Coordinates of the points to be plotted, shape should be (n_points, dimensions). + axis : axis, optional + Axis in which to make the plotting, defaults to new empty one. + colour : str + Colour to apply to the points, defaults to red. + size : float + Size of the plotted points, defaults to 15. + title : str, optional + Figure title, defaults to empty title. + + Returns + ------- + matplotlib or MayaVi axis + Generated axis + + """ + if coordinates.shape[-1] > 2: + axis = plot_points_3d(coordinates, + axis=axis, colour=colour, size=size, title=title) + + else: + axis = plot_points_2d(coordinates, + axis=axis, colour=colour, size=size, title=title) + + return axis diff --git a/stride/plotting/plot_show.py b/stride/plotting/plot_show.py new file mode 100644 index 00000000..4d7d954c --- /dev/null +++ b/stride/plotting/plot_show.py @@ -0,0 +1,88 @@ + +try: + from mayavi import mlab + from mayavi.core.ui.api import MlabSceneModel + + ENABLED_3D_PLOTTING = True + +except ModuleNotFoundError: + ENABLED_3D_PLOTTING = False + +try: + import matplotlib.pyplot as plt + + ENABLED_2D_PLOTTING = True + +except ModuleNotFoundError: + ENABLED_2D_PLOTTING = False + + +def show_2d(figure=None): + """ + Utility function to show a Bokeh figure. + + Parameters + ---------- + figure : object + Bokeh figure to show. + + Returns + ------- + + """ + if not ENABLED_2D_PLOTTING: + return None + + plt.show() + + +def show_3d(figure): + """ + Utility function to show a MayaVi figure. + + Parameters + ---------- + figure : object + MayaVi figure to show. + + Returns + ------- + + """ + if not ENABLED_3D_PLOTTING: + return None + + if not isinstance(figure, list): + figure = [figure] + + for _figure in figure: + if hasattr(_figure, 'scene3d'): + _figure.configure_traits() + + else: + mlab.show() + + +def show(figure=None): + """ + Utility function to show a figure regardless of the library being used. + + Parameters + ---------- + figure : object, optional + matplotlib or MayaVi figure to show. + + Returns + ------- + + """ + if isinstance(figure, list): + _figure = figure[0] + else: + _figure = figure + + if ENABLED_3D_PLOTTING and (isinstance(_figure, MlabSceneModel) or hasattr(_figure, 'scene3d')): + show_3d(figure) + + else: + show_2d(figure) diff --git a/stride/plotting/plot_traces.py b/stride/plotting/plot_traces.py new file mode 100644 index 00000000..8e7eaa13 --- /dev/null +++ b/stride/plotting/plot_traces.py @@ -0,0 +1,149 @@ + +import numpy as np + + +try: + import matplotlib.pyplot as plt + + ENABLED_2D_PLOTTING = True + +except ModuleNotFoundError: + ENABLED_2D_PLOTTING = False + + +__all__ = ['plot_trace', 'plot_gather'] + + +def plot_trace(*args, axis=None, colour='black', line_style='solid', title=None, **kwargs): + """ + Utility function to plot individual traces using matplotlib. + + Parameters + ---------- + args : arrays + Optional time grid and signal to be plotted. + axis : matplotlib figure, optional + Figure in which to make the plotting, defaults to new empty figure. + colour : str, optional + Colour to apply to the points, defaults to red. + line_style : str, optional + Line style to be used. + title : str, optional + Figure title, defaults to empty title. + + Returns + ------- + matplotlib figure + Generated matplotlib figure + + """ + if not ENABLED_2D_PLOTTING: + return None + + if axis is None: + figure, axis = plt.subplots(1, 1) + + im = axis.plot(*args, c=colour, linestyle=line_style, **kwargs) + + if title is not None: + axis.set_title(title) + + return axis + + +def plot_gather(*args, skip=1, time_range=None, norm=True, norm_trace=True, + colour='black', line_style='solid', title=None, axis=None, **kwargs): + """ + Utility function to plot gather using matplotlib. + + Parameters + ---------- + args : arrays + Optional trace ID grid, optional time grid and signal to be plotted. + skip : int, optional + Traces to skip, defaults to 1. + time_range : tuple, optional + Range of time to plot, defaults to all time. + norm : bool, optional + Whether or not to normalise the gather, defaults to True. + norm_trace : bool, optional + Whether or not to normalise trace by trace, defaults to True. + axis : matplotlib figure, optional + Figure in which to make the plotting, defaults to new empty figure. + colour : str, optional + Colour to apply to the points, defaults to red. + line_style : str, optional + Line style to be used. + title : str, optional + Figure title, defaults to empty title. + + Returns + ------- + matplotlib figure + Generated matplotlib figure + + """ + + if not ENABLED_2D_PLOTTING: + return None + + if len(args) > 2: + trace_axis = args[0] + time_axis = args[1] + signal = args[2] + + elif len(args) > 1: + trace_axis = None + time_axis = args[0] + signal = args[1] + + else: + trace_axis = None + time_axis = None + signal = args[0] + + if axis is None: + figure, axis = plt.subplots(1, 1) + + if time_range is None: + time_range = (0, signal.shape[-1]) + + if norm is True: + signal = signal / (np.max(np.abs(signal))+1e-31) + + num_traces = signal.shape[0] + + if norm_trace is True: + signal = signal / (np.max(np.abs(signal), axis=-1).reshape((num_traces, 1))+1e-31) + + signal_under = signal[0:num_traces:skip, time_range[0]:time_range[1]] + num_under_traces = signal_under.shape[0] + + shift = np.arange(0, num_under_traces) * 1.10 + shift = np.reshape(shift, (shift.shape[0], 1)) + + signal_shifted = np.transpose(signal_under + shift) + + if time_axis is None: + time_axis = np.linspace(0, time_range[1]-time_range[0]-1, time_range[1]-time_range[0], endpoint=False) + + time_axis = np.broadcast_to(np.reshape(time_axis, (time_axis.shape[0], 1)), signal_shifted.shape) + + axis.plot(signal_shifted, time_axis, c=colour, linestyle=line_style, **kwargs) + axis.set_ylim(time_axis[-1, 0], time_axis[0, 0]) + + axis.set_xlabel('trace') + axis.set_ylabel('time') + + if trace_axis is None: + trace_axis = np.linspace(0, num_traces-1, num_traces, endpoint=False) + + trace_axis = [str(each) for each in trace_axis] + + axis.set_xticks(shift[::2]) + axis.set_xticklabels(trace_axis[::2]) + + if title is not None: + axis.set_title(title) + + return shift, axis diff --git a/stride/problem_definition/__init__.py b/stride/problem_definition/__init__.py new file mode 100644 index 00000000..07e28707 --- /dev/null +++ b/stride/problem_definition/__init__.py @@ -0,0 +1,9 @@ + + +from .acquisitions import * +from .domain import * +from .data import * +from .geometry import * +from .medium import * +from .transducers import * +from .problem import * diff --git a/stride/problem_definition/acquisitions.py b/stride/problem_definition/acquisitions.py new file mode 100644 index 00000000..0025f4fe --- /dev/null +++ b/stride/problem_definition/acquisitions.py @@ -0,0 +1,726 @@ + +import functools +import numpy as np +from collections import OrderedDict + +try: + import matplotlib.pyplot as plt + from matplotlib.widgets import Slider + + ENABLED_2D_PLOTTING = True + +except ModuleNotFoundError: + ENABLED_2D_PLOTTING = False + +from .data import Traces +from .base import ProblemBase + + +__all__ = ['Shot', 'Acquisitions'] + + +class Shot(ProblemBase): + """ + A Shot is an even in which one or more transducers act as sources with a given wavelet and one or more + transducers act as receivers and record some observed data. + + Therefore a shot object maintains data about the ids of the transducer locations that will act as sources, + the ids of the transducer locations that will act as receivers, as well as the wavelets that will be fired and + the observed data that is recorded. + + Parameters + ---------- + id : int + Identifier assigned to this shot. + name : str + Optional name for the shot. + problem : Problem + Problem to which the Shot belongs. + geometry : Geometry + Geometry referenced by the source/receiver transducer locations of the shot. + sources : list + Sources with which to initialise the shot, defaults to empty. + receivers : list + Receivers with which to initialise the shot, defaults to empty. + grid : Grid or any of Space or Time + Grid on which the Acquisitions is defined + + """ + + def __init__(self, id, name=None, problem=None, **kwargs): + name = name or 'shot_%05d' % id + super().__init__(name, problem, **kwargs) + + if id < 0: + raise ValueError('The shot needs a positive ID') + + self.id = id + + if problem is not None: + geometry = problem.geometry + else: + geometry = kwargs.pop('geometry', None) + + if geometry is None: + raise ValueError('A Shot has be defined with respect to a Geometry') + + self._geometry = geometry + self._acquisitions = None + + self._sources = OrderedDict() + self._receivers = OrderedDict() + self.wavelets = None + self.observed = None + + sources = kwargs.pop('sources', None) + receivers = kwargs.pop('receivers', None) + + if sources is not None and receivers is not None: + for source in sources: + self._sources[source.id] = source + + for receiver in receivers: + self._receivers[receiver.id] = receiver + + self.wavelets = Traces('wavelets', transducer_ids=self.source_ids, grid=self.grid) + self.observed = Traces('observed', transducer_ids=self.receiver_ids, grid=self.grid) + + @property + def geometry(self): + return self._geometry + + @property + def source_ids(self): + """ + Get ids of sources in this Shot in a list. + + """ + return list(self._sources.keys()) + + @property + def receiver_ids(self): + """ + Get ids of receivers in this Shot in a list. + + """ + return list(self._receivers.keys()) + + @property + def sources(self): + """ + Get sources in this Shot as a list. + + """ + return list(self._sources.values()) + + @property + def receivers(self): + """ + Get receivers in this Shot as a list. + + """ + return list(self._receivers.values()) + + @property + def num_sources(self): + """ + Get number of sources in the Shot. + + """ + return len(self.source_ids) + + @property + def num_receivers(self): + """ + Get number of receivers in the Shot. + + """ + return len(self.receiver_ids) + + @property + def source_coordinates(self): + """ + Get the coordinates of all sources packed in an array format. + + Coordinates are defined as a 2 or 3-dimensional array with shape (n_sources, n_dimensions). + + """ + coordinates = np.zeros((self.num_sources, self.space.dim), dtype=np.float32) + source_index = 0 + for source in self.sources: + coordinates[source_index, :] = source.coordinates + source_index += 1 + + return coordinates + + @property + def receiver_coordinates(self): + """ + Get the coordinates of all receivers packed in an array format. + + Coordinates are defined as a 2 or 3-dimensional array with shape (n_receivers, n_dimensions). + + """ + coordinates = np.zeros((self.num_receivers, self.space.dim), dtype=np.float32) + receiver_index = 0 + for receiver in self.receivers: + coordinates[receiver_index, :] = receiver.coordinates + receiver_index += 1 + + return coordinates + + def sub_problem(self, shot, sub_problem): + """ + Create a subset object for a certain shot. + + A SubProblem contains everything that is needed to fully determine how to run a particular shot. + This method takes care of generating a new Shot object that is linked to this new SubProblem. + + Parameters + ---------- + shot : Shot + Shot for which the SubProblem is being generated. + sub_problem : SubProblem + Container for the sub-problem being generated. + + Returns + ------- + Shot + Newly created Shot instance. + + """ + shot = Shot(self.id, + name=self.name, problem=sub_problem, + grid=self.grid, geometry=sub_problem.geometry) + + for source_id in self.source_ids: + location = sub_problem.geometry.get(source_id) + shot._sources[location.id] = location + + for receiver_id in self.receiver_ids: + location = sub_problem.geometry.get(receiver_id) + shot._receivers[location.id] = location + + if self.wavelets is not None: + shot.wavelets = self.wavelets + + if self.observed is not None: + shot.observed = self.observed + + return shot + + def plot(self, **kwargs): + """ + Plot wavelets and observed for this shot if they are allocated. + + Parameters + ---------- + kwargs + Arguments for plotting. + + Returns + ------- + axes + Axes on which the plotting is done. + + """ + axes = [] + + if self.wavelets is not None and self.wavelets.allocated: + axes.append(self.wavelets.plot(**kwargs)) + + if self.observed is not None and self.observed.allocated: + axes.append(self.observed.plot(**kwargs)) + + return axes + + def plot_wavelets(self, **kwargs): + """ + Plot wavelets for this shot if they are allocated. + + Parameters + ---------- + kwargs + Arguments for plotting. + + Returns + ------- + axes + Axes on which the plotting is done. + + """ + if self.wavelets is not None and self.wavelets.allocated: + return self.wavelets.plot(**kwargs) + + def plot_observed(self, **kwargs): + """ + Plot observed for this shot if they are allocated. + + Parameters + ---------- + kwargs + Arguments for plotting. + + Returns + ------- + axes + Axes on which the plotting is done. + + """ + if self.observed is not None and self.observed.allocated: + return self.observed.plot(**kwargs) + + def append_observed(self, *args, **kwargs): + """ + Append the shot to the corresponding Acquisitions file. + + See :class:`~mosaic.file_manipulation.h5.HDF5` for more information on the parameters of this method. + + Returns + ------- + + """ + kwargs['parameter'] = 'acquisitions' + kwargs['version'] = kwargs.get('version', 0) + + self._acquisitions.append(*args, **kwargs) + + def __get_desc__(self): + description = { + 'id': self.id, + 'num_sources': self.num_sources, + 'num_receivers': self.num_receivers, + 'source_ids': self.source_ids, + 'receiver_ids': self.receiver_ids, + } + + if self.wavelets is not None and self.wavelets.allocated: + description['wavelets'] = self.wavelets.__get_desc__() + + if self.observed is not None and self.observed.allocated: + description['observed'] = self.observed.__get_desc__() + + return description + + def __set_desc__(self, description): + self.id = description.id + + for source_id in description.source_ids: + if self._geometry: + source = self._geometry.get(source_id) + else: + source = None + self._sources[source_id] = source + + for receiver_id in description.receiver_ids: + if self._geometry: + receiver = self._geometry.get(receiver_id) + else: + receiver = None + self._receivers[receiver_id] = receiver + + if 'wavelets' in description: + self.wavelets = Traces('wavelets', transducer_ids=self.source_ids, grid=self.grid) + self.wavelets.__set_desc__(description.wavelets) + + if 'observed' in description: + self.observed = Traces('observed', transducer_ids=self.receiver_ids, grid=self.grid) + self.observed.__set_desc__(description.observed) + + +class Acquisitions(ProblemBase): + """ + Acquisitions establish a series of shots that will be or have been fired to generate data. + + A shot is an even in which one or more transducer locations act as sources with a given wavelet and one or more + transducer locations act as receivers and record some observed data. + + Shots are identified through a numerical ID, which is >= 0. + + Shots can be added through ``Acquisitions.add(shot)`` and can be accessed through + ``Acquisitions.get(shot_id)``. + + The Acquisitions also provides utilities for loading and dumping these shots and their data. + + Parameters + ---------- + name : str + Alternative name to give to the medium. + problem : Problem + Problem to which the Acquisitions belongs. + geometry : Geometry + Geometry referenced by the source/receiver transducer locations of the shot. + grid : Grid or any of Space or Time + Grid on which the Acquisitions is defined + + """ + + def __init__(self, name='acquisitions', problem=None, **kwargs): + super().__init__(name, problem, **kwargs) + + if problem is not None: + geometry = problem.geometry + else: + geometry = kwargs.pop('geometry', None) + + if geometry is None: + raise ValueError('An Acquisition has be defined with respect to a Geometry') + + self._geometry = geometry + self._shots = OrderedDict() + self._shot_selection = [] + + @property + def shots(self): + """ + Get all shots in the Acquisitions as a list. + + """ + return list(self._shots.values()) + + @property + def shot_ids(self): + """ + Get all IDs of shots in the Acquisitions as a list. + + """ + return list(self._shots.keys()) + + @property + def num_shots(self): + """ + Get number of shots in the Acquisitions. + + """ + return len(self.shot_ids) + + @property + def num_sources_per_shot(self): + """ + Get maximum number of sources in any shot. + + """ + num_transducers = max(*[each.num_sources for each in self._shots.values()]) + return num_transducers + + @property + def num_receivers_per_shot(self): + """ + Get maximum number of receivers in any shot. + + """ + num_transducers = max(*[each.num_receivers for each in self._shots.values()]) + return num_transducers + + @property + def remaining_shots(self): + """ + Get dict of all shots that have no observed allocated. + + """ + shots = OrderedDict() + for shot_id, shot in self._shots.items(): + if not shot.observed.allocated: + shots[shot_id] = shot + + return shots + + @property + def remaining_shot_ids(self): + """ + Get list of all shot IDs that have no observed allocated. + + """ + shot_ids = [] + for shot_id, shot in self._shots.items(): + if not shot.observed.allocated: + shot_ids.append(shot_id) + + return shot_ids + + def add(self, item): + """ + Add a new shot to the Acquisitions. + + Parameters + ---------- + item : Shot + Shot to be added to the Acquisitions. + + Returns + ------- + + """ + if item.id in self._shots.keys(): + raise ValueError('Shot with ID "%d" already exists in the Acquisitions' % item.id) + + self._shots[item.id] = item + item._acquisitions = self + + def get(self, id): + """ + Get a shot from the Acquisitions with a known id. + + Parameters + ---------- + id : int + Identifier of the shot. + + Returns + ------- + Shot + Found Shot. + + """ + if isinstance(id, (np.int32, np.int64)): + id = int(id) + + if not isinstance(id, int) or id < 0: + raise ValueError('Shot IDs have to be positive integer numbers') + + return self._shots[id] + + def set(self, item): + """ + Change an existing shot in the Acquisitions. + + Parameters + ---------- + item : Shot + Shot to be modified in the Acquisitions. + + Returns + ------- + + """ + if item.id not in self._shots.keys(): + raise ValueError('Shot with ID "%d" does not exist in the Acquisitions' % item.id) + + self._shots[item.id] = item + + def select_shot_ids(self, start=None, end=None, num=None, every=1, randomly=False): + """ + Select a number of shots according to the rules given in the arguments to the method. + + For every call to this method a new group of shots will be selected according to + those rules until all shots have been selected. At that point, the selection will + start again. + + Parameters + ---------- + start : int, optional + Start of the slice, defaults to the first id. + end : int, optional + End of the slice, defaults to the last id. + num : int, optional + Number of shots to select every time the method is called. + every : int, optional + How many shots to skip in the selection, defaults to 1, which means taking all shots + subsequently. + randomly : bool, optional + Whether to select the shots at random at in order, defaults to False. + + Returns + ------- + list + List with selected shots. + + """ + if not len(self._shot_selection): + ids_slice = slice(start or 0, end) + shot_ids = self.shot_ids + shot_ids.sort() + shot_ids = shot_ids[ids_slice] + + if randomly is True: + self._shot_selection = np.random.permutation(shot_ids).tolist() + + else: + num_groups = int(np.ceil(len(shot_ids) / num)) + + self._shot_selection = [] + + for group_index in range(num_groups): + + group = [] + num_remaining = len(shot_ids) + + start_index = group_index if every > 1 else 0 + for index in range(start_index, num_remaining, every): + group.append(shot_ids[index]) + + if len(group) == num or not len(shot_ids): + break + + self._shot_selection += group + shot_ids = shot_ids if every > 1 else list(set(shot_ids)-set(group)) + + next_slice = self._shot_selection[:num] + self._shot_selection = self._shot_selection[num:] + + return next_slice + + def default(self): + """ + Fill the container with the default configuration. + + In this case, that means that every location in the Geometry + acts as a source once while every location acts as a receiver. + + This generates as many shots as there are locations available in the + Geometry. Each Shot only has one source and as many receivers as locations + are in the Geometry. + + Returns + ------- + + """ + for source in self._geometry.locations: + receivers = self._geometry.locations + + self.add(Shot(source.id, + sources=[source], receivers=receivers, + geometry=self._geometry, problem=self.problem)) + + def plot(self, **kwargs): + """ + Plot wavelets and observed for for all shots if they are allocated. + + Parameters + ---------- + kwargs + Arguments for plotting. + + Returns + ------- + + """ + self.plot_wavelets(**kwargs) + self.plot_observed(**kwargs) + + def _plot(self, update): + if not ENABLED_2D_PLOTTING: + return None + + figure, axis = plt.subplots(1, 1) + plt.subplots_adjust(bottom=0.25) + axis.margins(x=0) + + ax_shot = plt.axes([0.15, 0.1, 0.7, 0.03]) + slider = Slider(ax_shot, 'shot ID', + self.shot_ids[0], self.shot_ids[-1], + valinit=self.shot_ids[0], valstep=1) + + update = functools.partial(update, figure, axis) + update(self.shot_ids[0]) + + slider.on_changed(update) + axis.slider = slider + + return axis + + def plot_wavelets(self, **kwargs): + """ + Plot wavelets for for all shots if they are allocated. + + Parameters + ---------- + kwargs + Arguments for plotting. + + Returns + ------- + + """ + if not self.get(0).wavelets.allocated: + return None + + kwargs['plot'] = False + + def update(figure, axis, shot_id): + axis.clear() + + self.get(int(shot_id)).plot_wavelets(axis=axis, **kwargs) + axis.set_title(axis.get_title() + ' - shot %d' % shot_id) + + figure.canvas.draw_idle() + + return self._plot(update) + + def plot_observed(self, **kwargs): + """ + Plot observed for for all shots if they are allocated. + + Parameters + ---------- + kwargs + Arguments for plotting. + + Returns + ------- + + """ + if not self.get(0).observed.allocated: + return None + + kwargs['plot'] = False + + def update(figure, axis, shot_id): + axis.clear() + + self.get(int(shot_id)).plot_observed(axis=axis, **kwargs) + axis.set_title(axis.get_title() + ' - shot %d' % shot_id) + + figure.canvas.draw_idle() + + return self._plot(update) + + def sub_problem(self, shot, sub_problem): + """ + Create a subset object for a certain shot. + + A SubProblem contains everything that is needed to fully determine how to run a particular shot. + This method takes care of selecting the portions of the Acquisitions that are needed + for a given shot. + + Parameters + ---------- + shot : Shot + Shot for which the SubProblem is being generated. + sub_problem : SubProblem + Container for the sub-problem being generated. + + Returns + ------- + Acquisitions + Newly created Acquisitions instance. + + """ + sub_acquisitions = Acquisitions(name=self.name, + geometry=sub_problem.geometry, + problem=sub_problem, grid=self.grid) + sub_acquisitions.add(shot) + + return sub_acquisitions + + def __get_desc__(self): + description = { + 'num_shots': self.num_shots, + 'shots': [], + } + + for shot in self.shots: + description['shots'].append(shot.__get_desc__()) + + return description + + def __set_desc__(self, description): + for shot_desc in description.shots: + if shot_desc.id not in self._shots: + shot = Shot(shot_desc.id, + geometry=self._geometry, + problem=self.problem, grid=self.grid) + self.add(shot) + + shot = self.get(shot_desc.id) + shot.__set_desc__(shot_desc) diff --git a/stride/problem_definition/base.py b/stride/problem_definition/base.py new file mode 100644 index 00000000..a6993141 --- /dev/null +++ b/stride/problem_definition/base.py @@ -0,0 +1,344 @@ + +from mosaic import h5 + +from .domain import Space, Time, Grid + + +__all__ = ['Gridded', 'Saved', 'GriddedSaved', 'ProblemBase'] + + +class Gridded: + """ + Objects of this type are defined over a spatio-temporal grid. + + This grid can be provided either as a Grid object or as any of Space, Time or SlowTime + objects that define a grid. + + Parameters + ---------- + grid : Grid, optional + Existing grid, if not provided one will be created. + space : Space, optional + time : Time, optional + slow_time : SlowTime, optional + + """ + + def __init__(self, grid=None, space=None, time=None, slow_time=None, **kwargs): + if grid is None: + grid = Grid(space, time, slow_time) + + else: + grid = Grid(grid.space, grid.time, grid.slow_time) + + self._grid = grid + + @property + def grid(self): + """ + Access the grid. + + """ + return self._grid + + @property + def space(self): + """ + Access the space grid. + + """ + return self._grid.space + + @property + def time(self): + """ + Access the time grid. + + """ + return self._grid.time + + @property + def slow_time(self): + """ + Access the slow time grid. + + """ + return self._grid.slow_time + + def resample(self, grid=None, space=None, time=None, slow_time=None): + raise NotImplementedError('Resampling has not been implemented yet.') + + +class Saved: + """ + Saved objects include helper functions to interact with the file system. + + Classes that inherit from Saved need to define ``__get_desc__`` and ``__set_desc__`` + to define how the object is described to be saved and how a loaded description is + digested by the class respectively. + + ``__get_desc__`` expects a dict-like object with all the attributes that need to + be stored to disk and is called when dumping the object. + + ``__set_desc__`` will take a dict-like object as a parameter, which it can then be + used to set the state of the object, and is called when loading it. + + Parameters + ---------- + name : str + Name of the saved object. + + """ + + def __init__(self, name, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.name = name + + def dump(self, *args, **kwargs): + """ + Dump the object according to the ``__get_desc__`` description. + + See :class:`~mosaic.file_manipulation.h5.HDF5` for more information on the parameters of this method. + + Returns + ------- + + """ + description = self.__get_desc__() + + kwargs['parameter'] = kwargs.get('parameter', self.name) + with h5.HDF5(*args, **kwargs, mode='w') as file: + file.dump(description) + + def append(self, *args, **kwargs): + """ + Append the object to a file according to the ``__get_desc__`` description. + + See :class:`~mosaic.file_manipulation.h5.HDF5` for more information on the parameters of this method. + + Returns + ------- + + """ + if not h5.file_exists(*args, **kwargs): + self.dump(*args, **kwargs) + return + + description = self.__get_desc__() + + kwargs['parameter'] = kwargs.get('parameter', self.name) + with h5.HDF5(*args, **kwargs, mode='a') as file: + file.append(description) + + def load(self, *args, **kwargs): + """ + Load the object using ``__set_desc__`` to digest the description. + + See :class:`~mosaic.file_manipulation.h5.HDF5` for more information on the parameters of this method. + + Returns + ------- + + """ + kwargs['parameter'] = self.name + with h5.HDF5(*args, **kwargs, mode='r') as file: + description = file.load() + + self.__set_desc__(description) + + def __get_desc__(self): + return {} + + def __set_desc__(self, description): + pass + + +class GriddedSaved(Saved, Gridded): + """ + Objects of this type are include utils to dump and load the instance, taking into + account that it is defined over a grid. + + """ + + def dump(self, *args, **kwargs): + """ + Dump the object according to the ``__get_desc__`` description. + + It will ensure that the grid of the instance is also dumped to disk. + + See :class:`~mosaic.file_manipulation.h5.HDF5` for more information on the parameters of this method. + + Returns + ------- + + """ + grid_description = self.grid_description() + + description = self.__get_desc__() + grid_description.update(description) + + kwargs['parameter'] = kwargs.get('parameter', self.name) + with h5.HDF5(*args, **kwargs, mode='w') as file: + file.dump(grid_description) + + def append(self, *args, **kwargs): + """ + Append the object to a file according to the ``__get_desc__`` description. + + It will ensure that the grid of the instance is also dumped to disk. + + See :class:`~mosaic.file_manipulation.h5.HDF5` for more information on the parameters of this method. + + Returns + ------- + + """ + if not h5.file_exists(*args, **kwargs): + self.dump(*args, **kwargs) + return + + grid_description = self.grid_description() + + description = self.__get_desc__() + grid_description.update(description) + + kwargs['parameter'] = kwargs.get('parameter', self.name) + with h5.HDF5(*args, **kwargs, mode='a') as file: + file.append(grid_description) + + def load(self, *args, **kwargs): + """ + Load the object using ``__set_desc__`` to digest the description. + + It will use the grid loaded from file to determine the grid of the instance. + + See :class:`~mosaic.file_manipulation.h5.HDF5` for more information on the parameters of this method. + + Returns + ------- + + """ + kwargs['parameter'] = self.name + with h5.HDF5(*args, **kwargs, mode='r') as file: + description = file.load() + + # TODO If there's already a grid and they don't match, resample instead or overwriting + if 'space' in description: + space = Space(shape=description.space.shape, + spacing=description.space.spacing, + extra=description.space.extra, + absorbing=description.space.absorbing) + + self._grid.space = space + + if 'time' in description: + time = Time(start=description.time.start, + stop=description.time.stop, + step=description.time.step, + num=description.time.num) + + self._grid.time = time + + if 'slow_time' in description: + pass + + self.__set_desc__(description) + + def grid_description(self): + """ + Get a description of the grid of the object. + + Returns + ------- + dict + Description of the grid. + + """ + grid_description = dict() + + if self.space is not None: + space = self.space + grid_description['space'] = { + 'shape': space.shape, + 'spacing': space.spacing, + 'extra': space.extra, + 'absorbing': space.absorbing, + } + + if self.time is not None: + time = self.time + grid_description['time'] = { + 'start': time.start, + 'stop': time.stop, + 'step': time.step, + 'num': time.num, + } + + if self.slow_time is not None: + pass + + return grid_description + + def resample(self, grid=None, space=None, time=None, slow_time=None): + super().resample(grid=grid, space=space, time=time, slow_time=slow_time) + + +class ProblemBase(GriddedSaved): + """ + Base class for the different components of the problem that need to have access to it and + that also create sub-problems. + + Parameters + ---------- + name : str + Name of the object. + problem : Problem + Problem to which the object belongs. + grid : Grid or any of Space or Time + Grid on which the object is defined + + """ + + def __init__(self, name, problem, *args, **kwargs): + if problem is not None: + kwargs['space'] = kwargs.get('space', problem.space) + kwargs['time'] = kwargs.get('time', problem.time) + kwargs['slow_time'] = kwargs.get('slow_time', problem.slow_time) + + super().__init__(name, *args, **kwargs) + + self._problem = problem + + @property + def problem(self): + """ + Access problem object. + + """ + return self._problem + + def sub_problem(self, shot, sub_problem): + """ + Create a subset object for a certain shot. + + A SubProblem contains everything that is needed to fully determine how to run a particular shot. + This method takes care of selecting the portions of the object that are needed + for a given shot. + + By default, this has no effect. + + Parameters + ---------- + shot : Shot + Shot for which the SubProblem is being generated. + sub_problem : SubProblem + Container for the sub-problem being generated. + + Returns + ------- + ProblemBase + ProblemBase instance. + + """ + return self diff --git a/stride/problem_definition/data.py b/stride/problem_definition/data.py new file mode 100644 index 00000000..6ec58758 --- /dev/null +++ b/stride/problem_definition/data.py @@ -0,0 +1,834 @@ + +import gc +import copy +import numpy as np + +from .base import GriddedSaved +from .. import plotting + + +__all__ = ['Data', 'StructuredData', 'ScalarField', 'VectorField', 'Traces'] + + +class Data(GriddedSaved): + """ + Objects of this type represent Data defined over a grid and on which mathematical + operations might be performed. This data might or might not be structured. + + """ + + def __add__(self, other): + raise NotImplementedError('Operator + has not been implemented for class %s' % self.__class__.__name__) + + def __sub__(self, other): + raise NotImplementedError('Operator - has not been implemented for class %s' % self.__class__.__name__) + + def __mul__(self, other): + raise NotImplementedError('Operator * has not been implemented for class %s' % self.__class__.__name__) + + def __pow__(self, power, modulo=None): + raise NotImplementedError('Operator ** has not been implemented for class %s' % self.__class__.__name__) + + def __truediv__(self, other): + raise NotImplementedError('Operator / has not been implemented for class %s' % self.__class__.__name__) + + def __floordiv__(self, other): + raise NotImplementedError('Operator // has not been implemented for class %s' % self.__class__.__name__) + + def __iadd__(self, other): + raise NotImplementedError('Operator + has not been implemented for class %s' % self.__class__.__name__) + + def __isub__(self, other): + raise NotImplementedError('Operator - has not been implemented for class %s' % self.__class__.__name__) + + def __imul__(self, other): + raise NotImplementedError('Operator * has not been implemented for class %s' % self.__class__.__name__) + + def __ipow__(self, power, modulo=None): + raise NotImplementedError('Operator ** has not been implemented for class %s' % self.__class__.__name__) + + def __itruediv__(self, other): + raise NotImplementedError('Operator / has not been implemented for class %s' % self.__class__.__name__) + + def __ifloordiv__(self, other): + raise NotImplementedError('Operator // has not been implemented for class %s' % self.__class__.__name__) + + __radd__ = __add__ + __rsub__ = __sub__ + __rmul__ = __mul__ + __rtruediv__ = __truediv__ + __rfloordiv__ = __floordiv__ + + +class StructuredData(Data): + """ + Objects of this type represent data defined over a structured grid. + + This grid is on which the data lives is fully defined by the ``shape`` parameter. Optionally, + an ``extended_shape`` may be provided if the data is defined over an inner and extended domain. + If an extended domain is defined, the ``inner`` parameter can be used to determine the position + of the inner domain within the larger extended domain. + + Parameters + ---------- + name : str + Name of the data. + shape : tuple + Shape of the inner domain of the data. + extended_shape : tuple, optional + Shape of the extended domain of the data, defaults to the ``shape``. + inner : tuple, optional + Tuple of slices defining the location of the inner domain inside the + extended domain, defaults to the inner domain being centred. + dtype : data-type, optional + Data type of the data, defaults to float32. + data : ndarray, optional + Data with which to initialise the internal buffer, defaults to a new array. By default, + no copies of the buffer are made if provided. + grid : Grid or any of Space or Time + Grid on which the Problem is defined + + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self._data = None + + shape = kwargs.pop('shape', None) + extended_shape = kwargs.pop('extended_shape', None) + inner = kwargs.pop('inner', None) + dtype = kwargs.pop('dtype', np.float32) + + if shape is not None: + extended_shape = extended_shape or shape + + if inner is None: + extra = [each_extended - each_shape for each_extended, each_shape in zip(extended_shape, shape)] + inner = tuple([slice(each_extra, each_extra + each_shape) for each_extra, each_shape in zip(extra, shape)]) + + self._shape = shape + self._extended_shape = extended_shape + self._inner = inner + self._dtype = dtype + + data = kwargs.pop('data', None) + + if data is not None: + self._data = self.pad_data(data) + + def alike(self, *args, **kwargs): + """ + Create a data object that shares its characteristics with this object. + + The same parameters as those given to ``__init__`` are valid here. Otherwise the + new object will be configured to be like this one. + + Returns + ------- + StructuredData + Newly created StructuredData. + + """ + kwargs['shape'] = kwargs.pop('shape', self.shape) + kwargs['extended_shape'] = kwargs.pop('extended_shape', self.extended_shape) + kwargs['inner'] = kwargs.pop('inner', self.inner) + kwargs['dtype'] = kwargs.pop('dtype', self.dtype) + kwargs['grid'] = kwargs.pop('grid', self.grid) + + return self.__class__(*args, **kwargs) + + def copy(self): + """ + Create a deep copy of the data object. + + Returns + ------- + StructuredData + Newly created StructuredData. + + """ + return copy.deepcopy(self) + + @property + def data(self): + """ + Data values inside the inner domain, as an ndarray. + + """ + if self._data is None: + self.allocate() + + return self._data[self._inner] + + @property + def extended_data(self): + """ + Data values inside the extended domain, as an ndarray. + + """ + if self._data is None: + self.allocate() + + return self._data + + @property + def shape(self): + """ + Shape of the inner domain, as a tuple. + + """ + return self._shape + + @property + def extended_shape(self): + """ + Shape of the extended domain, as a tuple. + + """ + return self._extended_shape + + @property + def inner(self): + """ + Slices that determine the location of the inner domain with respect to the extended domain, + as a tuple of slices. + + """ + return self._inner + + @property + def allocated(self): + """ + Whether or not the data has been allocated. + + """ + return self._data is not None + + @property + def dtype(self): + """ + Data-type of the data. + + """ + return self._dtype + + def allocate(self): + """ + Allocate the data if this has not been allocated yet. + + Returns + ------- + + """ + if self._data is None: + self._data = np.empty(self._extended_shape, dtype=self._dtype) + + def deallocate(self): + """ + Deallocate the data. + + Returns + ------- + + """ + if self._data is not None: + del self._data + self._data = None + gc.collect() + + def fill(self, value): + """ + Fill the data with a certain value + + Parameters + ---------- + value : float + Value with which to fill the data. + + Returns + ------- + + """ + if self._data is None: + self.allocate() + + self._data.fill(value) + + def pad_data(self, data): + """ + Pad input data to match the extended shape of the StructuredData. + + Parameters + ---------- + data : ndarray + Array to pad. + + Returns + ------- + ndarray + Padded array. + + """ + shape = data.shape + pad_widths = [each_extended - each_shape for each_extended, each_shape in + zip(self._extended_shape, shape)] + pad_widths = [[each // 2, each // 2] for each in pad_widths] + + if np.asarray(pad_widths).sum() > 0: + return np.pad(data, pad_widths, mode='edge') + + else: + return data + + def _prepare_op(self, other): + res = self.copy() + other_data = self._prepare_other(other) + + return res, other_data + + def _prepare_other(self, other): + other_data = other + if isinstance(other, StructuredData): + if not isinstance(other, StructuredData): + raise ValueError('Data of type %s and %s cannot be operated together' % + (type(self), type(other))) + + other_data = other.extended_data + + return other_data + + def __add__(self, other): + res, other_data = self._prepare_op(other) + res.extended_data[:] = res.extended_data.__add__(other_data) + + return res + + def __sub__(self, other): + res, other_data = self._prepare_op(other) + res.extended_data[:] = res.extended_data.__sub__(other_data) + + return res + + def __mul__(self, other): + res, other_data = self._prepare_op(other) + res.extended_data[:] = res.extended_data.__mul__(other_data) + + return res + + def __pow__(self, power, modulo=None): + res = self.copy() + res.extended_data[:] = res.extended_data.__pow__(power) + + return res + + def __truediv__(self, other): + res, other_data = self._prepare_op(other) + res.extended_data[:] = res.extended_data.__truediv__(other_data) + + return res + + def __floordiv__(self, other): + res, other_data = self._prepare_op(other) + res.extended_data[:] = res.extended_data.__floordiv__(other_data) + + return res + + def __iadd__(self, other): + res = self + other_data = self._prepare_other(other) + res.extended_data.__iadd__(other_data) + + return res + + def __isub__(self, other): + res = self + other_data = self._prepare_other(other) + res.extended_data.__isub__(other_data) + + return res + + def __imul__(self, other): + res = self + other_data = self._prepare_other(other) + res.extended_data.__imul__(other_data) + + return res + + def __ipow__(self, power, modulo=None): + res = self + res.extended_data.__ipow__(power) + + return res + + def __itruediv__(self, other): + res = self + other_data = self._prepare_other(other) + res.extended_data.__itruediv__(other_data) + + return res + + def __ifloordiv__(self, other): + res = self + other_data = self._prepare_other(other) + res.extended_data.__ifloordiv__(other_data) + + return res + + __radd__ = __add__ + __rsub__ = __sub__ + __rmul__ = __mul__ + __rtruediv__ = __truediv__ + __rfloordiv__ = __floordiv__ + + def __get_desc__(self): + if self._data is None: + self.allocate() + + inner = [] + for each in self._inner: + inner.append([ + str(each.start), + str(each.stop), + str(each.step), + ]) + + description = { + 'shape': self._shape, + 'extended_shape': self._extended_shape, + 'inner': inner, + 'dtype': str(np.dtype(self._dtype)), + 'data': self.data, + } + + return description + + def __set_desc__(self, description): + self._shape = description.shape + self._extended_shape = description.extended_shape + self._dtype = np.dtype(description.dtype) + + inner = [] + for each in description.inner: + inner.append(slice( + int(each[0]) if each[0] != 'None' else None, + int(each[1]) if each[1] != 'None' else None, + int(each[2]) if each[2] != 'None' else None, + )) + + self._inner = tuple(inner) + + if hasattr(description.data, 'load'): + data = description.data.load() + else: + data = description.data + + self.extended_data[:] = self.pad_data(data) + + +class ScalarField(StructuredData): + """ + Objects of this type describe a scalar field defined over the spatial grid. Scalar fields + can also be time-dependent. + + By default, the domain over which the field is defined is determined by the grid + provided. This can be overwritten by providing a defined ``shape`` instead. + + Parameters + ---------- + name : str + Name of the data. + time_dependent : bool, optional + Whether or not the field is time-dependent, defaults to False. + shape : tuple, optional + Shape of the inner domain of the data. + extended_shape : tuple, optional + Shape of the extended domain of the data, defaults to the ``shape``. + inner : tuple, optional + Tuple of slices defining the location of the inner domain inside the + extended domain, defaults to the inner domain being centred. + dtype : data-type, optional + Data type of the data, defaults to float32. + grid : Grid or any of Space or Time + Grid on which the Problem is defined + + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + time_dependent = kwargs.pop('time_dependent', False) + self._time_dependent = time_dependent + + if self.space is not None and self._shape is None: + shape = () + extended_shape = () + inner = () + if self._time_dependent: + shape += (self.time.num,) + extended_shape += (self.time.extended_num,) + inner += (self.time.inner,) + + shape += self.space.shape + extended_shape += self.space.extended_shape + inner += self.space.inner + + self._shape = shape + self._extended_shape = extended_shape + self._inner = inner + + def alike(self, *args, **kwargs): + """ + Create a data object that shares its characteristics with this object. + + The same parameters as those given to ``__init__`` are valid here. Otherwise the + new object will be configured to be like this one. + + Returns + ------- + ScalarField + Newly created ScalarField. + + """ + kwargs['time_dependent'] = kwargs.pop('time_dependent', self.time_dependent) + + return super().alike(*args, **kwargs) + + @property + def time_dependent(self): + """ + Whether or not the field is time dependent. + + """ + return self._time_dependent + + def plot(self, **kwargs): + """ + Plot the inner domain of the field. + + Parameters + ---------- + kwargs + Arguments for plotting. + + Returns + ------- + axes + Axes on which the plotting is done. + + """ + title = kwargs.pop('title', self.name) + plot = kwargs.pop('plot', True) + + axis = plotting.plot_scalar_field(self.data, title=title, + origin=self.space.origin, limit=self.space.limit, + **kwargs) + + if plot is True: + plotting.show(axis) + + return axis + + def extended_plot(self, **kwargs): + """ + Plot the extended domain of the field. + + Parameters + ---------- + kwargs + Arguments for plotting. + + Returns + ------- + axes + Axes on which the plotting is done. + + """ + title = kwargs.pop('title', self.name) + plot = kwargs.pop('plot', True) + + axis = plotting.plot_scalar_field(self.extended_data, title=title, + origin=self.space.pml_origin, limit=self.space.extended_limit, + **kwargs) + + if plot is True: + plotting.show(axis) + + return axis + + def __get_desc__(self): + description = super().__get_desc__() + description['time_dependent'] = self._time_dependent + + return description + + def __set_desc__(self, description): + super().__set_desc__(description) + + self._time_dependent = description.time_dependent + + +class VectorField(ScalarField): + """ + Objects of this type describe a vector field defined over the spatial grid. Vector fields + can also be time-dependent. + + By default, the domain over which the field is defined is determined by the grid + provided. This can be overwritten by providing a defined ``shape`` instead. + + Parameters + ---------- + name : str + Name of the data. + dim : int, optional + Number of dimensions for the vector field, defaults to the spatial dimensions. + time_dependent : bool, optional + Whether or not the field is time-dependent, defaults to False. + shape : tuple, optional + Shape of the inner domain of the data. + extended_shape : tuple, optional + Shape of the extended domain of the data, defaults to the ``shape``. + inner : tuple, optional + Tuple of slices defining the location of the inner domain inside the + extended domain, defaults to the inner domain being centred. + dtype : data-type, optional + Data type of the data, defaults to float32. + grid : Grid or any of Space or Time + Grid on which the Problem is defined + + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + dim = kwargs.pop('dim', False) + self._dim = dim + + if self.space is not None and self._shape is not None: + self._dim = dim or self.space.dim + self._shape = (self._dim,) + self._shape + self._extended_shape = (self._dim,) + self._extended_shape + self._inner = (slice(0, None),) + self._inner + + def alike(self, *args, **kwargs): + """ + Create a data object that shares its characteristics with this object. + + The same parameters as those given to ``__init__`` are valid here. Otherwise the + new object will be configured to be like this one. + + Returns + ------- + VectorField + Newly created VectorField. + + """ + kwargs['dim'] = kwargs.pop('dim', self.dim) + + return super().alike(*args, **kwargs) + + @property + def dim(self): + """ + Number of dimensions of the vector field. + + """ + return self._dim + + def __get_desc__(self): + description = super().__get_desc__() + description['dim'] = self._dim + + return description + + def __set_desc__(self, description): + super().__set_desc__(description) + + self._dim = description.dim + + +class Traces(StructuredData): + """ + Objects of this type describe a set of time traces defined over the time grid. + + By default, the domain over which the field is defined is determined by the time grid + provided. This can be overwritten by providing a defined ``shape`` instead. + + Parameters + ---------- + name : str + Name of the data. + transducer_ids : list + List of IDs to which the time traces correspond. + shape : tuple, optional + Shape of the inner domain of the data. + extended_shape : tuple, optional + Shape of the extended domain of the data, defaults to the ``shape``. + inner : tuple, optional + Tuple of slices defining the location of the inner domain inside the + extended domain, defaults to the inner domain being centred. + dtype : data-type, optional + Data type of the data, defaults to float32. + grid : Grid or any of Space or Time + Grid on which the Problem is defined + + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + transducer_ids = kwargs.pop('transducer_ids', None) + self._transducer_ids = transducer_ids + + if self._transducer_ids is not None and self._shape is None: + shape = (len(self._transducer_ids), self.time.num) + extended_shape = (len(self._transducer_ids), self.time.extended_num) + inner = (slice(0, None), self.time.inner) + + self._shape = shape + self._extended_shape = extended_shape + self._inner = inner + + def alike(self, *args, **kwargs): + """ + Create a data object that shares its characteristics with this object. + + The same parameters as those given to ``__init__`` are valid here. Otherwise the + new object will be configured to be like this one. + + Returns + ------- + Traces + Newly created Traces. + + """ + kwargs['transducer_ids'] = kwargs.pop('transducer_ids', self.transducer_ids) + + return super().alike(*args, **kwargs) + + @property + def transducer_ids(self): + """ + List of transducer IDs associated with the traces. + + """ + return self._transducer_ids + + @property + def num_transducers(self): + """ + Number of transducers. + + """ + return len(self._transducer_ids) + + def get(self, id): + """ + Get one trace based on a transducer ID, selecting the inner domain. + + Parameters + ---------- + id : int + Transducer ID. + + Returns + ------- + 1d-array + Time trace. + + """ + if self._data is None: + self.allocate() + + index = list(self._transducer_ids).index(id) + return self.data[index, :] + + def get_extended(self, id): + """ + Get one trace based on a transducer ID, selecting the extended domain. + + Parameters + ---------- + id : int + Transducer ID. + + Returns + ------- + 1d-array + Time trace. + + """ + if self._data is None: + self.allocate() + + index = list(self._transducer_ids).index(id) + return self.extended_data[index, :] + + def plot(self, **kwargs): + """ + Plot the inner domain of the traces as a shot gather. + + Parameters + ---------- + kwargs + Arguments for plotting. + + Returns + ------- + axes + Axes on which the plotting is done. + + """ + title = kwargs.pop('title', self.name) + plot = kwargs.pop('plot', True) + time_axis = self.time.grid / 1e-6 + + axis = plotting.plot_gather(self.transducer_ids, time_axis, self.data, + title=title, **kwargs) + + if plot is True: + plotting.show(axis) + + return axis + + def plot_one(self, id, **kwargs): + """ + Plot the the inner domain of one of the traces. + + Parameters + ---------- + id : int + Transducer ID. + kwargs + Arguments for plotting. + + Returns + ------- + axes + Axes on which the plotting is done. + + """ + title = kwargs.pop('title', self.name) + plot = kwargs.pop('plot', True) + trace = self.get(id) + time_axis = self.time.grid / 1e-6 + + axis = plotting.plot_trace(time_axis, trace, + title=title, **kwargs) + + if plot is True: + plotting.show(axis) + + return axis + + def __get_desc__(self): + description = super().__get_desc__() + description['num_transducers'] = self.num_transducers + description['transducer_ids'] = self._transducer_ids + + return description + + def __set_desc__(self, description): + super().__set_desc__(description) + + self._transducer_ids = description.transducer_ids diff --git a/stride/problem_definition/domain.py b/stride/problem_definition/domain.py new file mode 100644 index 00000000..503ca9f1 --- /dev/null +++ b/stride/problem_definition/domain.py @@ -0,0 +1,273 @@ + +import numpy as np + + +__all__ = ['Space', 'Time', 'SlowTime', 'Grid'] + + +class Space: + """ + This defines the spatial grid over which the problem is defined. + + The spatial grid consists of an inner domain defined by ``shape`` and + an external padding defined by ``extra``. Within this extra region, a + further sub-region is defined as absorbing for boundary purposes as defined + by ``absorbing``. + + The ``spacing`` defines the axis-wise spacing of the grid. + + Parameters + ---------- + shape : tuple + Shape of the inner domain. + spacing : tuple + Axis-wise spacing of the grid, in metres. + extra : tuple + Amount of axis-wise extra space around the inner domain. + absorbing : tuple + Portion of the extra space that corresponds to absorbing boundaries. + + """ + + def __init__(self, shape=None, spacing=None, extra=None, absorbing=None): + self.dim = len(shape) + self.shape = tuple(shape) + self.spacing = tuple(spacing) + self.extra = tuple(extra) + self.absorbing = tuple(absorbing) + + origin = tuple([0] * self.dim) + pml_origin = tuple([each_origin - each_spacing * each_extra for each_origin, each_spacing, each_extra in + zip(origin, spacing, extra)]) + + extended_shape = tuple(np.array([dim + 2*added for dim, added in zip(shape, extra)])) + size = tuple(np.array(spacing) * (np.array(shape) - 1)) + extended_size = tuple([each_origin + each_spacing * (each_shape + each_extra - 1) + for each_origin, each_spacing, each_shape, each_extra in zip(origin, spacing, shape, extra)]) + + self.origin = origin + self.pml_origin = pml_origin + self.extended_shape = extended_shape + self.limit = size + self.extended_limit = extended_size + + def resample(self): + raise NotImplementedError('Resampling has not been implemented yet') + + @property + def inner(self): + """ + Slices defining the inner domain, as a tuple of slices. + + """ + return tuple([slice(extra, extra + shape) for shape, extra in zip(self.shape, self.extra)]) + + @property + def inner_mask(self): + """ + Tensor of the shape of the space grid with gridpoints wihtin inner domain set to 1 + and those outside set to 0, as an ndarray. + + """ + mask = np.zeros(self.extended_shape, dtype=np.float32) + pml_slices = self.inner + + mask[pml_slices] = 1. + + return mask + + @property + def mesh_indices(self): + """ + Create the mesh of indices in the inner domain, as a tuple + of ndarray. + + """ + grid = [np.arange(0, shape) for shape in self.shape] + return np.meshgrid(*grid) + + @property + def extended_mesh_indices(self): + """ + Create the mesh of indices in the extended domain, as a tuple + of ndarray. + + """ + grid = [np.arange(0, extended_shape) for extended_shape in self.extended_shape] + return np.meshgrid(*grid) + + @property + def mesh(self): + """ + Create the mesh of spatial locations in the inner domain, as a tuple + of ndarray. + + """ + grid = self.grid + return np.meshgrid(*grid) + + @property + def extended_mesh(self): + """ + Create the mesh of spatial locations the full, extended domain, as a tuple + of ndarray. + + """ + grid = self.extended_grid + return np.meshgrid(*grid) + + @property + def indices(self): + """ + Indices corresponding to the grid of the inner domain, as a tuple of 1d-arrays. + + """ + axes = [np.arange(0, shape) for shape in self.shape] + return tuple(axes) + + @property + def extended_indices(self): + """ + Indices corresponding to the grid of the extended domain, as a tuple of 1d-arrays. + + """ + axes = [np.arange(0, extended_shape) for extended_shape in self.extended_shape] + return tuple(axes) + + @property + def grid(self): + """ + Spatial points corresponding to the grid of the inner domain, as a tuple of 1d-arrays. + + """ + axes = [np.linspace(self.origin[dim], self.limit[dim], self.shape[dim], + endpoint=True, dtype=np.float32) + for dim in range(self.dim)] + return tuple(axes) + + @property + def extended_grid(self): + """ + Spatial points corresponding to the grid of the extended domain, as a tuple of 1d-arrays. + + + """ + axes = [np.linspace(self.pml_origin[dim], self.extended_limit[dim], self.extended_shape[dim], + endpoint=True, dtype=np.float32) + for dim in range(self.dim)] + return tuple(axes) + + +class Time: + """ + This defines the temporal grid over which the problem is defined + + A time grid is fully defined by three of its arguments: start, stop, step or num. + + The time grid can be extended with a certain amount of padding, generating an + inner domain and an extended domain, similar to that seen in the Space. + + Parameters + ---------- + start : float, optional + Point at which time starts, in seconds. + step : float, optional + Step between time points, in seconds. + num : int, optional + Number of time points in the grid. + stop : float, optional + Point at which time ends, in seconds. + + """ + + def __init__(self, start=None, step=None, num=None, stop=None): + try: + if start is None: + start = stop - step*(num - 1) + elif step is None: + step = (stop - start)/(num - 1) + elif num is None: + num = int(np.ceil((stop - start)/step + 1)) + stop = step*(num - 1) + start + elif stop is None: + stop = start + step*(num - 1) + + except: + raise ValueError('Three of args start, step, num and stop may be set') + + if not isinstance(num, int): + raise TypeError('"input" argument must be of type int') + + self.start = start + self.stop = stop + self.step = step + self.num = num + + self.extra = 0 + self.extended_start = start + self.extended_stop = stop + self.extended_num = num + + def extend(self, freq): + if not isinstance(freq, float): + self.extended_start = self.start + self.extended_stop = self.stop + self.extended_num = self.num + self.extra = 0 + + return + + extra = int((1/self.step)/freq * 0.75) + + self.extra = extra + self.extended_start = self.start - (self.extra - 1)*self.step + self.extended_stop = self.stop + (self.extra - 1)*self.step + self.extended_num = self.num + 2*self.extra + + def resample(self): + raise NotImplementedError('Resampling has not been implemented yet') + + @property + def inner(self): + """ + Slice defining the inner domain. + + """ + return slice(self.extra, self.extra + self.num) + + @property + def grid(self): + """ + Time points corresponding to the grid of the inner domain, as a 1d-array. + + """ + return np.linspace(self.start, self.stop, self.num, endpoint=True, dtype=np.float32) + + @property + def extended_grid(self): + """ + Time points corresponding to the grid of the extended domain, as a 1d-array. + + """ + return np.linspace(self.extended_start, self.extended_stop, self.extended_num, endpoint=True, dtype=np.float32) + + +class SlowTime: + pass + + +class Grid: + """ + The grid is a container for the spatial and temporal grids. + + Parameters + ---------- + space : Space + time : Time + slow_time : SlowTime + """ + + def __init__(self, space, time, slow_time): + self.space = space + self.time = time + self.slow_time = slow_time diff --git a/stride/problem_definition/geometry.py b/stride/problem_definition/geometry.py new file mode 100644 index 00000000..fb7bbf08 --- /dev/null +++ b/stride/problem_definition/geometry.py @@ -0,0 +1,419 @@ + +import numpy as np +from collections import OrderedDict + +from .base import GriddedSaved, ProblemBase +from .. import plotting +from ..utils import geometries + + +__all__ = ['Geometry'] + + +class TransducerLocation(GriddedSaved): + """ + This determines the spatial location of a specific transducer device within the + geometry. + + The location is determined by a numerical ID (>= 0), a transducer and the + coordinates of the location within the space grid. In some cases, the orientation + of the transducer might also be needed. + + Parameters + ---------- + id : int + Numerical ID of the location (>=0). + name : str + Optional name for the transducer location. + transducer : Transducer + Transducer device to which this location refers. + coordinates : ndarray + Coordinates of the transducer in the space grid. + orientation : ndarray, optional + Orientation of the transducer with respect to its location. + + """ + + def __init__(self, id, transducer=None, coordinates=None, orientation=None, + name=None, *args, **kwargs): + name = name or 'transducer_instance_%05d' % id + super().__init__(name, *args, **kwargs) + + if id < 0: + raise ValueError('The transducer location needs a positive ID') + + self.id = id + self.transducer = transducer + + if coordinates is not None: + coordinates = transducer.coordinates + coordinates + self.coordinates = coordinates + + self.orientation = orientation + + def check_bounds(self): + pass + + def sub_problem(self, shot, sub_problem): + """ + Create a subset object for a certain shot. + + A SubProblem contains everything that is needed to fully determine how to run a particular shot. + This method takes care of creating a TransducerLocation object that links to that + new SubProblem. + + Parameters + ---------- + shot : Shot + Shot for which the SubProblem is being generated. + sub_problem : SubProblem + Container for the sub-problem being generated. + + Returns + ------- + TransducerLocation + Newly created TransducerLocation instance. + + """ + sub_location = TransducerLocation(self.id, + name=self.name, grid=self.grid) + + transducer = sub_problem.transducers.get(self.transducer.id) + sub_location.transducer = transducer + + sub_location.coordinates = self.coordinates + sub_location.orientation = self.orientation + + return sub_location + + def __get_desc__(self): + description = { + 'id': self.id, + 'transducer_id': self.transducer.id, + 'coordinates': self.coordinates, + } + + if self.orientation is not None: + description['orientation'] = self.orientation + + return description + + def __set_desc__(self, description, transducers=None): + self.id = description.id + self.transducer = transducers.get(description.transducer_id) + self.coordinates = description.coordinates.load() + + if 'orientation' in description: + self.orientation = description.orientation.load() + + +class Geometry(ProblemBase): + """ + The Geometry represents a series of transducer locations that exist within the confines of the grid. + + Transducer locations are identified through a numerical ID, which is >= 0. + + Transducer locations can be added at a certain location through ``Geometry.add(id, transducer, coordinates, [orientation])`` + and can be accessed through ``Geometry.get(location_id)``. + + The Geometry also provides utilities for loading and dumping these transducers and for plotting them. + + Parameters + ---------- + name : str + Alternative name to give to the medium. + problem : Problem + Problem to which the Geometry belongs. + transducers : Transducers + Transducers object to which the Geometry refers. + grid : Grid or any of Space or Time + Grid on which the Geometry is defined + + """ + + def __init__(self, name='geometry', problem=None, **kwargs): + super().__init__(name, problem, **kwargs) + + if problem is not None: + transducers = problem.transducers + else: + transducers = kwargs.pop('transducers', None) + + if transducers is None: + raise ValueError('A Geometry has be defined with respect to a set of Transducers') + + self._locations = OrderedDict() + self._transducers = transducers + + def add(self, id, transducer, coordinates, orientation=None): + """ + Add a new transducer location to the Geometry. + + Parameters + ---------- + id : int + ID of the instantiation of the transducer in the geometry. + transducer : Transducer + Transducer to be added to the Geometry. + coordinates : array + Coordinates of the transducer in the grid. + orientation : array, optional + Orientation vector of the transducer. + + Returns + ------- + + """ + if id in self._locations.keys(): + raise ValueError('Transducer location with ID "%d" already exists in the Geometry' % id) + + instance = TransducerLocation(id, transducer, coordinates, orientation, + grid=self.grid) + self._locations[id] = instance + + def add_location(self, item): + """ + Add an existing location to the Geometry. + + Parameters + ---------- + item : TransducerLocation + Transducer location instance to be added to the Geometry. + + Returns + ------- + + """ + if item.id in self._locations.keys(): + raise ValueError('Transducer location with ID "%d" already exists in the Geometry' % item.id) + + self._locations[item.id] = item + + def get(self, id): + """ + Get a transducer location from the Geometry with a known id. + + Parameters + ---------- + id : int + Identifier of the transducer. + + Returns + ------- + TransducerLocation + Found TransducerLocation. + + """ + if isinstance(id, (np.int32, np.int64)): + id = int(id) + + if not isinstance(id, int) or id < 0: + raise ValueError('Transducer IDs have to be positive integer numbers') + + return self._locations[id] + + def get_slice(self, start=None, end=None, step=None): + """ + Get a slice of the indices of the locations using ``slice(start, stop, step)``. + + Parameters + ---------- + start : int, optional + Start of the slice, defaults to the first id. + end : int, optional + End of the slice, defaults to the last id. + step : int, optional + Steps in between transducers, defaults to 1. + + Returns + ------- + list + Found transducer locations in the slice. + + """ + section = OrderedDict() + if start is None: + _range = range(end) + elif step is None: + _range = range(start, end) + else: + _range = range(start, end, step) + + for index in _range: + section[list(self._locations.keys())[index]] = list(self._locations.values())[index] + + return section + + def default(self, geometry_type, *args, **kwargs): + """ + Fill the container with the default configuration. + + In this case, that means using one of the default geometry functions in ``stride.utils.geometries`` + and using the same transducer for all of them. + + Parameters + ---------- + geometry_type : str + Type of geometry to use. + + Returns + ------- + + """ + + if geometry_type == 'elliptical': + default_radius = ((self.space.limit[0] - 15.e-3) / 2, + (self.space.limit[1] - 13.e-3) / 2) + default_centre = (self.space.limit[0] / 2, + self.space.limit[1] / 2) + + kwargs['radius'] = kwargs.get('radius', default_radius) + kwargs['centre'] = kwargs.get('centre', default_centre) + + elif geometry_type == 'ellipsoidal': + default_radius = ((self.space.limit[0] - 15.e-3) / 2, + (self.space.limit[1] - 15.e-3) / 2, + (self.space.limit[2] - 15.e-3) / 2) + default_centre = (self.space.limit[0] / 2, + self.space.limit[1] / 2, + self.space.limit[2] / 2) + + if len(args) < 2: + kwargs['radius'] = kwargs.get('radius', default_radius) + if len(args) < 3: + kwargs['centre'] = kwargs.get('centre', default_centre) + kwargs['threshold'] = kwargs.get('threshold', 0.3) + + geometry_fun = getattr(geometries, geometry_type) + coordinates = geometry_fun(*args, **kwargs) + + for index in range(coordinates.shape[0]): + self.add(index, self._transducers.get(0), coordinates[index, :]) + + @property + def transducers(self): + return self._transducers + + @property + def num_locations(self): + """ + Get number of locations in the Geometry. + + """ + return len(self._locations.keys()) + + @property + def locations(self): + """ + Get all locations in the Geometry as a list. + + """ + return list(self._locations.values()) + + @property + def location_ids(self): + """ + Get all location IDs in the Geometry as a list. + + """ + return list(self._locations.keys()) + + @property + def coordinates(self): + """ + Get the coordinates of all locations packed in an array format. + + Coordinates are defined as a 2 or 3-dimensional array with shape (n_transducers, n_dimensions). + + """ + coordinates = np.zeros((self.num_locations, self.space.dim), dtype=np.float32) + index = 0 + for location in self._locations.values(): + coordinates[index, :] = location.coordinates + index += 1 + + return coordinates + + def plot(self, **kwargs): + """ + Plot the locations of the transducers as scattered points. + + Parameters + ---------- + kwargs + Arguments for plotting. + + Returns + ------- + axes + Axes on which the plotting is done. + + """ + title = kwargs.pop('title', self.name) + plot = kwargs.pop('plot', True) + + coordinates = self.coordinates + if self.space.dim > 2: + coordinates = coordinates / np.array(self.space.spacing) + + axis = plotting.plot_points(coordinates, title=title, **kwargs) + + if plot is True: + plotting.show(axis) + + return axis + + def sub_problem(self, shot, sub_problem): + """ + Create a subset object for a certain shot. + + A SubProblem contains everything that is needed to fully determine how to run a particular shot. + This method takes care of selecting the portions of the Geometry that are needed + for a given shot. + + Parameters + ---------- + shot : Shot + Shot for which the SubProblem is being generated. + sub_problem : SubProblem + Container for the sub-problem being generated. + + Returns + ------- + Geometry + Newly created Geometry instance. + + """ + sub_geometry = Geometry(name=self.name, + transducers=sub_problem.transducers, + problem=sub_problem, grid=self.grid) + + source_ids = shot.source_ids + receiver_ids = shot.receiver_ids + + location_ids = list(set(source_ids) | set(receiver_ids)) + for location_id in location_ids: + location = self.get(location_id) + location = location.sub_problem(shot, sub_problem) + + sub_geometry.add_location(location) + + return sub_geometry + + def __get_desc__(self): + description = { + 'num_locations': self.num_locations, + 'locations': [], + } + + for location_id, location in self._locations.items(): + description['locations'].append(location.__get_desc__()) + + return description + + def __set_desc__(self, description): + for location_desc in description.locations: + instance = TransducerLocation(location_desc.id) + instance.__set_desc__(location_desc, self._transducers) + + self.add_location(instance) diff --git a/stride/problem_definition/medium.py b/stride/problem_definition/medium.py new file mode 100644 index 00000000..c0082667 --- /dev/null +++ b/stride/problem_definition/medium.py @@ -0,0 +1,196 @@ + +import numpy as np +from collections import OrderedDict + +from .base import ProblemBase + + +__all__ = ['Medium'] + + +class Medium(ProblemBase): + """ + A Medium contains the fields that define a physical medium, + such as density or longitudinal speed of sound. + + A Medium defines these properties by keeping track of a series of named fields, that can be added to + it through ``Medium.add``. + + A field with a name ``field_name`` can be accessed directly through ``medium.field_name`` or + ``medium['field_name']``. + + The Medium also provides utilities for loading and dumping these fields and for plotting them. + + Parameters + ---------- + name : str + Alternative name to give to the medium. + problem : Problem + Problem to which the Medium belongs. + grid : Grid or any of Space or Time + Grid on which the Medium is defined + + """ + + def __init__(self, name='medium', problem=None, **kwargs): + super().__init__(name, problem, **kwargs) + + self._fields = OrderedDict() + + def _get(self, item): + if item in super().__getattribute__('_fields').keys(): + return self._fields[item] + + return super().__getattribute__(item) + + def __getattr__(self, item): + return self._get(item) + + def __getitem__(self, item): + return self._get(item) + + @property + def fields(self): + """ + Access fields dictionary. + + """ + return self._fields + + def items(self): + """ + Access all fields as (name, field) pairs. + + Returns + ------- + Fields + Iterable of (name, field) pairs. + + """ + return self._fields.items() + + def add(self, field): + """ + Add a named field to the Medium. + + Parameters + ---------- + field : Field object + Field to add to the Medium. + + Returns + ------- + + """ + self._fields[field.name] = field + + def damping(self, damping_coefficient=None, mask=False, damping_type='sine'): + """ + Create a damping field based on the dimensions of the grid. + + Parameters + ---------- + damping_coefficient : float + Value of the maximum damping of the field. + mask : bool, optional + Create the damping layer as a mask (interior filled with ones) or not (interior filled with zeros). + damping_type : str, optional + Expression to be used for the shape of the damping function, defaults to ``sine``. + + Returns + ------- + ndarray + Tensor containing the damping field. + + """ + # Create a damping field that corresponds to the given field, only scalar for now + if mask: + damp = np.ones(self.space.extended_shape, dtype=np.float32) + else: + damp = np.zeros(self.space.extended_shape, dtype=np.float32) + + spacing = self.space.spacing + absorbing = self.space.absorbing + + if damping_coefficient is None: + if damping_type == 'sine': + damping_coefficient = 10 * 3/2 * np.log(1.0 / 0.001) + + elif damping_type == 'quadratic': + damping_coefficient = 10 * 3/2 * np.log(1.0 / 0.001) + + for dimension in range(self.space.dim): + + dimension_coefficient = damping_coefficient / absorbing[dimension] \ + if absorbing[dimension] > 15 else 0.67 + + for index in range(absorbing[dimension]): + # Damping coefficient + pos = np.abs((absorbing[dimension] - index + 1) / float(absorbing[dimension])) + if damping_type == 'sine': + val = dimension_coefficient * (pos - np.sin(2 * np.pi * pos) / (2 * np.pi)) + + elif damping_type == 'quadratic': + val = dimension_coefficient * pos**2 + + else: + raise ValueError('Allowed dumping type are (`sine`, `quadratic`)') + + # : slices + all_ind = [slice(0, d) for d in damp.shape] + # Left slice for dampening for dimension + all_ind[dimension] = slice(index, index + 1) + damp[tuple(all_ind)] += val * self.time.step / spacing[dimension] + # right slice for dampening for dimension + all_ind[dimension] = slice(damp.shape[dimension] - index, damp.shape[dimension] - index + 1) + damp[tuple(all_ind)] += val * self.time.step / spacing[dimension] + + return damp + + def load(self, *args, **kwargs): + """ + Load all fields in the Medium. + + See :class:`~mosaic.file_manipulation.h5.HDF5` for more information on the parameters of this method. + + Returns + ------- + + """ + for field_name, field in self._fields.items(): + field.load(*args, **kwargs) + + def dump(self, *args, **kwargs): + """ + Dump all fields in the Medium. + + See :class:`~mosaic.file_manipulation.h5.HDF5` for more information on the parameters of this method. + + Returns + ------- + + """ + for field_name, field in self._fields.items(): + field.dump(*args, **kwargs) + + def plot(self, **kwargs): + """ + Plot all fields in the Medium. + + Parameters + ---------- + kwargs + Arguments for plotting the fields. + + Returns + ------- + axes + Axes on which the plotting is done. + + """ + axes = [] + for field in self._fields.values(): + axis = field.plot(**kwargs) + axes.append(axis) + + return axes diff --git a/stride/problem_definition/problem.py b/stride/problem_definition/problem.py new file mode 100644 index 00000000..aeb15305 --- /dev/null +++ b/stride/problem_definition/problem.py @@ -0,0 +1,458 @@ + +import os +import numpy as np + +import mosaic +from mosaic.utils import camel_case + +from .base import Gridded +from . import Medium, Transducers, Geometry, Acquisitions +from .. import Runner +from .. import problem_types +from .. import plotting + + +__all__ = ['Problem', 'SubProblem'] + + +class Problem(Gridded): + """ + The Problem is the object that fully defines the setting in which Stride works. + + The problem defines a medium with a set of fields (such as Vp or density), some + transducers (such as a series of scalar point transducers), a geometry where those + transducers are located in space, and the acquisitions that happen given that geometry. + + The problem also defines a problem type, which determines the physics of interest, such + as the second-order isotropic acoustic wave equation. And a numerical implementation + of those physics, such as through the finite-difference library Devito. + + Parameters + ---------- + name : str + Name of the problem. + grid : Grid or any of Space or Time + Grid on which the Problem is defined + input_folder : str, optional + Default folder from which files should be read, defaults to current working directory. + output_folder : str, optional + Default folder to which files should be written, defaults to current working directory. + medium : Medium, optional + Predefined Medium of the problem. + transducers : Transducers, optional + Predefined Transducers of the problem. + geometry : Geometry, optional + Predefined Geometry of the problem. + acquisitions : Acquisitions, optional + Predefined Acquisitions of the problem. + problem_type : str or object, optional + Problem type that will be executed on this Problem, defaults to ``acoustic``. + problem_implementation : str or object, optional + Implementation of the problem type that will be executed on this Problem, defaults to ``devito``. + + """ + + def __init__(self, name, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.name = name + self.input_folder = kwargs.pop('input_folder', os.getcwd()) + self.output_folder = kwargs.pop('output_folder', os.getcwd()) + + self.problem_config = {} + + medium = kwargs.pop('medium', None) + if medium is None: + medium = Medium(problem=self) + + self.medium = medium + + transducers = kwargs.pop('transducers', None) + if transducers is None: + transducers = Transducers(problem=self) + + self.transducers = transducers + + geometry = kwargs.pop('geometry', None) + if geometry is None: + geometry = Geometry(transducers=transducers, problem=self) + + self.geometry = geometry + + acquisitions = kwargs.pop('acquisitions', None) + if acquisitions is None: + acquisitions = Acquisitions(geometry=geometry, problem=self) + + self.acquisitions = acquisitions + + problem_type = kwargs.pop('problem_type', 'acoustic') + problem_implementation = kwargs.pop('problem_implementation', 'devito') + + if isinstance(problem_type, str): + problem_module = getattr(problem_types, problem_type) + problem_module = getattr(problem_module, problem_implementation) + self.problem_type = getattr(problem_module, camel_case(problem_type + '_' + problem_implementation))() + + else: + self.problem_type = problem_type + + def load(self, *args, **kwargs): + """ + Load all elements in the Problem. + + See :class:`~mosaic.file_manipulation.h5.HDF5` for more information on the parameters of this method. + + Returns + ------- + + """ + self.medium.load(*args, **kwargs) + self.transducers.load(*args, **kwargs) + self.geometry.load(*args, **kwargs) + self.acquisitions.load(*args, **kwargs) + + def dump(self, *args, **kwargs): + """ + Dump all elements in the Problem. + + See :class:`~mosaic.file_manipulation.h5.HDF5` for more information on the parameters of this method. + + Returns + ------- + + """ + self.medium.dump(*args, **kwargs) + self.transducers.dump(*args, **kwargs) + self.geometry.dump(*args, **kwargs) + self.acquisitions.dump(*args, **kwargs) + + def plot(self, **kwargs): + """ + Plot all elements in the Problem. + + Parameters + ---------- + kwargs + Arguments for plotting the fields. + + Returns + ------- + + """ + kwargs['plot'] = False + + # Medium + medium_axes = self.medium.plot(**kwargs) + + # Geometry + geometry_axes = [] + for axis in medium_axes: + geometry_axes.append(self.geometry.plot(axis=axis, title=None, **kwargs)) + + plotting.show(geometry_axes) + + # Acquisitions + acquisitions_axes = self.acquisitions.plot() + plotting.show(acquisitions_axes) + + def sub_problem(self, shot_id): + """ + Create a subset object for a certain shot. + + A SubProblem contains everything that is needed to fully determine how to run a particular shot. + This method takes care of selecting creating a SubProblem instance and populating it + appropriately. + + Parameters + ---------- + shot_id : int + ID of the shot for which this sub-problem will be generated. + + Returns + ------- + SubProblem + Newly created SubProblem instance. + + """ + + if isinstance(shot_id, (np.int32, np.int64)): + shot_id = int(shot_id) + + sub_problem = SubProblem(self.name, + input_folder=self.input_folder, + output_folder=self.output_folder, + problem_type=self.problem_type, + grid=self.grid) + + shot = self.acquisitions.get(shot_id) + + # Set up medium + sub_problem.medium = self.medium.sub_problem(shot, sub_problem) + + # Set up transducers + sub_problem.transducers = self.transducers.sub_problem(shot, sub_problem) + + # Set up geometry + sub_problem.geometry = self.geometry.sub_problem(shot, sub_problem) + + # Set up acquisitions + shot = shot.sub_problem(shot, sub_problem) + sub_problem.shot = shot + sub_problem.shot_id = shot.id + sub_problem.acquisitions = self.acquisitions.sub_problem(shot, sub_problem) + + return sub_problem + + async def forward(self, shot_ids=None, dump=True, deallocate=True, **kwargs): + """ + Run the problem forward with default parameters. + + This will generate a series of Runners, one per available worker, and + distribute all the available shots in the Acquisitions across those + Runners. + + Parameters + ---------- + shot_ids : list, optional + List of shot IDs to run forward, defaults to all of them. + dump : bool, optional + Whether or not the generated data should be dumped to disk, defaults to True. + deallocate : bool, optional + Whether or not to deallocate the generated data after each Shot is completed, + defaults to True. + + Returns + ------- + + """ + runtime = mosaic.runtime() + + # Create an array of runners + runners = await Runner.remote(len=runtime.num_workers) + + # Prepare sub-problems + if dump is True: + try: + self.acquisitions.load(path=self.output_folder, + project_name=self.name, version=0) + except OSError: + pass + + if shot_ids is None: + shot_ids = self.acquisitions.remaining_shot_ids + if not len(shot_ids): + runtime.logger.warning('No need to run forward, observed already exists') + return + + if not isinstance(shot_ids, list): + shot_ids = [shot_ids] + + # Run sub-problems + await runners.map(self.run_forward, shot_ids, dump=dump, deallocate=deallocate, **kwargs) + + async def run_forward(self, shot_id, runner, dump=True, deallocate=False, **kwargs): + """ + Run a single shot forward in a given Runner using default parameters. + + This means that no wavefield will be generated and the resulting traces will be stored in the Shot. + + Parameters + ---------- + shot_id : int + ID of the shot to be run. + runner : Runner + Runner on which the shot will be run. + dump : bool, optional + Whether or not the generated data should be dumped to disk, defaults to True. + deallocate : bool, optional + Whether or not to deallocate the generated data after each Shot is completed, + defaults to False. + + Returns + ------- + + """ + runtime = mosaic.runtime() + + sub_problem = self.sub_problem(shot_id) + runtime.logger.info('\n') + runtime.logger.info('Giving shot %d to %s' % (shot_id, runner.runtime_id)) + + await (await runner.set_problem(sub_problem, **kwargs)) + + task = await runner.run_state(save_wavefield=False, **kwargs) + traces, _ = await task.result() + + runtime.logger.info('Shot %d retrieved' % sub_problem.shot_id) + + shot = self.acquisitions.get(shot_id) + shot.observed.data[:] = traces.data + + if dump is True: + shot.append_observed(path=self.output_folder, + project_name=self.name) + + runtime.logger.info('Appended traces for shot %d to observed file' % sub_problem.shot_id) + + if deallocate is True: + shot.observed.deallocate() + + async def inverse(self, runners, variables, + block=None, iteration=None, **kwargs): + """ + Run the inverse problem with default parameters. + + This will generate a series of given Runners, one per available worker, + select some shots according to the Block configuration and then distribute + those across the Runners. + + As the Runners return the functional value and gradient, those are + accumulated in per-variable buffers before returning them. + + Parameters + ---------- + runners : ArrayProxy + Runners on which to distribute the shots. + variables : VariableList + Variables on which the inverse problem is running. + block : Block + Block instance that determines the configuration of the inverse problem. + iteration : Iteration + Iteration instance. + kwargs + Extra arguments for ``run_inverse``. + + Returns + ------- + Iteration + Iteration updated with the functional values returned for all shots. + VariableList + Variables of the inverse problem with the accumulated gradient of all shots. + + """ + variables.grad.fill(0.) + variables.prec.fill(0.) + + shot_ids = self.acquisitions.select_shot_ids(**block.select_shots) + + # Run sub-problems + async for fun, vars in runners.map_as_completed(self.run_inverse, shot_ids, variables, + iteration=iteration, **kwargs): + iteration.add_fun(fun) + variables.grad += vars.grad + variables.prec += vars.prec + + return iteration, variables + + async def run_inverse(self, shot_id, runner, variables, + needs_grad=True, **kwargs): + """ + Run the inverse problem for a single shot with default parameters. + + This will generate a series of given Runners, one per available worker, + select some shots according to the Block configuration and then distribute + those across the Runners. + + As the Runners return the functional value and gradient, those are + accumulated in per-variable buffers before returning them. + + Parameters + ---------- + shot_id : int + ID of the shot to be run. + runner : Runner + Runner on which the shot will be run. + variables : VariableList + Variables on which the inverse problem is running. + needs_grad : bool + Whether or not the gradient is needed or only the functional value. + kwargs + Extra arguments for ``run_inverse``. + + Returns + ------- + FunctionalValue + Value of the functional and other information about the inverse execution. + VariableList + Variables of the inverse problem with the gradient of this shots. + + """ + runtime = mosaic.runtime() + + sub_problem = self.sub_problem(shot_id) + runtime.logger.info('\n') + runtime.logger.info('Giving shot %d to %s' % (shot_id, runner.runtime_id)) + + variables.update_problem(sub_problem) + + await runner.set_problem(sub_problem, **kwargs) + + if needs_grad is True: + task = await runner.run_gradient(variables, **kwargs) + fun, vars = await task.result() + + else: + task_fwd = await runner.run_state(save_wavefield=False, **kwargs) + task_fun = await runner.run_functional(task_fwd.outputs[0], **kwargs) + fun = await task_fun.outputs[0].result() + vars = variables + + runtime.logger.info('Gradient and functional for shot %d retrieved' % sub_problem.shot_id) + + return fun, vars + + +class SubProblem(Gridded): + """ + The SubProblem is the object that fully defines how a specific Shot is to be run. The SubProblem + resembles the Problem from which ir originates, but takes from it only those parts that + are relevant for this particular Shot. + + The SubProblem defines a medium with a set of fields (such as Vp or density), some + transducers (such as a series of scalar point transducers), a geometry where those + transducers are located in space, and the acquisitions that happen given that geometry. + + The SubProblem also defines a problem type, which determines the physics of interest, such + as the second-order isotropic acoustic wave equation. And a numerical implementation + of those physics, such as through the finite-difference library Devito. + + Parameters + ---------- + name : str + Name of the problem. + grid : Grid or any of Space or Time + Grid on which the Problem is defined + input_folder : str, optional + Default folder from which files should be read, defaults to current working directory. + output_folder : str, optional + Default folder to which files should be written, defaults to current working directory. + medium : Medium, optional + Predefined Medium of the problem. + transducers : Transducers, optional + Predefined Transducers of the problem. + geometry : Geometry, optional + Predefined Geometry of the problem. + acquisitions : Acquisitions, optional + Predefined Acquisitions of the problem. + problem_type : object, optional + Problem type that will be executed on this SubProblem, defaults to ``acoustic``. + + """ + + def __init__(self, name, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.shot = None + self.shot_id = -1 + + self.name = name + self.input_folder = kwargs.pop('input_folder', os.getcwd()) + self.output_folder = kwargs.pop('output_folder', os.getcwd()) + + self.problem_config = {} + self.medium = kwargs.pop('medium', None) + self.transducers = kwargs.pop('transducers', None) + self.geometry = kwargs.pop('geometry', None) + self.acquisitions = kwargs.pop('acquisitions', None) + + self.problem_type = kwargs.pop('problem_type', None) diff --git a/stride/problem_definition/transducer_types/__init__.py b/stride/problem_definition/transducer_types/__init__.py new file mode 100644 index 00000000..a09f9151 --- /dev/null +++ b/stride/problem_definition/transducer_types/__init__.py @@ -0,0 +1,3 @@ + +from .transducer import * +from .point_transducer import * diff --git a/stride/problem_definition/transducer_types/point_transducer.py b/stride/problem_definition/transducer_types/point_transducer.py new file mode 100644 index 00000000..ef822916 --- /dev/null +++ b/stride/problem_definition/transducer_types/point_transducer.py @@ -0,0 +1,37 @@ + + +import numpy as np + +from .transducer import Transducer + + +__all__ = ['PointTransducer'] + + +class PointTransducer(Transducer): + """ + This class describes a point transducers, in which a single point represents the + effect of the device. + + """ + + type = 'point_transducer' + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def coordinates(self): + """ + Coordinates of points in the transducer, relative to its centre. + + Returns + ------- + ndarray + Coordinate array. + + """ + if self._coordinates is None: + self._coordinates = np.zeros((self.space.dim,)) + + return self._coordinates diff --git a/stride/problem_definition/transducer_types/transducer.py b/stride/problem_definition/transducer_types/transducer.py new file mode 100644 index 00000000..7c03ce4a --- /dev/null +++ b/stride/problem_definition/transducer_types/transducer.py @@ -0,0 +1,86 @@ + +from abc import ABC + +from ..base import GriddedSaved + + +__all__ = ['Transducer'] + + +class Transducer(ABC, GriddedSaved): + """ + The transducer holds information about its location in space, its type, as well as + other things such as IRs. + + Parameters + ---------- + id : int + Identifier assigned to this transducer. + name : str + Optional name for the shot. + grid : Grid or any of Space or Time + Grid on which the Transducer is defined + + """ + + type = '' + """Type of transducer, e.g. point_transducer""" + + def __init__(self, id, name=None, *args, **kwargs): + name = name or 'transducer_%05d' % id + super().__init__(name, *args, **kwargs) + + if id < 0: + raise ValueError('The transducer needs a positive ID') + + self.id = id + self.transmit_ir = None + self.receive_ir = None + + self._coordinates = None + + @property + def coordinates(self): + """ + Coordinates of points in the transducer, relative to its centre. + + Returns + ------- + ndarray + Coordinate array. + + """ + return self._coordinates + + def sub_problem(self, shot, sub_problem): + """ + Create a subset object for a certain shot. + + A SubProblem contains everything that is needed to fully determine how to run a particular shot. + This method has no effect for this particular case. + + Parameters + ---------- + shot : Shot + Shot for which the SubProblem is being generated. + sub_problem : SubProblem + Container for the sub-problem being generated. + + Returns + ------- + Transducer + Transducer instance. + + """ + return self + + def __get_desc__(self): + description = { + 'id': self.id, + 'type': self.type, + } + + return description + + def __set_desc__(self, description): + self.id = description.id diff --git a/stride/problem_definition/transducers.py b/stride/problem_definition/transducers.py new file mode 100644 index 00000000..c5746ed3 --- /dev/null +++ b/stride/problem_definition/transducers.py @@ -0,0 +1,216 @@ + +from collections import OrderedDict + +from mosaic.utils import camel_case + +from .base import ProblemBase +from ..problem_definition import transducer_types + + +__all__ = ['Transducers'] + + +class Transducers(ProblemBase): + """ + The Transducers stores a reference to all the transducer devices that are + used in a problem. + + Transducers are identified through a numerical ID, which is >= 0. + + A transducer can be added through ``Transducers.add`` and accessed through + ``Transducers.get``. + + Parameters + ---------- + name : str + Alternative name to give to the Transducers. + problem : Problem + Problem to which the Transducers belongs. + grid : Grid or any of Space or Time + Grid on which the Transducers are defined + + """ + + def __init__(self, name='transducers', problem=None, **kwargs): + super().__init__(name, problem, **kwargs) + + self._transducers = OrderedDict() + + def add(self, item): + """ + Add a new transducer to the Transducers. + + Parameters + ---------- + item : Transducer + Transducer to be added to the Transducers. + + Returns + ------- + + """ + if item.id in self._transducers.keys(): + raise ValueError('Transducer with ID "%d" already exists in the Transducers' % item.id) + + self._transducers[item.id] = item + + def get(self, id): + """ + Get a transducer from the Transducers with a known id. + + Parameters + ---------- + id : int + Identifier of the transducer. + + Returns + ------- + Transducer + Found Transducer. + + """ + if not isinstance(id, int) or id < 0: + raise ValueError('Transducer IDs have to be positive integer numbers') + + return self._transducers[id] + + def get_slice(self, start=None, end=None, step=None): + """ + Get a slice of the indices of the transducer using ``slice(start, stop, step)``. + + Parameters + ---------- + start : int, optional + Start of the slice, defaults to the first id. + end : int, optional + End of the slice, defaults to the last id. + step : int, optional + Steps in between transducers, defaults to 1. + + Returns + ------- + list + Found transducers in the slice. + + """ + section = OrderedDict() + if start is None: + _range = range(end) + elif step is None: + _range = range(start, end) + else: + _range = range(start, end, step) + + for index in _range: + section[list(self._transducers.keys())[index]] = list(self._transducers.values())[index] + + return section + + def items(self): + """ + Access all transducers as iterable of (ID, transducer) pairs. + + """ + return self._transducers.items() + + @property + def num_transducers(self): + """ + Get number of transducers in the Transducers. + + """ + return len(self._transducers.keys()) + + @property + def transducers(self): + """ + Get all transducers as a list. + + """ + return list(self._transducers.values()) + + @property + def transducer_ids(self): + """ + Get all transducer IDs in the Transducers. + + Returns + ------- + list + IDs of the transducers. + + """ + return list(self._transducers.keys()) + + def default(self): + """ + Fill the container with the default configuration. + + In this case, a single PointTransducer will be generated. + + Returns + ------- + + """ + transducer = transducer_types.PointTransducer(0, grid=self.grid) + self.add(transducer) + + def sub_problem(self, shot, sub_problem): + """ + Create a subset object for a certain shot. + + A SubProblem contains everything that is needed to fully determine how to run a particular shot. + This method takes care of selecting the portions of the Transducers that are needed + for a given shot. + + Parameters + ---------- + shot : Shot + Shot for which the SubProblem is being generated. + sub_problem : SubProblem + Container for the sub-problem being generated. + + Returns + ------- + Transducers + Newly created Transducers instance. + + """ + sub_transducers = Transducers(name=self.name, + problem=sub_problem, grid=self.grid) + + source_ids = shot.source_ids + receiver_ids = shot.receiver_ids + + location_ids = list(set(source_ids) | set(receiver_ids)) + geometry = self.problem.geometry + for location_id in location_ids: + location = geometry.get(location_id) + + if location.transducer.id in sub_transducers.transducer_ids: + continue + + transducer = location.transducer.sub_problem(shot, sub_problem) + sub_transducers.add(transducer) + + return sub_transducers + + def __get_desc__(self): + description = { + 'num_transducers': self.num_transducers, + 'transducers': [], + } + + for transducer_id, transducer in self._transducers.items(): + description['transducers'].append(transducer.__get_desc__()) + + return description + + def __set_desc__(self, description): + for transducer_desc in description.transducers: + transducer_type = getattr(transducer_types, camel_case(transducer_desc.type)) + transducer = transducer_type(transducer_desc.id, grid=self.grid) + + transducer.__set_desc__(transducer_desc) + + self.add(transducer) diff --git a/stride/problem_types/__init__.py b/stride/problem_types/__init__.py new file mode 100644 index 00000000..8f665075 --- /dev/null +++ b/stride/problem_types/__init__.py @@ -0,0 +1,3 @@ + +from . import acoustic +from . import operators diff --git a/stride/problem_types/acoustic/__init__.py b/stride/problem_types/acoustic/__init__.py new file mode 100644 index 00000000..1b57d735 --- /dev/null +++ b/stride/problem_types/acoustic/__init__.py @@ -0,0 +1,7 @@ + +from . import devito + +try: + from stride_private.problem_types.acoustic import devito +except ImportError: + pass diff --git a/stride/problem_types/acoustic/devito.py b/stride/problem_types/acoustic/devito.py new file mode 100644 index 00000000..acdc33cb --- /dev/null +++ b/stride/problem_types/acoustic/devito.py @@ -0,0 +1,618 @@ + +import devito +import numpy as np + +import mosaic + +from stride.utils import fft +from stride.problem_definition import ScalarField +from stride.problem_types.operators.devito import GridDevito, OperatorDevito +from stride.problem_types.problem_type import ProblemTypeBase + + +__all__ = ['AcousticDevito'] + + +class AcousticDevito(ProblemTypeBase): + """ + This class represents the second-order isotropic acoustic wave equation, + implemented using Devito. + + """ + + space_order = 10 + time_order = 2 + + def __init__(self): + super().__init__() + + self.kernel = 'OT4' + self.drp = False + self.undersampling_factor = 4 + + self._grad = None + + self._max_wavelet = 0. + self._src_scale = 0. + + self._grid = None + self._state_operator = None + self._adjoint_operator = None + + def set_problem(self, problem, **kwargs): + """ + Set up the problem or sub-problem that needs to be run. + + Parameters + ---------- + problem : SubProblem or Problem + Problem on which the physics will be executed + + Returns + ------- + + """ + super().set_problem(problem) + + if self._grid is None: + self._grid = GridDevito(self.space_order, self.time_order) + + if self._state_operator is None: + self._state_operator = OperatorDevito(self.space_order, self.time_order, grid=self._grid) + + if self._adjoint_operator is None: + self._adjoint_operator = OperatorDevito(self.space_order, self.time_order, grid=self._grid) + + self._grid.set_problem(problem) + self._state_operator.set_problem(problem) + self._adjoint_operator.set_problem(problem) + + self.drp = kwargs.get('drp', False) + self.check_conditions() + + def check_conditions(self): + """ + Check CFL and dispersion conditions, and select appropriate OT method. + + Returns + ------- + + """ + time = self._problem.time + space = self._problem.space + shot = self._problem.shot + + runtime = mosaic.runtime() + + # Get speed of sound bounds + medium = self._problem.medium + + vp_min = np.min(medium.vp.extended_data) + vp_max = np.max(medium.vp.extended_data) + + # Figure out propagated bandwidth + wavelets = shot.wavelets.data + f_min, f_max = fft.bandwidth(wavelets, time.step, cutoff=-10) + + runtime.logger.info('Estimated bandwidth for the propagated ' + 'wavelet %.3f-%.3f MHz' % (f_min / 1e6, f_max / 1e6)) + + # Check for dispersion + h = max(*space.spacing) + h_max = vp_min / (5 * f_max) + + if h > h_max: + runtime.logger.warn('Spatial grid spacing (%.3f mm) is ' + 'higher than dispersion limit (%.3f mm)' % (h / 1e-3, h_max / 1e-3)) + + # Check for instability + dt = time.step + + dt_max_OT2 = self._dt_max(2.0 / np.pi, h, vp_max) + dt_max_OT4 = self._dt_max(3.6 * np.pi, h, vp_max) + + recompile = False + if dt <= dt_max_OT2: + runtime.logger.info('Time grid spacing (%.3f \u03BCs) is ' + 'below OT2 limit (%.3f \u03BCs)' % (dt / 1e-6, dt_max_OT2 / 1e-6)) + + if self.kernel != 'OT2': + recompile = True + + self.kernel = 'OT2' + + elif dt <= dt_max_OT4: + runtime.logger.info('Time grid spacing (%.3f \u03BCs) is ' + 'above OT2 limit (%.3f \u03BCs), ' + 'switching to OT4' % (dt / 1e-6, dt_max_OT2 / 1e-6)) + + if self.kernel != 'OT4': + recompile = True + + self.kernel = 'OT4' + + else: + runtime.logger.warn('Time grid spacing (%.3f \u03BCs) is ' + 'above OT4 limit (%.3f \u03BCs)' % (dt / 1e-6, dt_max_OT4 / 1e-6)) + + if self.kernel != 'OT4': + recompile = True + + self.kernel = 'OT4' + + # Select undersampling level + f_max *= 4 + dt_max = 1 / f_max + + undersampling = int(dt_max / dt) + + if self.undersampling_factor != undersampling: + recompile = True + + self.undersampling_factor = undersampling + + # Maybe recompile + if recompile: + self._state_operator.operator = None + self._adjoint_operator.operator = None + + def before_state(self, save_wavefield=False, **kwargs): + """ + Prepare the problem type to run the state or forward problem. + + Parameters + ---------- + save_wavefield : bool, optional + Whether or not the wavefield needs to be stored, defaults to False. + + Returns + ------- + + """ + time = self._problem.time + space = self._problem.space + shot = self._problem.shot + medium = self._problem.medium + + num_sources = shot.num_sources + num_receivers = shot.num_receivers + + # If there's no previous operator, generate one + if self._state_operator.operator is None: + # Define variables + src = self._grid.sparse_time_function('src', num=num_sources) + rec = self._grid.sparse_time_function('rec', num=num_receivers) + + p = self._grid.time_function('p', coefficients='symbolic' if self.drp else 'standard') + m = self._grid.function('m', coefficients='symbolic' if self.drp else 'standard') + inv_m = self._grid.function('inv_m', coefficients='symbolic' if self.drp else 'standard') + + # Create damping layer + if np.max(space.extra) > 0: + damp = self._grid.function('damp') + damp.data[:] = self._problem.medium.damping()*medium.vp.extended_data + + else: + damp = devito.Constant('damp') + damp.data = 0. + + # Create stencil + stencil = self._iso_stencil(self._grid.grid, p, m, inv_m, damp, + direction='forward') + + # Define the source injection function to generate the corresponding code + src_term = src.inject(field=p.forward, expr=src * time.step**2 / m) + rec_term = rec.interpolate(expr=p) + + op_kwargs = { + 'dt': time.step, + 'p': p, + 'm': m, + 'inv_m': inv_m, + 'damp': damp, + 'src': src, + 'rec': rec, + } + + # Define the saving of the wavefield + if save_wavefield: + p_saved = self._grid.undersampled_time_function('p_saved', + factor=self.undersampling_factor) + + update_saved = [devito.Eq(p_saved, self._saved(p, m))] + kwargs['p_saved'] = p_saved + + else: + update_saved = [] + + # Compile the operator + self._state_operator.set_operator(stencil + src_term + rec_term + update_saved, + name='acoustic_iso_state', + **kwargs.get('devito_config', {})) + self._state_operator.compile() + + # Prepare arguments + self._state_operator.arguments(**{**op_kwargs, **kwargs.get('devito_args', {})}) + + else: + # If the source/receiver size has changed, then create new functions for them + # and generate the arguments again + changed_args = False + + if num_sources != self._grid.vars.src.npoint: + changed_args = True + self._grid.sparse_time_function('src', num=num_sources, cached=False) + + if num_receivers != self._grid.vars.rec.npoint: + changed_args = True + self._grid.sparse_time_function('rec', num=num_receivers, cached=False) + + if changed_args: + self._state_operator.arguments(src=self._grid.vars.src, rec=self._grid.vars.rec) + + # Clear all buffers + self._grid.vars.src.data_with_halo.fill(0.) + self._grid.vars.rec.data_with_halo.fill(0.) + self._grid.vars.p.data_with_halo.fill(0.) + + if save_wavefield: + self._grid.vars.p_saved.data_with_halo.fill(0.) + + # Set medium parameters + self._grid.vars.m.data_with_halo[:] = 1 / self._grid.with_halo(medium.vp.extended_data)**2 + self._grid.vars.inv_m.data_with_halo[:] = self._grid.with_halo(medium.vp.extended_data)**2 + + # Set geometry + self._src_scale = 1000. / (np.max(medium.vp.extended_data)**2 * time.step**2) + self._max_wavelet = np.max(np.abs(shot.wavelets.data)) + self._grid.vars.src.data[:] = shot.wavelets.data.T * self._src_scale / self._max_wavelet + + self._grid.vars.src.coordinates.data[:] = shot.source_coordinates + self._grid.vars.rec.coordinates.data[:] = shot.receiver_coordinates + + def state(self, **kwargs): + """ + Run the state or forward problem. + + Returns + ------- + + """ + self._state_operator.run() + + def after_state(self, save_wavefield=False, **kwargs): + """ + Clean up after the state run and retrieve the time traces. + + If requested, also provide a saved wavefield. + + Parameters + ---------- + save_wavefield : bool, optional + Whether or not the wavefield needs to be stored, defaults to False. + + Returns + ------- + Traces + Time traces produced by the state run. + Data or None + Wavefield produced by the state run, if any. + + """ + if save_wavefield: + wavefield_data = np.asarray(self._grid.vars.p_saved.data_with_halo, dtype=np.float32) + wavefield_data *= self._max_wavelet / self._src_scale + + wavefield = ScalarField('p_dt2', + data=wavefield_data, + shape=wavefield_data.shape) + + self._grid.deallocate('p_saved') + + else: + wavefield = None + + traces_data = np.asarray(self._grid.vars.rec.data, dtype=np.float32).T + traces_data *= self._max_wavelet / self._src_scale + traces = self._problem.shot.observed.alike('modelled', data=traces_data) + + self._grid.deallocate('p') + self._grid.deallocate('m') + self._grid.deallocate('inv_m') + + return traces, wavefield + + def before_adjoint(self, wrt, adjoint_source, wavefield, **kwargs): + """ + Prepare the problem type to run the adjoint problem. + + Parameters + ---------- + wrt : VariableList + List of variables for which the inverse problem is being solved. + adjoint_source : Traces + Adjoint source to use in the adjoint propagation. + wavefield : Data + Stored wavefield from the forward run, to use as needed. + + Returns + ------- + + """ + time = self._problem.time + space = self._problem.space + shot = self._problem.shot + medium = self._problem.medium + + num_sources = shot.num_sources + num_receivers = shot.num_receivers + + # If there's no previous operator, generate one + if self._adjoint_operator.operator is None: + # Define variables + rec = self._grid.sparse_time_function('rec', num=num_receivers) + + p_a = self._grid.time_function('p_a', coefficients='symbolic' if self.drp else 'standard') + p_saved = self._grid.undersampled_time_function('p_saved', + factor=self.undersampling_factor) + m = self._grid.function('m', coefficients='symbolic' if self.drp else 'standard') + inv_m = self._grid.function('inv_m', coefficients='symbolic' if self.drp else 'standard') + + # Properly create damping layer + if np.max(space.extra) > 0: + damp = self._grid.function('damp') + damp.data[:] = self._problem.medium.damping()*medium.vp.extended_data + + else: + damp = devito.Constant('damp') + damp.data = 0. + + # Create stencil + stencil = self._iso_stencil(self._grid.grid, p_a, m, inv_m, damp, + direction='backward') + + # Define the source injection function to generate the corresponding code + rec_term = rec.inject(field=p_a.backward, expr=-rec * time.step ** 2 / m) + + op_kwargs = { + 'dt': time.step, + 'p_a': p_a, + 'p_saved': p_saved, + 'm': m, + 'inv_m': inv_m, + 'damp': damp, + 'rec': rec, + } + + # Define gradient + gradient_update = self.set_grad(wrt) + + # Compile the operator + self._adjoint_operator.set_operator(stencil + rec_term + gradient_update, + name='acoustic_iso_adjoint', + **kwargs.get('devito_config', {})) + self._adjoint_operator.compile() + + # Prepare arguments + self._adjoint_operator.arguments(**{**op_kwargs, **kwargs.get('devito_args', {})}) + + else: + # If the source/receiver size has changed, then create new functions for them + # and generate the arguments again + changed_args = False + + if num_sources != self._grid.vars.src.npoint: + changed_args = True + self._grid.sparse_time_function('src', num=num_sources, cached=False) + + if num_receivers != self._grid.vars.rec.npoint: + changed_args = True + self._grid.sparse_time_function('rec', num=num_receivers, cached=False) + + if changed_args: + self._adjoint_operator.arguments(src=self._grid.vars.src, rec=self._grid.vars.rec) + + # Clear all buffers + self._grid.vars.src.data_with_halo.fill(0.) + self._grid.vars.rec.data_with_halo.fill(0.) + self._grid.vars.p_a.data_with_halo.fill(0.) + + for variable in wrt: + self._grid.vars['grad_'+variable.name].data_with_halo.fill(0.) + + # Set prior wavefield + # Currently, we need to use this trick to ensure no copy is made + # of the wavefield + class Allocator: + @staticmethod + def alloc(*args, **kwargs): + return wavefield.data, None + + wavefield_data = devito.data.Data(self._grid.vars.p_saved.shape_allocated, + self._grid.vars.p_saved.dtype, + modulo=self._grid.vars.p_saved._mask_modulo, + allocator=Allocator) + + self._grid.vars.p_saved._data = wavefield_data + + # Set medium parameters + self._grid.vars.m.data_with_halo[:] = 1 / self._grid.with_halo(medium.vp.extended_data)**2 + self._grid.vars.inv_m.data_with_halo[:] = self._grid.with_halo(medium.vp.extended_data)**2 + + # Set geometry + self._grid.vars.rec.data[:] = adjoint_source.data.T + + self._grid.vars.src.coordinates.data[:] = shot.source_coordinates + self._grid.vars.rec.coordinates.data[:] = shot.receiver_coordinates + + def adjoint(self, wrt, adjoint_source, wavefield, **kwargs): + """ + Run the adjoint problem. + + Parameters + ---------- + wrt : VariableList + List of variables for which the inverse problem is being solved. + adjoint_source : Traces + Adjoint source to use in the adjoint propagation. + wavefield : Data + Stored wavefield from the forward run, to use as needed. + + Returns + ------- + + """ + self._adjoint_operator.run() + + def after_adjoint(self, wrt, adjoint_source, wavefield, **kwargs): + """ + Clean up after the adjoint run and retrieve the time gradients (if needed). + + Parameters + ---------- + wrt : VariableList + List of variables for which the inverse problem is being solved. + adjoint_source : Traces + Adjoint source to use in the adjoint propagation. + wavefield : Data + Stored wavefield from the forward run, to use as needed. + + Returns + ------- + VariableList + Updated variable list with gradients added to them. + + """ + wavefield.deallocate() + self._grid.deallocate('p_saved') + self._grid.deallocate('p_a') + self._grid.deallocate('m') + self._grid.deallocate('inv_m') + + return self.get_grad(wrt, **kwargs) + + def set_grad_vp(self, vp, **kwargs): + """ + Prepare the problem type to calculate the gradients wrt Vp. + + Parameters + ---------- + vp : Vp + Vp variable to calculate the gradient. + + Returns + ------- + tuple + Tuple of gradient and preconditioner updates. + + """ + p = self._grid.vars.p_saved + p_a = self._grid.vars.p_a + + grad = self._grid.function('grad_vp') + grad_update = devito.Inc(grad, -p * p_a) + + prec = self._grid.function('prec_vp') + prec_update = devito.Inc(prec, +p * p) + + return grad_update, prec_update + + def get_grad_vp(self, vp, **kwargs): + """ + Retrieve the gradients calculated wrt to the input. + + The variable is updated inplace. + + Parameters + ---------- + vp : Vp + Vp variable to calculate the gradient. + + Returns + ------- + + """ + variable_grad = self._grid.vars.grad_vp + variable_grad = np.asarray(variable_grad.data, dtype=np.float32) + + variable_prec = self._grid.vars.prec_vp + variable_prec = np.asarray(variable_prec.data, dtype=np.float32) + + variable_grad *= 2 / vp.extended_data**3 + variable_prec *= 4 / vp.extended_data**6 + + vp.grad += variable_grad + vp.prec += variable_prec + + self._grid.deallocate('grad_vp') + self._grid.deallocate('prec_vp') + + def _symbolic_coefficients(self, grid, field, laplacian, m, inv_m): + raise NotImplementedError('DRP weights are not implemented in this version of stride') + + def _weights(self): + raise NotImplementedError('DRP weights are not implemented in this version of stride') + + def _laplacian(self, field, laplacian, m, inv_m): + if self.kernel not in ['OT2', 'OT4']: + raise ValueError("Unrecognized kernel") + + time = self._problem.time + + if self.kernel == 'OT2': + bi_harmonic = 0 + + else: + bi_harmonic = time.step**2/12 * inv_m*field.laplace + + laplacian_subs = field + bi_harmonic + + return laplacian_subs + + def _saved(self, field, m, inv_m): + return field.dt2 + + def _iso_stencil(self, grid, field, m, inv_m, damp, direction='forward'): + # Forward or backward + forward = direction == 'forward' + + # Define time step to be updated + u_next = field.forward if forward else field.backward + u_dt = field.dt if forward else field.dt.T + + # Get the spatial FD + laplacian = self._grid.function('laplacian', coefficients='symbolic') + laplacian_expr = self._laplacian(field, laplacian, m, inv_m) + + # Define PDE and update rule + eq_time = devito.solve(m*field.dt2 - laplacian.laplace + 2*damp*u_dt + damp**2*field, u_next) + + # Define coefficients + if self.drp: + subs = self._symbolic_coefficients(grid, field, laplacian, m, inv_m) + + # Time-stepping stencil + laplacian_term = devito.Eq(laplacian, laplacian_expr, + subdomain=grid.subdomains['physical_domain'], + coefficients=subs) + + stencil = devito.Eq(u_next, eq_time, + subdomain=grid.subdomains['physical_domain'], + coefficients=subs) + + return [laplacian_term, stencil] + + else: + # Time-stepping stencil + laplacian_term = devito.Eq(laplacian, laplacian_expr, + subdomain=grid.subdomains['physical_domain']) + + stencil = devito.Eq(u_next, eq_time, + subdomain=grid.subdomains['physical_domain']) + + return [laplacian_term, stencil] + + def _dt_max(self, k, h, vp_max): + space = self._problem.space + + return k * h / vp_max * 1 / np.sqrt(space.dim) diff --git a/stride/problem_types/operators/__init__.py b/stride/problem_types/operators/__init__.py new file mode 100644 index 00000000..185ddbcf --- /dev/null +++ b/stride/problem_types/operators/__init__.py @@ -0,0 +1,3 @@ + + +from .devito import * diff --git a/stride/problem_types/operators/devito.py b/stride/problem_types/operators/devito.py new file mode 100644 index 00000000..05cf0b5c --- /dev/null +++ b/stride/problem_types/operators/devito.py @@ -0,0 +1,463 @@ + +import os +import devito +import logging +import functools +import numpy as np + +import mosaic +from mosaic.types import Struct + + +__all__ = ['OperatorDevito', 'GridDevito'] + + +class PhysicalDomain(devito.SubDomain): + + name = 'physical_domain' + + def __init__(self, space_order, extra): + super(PhysicalDomain, self).__init__() + self.space_order = space_order + self.extra = extra + + def define(self, dimensions): + return {dimension: dimension for dimension in dimensions} + + +def _cached(func): + + @functools.wraps(func) + def cached_wrapper(self, *args, **kwargs): + name = args[0] + cached = kwargs.pop('cached', True) + + if cached is True: + fun = self.vars.get(name, None) + if fun is not None: + return fun + + fun = func(self, *args, **kwargs) + + self.vars[name] = fun + + return fun + + return cached_wrapper + + +class GridDevito: + """ + Instances of this class encapsulate the Devito grid, and interact with it by + generating appropriate functions on demand. + + Instances will also keep a cache of created Devito functions under the ``vars`` + attribute, which can be accessed by name using dot notation. + + Parameters + ---------- + space_order : int + Default space order of the discretisation for functions of the grid. + time_order : int + Default time order of the discretisation for functions of the grid. + grid : devito.Grid, optional + Predefined Devito grid. A new one will be created unless specified. + + """ + + def __init__(self, space_order, time_order, grid=None): + self._problem = None + + self.vars = Struct() + + self.space_order = space_order + self.time_order = time_order + + self.grid = grid + + # TODO The grid needs to be re-created if the space or time extent has changed + def set_problem(self, problem): + """ + Set up the problem or sub-problem that will be run on this grid. + + Parameters + ---------- + problem : SubProblem or Problem + Problem on which the physics will be executed + + Returns + ------- + + """ + self._problem = problem + + if self.grid is None: + space = problem.space + + extended_extent = tuple(np.array(space.spacing) * (np.array(space.extended_shape) - 1)) + physical_domain = PhysicalDomain(self.space_order, space.extra) + self.grid = devito.Grid(extent=extended_extent, + shape=space.extended_shape, + origin=space.pml_origin, + subdomains=physical_domain, + dtype=np.float32) + + @_cached + def sparse_time_function(self, name, num=1, space_order=None, time_order=None, **kwargs): + """ + Create a Devito SparseTimeFunction with parameters provided. + + Parameters + ---------- + name : str + Name of the function. + num : int, optional + Number of points in the function, defaults to 1. + space_order : int, optional + Space order of the discretisation, defaults to the grid space order. + time_order : int, optional + Time order of the discretisation, defaults to the grid time order. + kwargs + Additional arguments for the Devito constructor. + + Returns + ------- + devito.SparseTimeFunction + Generated function. + + """ + time = self._problem.time + + space_order = space_order or self.space_order + time_order = time_order or self.time_order + + # Define variables + p_dim = devito.Dimension(name='p_%s' % name) + fun = devito.SparseTimeFunction(name=name, + grid=self.grid, + dimensions=(self.grid.time_dim, p_dim), + npoint=num, + nt=time.extended_num, + space_order=space_order, + time_order=time_order, + dtype=np.float32, + **kwargs) + + return fun + + @_cached + def function(self, name, space_order=None, **kwargs): + """ + Create a Devito Function with parameters provided. + + Parameters + ---------- + name : str + Name of the function. + space_order : int, optional + Space order of the discretisation, defaults to the grid space order. + kwargs + Additional arguments for the Devito constructor. + + Returns + ------- + devito.Function + Generated function. + + """ + space_order = space_order or self.space_order + + fun = devito.Function(name=name, + grid=self.grid, + space_order=space_order, + **kwargs) + + return fun + + @_cached + def time_function(self, name, space_order=None, time_order=None, **kwargs): + """ + Create a Devito TimeFunction with parameters provided. + + Parameters + ---------- + name : str + Name of the function. + space_order : int, optional + Space order of the discretisation, defaults to the grid space order. + time_order : int, optional + Time order of the discretisation, defaults to the grid time order. + kwargs + Additional arguments for the Devito constructor. + + Returns + ------- + devito.TimeFunction + Generated function. + + """ + space_order = space_order or self.space_order + time_order = time_order or self.time_order + + fun = devito.TimeFunction(name=name, + grid=self.grid, + time_order=time_order, + space_order=space_order, + **kwargs) + + return fun + + @_cached + def undersampled_time_function(self, name, factor, space_order=None, time_order=None, **kwargs): + """ + Create an undersampled version of a Devito function with parameters provided. + + Parameters + ---------- + name : str + Name of the function. + factor : int,= + Undersampling factor. + space_order : int, optional + Space order of the discretisation, defaults to the grid space order. + time_order : int, optional + Time order of the discretisation, defaults to the grid time order. + kwargs + Additional arguments for the Devito constructor. + + Returns + ------- + devito.Function + Generated function. + + """ + time = self._problem.time + + time_under = devito.ConditionalDimension('time_under', + parent=self.grid.time_dim, + factor=factor) + + buffer_size = (time.extended_num + factor - 1) // factor + + return self.time_function(name, + space_order=space_order, + time_order=time_order, + time_dim=time_under, + save=buffer_size, + **kwargs) + + def deallocate(self, name): + """ + Remove internal references to data buffers, if ``name`` is cached. + + Parameters + ---------- + name : str + Name of the function. + + Returns + ------- + + """ + if name in self.vars: + del self.vars[name]._data + self.vars[name]._data = None + + def with_halo(self, data): + """ + Pad ndarray with appropriate halo given the grid space order. + + Parameters + ---------- + data : ndarray + Array to pad + + Returns + ------- + ndarray + Padded array. + + """ + pad_widths = [[self.space_order, self.space_order] + for _ in self._problem.space.shape] + + return np.pad(data, pad_widths, mode='edge') + + +class OperatorDevito: + """ + Instances of this class encapsulate Devito operators, how to configure them and how to run them. + + + Parameters + ---------- + space_order : int + Default space order of the discretisation for functions of the grid. + time_order : int + Default time order of the discretisation for functions of the grid. + grid : GridDevito, optional + Predefined GridDevito. A new one will be created unless specified. + """ + + def __init__(self, space_order, time_order, grid=None): + self._problem = None + + self.operator = None + self.kwargs = {} + + self.space_order = space_order + self.time_order = time_order + + if grid is None: + self.grid = GridDevito(space_order, time_order) + else: + self.grid = grid + + devito_logger = logging.getLogger('devito') + devito.logger.logger = devito_logger + + class RerouteFilter(logging.Filter): + + def __init__(self): + super().__init__() + + def filter(self, record): + _runtime = mosaic.runtime() + + if record.levelno == devito.logger.PERF: + _runtime.logger.info(record.msg) + + elif record.levelno == logging.ERROR: + _runtime.logger.error(record.msg) + + elif record.levelno == logging.WARNING: + _runtime.logger.warning(record.msg) + + elif record.levelno == logging.DEBUG: + _runtime.logger.debug(record.msg) + + else: + _runtime.logger.info(record.msg) + + return False + + devito_logger.addFilter(RerouteFilter()) + + runtime = mosaic.runtime() + if runtime.mode == 'local': + devito_logger.propagate = False + + def set_problem(self, problem): + """ + Set up the problem or sub-problem that will be run with this operator. + + Parameters + ---------- + problem : SubProblem or Problem + Problem on which the physics will be executed + + Returns + ------- + + """ + self._problem = problem + + def set_operator(self, op, name='kernel', **kwargs): + """ + Set up a Devito operator from a list of operations. + + Parameters + ---------- + op : list + List of operations to be given to the devito.Operator instance. + name : str + Name to give to the operator, defaults to ``kernel``. + kwargs : optional + Configuration parameters to set for Devito overriding defaults. + + Returns + ------- + + """ + default_config = { + 'autotuning': ['aggressive', 'runtime'], + 'develop-mode': False, + 'mpi': False, + 'log-level': 'DEBUG', + } + + for key, value in default_config.items(): + if key in kwargs: + value = kwargs[key] + default_config[key] = value + del kwargs[key] + + devito.parameters.configuration[key] = value + + default_kwargs = { + 'name': name, + 'subs': self.grid.grid.spacing_map, + 'opt': 'advanced', + 'platform': os.getenv('DEVITO_PLATFORM', None), + 'language': os.getenv('DEVITO_LANGUAGE', 'openmp'), + 'compiler': os.getenv('DEVITO_COMPILER', None), + } + + default_kwargs.update(kwargs) + + runtime = mosaic.runtime() + runtime.logger.info('Operator `%s` configuration:' % name) + + for key, value in default_config.items(): + runtime.logger.info('\t * %s=%s' % (key, value)) + + for key, value in default_kwargs.items(): + if key == 'name': + continue + + runtime.logger.info('\t * %s=%s' % (key, value)) + + self.operator = devito.Operator(op, **default_kwargs) + + def compile(self): + """ + Compile the operator. + + Returns + ------- + + """ + # compiler_flags = os.getenv('DEVITO_COMP_FLAGS', '').split(',') + # compiler_flags = [each.strip() for each in compiler_flags] + # self.operator._compiler.cflags += compiler_flags + self.operator.cfunction + + def arguments(self, **kwargs): + """ + Prepare Devito arguments. + + Parameters + ---------- + kwargs : optional + Arguments to pass to Devito. + + Returns + ------- + + """ + time = self._problem.time + + kwargs['time_m'] = kwargs.get('time_m', 0) + kwargs['time_M'] = kwargs.get('time_M', time.extended_num - 1) + + self.kwargs.update(kwargs) + + def run(self): + """ + Run the operator. + + Returns + ------- + + """ + self.operator.apply(**self.kwargs) diff --git a/stride/problem_types/problem_type.py b/stride/problem_types/problem_type.py new file mode 100644 index 00000000..df1c2008 --- /dev/null +++ b/stride/problem_types/problem_type.py @@ -0,0 +1,254 @@ + +from abc import ABC, abstractmethod + + +__all__ = ['ProblemTypeBase'] + + +class ProblemTypeBase(ABC): + """ + Problem types encode the physics of the forward and inverse problems that we want + to solve using Stride. In most cases, these physics will correspond to state and adjoint PDEs + describing problems of interest. + + A problem type could have multiple implementations, depending on how the physics are solved + or the techniques that are used to solve them. + + For example, the ``acoustic`` problem corresponds to the second-order isotropic acoustic + wave equation, which currently has one implementation using the Devito library. This implementation + is contained within the ``acoustic/devito`` folder. + + Problem types inherit from this base class, and have to comply with a certain interface by + defining, at least, a series of methods. + + To solve the state or forward problem: + + - ``before_state`` + - ``state`` + - ``after_state`` + + and to solve the adjoint problem: + + - ``before_adjoint`` + - ``adjoint`` + - ``after_adjoint`` + + If the problem type has to provide the gradient for a certain optimisation variable, the + class will also have to define a pair of methods per variable: + + - ``set_grad_[variable_name]`` will be called before the adjoint run to prepare the calculation of the gradient. + - ``get_grad_[variable_name]`` will be called after the adjoint run to fill in the calculated gradients. + + in order for the gradients to be calculated. + + """ + + space_order = -1 + time_order = -1 + + def __init__(self): + self._problem = None + + self._state_operator = None + self._adjoint_operator = None + + def set_problem(self, problem, **kwargs): + """ + Set up the problem or sub-problem that needs to be run. + + Parameters + ---------- + problem : SubProblem or Problem + Problem on which the physics will be executed + kwargs + Extra parameters to be used by the method. + + Returns + ------- + + """ + self._problem = problem + + @abstractmethod + def before_state(self, save_wavefield=False, **kwargs): + """ + Prepare the problem type to run the state or forward problem. + + Parameters + ---------- + save_wavefield : bool, optional + Whether or not the wavefield needs to be stored, defaults to False. + kwargs + Extra parameters to be used by the method. + + Returns + ------- + + """ + pass + + @abstractmethod + def state(self, **kwargs): + """ + Run the state or forward problem. + + Parameters + ---------- + kwargs + Extra parameters to be used by the method. + + Returns + ------- + + """ + pass + + @abstractmethod + def after_state(self, save_wavefield=False, **kwargs): + """ + Clean up after the state run and retrieve the time traces. + + If requested, also provide a saved wavefield. + + Parameters + ---------- + save_wavefield : bool, optional + Whether or not the wavefield needs to be stored, defaults to False. + kwargs + Extra parameters to be used by the method. + + Returns + ------- + Traces + Time traces produced by the state run. + Data or None + Wavefield produced by the state run, if any. + + """ + pass + + @abstractmethod + def before_adjoint(self, wrt, adjoint_source, wavefield, **kwargs): + """ + Prepare the problem type to run the adjoint problem. + + Parameters + ---------- + wrt : VariableList + List of variables for which the inverse problem is being solved. + adjoint_source : Traces + Adjoint source to use in the adjoint propagation. + wavefield : Data + Stored wavefield from the forward run, to use as needed. + kwargs + Extra parameters to be used by the method. + + Returns + ------- + + """ + pass + + @abstractmethod + def adjoint(self, wrt, adjoint_source, wavefield, **kwargs): + """ + Run the adjoint problem. + + Parameters + ---------- + wrt : VariableList + List of variables for which the inverse problem is being solved. + adjoint_source : Traces + Adjoint source to use in the adjoint propagation. + wavefield : Data + Stored wavefield from the forward run, to use as needed. + kwargs + Extra parameters to be used by the method. + + Returns + ------- + + """ + pass + + @abstractmethod + def after_adjoint(self, wrt, adjoint_source, wavefield, **kwargs): + """ + Clean up after the adjoint run and retrieve the time gradients (if needed). + + Parameters + ---------- + wrt : VariableList + List of variables for which the inverse problem is being solved. + adjoint_source : Traces + Adjoint source to use in the adjoint propagation. + wavefield : Data + Stored wavefield from the forward run, to use as needed. + kwargs + Extra parameters to be used by the method. + + Returns + ------- + VariableList + Updated variable list with gradients added to them. + + """ + pass + + def set_grad(self, wrt, **kwargs): + """ + Prepare the problem type to calculate the gradients wrt the inputs. + + Parameters + ---------- + wrt : VariableList + List of variable with respect to which the inversion is running. + kwargs + Extra parameters to be used by the method. + + Returns + ------- + list + List of update rules (if any) for the gradients of the problem type + with respect to the inputs + + """ + gradient_update = [] + + for variable in wrt: + method = getattr(self, 'set_grad_' + variable.name, None) + + if method is None: + raise ValueError('Variable %s not implemented' % variable.name) + + update = method(variable, **kwargs) + gradient_update += update + + return gradient_update + + def get_grad(self, wrt, **kwargs): + """ + Retrieve the gradients calculated wrt to the inputs. + + Parameters + ---------- + wrt : VariableList + List of variable with respect to which the inversion is running. + kwargs + Extra parameters to be used by the method. + + Returns + ------- + VariableList + Updated variable list with gradients added to them. + + """ + for variable in wrt: + method = getattr(self, 'get_grad_' + variable.name, None) + + if method is None: + raise ValueError('Variable %s not implemented' % variable.name) + + method(variable, **kwargs) + + return wrt diff --git a/stride/runner.py b/stride/runner.py new file mode 100644 index 00000000..35dff040 --- /dev/null +++ b/stride/runner.py @@ -0,0 +1,230 @@ + + +from mosaic import tessera + + +__all__ = ['Runner'] + + +@tessera +class Runner: + """ + The Runner acts as a manager of the forward and inverse runs in Stride. The Runner takes care + of instantiating a problem type and, when needed, a functional; it takes care of setting + up the sub-problem to run and the optimisation block; and it acts as an interface to + execute forward, adjoint and gradient runs on these. + + The Runner is also responsible for all necessary processing of wavelets, observed and modelled + data, as well as local-level actions on the gradients. + + """ + + def __init__(self): + self.problem = None + self.block = None + + self.problem_type = None + self.functional = None + + def set_problem(self, problem, **kwargs): + """ + Set up the problem or sub-problem that needs to be run. + + Parameters + ---------- + problem : SubProblem or Problem + Problem on which the physics will be executed + kwargs + Extra parameters to be used by the method. + + Returns + ------- + + """ + self.logger.info('(ShotID %d) Preparing to run shot' % problem.shot_id) + + self.problem = problem + + if self.problem_type is None or self.problem_type.__class__ != problem.problem_type.__class__: + self.problem_type = problem.problem_type + + if self.block is not None: + wavelets = self.problem.shot.wavelets + wavelets = self.block.pipelines.wavelets.apply(wavelets) + self.problem.shot.wavelets = wavelets + + observed = self.problem.shot.observed + observed = self.block.pipelines.wavelets.apply(observed) + self.problem.shot.observed = observed + + self.problem_type.set_problem(problem, **kwargs) + + def set_block(self, block, **kwargs): + """ + Set up the optimisation block for the inversion. + + Parameters + ---------- + block : Block + Relevant optimisation block. + kwargs + Extra parameters to be used by the method. + + Returns + ------- + + """ + self.logger.info('Preparing to run block %d' % block.id) + + self.block = block + + if self.functional is None or self.functional.__class__ != block.functional.__class__: + self.functional = block.functional + + def run_state(self, save_wavefield=False, **kwargs): + """ + Run all the necessary hooks on the problem type to execute the state or forward. + + Parameters + ---------- + save_wavefield : bool, optional + Whether or not the wavefield needs to be stored, defaults to False. + kwargs + Extra parameters to be used by the method. + + Returns + ------- + Traces + Time traces produced by the state run. + Data or None + Wavefield produced by the state run, if any. + + """ + self.problem_type.before_state(save_wavefield=save_wavefield, **kwargs) + + self.logger.info('(ShotID %d) Running state equation for shot' % self.problem.shot_id) + self.problem_type.state(**kwargs) + + self.logger.info('(ShotID %d) Completed state equation run for shot' % self.problem.shot_id) + traces, wavefield = self.problem_type.after_state(save_wavefield=save_wavefield, **kwargs) + + if save_wavefield is True and self.block is not None: + wavefield = self.block.pipelines.wavefield.apply(wavefield) + + return traces, wavefield + + def run_functional(self, modelled, **kwargs): + """ + Use some ``modelled`` data to calculate a functional value for the + present SubProblem. + + Parameters + ---------- + modelled : Traces + Time traces to compare with the observed data in the shot. + kwargs + Extra parameters to be used by the method. + + Returns + ------- + FunctionalValue + Object containing information about the shot, the value of the functional + and the residuals. + Traces + Generated adjoint source. + + """ + if self.functional is None: + raise ValueError('No functional was given to the runner instance') + + observed = self.problem.shot.observed + if self.block is not None: + modelled, observed = self.block.pipelines.traces.apply(modelled, observed) + + fun, adjoint_source = self.functional.apply(self.problem.shot, modelled, observed, **kwargs) + + if self.block is not None: + adjoint_source = self.block.pipelines.adjoint_source.apply(adjoint_source) + + self.logger.info('(ShotID %d) Functional value: %s' % (self.problem.shot_id, fun)) + + return fun, adjoint_source + + def run_adjoint(self, wrt, adjoint_source, wavefield, **kwargs): + """ + Run all the necessary hooks on the problem type to execute the adjoint problem. + + Parameters + ---------- + wrt : VariableList + List of variables for which the inverse problem is being solved. + adjoint_source : Traces + Adjoint source to use in the adjoint propagation. + wavefield : Data + Stored wavefield from the forward run, to use as needed. + kwargs + Extra parameters to be used by the method. + + Returns + ------- + VariableList + Updated variable list with gradients added to them, if any. + + """ + wrt.grad.fill(0.) + wrt.prec.fill(0.) + + self.problem_type.before_adjoint(wrt, adjoint_source, wavefield, **kwargs) + + self.logger.info('(ShotID %d) Running adjoint equation for shot' % self.problem.shot_id) + self.problem_type.adjoint(wrt, adjoint_source, wavefield, **kwargs) + + self.logger.info('(ShotID %d) Completed adjoint equation run shot' % self.problem.shot_id) + + wrt = self.problem_type.after_adjoint(wrt, adjoint_source, wavefield, **kwargs) + wrt = self.functional.get_grad(wrt, **kwargs) + + for variable in wrt: + variable.grad, variable.prec = self.block.pipelines.\ + local_gradient.apply(variable.grad, variable.prec) + + return wrt + + def run_gradient(self, wrt, **kwargs): + """ + Execute the state, functional and adjoint in order to calculate + the gradients for a series of variables. + + Parameters + ---------- + wrt : VariableList + List of variables for which the inverse problem is being solved. + kwargs + Extra parameters to be used by the method. + + Returns + ------- + FunctionalValue + Object containing information about the shot, the value of the functional + and the residuals. + VariableList + Updated variable list with gradients added to them, if any. + + """ + traces, wavefield = self.run_state(save_wavefield=True, **kwargs) + + fun, adjoint_source = self.run_functional(traces, **kwargs) + + wrt = self.run_adjoint(wrt, adjoint_source, wavefield, **kwargs) + + return fun, wrt + + @staticmethod + def _sum_grad_prec(grad_problem, grad_fun): + grad = [] + + for each_problem, each_fun in zip(grad_problem, grad_fun): + grad.append([each_problem[0]+each_fun[0], + each_problem[1]+each_fun[1]]) + + return grad diff --git a/stride/utils/__init__.py b/stride/utils/__init__.py new file mode 100644 index 00000000..8fc1dc5d --- /dev/null +++ b/stride/utils/__init__.py @@ -0,0 +1,3 @@ + + +from .fetch import * diff --git a/stride/utils/fetch.py b/stride/utils/fetch.py new file mode 100644 index 00000000..1483cc48 --- /dev/null +++ b/stride/utils/fetch.py @@ -0,0 +1,57 @@ + + +import os +import subprocess + + +__all__ = ['fetch'] + + +known_assets = { + 'anastasio2D': 'https://api.github.com/repos/trustimaging/stride/releases/assets/32679235', + 'anastasio3D': 'https://api.github.com/repos/trustimaging/stride/releases/assets/32679251', +} + + +try: + from stride_private.utils.fetch import known_assets +except ImportError: + pass + + +def fetch(origin, dest, token=None): + """ + Fetch asset from GitHub repository. The asset is only fetched once. + + Parameters + ---------- + origin : str + Name of the asset to fetch. + dest : str + Path to save file. + token : str, optional + Access token to fetch the access if it lives in a private repo. + + Returns + ------- + + """ + if os.path.exists(dest): + return + + if origin in known_assets: + origin = known_assets[origin] + + if token is not None: + cmd = 'curl -LJ# -H "Authorization: token %s" -H ' \ + '"Accept: application/octet-stream" "%s" --output "%s"' % (token, origin, dest) + else: + cmd = 'curl -LJ# -H "Accept: application/octet-stream" "%s" --output "%s"' % (origin, dest) + + process = subprocess.run(cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + if process.returncode < 0: + raise RuntimeError('Fetching with cmd "%s" failed' % cmd) diff --git a/stride/utils/fft.py b/stride/utils/fft.py new file mode 100644 index 00000000..feea5ce1 --- /dev/null +++ b/stride/utils/fft.py @@ -0,0 +1,87 @@ + +import numpy as np + + +__all__ = ['magnitude_spectrum', 'bandwidth'] + + +def magnitude_spectrum(signal, dt, db=True): + """ + Calculate magnitude spectrum of a signal. + + Parameters + ---------- + signal : ndarray + Signal or array of signals. + dt : float + Discretisation step for the time axis. + db : bool, optional + Whether to calculate the spectrum in decibels, defaults to True. + + Returns + ------- + 1-dimensional array + Frequencies of the spectrum + ndarray + Magnitude spectrum of the signal or signals. + + """ + num = signal.shape[-1] + + if not num % 2: + num_freqs = num // 2 + else: + num_freqs = (num + 1) // 2 + + signal_fft = np.fft.fft(signal, axis=-1).take(range(num_freqs), axis=-1) + freqs = np.fft.fftfreq(num, dt)[:num_freqs] + + signal_fft = np.abs(signal_fft) + + if db is True: + signal_fft = 20 * np.log10(signal_fft / np.max(signal_fft)) + + return freqs, signal_fft + + +def bandwidth(signal, dt, cutoff=-10): + """ + Calculate the bandwidth of a signal at a given dB level. + + Parameters + ---------- + signal : ndarray + Signal or array of signals. + dt : float + Discretisation step for the time axis. + cutoff : float + dB level to calculate bandwidth. + + Returns + ------- + 1-dimensional array + Frequencies of the spectrum + ndarray + Magnitude spectrum of the signal or signals. + + """ + freqs, signal_fft = magnitude_spectrum(signal, dt, db=True) + + if len(signal_fft.shape) > 1: + signal_fft = np.mean(signal_fft, axis=0) + + num_freqs = signal_fft.shape[-1] + + f_min = 0 + for f in range(num_freqs): + if signal_fft[f] > cutoff: + f_min = freqs[f] + break + + f_max = num_freqs + for f in reversed(range(num_freqs)): + if signal_fft[f] > cutoff: + f_max = freqs[f] + break + + return f_min, f_max diff --git a/stride/utils/filters.py b/stride/utils/filters.py new file mode 100644 index 00000000..b109dc69 --- /dev/null +++ b/stride/utils/filters.py @@ -0,0 +1,223 @@ + +import numpy as np +import scipy.signal + + +__all__ = ['bandpass_filter_butterworth', 'lowpass_filter_butterworth', 'highpass_filter_butterworth', + 'bandpass_filter_fir', 'lowpass_filter_fir', 'highpass_filter_fir'] + + +# TODO Implement more efficient threaded filters + + +def bandpass_filter_butterworth(data, f_min, f_max, padding=0, order=8, axis=-1): + """ + Apply a zero-phase Butterworth bandpass filter using cascaded second-order sections. + + Parameters + ---------- + data : 2-dimensional array + Data to apply the filter to, with shape (number_of_traces, number_of_timesteps) + f_min : float + Minimum frequency of the filter, dimensionless + f_max : float + Maximum frequency of the filter, dimensionless + padding : int, optional + Padding to apply before AND after the traces to compensate for the filtering, defaults to 0. + order : int, optional + Order of the filter, defaults to 8. + axis : int, optional + Axis on which to perform the filtering, defaults to -1 + + Returns + ------- + 2-dimensional array + Data after filtering, with shape (number_of_traces, number_of_timesteps+2*padding) + + """ + f_min = f_min / 0.5 + f_max = f_max / 0.5 + + if padding > 0: + data = np.pad(data, ((0, 0), (padding, padding)), mode='constant', constant_values=0.) + sos = scipy.signal.butter(order, [f_min, f_max], analog=False, btype='band', output='sos') + + return scipy.signal.sosfiltfilt(sos, data, axis=axis) + + +def lowpass_filter_butterworth(data, f_max, padding=0, order=8, axis=-1): + """ + Apply a zero-phase Butterworth lowpass filter using cascaded second-order sections. + + Parameters + ---------- + data : 2-dimensional array + Data to apply the filter to, with shape (number_of_traces, number_of_timesteps) + f_max : float + Maximum frequency of the filter, dimensionless + padding : int, optional + Padding to apply before AND after the traces to compensate for the filtering, defaults to 0. + order : int, optional + Order of the filter, defaults to 8. + axis : int, optional + Axis on which to perform the filtering, defaults to -1 + + Returns + ------- + 2-dimensional array + Data after filtering, with shape (number_of_traces, number_of_timesteps+2*padding) + + """ + f_max = f_max / 0.5 + + if padding > 0: + data = np.pad(data, ((0, 0), (padding, padding)), mode='constant', constant_values=0.) + sos = scipy.signal.butter(order, f_max, analog=False, btype='lowpass', output='sos') + + return scipy.signal.sosfiltfilt(sos, data, axis=axis) + + +def highpass_filter_butterworth(data, f_min, padding=0, order=8, axis=-1): + """ + Apply a zero-phase Butterworth highpass filter using cascaded second-order sections. + + Parameters + ---------- + data : 2-dimensional array + Data to apply the filter to, with shape (number_of_traces, number_of_timesteps) + f_min : float + Minimum frequency of the filter, dimensionless + padding : int, optional + Padding to apply before AND after the traces to compensate for the filtering, defaults to 0. + order : int, optional + Order of the filter, defaults to 8. + axis : int, optional + Axis on which to perform the filtering, defaults to -1 + + Returns + ------- + 2-dimensional array + Data after filtering, with shape (number_of_traces, number_of_timesteps+2*padding) + + """ + f_min = f_min / 0.5 + + if padding > 0: + data = np.pad(data, ((0, 0), (padding, padding)), mode='constant', constant_values=0.) + sos = scipy.signal.butter(order, f_min, analog=False, btype='highpass', output='sos') + + return scipy.signal.sosfiltfilt(sos, data, axis=axis) + + +def bandpass_filter_fir(data, f_min, f_max, padding=0, attenuation=50, axis=-1): + """ + Apply a zero-phase FIR bandpass filter using cascaded second-order sections. + + Parameters + ---------- + data : 2-dimensional array + Data to apply the filter to, with shape (number_of_traces, number_of_timesteps) + f_min : float + Minimum frequency of the filter, dimensionless + f_max : float + Minimum frequency of the filter, dimensionless + padding : int, optional + Padding to apply before AND after the traces to compensate for the filtering, defaults to 0. + attenuation : float, optional + Attenuation of the reject band in dB, defaults to 50. + axis : int, optional + Axis on which to perform the filtering, defaults to -1 + + Returns + ------- + 2-dimensional array + Data after filtering, with shape (number_of_traces, number_of_timesteps+2*padding) + + """ + f_min = f_min / 0.5 + f_max = f_max / 0.5 + + if padding > 0: + data = np.pad(data, ((0, 0), (padding, padding)), mode='constant', constant_values=0.) + + transition_width = 0.050 + order, beta = scipy.signal.kaiserord(attenuation, transition_width) + order = order // 2 * 2 + 1 + + filt = scipy.signal.firwin(order, [f_min, f_max], pass_zero='bandpass', window=('kaiser', beta), scale=True) + + return scipy.signal.filtfilt(filt, 1., data, axis=axis) + + +def lowpass_filter_fir(data, f_max, padding=0, attenuation=50, axis=-1): + """ + Apply a zero-phase FIR lowpass filter using cascaded second-order sections. + + Parameters + ---------- + data : 2-dimensional array + Data to apply the filter to, with shape (number_of_traces, number_of_timesteps) + f_max : float + Maximum frequency of the filter, dimensionless + padding : int, optional + Padding to apply before AND after the traces to compensate for the filtering, defaults to 0. + attenuation : float, optional + Attenuation of the reject band in dB, defaults to 50. + axis : int, optional + Axis on which to perform the filtering, defaults to -1 + + Returns + ------- + 2-dimensional array + Data after filtering, with shape (number_of_traces, number_of_timesteps+2*padding) + + """ + f_max = f_max / 0.5 + + if padding > 0: + data = np.pad(data, ((0, 0), (padding, padding)), mode='constant', constant_values=0.) + + transition_width = 0.050 + order, beta = scipy.signal.kaiserord(attenuation, transition_width) + order = order // 2 * 2 + 1 + + filt = scipy.signal.firwin(order, f_max, pass_zero='lowpass', window=('kaiser', beta), scale=True) + + return scipy.signal.filtfilt(filt, 1., data, axis=axis) + + +def highpass_filter_fir(data, f_min, padding=0, attenuation=50, axis=-1): + """ + Apply a zero-phase FIR highpass filter using cascaded second-order sections. + + Parameters + ---------- + data : 2-dimensional array + Data to apply the filter to, with shape (number_of_traces, number_of_timesteps) + f_min : float + Minimum frequency of the filter, dimensionless + padding : int, optional + Padding to apply before AND after the traces to compensate for the filtering, defaults to 0. + attenuation : float, optional + Attenuation of the reject band in dB, defaults to 50. + axis : int, optional + Axis on which to perform the filtering, defaults to -1 + + Returns + ------- + 2-dimensional array + Data after filtering, with shape (number_of_traces, number_of_timesteps+2*padding) + + """ + f_min = f_min / 0.5 + + if padding > 0: + data = np.pad(data, ((0, 0), (padding, padding)), mode='constant', constant_values=0.) + + transition_width = 0.050 + order, beta = scipy.signal.kaiserord(attenuation, transition_width) + order = order // 2 * 2 + 1 + + filt = scipy.signal.firwin(order, f_min, pass_zero='highpass', window=('kaiser', beta), scale=True) + + return scipy.signal.filtfilt(filt, 1., data, axis=axis) diff --git a/stride/utils/geometries.py b/stride/utils/geometries.py new file mode 100644 index 00000000..48034281 --- /dev/null +++ b/stride/utils/geometries.py @@ -0,0 +1,104 @@ + +import numpy as np +import scipy.linalg + + +__all__ = ['elliptical', 'ellipsoidal'] + + +def _rot_matrix(axis, theta): + return scipy.linalg.expm(np.cross(np.eye(3), axis / np.linalg.norm(axis) * theta)) + + +def elliptical(num, radius, centre): + """ + Generate a 2D elliptical geometry for a number of points ``num``, centred + on ``centre`` and with radius ``radius``. + + Parameters + ---------- + num : int + Number of points on the geometry. + radius : array-like + List or array with each of the two radii of the ellipsis. + centre : array-like + List or array with the coordinates of the centre of the ellipsis. + + Returns + ------- + 2d-array + Array containing the coordinates of points in the geometry, with shape (num, 2). + + """ + angles = np.linspace(0, 2*np.pi, num, endpoint=False) + + geometry = np.zeros((num, 2)) + for index, angle in zip(range(num), angles): + geometry[index, 0] = radius[0] * np.cos(angle) + centre[0] + geometry[index, 1] = radius[1] * np.sin(angle) + centre[1] + + return geometry + + +def ellipsoidal(num, radius, centre, theta=0., axis=None, threshold=0.): + """ + Generate a 3D ellipsoidal geometry for a number of points ``num``, centred + on ``centre`` and with radius ``radius``. The geometry can be rotated by + an amount ``theta``, and thresholded by eliminating ``threshold`` percent of it. + + Parameters + ---------- + num : int + Number of points on the geometry. + radius : array-like + List or array with each of the two radii of the ellipsis. + centre : array-like + List or array with the coordinates of the centre of the ellipsis. + theta + axis + threshold + + Returns + ------- + 3d-array + Array containing the coordinates of points in the geometry, with shape (num, 3). + + """ + num = int(np.round(num / (1 - threshold))) + + offset = 2. / num + increment = np.pi * (3. - np.sqrt(5.)) + axis = axis or [1, 0, 0] + + geometry = np.zeros((num, 3)) + + index = 0 + for sample in range(num): + z = ((sample * offset) - 1) + (offset / 2) + r = np.sqrt(1 - z ** 2) + + phi = (sample % num) * increment + + y = np.cos(phi) * r + x = np.sin(phi) * r + + if z + 1 < threshold*2: + continue + + x *= radius[0] + y *= radius[1] + z *= radius[2] + + [x, y, z] = np.dot(_rot_matrix(axis, theta), [x, y, z]) + + point = np.array([x, y, z]) + point[0] += centre[0] + point[1] += centre[1] + point[2] += centre[2] + + geometry[index, :] = point + index += 1 + + geometry = geometry[:index, :] + + return geometry diff --git a/stride/utils/wavelets.py b/stride/utils/wavelets.py new file mode 100644 index 00000000..11cc3a0d --- /dev/null +++ b/stride/utils/wavelets.py @@ -0,0 +1,88 @@ + +import numpy as np +import scipy.signal + + +def tone_burst(centre_freq, n_cycles, n_samples, dt, envelope='gaussian', offset=0): + """ + Generate a tone burst wavelet. + + Parameters + ---------- + centre_freq : float + Centre frequency of the signal. + n_cycles : float + Number of cycles for the signal. + n_samples : int + Length of the wavelet. + dt : float + Discretisation step for the time axis. + envelope : str, optional + Type of envelope to be applied to the signal, ``gaussian`` (default) or ``rectangular``. + offset : int, optional + Offset in timesteps to the start of the wavelet, defaults to 0. + + Returns + ------- + 1-dimensional array + Generated wavelet. + + """ + tone_length = n_cycles / centre_freq + + time_array = np.linspace(0, tone_length, int(tone_length//dt + 1), endpoint=False) + signal = np.sin(2 * np.pi * centre_freq * time_array) + n_tone = signal.shape[0] + + if envelope == 'gaussian': + limit = 3 + window_x = np.linspace(-limit, limit, n_tone) + window = np.exp(-window_x ** 2 / 2) + elif envelope == 'rectangular': + window = np.ones((signal.shape[0],)) + else: + raise Exception('Envelope type not implemented') + + signal = np.multiply(signal, window) + + window = scipy.signal.get_window(('tukey', 0.05), n_tone, False) + signal = np.multiply(signal, window) + + signal = np.pad(signal, ((offset, n_samples - offset - n_tone),), mode='constant', constant_values=0.) + + return signal + + +def ricker(centre_freq, n_samples, dt, offset=0): + """ + Generate a ricker wavelet. + + Parameters + ---------- + centre_freq : float + Centre frequency of the signal. + n_samples : int + Length of the wavelet. + dt : float + Discretisation step for the time axis. + offset : int, optional + Offset in timesteps to the start of the wavelet, defaults to 0. + + Returns + ------- + 1-dimensional array + Generated wavelet. + + """ + tone_length = 3 * np.sqrt(6) / (centre_freq * np.pi) + time_array = np.linspace(-tone_length/2, tone_length/2, int(tone_length//dt + 1), endpoint=False) + + signal = (1 - 2 * np.pi**2 * centre_freq**2 * time_array**2) * np.exp(-np.pi**2 * centre_freq**2 * time_array**2) + n_tone = signal.shape[0] + + window = scipy.signal.get_window(('tukey', 0.05), n_tone, False) + signal = np.multiply(signal, window) + + signal = np.pad(signal, ((offset, n_samples - offset - n_tone),), mode='constant', constant_values=0.) + + return signal