diff --git a/.github/workflows/get-externals/action.yml b/.github/workflows/get-externals/action.yml
index 6ff5d2dc..34c0aea0 100644
--- a/.github/workflows/get-externals/action.yml
+++ b/.github/workflows/get-externals/action.yml
@@ -12,43 +12,21 @@ runs:
if: runner.os == 'Linux'
run: |
curl -L https://github.com/disorderedmaterials/Gudrun/releases/download/${{ env.gudrunTag }}/binaries-${{ env.gudrunTag }}-linux.zip > gudrun-binaries.zip
- curl -L https://github.com/disorderedmaterials/ModEx/releases/download/${{ env.modexTag }}/binaries-${{ env.modexTag }}-linux.zip > modex-binaries.zip
- - name: Build Externals (OSX)
- shell: bash
- if: runner.os == 'MacOS'
- run: |
- # Install Ninja
- brew update-reset
- brew install ninja
- # Retrieve Gudrun source
- curl -L https://github.com/disorderedmaterials/Gudrun/archive/refs/tags/${{ env.gudrunTag }}.tar.gz > gudrun-source.tar.gz
- tar -zxvf gudrun-source.tar.gz
- cd Gudrun-${{ env.gudrunTag }}
- # Retrieve pre-built HDF5 libs
- curl -L https://github.com/disorderedmaterials/HDF5/releases/download/${{ env.hdf5tag }}/${{ env.hdf5tag }}-osx.zip > hdf5-osx.zip
- unzip hdf5-osx.zip
- # Build Gudrun binaries
- mkdir build
- cd build
- cmake ../ -G Ninja -DCMAKE_Fortran_COMPILER:string="gfortran-11" -DLOCAL_STATIC_HDF5:bool=True -DHDF5_DIR:path=$(pwd)/../${{ env.hdf5tag }} -DCMAKE_Fortran_FLAGS:string="-cpp" -DGUDPY_COMPATIBILITY=1
- ninja
- ninja install
- cd ../
- # Create binaries zip
- mkdir binaries-${{ env.gudrunTag }}-linux
- mv bin/* binaries-${{ env.gudrunTag }}-linux
- zip -9rv ../gudrun-binaries.zip binaries-${{ env.gudrunTag }}-linux/
+ unzip gudrun-binaries
- name: Retrieve Externals (OSX)
shell: bash
if: runner.os == 'MacOS'
run: |
- curl -L https://github.com/disorderedmaterials/ModEx/releases/download/${{ env.modexTag }}/binaries-${{ env.modexTag }}-osx.zip > modex-binaries.zip
+ brew install zlib hdf5 gcc@11
+ curl -L https://github.com/disorderedmaterials/Gudrun/releases/download/${{ env.gudrunTag }}/binaries-${{ env.gudrunTag }}-osx-fat.zip > gudrun-binaries.zip
+ unzip gudrun-binaries
+ chmod +x binaries-${{ env.gudrunTag }}-osx-fat/*
- name: Retrieve Externals (Windows)
shell: bash
if: runner.os == 'Windows'
run: |
curl -L https://github.com/disorderedmaterials/Gudrun/releases/download/${{ env.gudrunTag }}/binaries-${{ env.gudrunTag }}-windows.zip > gudrun-binaries.zip
- curl -L https://github.com/disorderedmaterials/ModEx/releases/download/${{ env.modexTag }}/binaries-${{ env.modexTag }}-windows.zip > modex-binaries.zip
+ unzip gudrun-binaries
- name: Retrieve StartupFiles
shell: bash
run: |
@@ -57,11 +35,7 @@ runs:
shell: bash
run: |
if [ ! -e ${{ inputs.targetDir }} ]; then mkdir ${{ inputs.targetDir }}; fi
- unzip gudrun-binaries.zip
mv binaries-${{ env.gudrunTag }}-*/* ${{ inputs.targetDir }}
- unzip modex-binaries.zip
- mv binaries-${{ env.modexTag }}-*/* ${{ inputs.targetDir }}
-
unzip startupFiles.zip
mv StartupFiles ${{ inputs.targetDir }}
diff --git a/.github/workflows/package/action.yml b/.github/workflows/package/action.yml
index 89bbd1bf..9f1152cf 100644
--- a/.github/workflows/package/action.yml
+++ b/.github/workflows/package/action.yml
@@ -14,13 +14,14 @@ runs:
- name: Upload Artifacts (OSX)
if: runner.os == 'MacOS'
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
- name: Packages
+ name: GudPy-OSX
path: |
dist/GudPy-*.dmg
dist/GudPy-*-OSX
+
- name: Create Zip (Windows)
if: runner.os == 'Windows'
shell: bash
@@ -30,9 +31,9 @@ runs:
- name: Upload Artifacts (Windows)
if: runner.os == 'Windows'
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
- name: Packages
+ name: GudPy-Windows
path: |
dist/GudPy-*-Windows.zip
@@ -50,9 +51,9 @@ runs:
- name: Upload Artifacts (Linux)
if: runner.os == 'Linux'
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
- name: Packages
+ name: GudPy-Linux
path: |
dist/GudPy-*.sif
dist/GudPy-*-Linux
diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml
index ecf00822..10ad186e 100644
--- a/.github/workflows/pr.yml
+++ b/.github/workflows/pr.yml
@@ -18,7 +18,6 @@ jobs:
uses: "./.github/workflows/lint"
Test:
- needs: Lint
strategy:
fail-fast: false
matrix:
diff --git a/.github/workflows/publish/action.yml b/.github/workflows/publish/action.yml
index 75d0e19a..48aaaad6 100644
--- a/.github/workflows/publish/action.yml
+++ b/.github/workflows/publish/action.yml
@@ -18,9 +18,9 @@ runs:
singularity-version: 3.8.3
- name: Download Artifacts
- uses: actions/download-artifact@v3
+ uses: actions/download-artifact@v4
with:
- name: Packages
+ pattern: GudPy-*
path: ${{ github.workspace }}/packages
- name: Download Prerequisites
@@ -36,7 +36,9 @@ runs:
echo "Release tag will be: ${{ env.gudPyVersion }}"
echo "Release name will be: ${{ env.gudPyVersion }}"
export GITHUB_TOKEN=${{ github.token }}
- ./update-release -r disorderedmaterials/gudpy -t ${{ env.gudPyVersion }} -e -n "${{ env.gudPyVersion }}" -f ReleaseNotes.md packages/*
+ FILES=( $(find ./packages -mindepth 2 -maxdepth 2 -type f -name "GudPy-${{ env.gudPyVersion }}*") )
+ echo ${FILES[@]}
+ ./update-release -r disorderedmaterials/gudpy -t ${{ env.gudPyVersion }} -e -n "${{ env.gudPyVersion }}" -f ReleaseNotes.md ${FILES[@]}
- name: Publish on GitHub (Continuous)
if: ${{ inputs.publish == 'true' && inputs.isRelease == 'false' }}
@@ -45,7 +47,9 @@ runs:
echo "Release tag will be: continuous"
echo "Release name will be: 'Continuous (${{ env.gudPyVersion }})'"
export GITHUB_TOKEN=${{ github.token }}
- ./update-release -r disorderedmaterials/gudpy -t continuous -p -e -u -n "Continuous (${{ env.gudPyVersion }})" -b "Continuous release from \`main\` branch. Built $(date)." packages/*
+ FILES=( $(find ./packages -mindepth 2 -maxdepth 2 -type f -name "GudPy-${{ env.gudPyVersion }}*") )
+ echo ${FILES[@]}
+ ./update-release -r disorderedmaterials/gudpy -t continuous -p -e -u -n "Continuous (${{ env.gudPyVersion }})" -b "Continuous release from \`main\` branch. Built $(date)." ${FILES[@]}
- name: Publish on Harbor (Release)
if: ${{ inputs.publish == 'true' && inputs.isRelease == 'true' }}
@@ -53,7 +57,7 @@ runs:
run: |
echo "Release tag will be: latest"
singularity remote login --username ${HARBOR_USER} --password ${HARBOR_SECRET} docker://harbor.stfc.ac.uk
- ${SINGULARITY_ROOT}/bin/singularity push packages/GudPy-${{ env.gudPyVersion }}.sif oras://harbor.stfc.ac.uk/isis_disordered_materials/gudpy:latest
+ ${SINGULARITY_ROOT}/bin/singularity push packages/GudPy-Linux/GudPy-${{ env.gudPyVersion }}.sif oras://harbor.stfc.ac.uk/isis_disordered_materials/gudpy:latest
- name: Publish on Harbor (Continuous)
if: ${{ inputs.publish == 'true' && inputs.isRelease == 'false' }}
@@ -61,4 +65,4 @@ runs:
run: |
echo "Release tag will be: continuous"
singularity remote login --username ${HARBOR_USER} --password ${HARBOR_SECRET} docker://harbor.stfc.ac.uk
- ${SINGULARITY_ROOT}/bin/singularity push packages/GudPy-${{ env.gudPyVersion }}.sif oras://harbor.stfc.ac.uk/isis_disordered_materials/gudpy:continuous
+ ${SINGULARITY_ROOT}/bin/singularity push packages/GudPy-Linux/GudPy-${{ env.gudPyVersion }}.sif oras://harbor.stfc.ac.uk/isis_disordered_materials/gudpy:continuous
\ No newline at end of file
diff --git a/.github/workflows/setup/action.yml b/.github/workflows/setup/action.yml
index 82704f3e..e8c0b11f 100644
--- a/.github/workflows/setup/action.yml
+++ b/.github/workflows/setup/action.yml
@@ -6,7 +6,7 @@ inputs:
default: 3.9
gudrunTag:
type: string
- default: 2023.1
+ default: 2024.1
modexTag:
type: string
default: 0.1.5
diff --git a/.github/workflows/test/action.yml b/.github/workflows/test/action.yml
index 2d4d51c0..8a880088 100644
--- a/.github/workflows/test/action.yml
+++ b/.github/workflows/test/action.yml
@@ -38,9 +38,9 @@ runs:
- name: Upload Test Results
if: always()
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
- name: testResults
+ name: testResults-${{ runner.os }}
path: results
- name: Publish Code Coverage
diff --git a/.github/workflows/testresults/action.yml b/.github/workflows/testresults/action.yml
index e0ed642d..77bb6a5f 100644
--- a/.github/workflows/testresults/action.yml
+++ b/.github/workflows/testresults/action.yml
@@ -5,9 +5,10 @@ runs:
steps:
- name: Download Test Results
- uses: actions/download-artifact@v3
+ uses: actions/download-artifact@v4
with:
- name: testResults
+ pattern: testResults-*
+ merge-multiple: true
path: results
- name: Publish Test Results
diff --git a/ci/singularity/ubuntu22.04.def b/ci/singularity/ubuntu22.04.def
index 67a13a0b..d58cdb54 100644
--- a/ci/singularity/ubuntu22.04.def
+++ b/ci/singularity/ubuntu22.04.def
@@ -8,7 +8,7 @@ From: ubuntu:22.04
%post
export DEBIAN_FRONTEND=noninteractive
apt-get update
- apt-get install python3.10 python3-distutils curl build-essential libx11-xcb-dev libglu1-mesa-dev libxkbcommon0 libglx0 libfontconfig libglib2.0-0 libdbus-1-3 libxcb-xinerama0 libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-render-util0 libxcb-randr0 libxcb-shape0 libgfortran5 libegl-dev libegl-mesa0 -y && rm -rf /var/lib/apt/lists/*
+ apt-get install python3.10 python3-distutils curl build-essential libx11-xcb-dev libglu1-mesa-dev libxkbcommon0 libglx0 libfontconfig libglib2.0-0 libdbus-1-3 libxcb-xinerama0 libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-render-util0 libxcb-randr0 libxcb-shape0 libxcb-cursor0 libgfortran5 libegl-dev libegl-mesa0 -y && rm -rf /var/lib/apt/lists/*
curl https://bootstrap.pypa.io/get-pip.py | python3
python3 -m pip install -r requirements.txt
rm requirements.txt
@@ -39,10 +39,10 @@ From: ubuntu:22.04
%runscript
# If the container is executed, this line will be run.
- python3.10 /opt/GudPy/gudpy
+ python3.10 /opt/GudPy/gudpy/gudpy_gui.py
%apphelp GudPy
GudPy GUI version.
%apprun GudPy
- python3 /opt/GudPy/gudpy
+ python3.10 /opt/GudPy/gudpy/gudpy_gui.py
diff --git a/gudpy/core/config.py b/gudpy/core/config.py
index 5a881bc9..7d6a9e06 100644
--- a/gudpy/core/config.py
+++ b/gudpy/core/config.py
@@ -12,7 +12,7 @@
USE_USER_DEFINED_COMPONENTS = False
NORMALISE_COMPOSITIONS = False
-__rootdir__ = os.path.dirname(os.path.abspath(sys.argv[0]))
+__rootdir__ = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
__root__ = (
os.path.join(sys._MEIPASS, "bin", "configs", "containers")
diff --git a/gudpy/core/container.py b/gudpy/core/container.py
index e401a42b..e56a8860 100644
--- a/gudpy/core/container.py
+++ b/gudpy/core/container.py
@@ -71,7 +71,7 @@ def __init__(self, config=None):
"""
self.name = ""
self.periodNumber = 1
- self.dataFiles = DataFiles([], "CONTAINER")
+ self.dataFiles = DataFiles([], "CONTAINER", True)
self.composition = Composition("CONTAINER")
self.geometry = Geometry.SameAsBeam
self.upstreamThickness = 0.1
@@ -97,6 +97,7 @@ def __init__(self, config=None):
self.grBroadening = 0.
self.powerForBroadening = 0.0
self.stepSize = 0.0
+ self.outputFolder = ""
self.yamlignore = {
"runAsSample",
@@ -113,6 +114,10 @@ def __init__(self, config=None):
if config:
self.parseFromConfig(config)
+ @property
+ def gudFile(self):
+ return None
+
def __str__(self):
"""
Returns the string representation of the Container object.
diff --git a/gudpy/core/data.py b/gudpy/core/data.py
new file mode 100644
index 00000000..637b2ebf
--- /dev/null
+++ b/gudpy/core/data.py
@@ -0,0 +1,82 @@
+import numpy
+
+
+class Point():
+ def __init__(self, x, y, err=0.0):
+ self.x = x
+ self.y = y
+ self.err = err
+
+
+class DataSet():
+ # mint01 / mdcs01 / mdor01 / mgor01 / dcs
+ def __init__(self, path, exists, lim=None):
+ if not exists:
+ self.dataSet = None
+ else:
+ self.dataSet = self.constructDataSet(path, lim)
+
+ def constructDataSet(self, path, lim=None):
+ dataSet = []
+ with open(path, "r", encoding="utf-8") as fp:
+ for dataLine in fp.readlines():
+
+ # Ignore commented lines.
+ if dataLine[0] == "#":
+ continue
+
+ splitLine = [float(n) for n in dataLine.split()]
+ if len(splitLine) > 2:
+ x, y, err, *__ = splitLine
+ if lim and x > lim:
+ return dataSet
+ dataSet.append(Point(x, y, err))
+ else:
+ x, y = splitLine
+ if lim and x > lim:
+ return dataSet
+ dataSet.append(Point(x, y))
+
+ return dataSet
+
+
+class NpDataSet():
+ # mint01 / mdcs01 / mdor01 / mgor01 / dcs
+ def __init__(self, path, lim=None):
+ self.LIMIT = lim
+ self.x = []
+ self.y = []
+
+ with open(path, "r", encoding="utf-8") as fp:
+ for dataLine in fp.readlines():
+
+ # Ignore commented lines.
+ if dataLine[0] == "#":
+ continue
+
+ splitLine = [float(n) for n in dataLine.split()]
+ x, y, *__ = splitLine
+ if lim and x > lim:
+ break
+ self.x.append(x)
+ self.y.append(y)
+
+ if not self.LIMIT:
+ self.LIMIT = self.x[-1]
+
+ self.x = numpy.array(self.x)
+ self.y = numpy.array(self.y)
+
+ self.interpolate()
+
+ def interpolate(self):
+ xnew = numpy.round(numpy.arange(0, self.LIMIT, 0.015), 4)
+ ynew = numpy.interp(xnew, self.x, self.y)
+ self.x = xnew
+ self.y = ynew
+
+
+def meanSquaredError(d1: NpDataSet, d2: NpDataSet) -> float:
+ return sum((y1 - y2)**2
+ for y1, y2 in zip(d1.y, d2.y)
+ ) / len(d1.y)
diff --git a/gudpy/core/data_files.py b/gudpy/core/data_files.py
index f8a9b1ff..8062b843 100644
--- a/gudpy/core/data_files.py
+++ b/gudpy/core/data_files.py
@@ -1,4 +1,86 @@
+import os
+
from core import config
+from core import utils
+from core.gud_file import GudFile
+
+
+class DataFile:
+ def __init__(self, filename, isSampleDataFile: bool = False):
+ self.filename = filename
+ self.name = utils.replace_unwanted_chars(
+ os.path.splitext(filename)[0]
+ )
+ self.ext = os.path.splitext(filename)[1]
+ self.outputFolder = ""
+ self._outputs = {}
+ self.isSampleDataFile = isSampleDataFile
+
+ self.yamlignore = {
+ "str",
+ "yamlignore"
+ }
+
+ def addOutput(self, path):
+ ext = os.path.splitext(path)[1]
+ self._outputs[ext] = path
+
+ def outputs(self, ext):
+ return self._outputs[ext]
+
+ def __str__(self):
+ return self.filename
+
+ @property
+ def gudFile(self):
+ if not self.isSampleDataFile:
+ return None
+ gudPath = self._outputs.get(".gud", None)
+ if not gudPath:
+ return None
+ return GudFile(gudPath)
+
+ @property
+ def mintFile(self):
+ return self._outputs.get(".mint01", None)
+
+ @property
+ def mdcsFile(self):
+ mdcsFile = self._outputs.get(".mdcs01", None)
+ if mdcsFile:
+ return mdcsFile
+ # If units are d-spacing
+ mdcsdFile = self._outputs.get(".mdcsd01", None)
+ if mdcsdFile:
+ return mdcsdFile
+ # If units are energy
+ mdcseFile = self._outputs.get(".mdcse01", None)
+ if mdcseFile:
+ return mdcseFile
+ # If units are time-of-flight
+ mdcstFile = self._outputs.get(".mdcst01", None)
+ if mdcstFile:
+ return mdcstFile
+ # If units are wavelength
+ mdcswFile = self._outputs.get(".mdcsw01", None)
+ if mdcswFile:
+ return mdcswFile
+
+ @property
+ def msubwFile(self):
+ return self._outputs.get(".msubw01", None)
+
+ @property
+ def mcdsFile(self):
+ return self._outputs.get(".mcds01", None)
+
+ @property
+ def mdorFile(self):
+ return self._outputs.get(".mdor01", None)
+
+ @property
+ def mgorFile(self):
+ return self._outputs.get(".mgor01", None)
class DataFiles:
@@ -17,7 +99,7 @@ class DataFiles:
-------
"""
- def __init__(self, dataFiles, name):
+ def __init__(self, dataFiles, name, isSampleDataFile=False):
"""
Constructs all the necessary attributes for the DataFiles object.
@@ -28,7 +110,11 @@ def __init__(self, dataFiles, name):
name : str
Name of the parent of the data files, e.g. Sample Background
"""
- self.dataFiles = dataFiles
+
+ self._dataFiles: DataFile = []
+ self.isSampleDataFile = isSampleDataFile
+ self.setFiles(dataFiles)
+
self.name = name
self.yamlignore = {
@@ -36,6 +122,16 @@ def __init__(self, dataFiles, name):
"yamlignore"
}
+ self.outputFolders = {}
+ self._outputs = {}
+
+ def setFiles(self, dataFilenames):
+ self._dataFiles.clear()
+ for dataFile in dataFilenames:
+ if not dataFile:
+ continue
+ self._dataFiles.append(DataFile(dataFile, self.isSampleDataFile))
+
def __str__(self):
"""
Returns the string representation of the DataFiles object.
@@ -50,11 +146,20 @@ def __str__(self):
String representation of DataFiles.
"""
self.str = [
- df + config.spc10 + self.name + " data files"
- for df in self.dataFiles
+ df.filename + config.spc10 + self.name + " data files"
+ for df in self._dataFiles
]
return """\n""".join(self.str)
+ @property
+ def dataFilenames(self):
+ dfNames = [df.filename for df in self._dataFiles]
+ return dfNames
+
+ @property
+ def dataFiles(self):
+ return self._dataFiles
+
def __len__(self):
"""
Returns the length of the dataFiles list member.
@@ -68,15 +173,19 @@ def __len__(self):
int
Number of data files,
"""
- return len(self.dataFiles)
+ return len(self._dataFiles)
def __getitem__(self, n):
- return self.dataFiles[n]
+ if not self._dataFiles:
+ return None
+ return self._dataFiles[n]
def __setitem__(self, n, item):
+ if not self._dataFiles:
+ return None
if n >= len(self):
- self.dataFiles.extend(n+1)
- self.dataFiles[n] = item
+ self._dataFiles.extend(n+1)
+ self._dataFiles[n] = DataFile(item)
def __iter__(self):
- return iter(self.dataFiles)
+ return iter(self._dataFiles)
diff --git a/gudpy/core/file_library.py b/gudpy/core/file_library.py
index 77d7acda..041e6202 100644
--- a/gudpy/core/file_library.py
+++ b/gudpy/core/file_library.py
@@ -1,9 +1,9 @@
import os
from zipfile import ZipFile, ZIP_DEFLATED
-from pathlib import Path
from core import utils
from core.enums import CrossSectionSource
+from core.io.gudpy_io import GudPyIO
class GudPyFileLibrary:
@@ -64,8 +64,8 @@ def __init__(self, gudrunFile):
}
self.dataFiles = [
- *gudrunFile.normalisation.dataFiles.dataFiles,
- *gudrunFile.normalisation.dataFilesBg.dataFiles,
+ *gudrunFile.normalisation.dataFiles.dataFilenames,
+ *gudrunFile.normalisation.dataFilesBg.dataFilenames,
]
# If NXS files are being used
@@ -90,15 +90,15 @@ def __init__(self, gudrunFile):
# that file too.
for sampleBackground in gudrunFile.sampleBackgrounds:
- self.dataFiles.extend(sampleBackground.dataFiles.dataFiles)
+ self.dataFiles.extend(sampleBackground.dataFiles.dataFilenames)
for sample in sampleBackground.samples:
- self.dataFiles.extend(sample.dataFiles.dataFiles)
+ self.dataFiles.extend(sample.dataFiles.dataFilenames)
if sample.totalCrossSectionSource == CrossSectionSource.FILE:
self.files[sample.name] = sample.crossSectionFilename
for container in sample.containers:
- self.dataFiles.extend(container.dataFiles.dataFiles)
+ self.dataFiles.extend(container.dataFiles.dataFilenames)
if container.totalCrossSectionSource == (
CrossSectionSource.FILE
):
@@ -166,21 +166,16 @@ def checkFilesExist(self):
def exportMintData(
self,
samples,
+ exportTo="",
renameDataFiles=False,
- exportTo=None,
includeParams=False,
):
- if not exportTo:
- exportTo = os.path.join(
- self.gudrunFile.projectDir,
- Path(self.gudrunFile.path()).stem + ".zip",
- )
with ZipFile(exportTo, "w", ZIP_DEFLATED) as zipFile:
for sample in samples:
if len(sample.dataFiles.dataFiles):
path = os.path.join(
self.gudrunFile.projectDir,
- sample.dataFiles.dataFiles[0].replace(
+ sample.dataFiles.dataFilenames[0].replace(
self.gudrunFile.instrument.dataFileType, "mint01"
),
)
@@ -199,10 +194,10 @@ def exportMintData(
self.gudrunFile.projectDir,
safeSampleName + ".sample",
)
+ sample.sampleFile = path
+
if not os.path.exists(path):
- sample.write_out(
- self.gudrunFile.projectDir
- )
+ GudPyIO.writeObject(sample, path)
zipFile.write(path, arcname=os.path.basename(path))
return zipFile.filename
diff --git a/gudpy/core/gud_file.py b/gudpy/core/gud_file.py
index 8a430dfa..561876ac 100644
--- a/gudpy/core/gud_file.py
+++ b/gudpy/core/gud_file.py
@@ -71,13 +71,6 @@ class GudFile:
Contents of the .gud file.
output : str
Output for use in the GUI.
- Methods
- -------
- parse():
- Parses the GudFile from path, assigning values
- to each of the attributes.
- write_out(overwrite=False)
- Writes out the string representation of the GudFile to a file.
"""
def __init__(self, path):
@@ -99,7 +92,7 @@ def __init__(self, path):
raise ParserException(f"Attempted to parse {path}" +
"\nOnly .gud files can be parsed.")
- if not os.path.isfile(path):
+ if not os.path.exists(path):
raise ParserException(f"{path} is not a valid path.")
self.path = path
@@ -350,22 +343,3 @@ def __str__(self):
f'{self.suggestedTweakFactor}\n'
)
-
- def write_out(self, path):
- """
- Writes out the string representation of the GudFile.
- If 'overwrite' is True, then the initial file is overwritten.
- Otherwise, it is written to 'gudpy_{initial filename}.gud'.
-
- Parameters
- ----------
- overwrite : bool, optional
- Overwrite the initial file? (default is False).
-
- Returns
- -------
- None
- """
- f = open(path, "w", encoding="utf-8")
- f.write(str(self))
- f.close()
diff --git a/gudpy/core/gudpy.py b/gudpy/core/gudpy.py
index 1072d5ea..9b42936b 100644
--- a/gudpy/core/gudpy.py
+++ b/gudpy/core/gudpy.py
@@ -16,6 +16,8 @@
import core.output_file_handler as handlers
from core.file_library import GudPyFileLibrary
from core import data_files
+from core.io.gudpy_io import GudPyIO
+import core.optimise as opti
SUFFIX = ".exe" if os.name == "nt" else ""
@@ -25,24 +27,31 @@ def __init__(
self
):
self.originalGudrunFile: GudrunFile = None
- self.gudrunFile: GudrunFile = None
- self.purgeFile = None
+ self._gudrunFile: GudrunFile = None
+ self.io = GudPyIO()
self.purge: Purge = None
self.gudrun: Gudrun = None
- self.runModes: RunModes = None
+ self.runModes: RunModes = RunModes()
self.gudrunIterator: GudrunIterator = None
self.batchProcessor: BatchProcessing = None
+ self.optimiser = None
- self.gudrunOutput = None
+ @property
+ def gudrunFile(self):
+ return self._gudrunFile
- self.projectDir = ""
- self.autosaveLocation = ""
+ @gudrunFile.setter
+ def gudrunFile(self, gudrunFile: GudrunFile):
+ self._gudrunFile = gudrunFile
- def loadFromFile(
+ @property
+ def projectDir(self):
+ return self.io.projectDir
+
+ def loadFromGudrunFile(
self,
loadFile: str,
- format: enums.Format,
config: bool = False
):
"""Loads GudPy from an input file
@@ -51,8 +60,6 @@ def loadFromFile(
----------
loadFile : str
Path of input file to load from
- format : enums.Format
- Format of the input file (YAML or TXT)
config : bool, optional
If loading from preset config, by default False
@@ -62,19 +69,30 @@ def loadFromFile(
Raised if input file does not exist
"""
if not os.path.exists(loadFile):
- raise FileNotFoundError("Input file does not exist.")
+ raise FileNotFoundError(f"Input file '{loadFile}' does not exist.")
- self.gudrunFile = GudrunFile(
- loadFile=loadFile,
- format=format,
- config=config
- )
+ self.gudrunFile = self.io.importGudrunFile(loadFile, config)
+
+ def loadFromYamlFile(
+ self,
+ loadFile: str,
+ ):
+ """Loads GudPy from a YAML input file
+
+ Parameters
+ ----------
+ loadFile : str
+ Path of input file to load from
- self.originalGudrunFile = copy.deepcopy(self.gudrunFile)
- self.originalGudrunFile.filename = "original"
+ Raises
+ ------
+ FileNotFoundError
+ Raised if input file does not exist
+ """
+ if not os.path.exists(loadFile):
+ raise FileNotFoundError("Input file does not exist.")
- self.projectDir == ""
- self.autosaveLocation = ""
+ self.gudrunFile = self.io.importFromYamlFile(loadFile)
def loadFromProject(self, projectDir: str):
"""Loads GudPy from a project directory
@@ -90,29 +108,10 @@ def loadFromProject(self, projectDir: str):
Raised if there is no YAML input file in the
project directory
"""
- loadFile = ""
+ if not os.path.exists(projectDir):
+ raise FileNotFoundError("Directory does not exist.")
- if os.path.exists(os.path.join(
- projectDir,
- f"{os.path.basename(projectDir)}.yaml"
- )):
- # If default file exists
- loadFile = os.path.join(
- projectDir,
- f"{os.path.basename(projectDir)}.yaml"
- )
- else:
- # Try to find yaml files
- for f in os.listdir(projectDir):
- if os.path.splitext(f)[1] == ".yaml":
- # If file is yaml
- loadFile = os.path.join(projectDir, f)
- if not loadFile:
- raise FileNotFoundError(
- "Could not find GudPy input file within the project")
-
- self.loadFromFile(loadFile=loadFile, format=enums.Format.YAML)
- self.setSaveLocation(projectDir)
+ self.gudrunFile = self.io.importProject(projectDir)
def checkSaveLocation(self):
"""Checks if user has set the save location
@@ -122,7 +121,7 @@ def checkSaveLocation(self):
bool
If the save location is set or not
"""
- return bool(self.projectDir)
+ return self.io.checkSaveLocation()
def setSaveLocation(self, projectDir: str):
"""Sets the save location
@@ -132,28 +131,12 @@ def setSaveLocation(self, projectDir: str):
projectDir : str
Path to the desired save location of project
"""
- self.projectDir = projectDir
- self.gudrunFile.filename = f"{os.path.basename(projectDir)}.yaml"
- self.gudrunFile.projectDir = projectDir
- self.originalGudrunFile.projectDir = projectDir
- self.autosaveLocation = (
- f"{os.path.basename(projectDir)}.autosave"
- )
+ self.io.setSaveLocation(projectDir)
- def save(self, path: str = "", format: enums.Format = enums.Format.YAML):
+ def save(self):
"""Saves current GudPy input file
-
- Parameters
- ----------
- path : str, optional
- Path to desired save location, by default ""
- format : enums.Format, optional
- Desired format of save file, by default enums.Format.YAML
"""
- if not path:
- path = self.gudrunFile.path()
- self.originalGudrunFile.save(path=path)
- self.gudrunFile.save(path=path, format=format)
+ self.io.save(self.gudrunFile)
def saveAs(self, targetDir: str):
"""Save GudPy project to a new location, set current
@@ -169,26 +152,7 @@ def saveAs(self, targetDir: str):
IsADirectoryError
Raised if the requested save location already exists
"""
- if os.path.exists(targetDir):
- raise IsADirectoryError("Cannot be an existing directory")
-
- oldDir = self.projectDir
- self.setSaveLocation(targetDir)
- os.makedirs(targetDir)
-
- if os.path.exists(os.path.join(oldDir, "Purge")):
- shutil.copytree(
- os.path.join(oldDir, "Purge"),
- os.path.join(targetDir, "Purge")
- )
- if os.path.exists(os.path.join(oldDir, "Gudrun")):
- shutil.copytree(
- os.path.join(oldDir, "Gudrun"),
- os.path.join(targetDir, "Gudrun")
- )
- self.gudrunFile.filename = os.path.basename(targetDir)
- self.gudrunFile.save(path=self.gudrunFile.path(),
- format=enums.Format.YAML)
+ self.io.exportProject(self.gudrunFile, targetDir)
self.loadFromProject(projectDir=targetDir)
def checkFilesExist(self):
@@ -231,9 +195,9 @@ def runPurge(self):
"""
self.prepareRun()
self.purge = Purge()
- self.purgeFile = PurgeFile(self.gudrunFile)
- exitcode = self.purge.purge(self.purgeFile)
- self.gudrunFile.save()
+ purgeFile = PurgeFile(self.gudrunFile)
+ exitcode = self.purge.purge(purgeFile)
+ self.save()
if exitcode:
raise exc.PurgeException(
"Purge failed to run with the following output:\n"
@@ -257,6 +221,7 @@ def runGudrun(self, gudrunFile: GudrunFile = None):
if not gudrunFile:
gudrunFile = self.gudrunFile
+ self.save()
self.gudrun = Gudrun()
exitcode = self.gudrun.gudrun(gudrunFile=gudrunFile)
if exitcode:
@@ -264,7 +229,7 @@ def runGudrun(self, gudrunFile: GudrunFile = None):
"Gudrun failed to run with the following output:\n"
f"{self.gudrun.error}"
)
- self.gudrunOutput = self.gudrun.gudrunOutput
+ self.save()
def iterateGudrun(self, iterator: iterators.Iterator):
"""Runs gudrun_dcs iteratively while tweaking parameters
@@ -290,7 +255,6 @@ def iterateGudrun(self, iterator: iterators.Iterator):
f"{error}"
)
self.gudrunFile = self.gudrunIterator.gudrunFile
- self.gudrunOutput = self.gudrunIterator.gudrunOutput
def iterateComposition(self, iterator: iterators.Composition):
"""Runs gudrun_dcs iteratively while tweaking the composition
@@ -328,6 +292,33 @@ def runFilesIndividually(self):
gudrunFile = self.runModes.partition(self.gudrunFile)
self.runGudrun(gudrunFile=gudrunFile)
+ def optimiseExponentialSubtractions(
+ self,
+ samples: list,
+ limit: float = 0.5,
+ nIters: int = 15,
+ ):
+ gudrunFileCopy = copy.deepcopy(self.gudrunFile)
+ optimiser = opti.BayesianOptimisation(
+ gudrunFile=gudrunFileCopy,
+ samples=samples,
+ purge=self.purge,
+ limit=limit,
+ nIters=nIters,
+ verbose=False,
+ )
+ optimiser.optimise()
+
+ self.runGudrun()
+
+ def optimiseInelasticity(self):
+ optimiser = opti.InelasticityOptimisation(
+ gudrunFile=self.gudrunFile,
+ purge=self.purge
+ )
+ results = optimiser.optimise(purge=self.purge)
+ return results
+
def batchProcessing(
self,
iterator: iterators.Iterator = None,
@@ -437,7 +428,7 @@ def organiseOutput(self, procDir: str, projectDir: str):
def purge(self, purgeFile: PurgeFile):
self.checkBinary()
with tempfile.TemporaryDirectory() as tmp:
- purgeFile.write_out(os.path.join(
+ GudPyIO.writeObject(purgeFile, os.path.join(
tmp,
f"{self.PROCESS}.dat"
))
@@ -460,7 +451,7 @@ def purge(self, purgeFile: PurgeFile):
return self.exitcode
self.purgeLocation = self.organiseOutput(
- tmp, purgeFile.gudrunFile.projectDir)
+ tmp, GudPyIO.projectDir)
self.exitcode = 0
return self.exitcode
@@ -477,23 +468,46 @@ def organiseOutput(
exclude: list[str] = [],
head: str = "",
overwrite: bool = True
- ) -> handlers.GudrunOutput:
+ ) -> None:
outputHandler = handlers.GudrunOutputHandler(
gudrunFile=gudrunFile, head=head, overwrite=overwrite
)
- gudrunOutput = outputHandler.organiseOutput(exclude=exclude)
- return gudrunOutput
+ outputHandler.organiseOutput(exclude=exclude)
+
+ def obtainDependencies(
+ self,
+ gudrunFile: GudrunFile
+ ) -> list[str]:
+ if not gudrunFile.instrument.subWavelengthBinnedData:
+ return []
+
+ if not GudPyIO.projectDir:
+ cli.echoWarning(
+ "Cannot source run dependencies - no previous GudPy"
+ " project referenced. Running without wavelength binning"
+ " data."
+ )
+
+ dependencies = []
+
+ for sample in gudrunFile.runSamples():
+ if sample.crossSectionFilename:
+ dependencies.append(sample.crossSectionFilename)
+ if sample.selfScatteringFilePath:
+ dependencies.append(sample.selfScatteringFilePath)
+
+ return dependencies
def gudrun(
self,
gudrunFile: GudrunFile,
purge: Purge = None,
iterator: iterators.Iterator = None,
- save: bool = True
+ supress=True
) -> int:
self.checkBinary()
- if not purge:
+ if not purge and not supress:
cli.echoWarning("Gudrun running without purge")
with tempfile.TemporaryDirectory() as tmp:
purgeFiles = []
@@ -510,7 +524,17 @@ def gudrun(
tmp,
gudrunFile.OUTPATH
)
- gudrunFile.write_out(path)
+
+ gudrunFile.clearOutputs()
+
+ deps = self.obtainDependencies(gudrunFile)
+ for dep in deps:
+ shutil.copyfile(
+ dep, os.path.join(
+ tmp, os.path.basename(dep)
+ ))
+
+ GudPyIO.writeGudrunFile(gudrunFile, tmp)
with subprocess.Popen(
[self.BINARY_PATH, path], cwd=tmp,
stdout=subprocess.PIPE,
@@ -518,29 +542,20 @@ def gudrun(
) as gudrun:
for line in gudrun.stdout:
line = "\n".join(line.decode("utf8").split("\n"))
+ self._outputChanged(line)
if self.checkError(line):
return self.exitcode
- self._outputChanged(line)
if gudrun.stderr:
self.error = gudrun.stderr.decode("utf8")
self.exitcode = 1
return self.exitcode
if iterator:
- self.gudrunOutput = iterator.organiseOutput(
+ iterator.organiseOutput(
gudrunFile, exclude=purgeFiles)
else:
- self.gudrunOutput = self.organiseOutput(
+ self.organiseOutput(
gudrunFile, exclude=purgeFiles)
- if save:
- gudrunFile.save(
- path=os.path.join(
- gudrunFile.projectDir,
- f"{gudrunFile.filename}"
- ),
- format=enums.Format.YAML
- )
- gudrunFile.setGudrunDir(self.gudrunOutput.path)
self.exitcode = 0
return self.exitcode
@@ -557,8 +572,8 @@ def __init__(
self.gudrunFile = copy.deepcopy(gudrunFile)
self.iterator = iterator
self.gudrunObjects = []
- self.exitcode = (1, "Operation incomplete")
- self.gudrunOutput = None
+ self.exitcode = 0
+ self.error = ""
self.result = {}
for _ in range(
@@ -574,45 +589,40 @@ def singleIteration(
gudrunFile: GudrunFile,
gudrun: Gudrun,
purge: Purge,
- prevOutput: handlers.GudrunOutput,
- save=True
) -> typ.Tuple[int, str]: # (exitcode, error)
- modGfFile = self.iterator.performIteration(gudrunFile, prevOutput)
- exitcode = gudrun.gudrun(modGfFile, purge, self.iterator, save=save)
- if exitcode:
- return exitcode
- self.gudrunOutput = gudrun.gudrunOutput
- return 0
-
- def iterate(self, purge, save=True) -> typ.Tuple[int, str]:
- prevOutput = None
+ try:
+ modGfFile = self.iterator.performIteration(gudrunFile)
+ exitcode = gudrun.gudrun(modGfFile, purge, self.iterator)
+ if exitcode:
+ return (exitcode, gudrun.error)
+ return (0, "")
+ except RuntimeError as e:
+ return (1, e.args)
+ def iterate(self, purge) -> typ.Tuple[int, str]:
# If the iterator requires a prelimenary run
if self.iterator.requireDefault:
exitcode = self.gudrunObjects[0].gudrun(
- self.gudrunFile, purge, self.iterator, save)
+ self.gudrunFile, purge, self.iterator)
if exitcode: # An exit code != 0 indicates failure
self.exitcode = (exitcode, self.gudrunObjects[0].error)
return self.exitcode
- prevOutput = self.gudrunObjects[0].gudrunOutput
# Iterate through gudrun objects
for gudrun in self.gudrunObjects:
if gudrun.output:
# If object has already been run, skip
continue
- exitcode = self.singleIteration(
- self.gudrunFile, gudrun, purge, prevOutput, save)
+ exitcode, error = self.singleIteration(
+ self.gudrunFile, gudrun, purge)
if exitcode: # An exit code != 0 indicates failure
- self.exitcode = (exitcode, gudrun.error)
+ self.exitcode = exitcode
+ self.error = error
return self.exitcode
- prevOutput = gudrun.gudrunOutput
-
self.result = self.iterator.result
- self.exitcode = (0, "")
- return self.exitcode
+ return (self.exitcode, self.error)
class CompositionIterator:
@@ -627,6 +637,8 @@ def __init__(
self.result = {}
self.compositionMap = None
self.currentIteration = 0
+ self.exitcode = 0
+ self.error = ""
def iterate(self, purge) -> typ.Tuple[int, str]:
for sampleArg in self.iterator.sampleArgs:
@@ -640,8 +652,11 @@ def iterate(self, purge) -> typ.Tuple[int, str]:
purge=purge,
iterator=self.iterator)
if exitcode: # An exit code != 0 indicates failure
- self.exitcode = (exitcode, gudrunNC.error)
- return self.exitcode
+ self.exitcode = exitcode
+ self.error = gudrunNC.error
+ return (self.exitcode, self.error)
+
+ newCenterGudFile = self.gudrunFile.runSamples()[0].gudFile
# Run the current center
currentCenter = self.iterator.iterateCurrentCenter(
@@ -651,16 +666,17 @@ def iterate(self, purge) -> typ.Tuple[int, str]:
purge=purge,
iterator=self.iterator)
if exitcode: # An exit code != 0 indicates failure
- self.exitcode = (exitcode, gudrunCC.error)
- return self.exitcode
+ self.exitcode = exitcode
+ self.error = gudrunCC.error
+ return (self.exitcode, self.error)
+
+ currentCenterGudFile = self.gudrunFile.runSamples()[0].gudFile
# Compare the cost of the two centers
self.iterator.compareCost(
sampleArg=sampleArg,
- currentCenterGudFile=gudrunCC.gudrunOutput.gudFile(
- name=sampleArg["background"].samples[0].name),
- newCenterGudFile=gudrunNC.gudrunOutput.gudFile(
- name=sampleArg["background"].samples[0].name)
+ currentCenterGudFile=currentCenterGudFile,
+ newCenterGudFile=newCenterGudFile
)
# Check if result has been achieved
@@ -681,10 +697,10 @@ def iterate(self, purge) -> typ.Tuple[int, str]:
" use the Component(s) selected for iteration."
)
if not self.result:
- self.exitcode = (1, error)
- else:
- self.exitcode = (0, "")
- return self.exitcode
+ self.exitcode = 1
+ self.error = error
+
+ return (self.exitcode, self.error)
class RunModes:
@@ -697,7 +713,7 @@ def convertContainersToSample(self, gudrunFile: GudrunFile):
containersAsSamples.append(
container.convertToSample()
)
- newGudrunFile.sampleBackground.samples = containersAsSamples
+ sampleBackground.samples = containersAsSamples
return newGudrunFile
def partition(self, gudrunFile):
@@ -746,7 +762,8 @@ def __init___(
):
self.iterator = iterator
self.iterationMode = None
- self.exitcode = (1, "Operation incomplete")
+ self.exitcode = 0
+ self.error
self.BATCH_SIZE = batchSize
self.STEP_SIZE = stepSize
self.OFFSET = offset
@@ -876,21 +893,22 @@ def bactchProcess(
batchSize: int,
iterator: iterators.Iterator = None,
):
- self.batchedGudrunFile.projectDir = (os.path.join(
- self.batchedGudrunFile.projectDir,
+ self.batchedGudrunFile.outputFolder = (os.path.join(
+ GudPyIO.projectDir,
f"BATCH_PROCESSING_BATCH_SIZE{batchSize}"
))
exitcode = gudrun.gudrun(GudrunFile, purge, iterator)
self.writeDiagnosticsFile(
os.path.join(
- self.batchedGudrunFile.path(),
+ self.batchedGudrunFile.outputFolder,
"batch_processing_diagnostics.txt",
),
self.batchedGudrunFile,
self.iterationMode
)
- self.exitcode = (exitcode, gudrun.error)
- return self.exitcode
+ self.exitcode = exitcode
+ self.error = gudrun.error
+ return (self.exitcode, self.error)
def iterate(
self,
@@ -898,8 +916,6 @@ def iterate(
batchedFile: GudrunFile,
outputFolder: str,
) -> int:
- prevOutput = None
-
batchedFile.projectDir = os.path.join(
batchedFile.projectDir,
f"BATCH_PROCESSING_BATCH_SIZE{self.BATCH_SIZE}",
@@ -914,18 +930,16 @@ def iterate(
if exitcode: # An exit code != 0 indicates failure
self.exitcode = (exitcode, error)
return self.exitcode
- prevOutput = gudrunIterator.defaultRun.gudrunOutput
# Iterate through gudrun objects
for i, gudrun in enumerate(gudrunIterator.gudrunObjects):
if self.canConverge(batchedFile, self.RTOL):
# Keep only the processed objects in the list
gudrunIterator.gudrunObjects = gudrunIterator.gudrunObjects[:i]
- self.exitcode = (0, )
- return self.exitcode
+ return (self.exitcode, self.error)
exitcode = gudrunIterator.singleIteration(
- batchedFile, gudrun, prevOutput)
+ batchedFile, gudrun)
self.writeDiagnosticsFile(
os.path.join(batchedFile.path(),
@@ -933,13 +947,11 @@ def iterate(
)
if exitcode:
- self.exitcode = (exitcode, gudrun.error)
- return self.exitcode
+ self.exitcode = exitcode
+ self.error = gudrun.error
+ return (self.exitcode, self.error)
- prevOutput = gudrun.gudrunOutput
-
- self.exitcode = (0, )
- return self.exitcode
+ return (self.exitcode, self.error)
def process(
self,
@@ -971,7 +983,7 @@ def process(
self.batchedGudrunFile,
iterator
))
- exitcode, error = self.iterate(
+ self.exitcode, self.error = self.iterate(
gudrunIterator=self.gudrunIterators["REST"],
gudrunFile=self.batchedGudrunFile,
outputFolder="REST"
diff --git a/gudpy/core/gudpy_yaml.py b/gudpy/core/gudpy_yaml.py
deleted file mode 100644
index 4682db6c..00000000
--- a/gudpy/core/gudpy_yaml.py
+++ /dev/null
@@ -1,253 +0,0 @@
-from abc import abstractmethod
-from enum import Enum
-from ruamel.yaml import YAML as yaml
-from ruamel.yaml import YAMLError
-import os
-
-from core.composition import (
- Component, Components, Composition, WeightedComponent
-)
-from core.data_files import DataFiles
-from core.element import Element
-from core.exception import YAMLException
-from core.gui_config import GUIConfig
-from core import utils
-
-from core.instrument import Instrument
-from core.beam import Beam
-from core.normalisation import Normalisation
-from core.sample_background import SampleBackground
-from core.sample import Sample
-from core.container import Container
-from core import config
-
-
-class YAML:
-
- def __init__(self):
- self.yaml = self.getYamlModule()
-
- def getYamlModule(self):
- yaml_ = yaml()
- yaml_.preserve_quotes = True
- yaml_.default_flow_style = None
- yaml_.encoding = 'utf-8'
- return yaml_
-
- def parseYaml(self, path):
- self.path = path
- try:
- parsedYAML = self.constructClasses(self.yamlToDict(path))
- except YAMLError as e:
- # Exception caused by yaml parsing library
- raise YAMLException(f"Invalid YAML file: {str(e)}")
- except YAMLException as e:
- # Exception caused by invalid arguments
- raise YAMLException(e)
- else:
- return parsedYAML
-
- def yamlToDict(self, path):
- # Read the input stream into our attribute.
- with open(path, encoding=self.yaml.encoding) as fp:
- return self.yaml.load(fp)
-
- def constructClasses(self, yamldict):
- instrument = Instrument()
- if "Instrument" in yamldict:
- self.maskYAMLDicttoClass(instrument, yamldict["Instrument"])
- instrument.GudrunInputFileDir = os.path.dirname(
- os.path.abspath(
- self.path
- )
- )
-
- beam = Beam()
- if "Beam" in yamldict:
- self.maskYAMLDicttoClass(beam, yamldict["Beam"])
-
- components = Components()
- if "Components" in yamldict:
- self.maskYAMLSeqtoClass(components, yamldict["Components"])
-
- normalisation = Normalisation()
- if "Normalisation" in yamldict:
- self.maskYAMLDicttoClass(normalisation, yamldict["Normalisation"])
-
- sampleBackgrounds = []
- if "SampleBackgrounds" in yamldict:
- for sbyaml in yamldict["SampleBackgrounds"]:
- sampleBackground = SampleBackground()
- self.maskYAMLDicttoClass(sampleBackground, sbyaml)
- sampleBackgrounds.append(sampleBackground)
-
- GUI = GUIConfig()
- if "GUI" in yamldict:
- self.maskYAMLDicttoClass(GUI, yamldict["GUI"])
-
- return (
- instrument, beam, components,
- normalisation, sampleBackgrounds, GUI
- )
-
- @abstractmethod
- def maskYAMLDicttoClass(self, cls, yamldict):
- for k, v in yamldict.items():
- if not hasattr(cls, k):
- # If attribute is not valid
- raise YAMLException(
- f"Invalid attribute '{k}' given to '{type(cls).__name__}'")
-
- if isinstance(cls.__dict__[k], Enum):
- setattr(cls, k, type(cls.__dict__[k])[v])
-
- elif isinstance(cls.__dict__[k], DataFiles):
- setattr(
- cls, k,
- DataFiles(
- [v_ for v_ in v["dataFiles"]], v["name"])
- )
-
- elif (
- isinstance(
- cls,
- (Component, Composition)
- )
- and k == "elements"
- ):
- elements = []
- for idx, element in enumerate(v):
- # Ensuring correct arguements are provided
- if (
- "atomicSymbol" not in element
- or "massNo" not in element
- or "abundance" not in element
- ):
- raise YAMLException(
- "Insufficient arguments given to element"
- + f" {idx + 1}. Expects 'atomicSymbol', 'massNo'"
- + " and 'abundance'"
- )
-
- # Setting element properties
- try:
- element_ = Element(
- **{
- "atomicSymbol": element["atomicSymbol"],
- "massNo": float(element["massNo"]),
- "abundance": float(element["abundance"])
- }
- )
- elements.append(element_)
- except ValueError:
- raise YAMLException(
- f"Invalid number given to element {idx + 1}"
- + f" in '{type(cls).__name__}")
- setattr(cls, k, elements)
-
- elif isinstance(cls, Composition) and k == "weightedComponents":
- weightedComponents = []
- for weightedComponent in v:
- if (
- "component" not in weightedComponent
- or "ratio" not in weightedComponent
- ):
- raise YAMLException(
- "Weighted Component expects 'component' and"
- + " 'ratio' to be provided")
- component = Component()
- self.maskYAMLDicttoClass(
- component, weightedComponent["component"]
- )
- ratio = weightedComponent["ratio"]
- try:
- weightedComponents.append(
- WeightedComponent(
- component, float(ratio))
- )
- except ValueError:
- raise YAMLException(
- "Invalid ratio given to Weighted Component")
- setattr(cls, k, weightedComponents)
-
- elif (
- isinstance(
- cls,
- (Normalisation, Sample, Container)
- )
- and k == "composition"
- ):
- self.maskYAMLDicttoClass(cls.__dict__[k], v)
-
- elif isinstance(cls, SampleBackground) and k == "samples":
- for sampleyaml in yamldict[k]:
- sample = Sample()
- self.maskYAMLDicttoClass(sample, sampleyaml)
- sample.name = utils.replace_unwanted_chars(sample.name)
- cls.samples.append(sample)
-
- elif isinstance(cls, Sample) and k == "containers":
- for contyaml in yamldict[k]:
- container = Container()
- self.maskYAMLDicttoClass(container, contyaml)
- cls.containers.append(container)
-
- else:
- setattr(cls, k, type(cls.__dict__[k])(self.toBuiltin(v)))
-
- def maskYAMLSeqtoClass(self, cls, yamlseq):
- if isinstance(cls, Components):
- components = []
- for component in yamlseq:
- component_ = Component()
- self.maskYAMLDicttoClass(component_, component)
- components.append(component_)
- setattr(cls, "components", components)
-
- def writeYAML(self, base, path):
- with open(path, "wb") as fp:
- outyaml = {
- "Instrument": base.instrument,
- "Beam": base.beam,
- "Components": base.components.components,
- "Normalisation": base.normalisation,
- "SampleBackgrounds": base.sampleBackgrounds,
- "GUI": config.GUI
- }
- self.yaml.dump(
- {k: self.toYaml(v) for k, v in outyaml.items()},
- fp
- )
-
- @abstractmethod
- def toYaml(self, var):
- if var.__class__.__module__ == "ruamel.yaml.scalarfloat":
- return float(var)
- if var.__class__.__module__ == "builtins":
- if isinstance(var, (list, tuple)):
- return type(var)([self.toYaml(v) for v in var])
- else:
- return var
- elif isinstance(var, Enum):
- return type(var)(var.value).name
- elif isinstance(var, (
- Instrument, Beam, Components, Normalisation,
- SampleBackground, Sample, Container, WeightedComponent,
- Component, Composition, Element, DataFiles, GUIConfig
- )):
- return {
- k: self.toYaml(v)
- for k, v in var.__dict__.items()
- if k not in var.yamlignore
- }
-
- @abstractmethod
- def toBuiltin(self, yamlvar):
- if isinstance(yamlvar, (list, tuple)):
- return [self.toBuiltin(v) for v in yamlvar]
- elif yamlvar.__class__.__module__ == "builtins":
- return yamlvar
- elif yamlvar.__class__.__module__ == "ruamel.yaml.scalarfloat":
- return float(yamlvar)
- elif yamlvar.__class__.__module__ == "ruamel.yaml.scalarstring":
- return str(yamlvar)
diff --git a/gudpy/core/gudrun_file.py b/gudpy/core/gudrun_file.py
index 56b75327..75ff635c 100644
--- a/gudpy/core/gudrun_file.py
+++ b/gudpy/core/gudrun_file.py
@@ -1,43 +1,20 @@
import os
import time
-import re
from copy import deepcopy
-from core.utils import (
- extract_nums_from_string,
- firstword, boolifyNum,
- extract_ints_from_string,
- extract_floats_from_string,
- firstNFloats,
- firstNInts,
- nthfloat,
- nthint,
- uniquifyName
-)
from core.instrument import Instrument
from core.beam import Beam
from core.normalisation import Normalisation
-from core.sample import Sample
from core.sample_background import SampleBackground
-from core.container import Container
-from core.composition import Component, Components, Composition
-from core.element import Element
-from core.data_files import DataFiles
-from core.enums import (
- CrossSectionSource, Format, Instruments, FTModes, UnitsOfDensity,
- MergeWeights, Scales, NormalisationType, OutputUnits,
- Geometry
-)
-from core import utils
+from core.composition import Components
from core import config as cfg
-from core.gudpy_yaml import YAML
-from core.exception import ParserException, YAMLException
from core.gud_file import GudFile
SUFFIX = ".exe" if os.name == "nt" else ""
class GudrunFile:
+ OUTPATH = "gudpy.txt"
"""
Class to represent a GudFile (files with .gud extension).
.gud files are outputted by gudrun_dcs, via merge_routines
@@ -47,10 +24,6 @@ class GudrunFile:
Attributes
----------
- path : str
- Path to the file.
- OUTPATH : str
- Path to write to, when not overwriting the initial file.
instrument : Instrument
Instrument object extracted from the input file.
beam : Beam
@@ -59,11 +32,6 @@ class GudrunFile:
Normalisation object extracted from the input file.
sampleBackgrounds : SampleBackground[]
List of SampleBackgrounds extracted from the input file.
- purged : bool
- Have the detectors been purged?
- stream : str[]
- List of strings, where each item represents a line
- in the input stream.
Methods
-------
getNextToken():
@@ -109,21 +77,15 @@ class GudrunFile:
sampleBackgroundHelper():
Parses the SampleBackground, its Samples and their Containers.
Returns the SampleBackground object.
- parse():
- Parse the GudrunFile from its path.
- Assign objects from the file to the attributes of the class.
- write_out(overwrite=False)
- Writes out the string representation of the GudrunFile to a file.
- purge():
- Create a PurgeFile from the GudrunFile, and run purge_det on it.
"""
def __init__(
self,
- projectDir=None,
- loadFile=None,
- format=Format.YAML,
- config=False
+ instrument=Instrument(),
+ beam=Beam(),
+ normalisation=Normalisation(),
+ sampleBackgrounds: list[SampleBackground] = [],
+ components=Components()
):
"""
Constructs all the necessary attributes for the GudrunFile object.
@@ -141,37 +103,13 @@ def __init__(
config : bool
If a new input file should be constructed from a config
"""
-
- self.yaml = YAML()
- self.format = format
-
- # Construct the outpath of generated input file
- self.OUTPATH = "gudpy.txt"
- self.gudrunOutput = None
- self.projectDir = projectDir
- self.loadFile = loadFile
- self.filename = None
- self.stream = None
-
- self.instrument = Instrument()
- self.normalisation = Normalisation()
- self.beam = Beam()
- self.sampleBackgrounds = []
- self.components = Components()
-
- if not projectDir and not loadFile:
- raise RuntimeError(
- "GudrunFile needs to be initialised with either"
- " a project directory or load file specified"
- )
-
- if loadFile:
- self.setGudrunDir(os.path.dirname(loadFile))
-
- if not config:
- self.setGudrunDir(os.path.dirname(loadFile))
-
- self.parse(loadFile, config=config)
+ self.instrument = instrument
+ self.normalisation = normalisation
+ self.beam = beam
+ self.sampleBackgrounds = sampleBackgrounds
+ self.components = components
+ self.purged = False
+ self.outputFolder = ""
def __deepcopy__(self, memo):
result = self.__class__.__new__(self.__class__)
@@ -182,1231 +120,65 @@ def __deepcopy__(self, memo):
setattr(result, k, deepcopy(v, memo))
return result
- def path(self):
- if not self.projectDir:
- return None
- else:
- return os.path.join(self.projectDir, self.filename)
-
- def checkNormDataFiles(self):
- return (len(self.normalisation.dataFiles)
- and len(self.normalisation.dataFilesBg))
-
- def getNextToken(self):
- """
- Pops the 'next token' from the stream and returns it.
- Essentially removes the first line in the stream and returns it.
+ def clearOutputs(self):
+ self.outputFolder = ""
+ for sbg in self.sampleBackgrounds:
+ sbg.outputFolder = ""
+ for sample in self.runSamples():
+ for df in sample.dataFiles:
+ df._outputs = {}
+ df.outputFolder = ""
- Parameters
- ----------
- None
- Returns
- -------
- str | None
- """
- return self.stream.pop(0) if self.stream else None
-
- def peekNextToken(self):
- """
- Returns the next token in the input stream, without removing it.
-
- Parameters
- ----------
- None
- Returns
- -------
- str | None
- """
- return self.stream[0] if self.stream else None
-
- def consumeTokens(self, n):
- """
- Consume n tokens from the input stream.
-
- Parameters
- ----------
- None
- Returns
- -------
- None
- """
- for _ in range(n):
- self.getNextToken()
-
- def consumeUpToDelim(self, delim):
- """
- Consume tokens iteratively, until a delimiter is reached.
-
- Parameters
- ----------
- None
- Returns
- -------
- None
- """
- line = self.getNextToken()
- while line[0] != delim:
- line = self.getNextToken()
-
- def consumeWhitespace(self):
- """
- Consume tokens iteratively, while they are whitespace.
-
- Parameters
- ----------
- None
- Returns
- -------
- None
- """
- line = self.peekNextToken()
- if line and line.isspace():
- self.getNextToken()
- line = self.peekNextToken()
-
- def parseInstrument(self):
- """
- Intialises an Instrument object and assigns it to the
- instrument attribute.
- Parses the attributes of the Instrument from the input stream.
- Raises a ParserException if any mandatory attributes are missing.
-
-
- Parameters
- ----------
- None
- Returns
- -------
- None
- """
- try:
- self.consumeWhitespace()
-
- # For string attributes,
- # we simply extract the firstword in the line.
- self.instrument.name = Instruments[firstword(self.getNextToken())]
- self.consumeTokens(1)
- self.instrument.dataFileDir = os.path.abspath(
- firstword(self.getNextToken())) + os.path.sep
- self.instrument.dataFileType = firstword(self.getNextToken())
- self.instrument.detectorCalibrationFileName = (
- firstword(self.getNextToken())
- )
-
- # For single integer attributes,
- # we extract the zeroth int from the line.
- self.instrument.columnNoPhiVals = nthint(self.getNextToken(), 0)
- self.instrument.groupFileName = firstword(self.getNextToken())
- self.instrument.deadtimeConstantsFileName = (
- firstword(self.getNextToken())
- )
-
- # For N integer attributes,
- # we extract the first N integers from the line.
- self.instrument.spectrumNumbersForIncidentBeamMonitor = (
- extract_ints_from_string(self.getNextToken())
- )
-
- # For integer pair attributes,
- # we extract the first 2 integers from the line.
- self.instrument.wavelengthRangeForMonitorNormalisation = (
- firstNFloats(self.getNextToken(), 2)
- )
-
- if all(
- self.instrument.wavelengthRangeForMonitorNormalisation
- ) == 0.0:
- self.instrument.wavelengthRangeForMonitorNormalisation = [
- 0, 0
- ]
-
- self.instrument.spectrumNumbersForTransmissionMonitor = (
- extract_ints_from_string(self.getNextToken())
- )
-
- # For single float attributes,
- # we extract the zeroth float from the line.
- self.instrument.incidentMonitorQuietCountConst = (
- nthfloat(self.getNextToken(), 0)
- )
- self.instrument.transmissionMonitorQuietCountConst = (
- nthfloat(self.getNextToken(), 0)
- )
-
- self.instrument.channelNosSpikeAnalysis = (
- firstNInts(self.getNextToken(), 2)
- )
- self.instrument.spikeAnalysisAcceptanceFactor = (
- nthfloat(self.getNextToken(), 0)
- )
-
- # Extract wavelength range
- # Which consists of the first 3 floats
- # (min, max, step) in the line.
- wavelengthRange = firstNFloats(self.getNextToken(), 3)
- self.instrument.wavelengthMin = wavelengthRange[0]
- self.instrument.wavelengthMax = wavelengthRange[1]
- self.instrument.wavelengthStep = wavelengthRange[2]
-
- self.instrument.NoSmoothsOnMonitor = nthint(self.getNextToken(), 0)
-
- # Extract X range
- # Which consists of the first 3 floats
- # (min, max, step) in the line.
- XRange = firstNFloats(self.getNextToken(), 3)
-
- self.instrument.XMin = XRange[0]
- self.instrument.XMax = XRange[1]
- self.instrument.XStep = XRange[2]
-
- # Extract the grouping parameter panel.
- # Each row in the panel consists of the first 4 ints
- # (Group, XMin, XMax, Background Factor) in the line.
- # If the marker line is encountered,
- # then the panel has been parsed.
-
- line = self.getNextToken()
- while "to end input of specified values" not in line:
- group = nthint(line, 0)
- xMin = nthfloat(line, 1)
- xMax = nthfloat(line, 2)
- backgroundFactor = nthfloat(line, 3)
- self.instrument.groupingParameterPanel.append(
- [group, xMin, xMax, backgroundFactor]
- )
- line = self.getNextToken()
-
- self.instrument.groupsAcceptanceFactor = (
- nthfloat(self.getNextToken(), 0)
- )
- self.instrument.mergePower = nthint(self.getNextToken(), 0)
-
- # For boolean attributes, we convert the first
- # integer in the line to its boolean value.
- self.instrument.subSingleAtomScattering = (
- boolifyNum(nthint(self.getNextToken(), 0))
- )
-
- # For enumerated attributes, where the value of the attribute is
- # the first integer in the line, and we must get the member,
- # we do this: Enum[Enum(value).name]
- self.instrument.mergeWeights = (
- MergeWeights[MergeWeights(nthint(self.getNextToken(), 0)).name]
- )
- self.instrument.incidentFlightPath = (
- nthfloat(self.getNextToken(), 0)
- )
- self.instrument.spectrumNumberForOutputDiagnosticFiles = (
- nthint(self.getNextToken(), 0)
- )
-
- self.instrument.neutronScatteringParametersFile = (
- firstword(self.getNextToken())
-
- )
- self.instrument.scaleSelection = (
- Scales[Scales(nthint(self.getNextToken(), 0)).name]
- )
- self.instrument.subWavelengthBinnedData = (
- boolifyNum(nthint(self.getNextToken(), 0))
- )
- self.consumeTokens(2)
- self.instrument.logarithmicStepSize = (
- nthfloat(self.getNextToken(), 0)
- )
- self.instrument.hardGroupEdges = (
- boolifyNum(nthint(self.getNextToken(), 0))
- )
-
- # If NeXus files are being used, then we expect a NeXus definition
- # file to be present, and extract it.
- if (
- self.instrument.dataFileType == "NXS"
- or self.instrument.dataFileType == "nxs"
- ):
- self.instrument.nxsDefinitionFile = (
- firstword(self.getNextToken())
- )
-
- if self.config:
- self.instrument.goodDetectorThreshold = nthint(
- self.getNextToken(), 0
- )
-
- # Consume whitespace and the closing brace.
- self.consumeUpToDelim("}")
-
- # Resolve the paths, to make them relative.
- # First construct the regular expression to match against.
- pattern = re.compile(r"StartupFiles\S*")
-
- match = re.search(
- pattern,
- self.instrument.detectorCalibrationFileName
- )
-
- if match:
- self.instrument.detectorCalibrationFileName = match.group()
-
- match = re.search(
- pattern,
- self.instrument.groupFileName
- )
-
- if match:
- self.instrument.groupFileName = match.group()
-
- match = re.search(
- pattern,
- self.instrument.deadtimeConstantsFileName
- )
-
- if match:
- self.instrument.deadtimeConstantsFileName = match.group()
-
- match = re.search(
- pattern,
- self.instrument.neutronScatteringParametersFile
- )
-
- if match:
- self.instrument.neutronScatteringParametersFile = match.group()
-
- match = re.search(
- pattern,
- self.instrument.neutronScatteringParametersFile
- )
-
- if match:
- self.instrument.neutronScatteringParametersFile = match.group()
-
- except Exception as e:
- raise ParserException(
- "Whilst parsing Instrument, an exception occured."
- " The input file is most likely of an incorrect format, "
- "and some attributes were missing."
- f"{str(e)}"
- ) from e
-
- def parseBeam(self):
- """
- Intialises a Beam object and assigns it to the
- beam attribute.
- Parses the attributes of the Beam from the input stream.
- Raises a ParserException if any mandatory attributes are missing.
-
-
- Parameters
- ----------
- None
- Returns
- -------
- None
- """
-
- try:
- # Initialise beam attribute to a new instance of Beam.
- self.beam = Beam()
-
- self.consumeWhitespace()
-
- # For enumerated attributes,
- # where the member name of the attribute is
- # the first 'word' in the line, and we must get the member,
- # we do this: Enum[memberName].
- self.beam.sampleGeometry = Geometry[firstword(self.getNextToken())]
-
- # Set the global geometry.
- cfg.geometry = self.beam.sampleGeometry
-
- # Ignore the number of beam values.
- self.consumeTokens(1)
-
- # For N float attributes,
- # we extract the first N floats from the line.
- self.beam.beamProfileValues = (
- extract_floats_from_string(self.getNextToken())
- )
-
- # For single float attributes,
- # we extract the zeroth float from the line.
- range = self.getNextToken()
- self.beam.stepSizeAbsorption = nthfloat(range, 0)
- self.beam.stepSizeMS = nthfloat(range, 1)
- self.beam.noSlices = nthint(range, 2)
- self.beam.angularStepForCorrections = (
- nthint(self.getNextToken(), 0)
- )
-
- # Extract the incident beam edges
- # relative to the centroid of the sample.
- incidentBeamEdges = self.getNextToken()
- self.beam.incidentBeamLeftEdge = nthfloat(incidentBeamEdges, 0)
- self.beam.incidentBeamRightEdge = nthfloat(incidentBeamEdges, 1)
- self.beam.incidentBeamBottomEdge = nthfloat(incidentBeamEdges, 2)
- self.beam.incidentBeamTopEdge = nthfloat(incidentBeamEdges, 3)
-
- # Extract the scattered beam edges
- # relative to the centroid of the sample.
- scatteredBeamEdges = self.getNextToken()
- self.beam.scatteredBeamLeftEdge = nthfloat(scatteredBeamEdges, 0)
- self.beam.scatteredBeamRightEdge = nthfloat(scatteredBeamEdges, 1)
- self.beam.scatteredBeamBottomEdge = nthfloat(scatteredBeamEdges, 2)
- self.beam.scatteredBeamTopEdge = nthfloat(scatteredBeamEdges, 3)
-
- # For string attributes,
- # we simply extract the firstword in the line.
- self.beam.filenameIncidentBeamSpectrumParams = (
- firstword(self.getNextToken())
- )
-
- # Now match it against a pattern,
- # to resolve the path to be relative.
- pattern = re.compile(r"StartupFiles\S*")
-
- match = re.search(
- pattern,
- self.beam.filenameIncidentBeamSpectrumParams
- )
-
- if match:
- self.beam.filenameIncidentBeamSpectrumParams = match.group()
-
- self.beam.overallBackgroundFactor = (
- nthfloat(self.getNextToken(), 0)
- )
- self.beam.sampleDependantBackgroundFactor = (
- nthfloat(self.getNextToken(), 0)
- )
- self.beam.shieldingAttenuationCoefficient = (
- nthfloat(self.getNextToken(), 0)
- )
-
- # Consume whitespace and the closing brace.
- self.consumeUpToDelim("}")
-
- except Exception as e:
- raise ParserException(
- "Whilst parsing Beam, an exception occured."
- " The input file is most likely of an incorrect format, "
- "and some attributes were missing."
- ) from e
-
- def parseNormalisation(self):
- """
- Intialises a Normalisation object and assigns it to the
- normalisation attribute.
- Parses the attributes of the Normalisation from the input stream.
- Raises a ParserException if any mandatory attributes are missing.
-
-
- Parameters
- ----------
- None
- Returns
- -------
- None
- """
-
- try:
- # Initialise normalisation attribute
- # to a new instance of Normalisation.
- self.normalisation = Normalisation()
-
- self.consumeWhitespace()
-
- # The number of files and period number are both stored
- # on the same line.
- # So we extract the 0th integer for the number of files,
- # and the 1st integer for the period number.
- dataFileInfo = self.getNextToken()
- numberOfFiles = nthint(dataFileInfo, 0)
- self.normalisation.periodNumber = nthint(dataFileInfo, 1)
-
- # Extract data files
- dataFiles = []
- for _ in range(numberOfFiles):
- dataFiles.append(firstword(self.getNextToken()))
- # Sorts list so that it is in ascending order
- dataFiles.sort()
-
- # Create a DataFiles object from the dataFiles list constructed.
- self.normalisation.dataFiles = (
- DataFiles(dataFiles, "NORMALISATION")
- )
-
- # The number of background files and
- # background period number are both stored
- # on the same line.
- # So we extract the 0th integer for the number of background files,
- # and the 1st integer for the background riod number.
- dataFileInfoBg = self.getNextToken()
- numberOfFilesBg = nthint(dataFileInfoBg, 0)
- self.normalisation.periodNumberBg = nthint(dataFileInfoBg, 1)
-
- # Extract background data files
- dataFilesBg = []
- for j in range(numberOfFilesBg):
- dataFilesBg.append(firstword(self.getNextToken()))
-
- # Sorts list so that it is in ascending order
- dataFilesBg.sort()
-
- # Create a DataFiles object from the dataFiles list constructed.
- self.normalisation.dataFilesBg = (
- DataFiles(dataFilesBg, "NORMALISATION BACKGROUND")
- )
-
- # For boolean attributes, we convert the first
- # integer in the line to its boolean value.
- self.normalisation.forceCalculationOfCorrections = (
- boolifyNum(nthint(self.getNextToken(), 0))
- )
-
- # Construct composition
- composition = []
- line = self.getNextToken()
- # Extract the composition.
- # Each element in the composition consists of the first 'word',
- # integer at the second position, and float at the third position,
- # (Atomic Symbol, MassNo, Abundance) in the line.
- # If the marker line is encountered,
- # then the panel has been parsed.
- while "end of composition input" not in line:
- atomicSymbol = firstword(line)
- massNo = nthfloat(line, 1)
- abundance = nthfloat(line, 2)
-
- # Create an Element object and append to the composition list.
- composition.append(
- Element(atomicSymbol, massNo, abundance)
- )
- line = self.getNextToken()
-
- # Create a Composition object from the dataFiles list constructed.
- self.normalisation.composition = (
- Composition("Normalisation", elements=composition)
- )
-
- # For enumerated attributes,
- # where the member name of the attribute is
- # the first 'word' in the line, and we must get the member,
- # we do this: Enum[memberName].
- self.normalisation.geometry = (
- Geometry[firstword(self.getNextToken())]
- )
-
- # Is the geometry FLATPLATE?
- if (
- (
- self.normalisation.geometry == Geometry.SameAsBeam
- and cfg.geometry == Geometry.FLATPLATE
- )
- or self.normalisation.geometry == Geometry.FLATPLATE):
- # If is is FLATPLATE, then extract the upstream and downstream
- # thickness, the angle of rotation and sample width.
- thickness = self.getNextToken()
- self.normalisation.upstreamThickness = nthfloat(thickness, 0)
- self.normalisation.downstreamThickness = (
- nthfloat(thickness, 1)
- )
- geometryInfo = self.getNextToken()
- self.normalisation.angleOfRotation = nthfloat(geometryInfo, 0)
- self.normalisation.sampleWidth = nthfloat(geometryInfo, 1)
- else:
-
- # Otherwise, it is CYLINDRICAL,
- # then extract the inner and outer
- # radii and the sample height.
- radii = self.getNextToken()
- self.normalisation.innerRadius = nthfloat(radii, 0)
- self.normalisation.outerRadius = nthfloat(radii, 1)
- self.normalisation.sampleHeight = (
- nthfloat(self.getNextToken(), 0)
- )
-
- # Extract the density.
- density = nthfloat(self.getNextToken(), 0)
-
- # Take the absolute value of the density - since it could be -ve.
- self.normalisation.density = abs(density)
-
- # Decide on the units of density.
- # -ve density means it is atomic (atoms/A^3)
- # +ve means it is chemical (gm/cm^3)
- self.normalisation.densityUnits = (
- UnitsOfDensity.ATOMIC if
- density < 0
- else UnitsOfDensity.CHEMICAL
- )
-
- self.normalisation.tempForNormalisationPC = (
- nthfloat(self.getNextToken(), 0)
- )
- crossSectionSource = firstword(self.getNextToken())
- if (
- crossSectionSource == "TABLES"
- or crossSectionSource == "TRANSMISSION"
- ):
- self.normalisation.totalCrossSectionSource = (
- CrossSectionSource[crossSectionSource]
- )
- else:
- self.normalisation.totalCrossSectionSource = (
- CrossSectionSource.FILE
- )
- self.normalisation.crossSectionFilename = crossSectionSource
-
- self.normalisation.normalisationDifferentialCrossSectionFile = (
- firstword(self.getNextToken())
- )
-
- self.normalisation.lowerLimitSmoothedNormalisation = (
- nthfloat(self.getNextToken(), 0)
- )
- self.normalisation.normalisationDegreeSmoothing = (
- nthfloat(self.getNextToken(), 0)
- )
- self.normalisation.minNormalisationSignalBR = (
- nthfloat(self.getNextToken(), 0)
- )
-
- # Consume whitespace and the closing brace.
- self.consumeUpToDelim("}")
-
- # Resolve to relative.
- pattern = re.compile(r"StartupFiles\S*")
-
- match = re.search(
- pattern,
- self.normalisation.
- normalisationDifferentialCrossSectionFile
- )
-
- if match:
- (
- self.normalisation.
- normalisationDifferentialCrossSectionFile
- ) = match.group()
-
- except Exception as e:
- raise ParserException(
- "Whilst parsing Normalisation, an exception occured."
- " The input file is most likely of an incorrect format, "
- "and some attributes were missing."
- ) from e
-
- def parseSampleBackground(self):
- """
- Intialises a SampleBackground object.
- Parses the attributes of the SampleBackground from the input stream.
- Raises a ParserException if any mandatory attributes are missing.
- Returns the parsed object.
-
- Parameters
- ----------
- None
- Returns
- -------
- sampleBackground : SampleBackground
- The SampleBackground that was parsed from the input lines.
- """
-
- try:
- sampleBackground = SampleBackground()
- line = self.peekNextToken()
- if "SAMPLE BACKGROUND" in line and "{" in line:
- self.consumeTokens(1)
- self.consumeWhitespace()
- dataFileInfo = self.getNextToken()
- numberOfFiles = nthint(dataFileInfo, 0)
- sampleBackground.periodNumber = nthint(dataFileInfo, 1)
-
- dataFiles = []
- for _ in range(numberOfFiles):
- dataFiles.append(firstword(self.getNextToken()))
- sampleBackground.dataFiles = (
- DataFiles(dataFiles, "SAMPLE BACKGROUND")
- )
-
- # Consume whitespace and the closing brace.
- self.consumeUpToDelim("}")
-
- return sampleBackground
- except Exception as e:
- raise ParserException(
- "Whilst parsing Sample Background, an exception occured."
- " The input file is most likely of an incorrect format, "
- "and some attributes were missing."
- ) from e
-
- def parseSample(self):
- """
- Intialises a Sample object.
- Parses the attributes of the Sample from the input stream.
- Raises a ParserException if any mandatory attributes are missing.
- Returns the parsed object.
-
- Parameters
- ----------
- None
- Returns
- -------
- sample : Sample
- The Sample that was parsed from the input lines.
- """
-
- try:
- # Create a new instance of Sample.
- sample = Sample()
-
- # Extract the sample name, and then discard whitespace lines.
- sample.name = (
- str(self.getNextToken()[:-2]).strip()
- .replace("SAMPLE", "").strip()
- )
- sample.name = utils.replace_unwanted_chars(sample.name)
- print(sample.name)
- self.consumeWhitespace()
- # The number of files and period number are both stored
- # on the same line.
- # So we extract the 0th integer for the number of files,
- # and the 1st integer for the period number.
- dataFileInfo = self.getNextToken()
- numberOfFiles = nthint(dataFileInfo, 0)
- sample.periodNumber = nthint(dataFileInfo, 1)
-
- # Extract data files
- dataFiles = []
- for _ in range(numberOfFiles):
- dataFiles.append(firstword(self.getNextToken()))
- # Create a DataFiles object from the dataFiles list constructed.
- sample.dataFiles = DataFiles(dataFiles, sample.name)
-
- # For boolean attributes, we convert the first
- # integer in the line to its boolean value.
- sample.forceCalculationOfCorrections = (
- boolifyNum(nthint(self.getNextToken(), 0))
- )
-
- # Construct composition
- composition = []
- line = self.getNextToken()
-
- # Extract the composition.
- # Each element in the composition consists of the first 'word',
- # integer at the second position, and float t the first position,
- # (Atomic Symbol, MassNo, Abundance) in the line.
- # If the marker line is encountered,
- # then the panel has been parsed.
- while "end of composition input" not in line:
-
- atomicSymbol = firstword(line)
- massNo = nthfloat(line, 1)
- abundance = nthfloat(line, 2)
-
- # Create an Element object and append to the composition list.
- composition.append(Element(atomicSymbol, massNo, abundance))
- line = self.getNextToken()
-
- # Create a Composition object from the dataFiles list constructed.
- sample.composition = Composition("Sample", elements=composition)
-
- # For enumerated attributes,
- # where the member name of the attribute is
- # the first 'word' in the line, and we must get the member,
- # we do this: Enum[memberName].
- sample.geometry = Geometry[firstword(self.getNextToken())]
-
- # Is the geometry FLATPLATE?
- if (
- (
- sample.geometry == Geometry.SameAsBeam
- and cfg.geometry == Geometry.FLATPLATE
- )
- or sample.geometry == Geometry.FLATPLATE):
- # If is is FLATPLATE, then extract the upstream and downstream
- # thickness, the angle of rotation and sample width.
- thickness = self.getNextToken()
- sample.upstreamThickness = nthfloat(thickness, 0)
- sample.downstreamThickness = nthfloat(thickness, 1)
-
- geometryInfo = self.getNextToken()
- sample.angleOfRotation = nthfloat(geometryInfo, 0)
- sample.sampleWidth = nthfloat(geometryInfo, 1)
- else:
-
- # Otherwise, it is CYLINDRICAL,
- # then extract the inner and outer
- # radii and the sample height.
- radii = self.getNextToken()
- sample.innerRadius = nthfloat(radii, 0)
- sample.outerRadius = nthfloat(radii, 1)
- sample.sampleHeight = nthfloat(self.getNextToken(), 0)
-
- # Extract the density.
- density = nthfloat(self.getNextToken(), 0)
-
- # Decide on the units of density.
- # -ve density means it is atomic (atoms/A^3)
- # +ve means it is chemical (gm/cm^3)
- sample.density = abs(density)
- sample.densityUnits = (
- UnitsOfDensity.ATOMIC if
- density < 0
- else UnitsOfDensity.CHEMICAL
- )
- sample.tempForNormalisationPC = nthfloat(self.getNextToken(), 0)
- crossSectionSource = firstword(self.getNextToken())
- if (
- crossSectionSource == "TABLES"
- or crossSectionSource == "TRANSMISSION"
- ):
- sample.totalCrossSectionSource = (
- CrossSectionSource[crossSectionSource]
- )
- else:
- sample.totalCrossSectionSource = CrossSectionSource.FILE
- sample.crossSectionFilename = crossSectionSource
- sample.sampleTweakFactor = nthfloat(self.getNextToken(), 0)
-
- topHatW = nthfloat(self.getNextToken(), 0)
- if topHatW == 0:
- sample.topHatW = 0
- sample.FTMode = FTModes.NO_FT
- elif topHatW < 0:
- sample.topHatW = abs(topHatW)
- sample.FTMode = FTModes.SUB_AVERAGE
- else:
- sample.topHatW = topHatW
- sample.FTMode = FTModes.ABSOLUTE
-
- sample.minRadFT = nthfloat(self.getNextToken(), 0)
- sample.grBroadening = nthfloat(self.getNextToken(), 0)
-
- # Extract the resonance values.
- # Each row consists of the first 2 floats.
- # (minWavelength, maxWavelength) in the line.
- # If the marker line is encountered,
- # then the values has been parsed.
- line = self.getNextToken()
- while (
- "to finish specifying wavelength range of resonance"
- not in line
- ):
- sample.resonanceValues.append(
- extract_floats_from_string(line)
- )
- line = self.getNextToken()
-
- # Extract the exponential values.
- # Each row consists of the first 3 numbers.
- # (Amplitude, Decay, N) in the line.
- # If the marker line is encountered,
- # then the values has been parsed.
- line = self.getNextToken()
- if "to specify end of exponential parameter input" not in line:
- sample.exponentialValues = []
- while "to specify end of exponential parameter input" not in line:
- sample.exponentialValues.append(
- extract_nums_from_string(line)
- )
-
- line = self.getNextToken()
-
- sample.normalisationCorrectionFactor = (
- nthfloat(self.getNextToken(), 0)
- )
- sample.fileSelfScattering = firstword(self.getNextToken())
- sample.normaliseTo = (
- NormalisationType[
- NormalisationType(nthint(self.getNextToken(), 0)).name
- ]
- )
- sample.maxRadFT = nthfloat(self.getNextToken(), 0)
- sample.outputUnits = (
- OutputUnits[OutputUnits(nthint(self.getNextToken(), 0)).name]
- )
- sample.powerForBroadening = nthfloat(self.getNextToken(), 0)
- sample.stepSize = nthfloat(self.getNextToken(), 0)
- sample.runThisSample = boolifyNum(nthint(self.getNextToken(), 0))
- environmentValues = self.getNextToken()
- sample.scatteringFraction = nthfloat(environmentValues, 0)
- sample.attenuationCoefficient = nthfloat(environmentValues, 1)
-
- # Consume whitespace and the closing brace.
- self.consumeUpToDelim("}")
-
- return sample
-
- except Exception as e:
- raise ParserException(
- "Whilst parsing Sample, an exception occured."
- " The input file is most likely of an incorrect format, "
- "and some attributes were missing."
- ) from e
-
- def parseContainer(self):
- """
- Intialises a Container object.
- Parses the attributes of the Container from the input stream.
- Raises a ParserException if any mandatory attributes are missing.
- Returns the parsed object.
-
- Parameters
- ----------
- None
- Returns
- -------
- container : Container
- The Container that was parsed from the input lines.
- """
-
- try:
- # Create a new instance of Container.
- container = Container()
-
- # Extract the name from the lines,
- # and then discard the unnecessary lines.
- container.name = (
- str(self.getNextToken()[:-2]).strip()
- .replace("CONTAINER", "").strip()
- )
- self.consumeWhitespace()
-
- # The number of files and period number are both stored
- # on the same line.
- # So we extract the 0th integer for the number of files,
- # and the 1st integer for the period number.
- dataFileInfo = self.getNextToken()
- numberOfFiles = nthint(dataFileInfo, 0)
- container.periodNumber = nthint(dataFileInfo, 1)
-
- # Extract data files
- dataFiles = []
- for _ in range(numberOfFiles):
- dataFiles.append(firstword(self.getNextToken()))
-
- # Create a DataFiles object from the dataFiles list constructed.
- container.dataFiles = DataFiles(dataFiles, container.name)
-
- # Construct composition
- composition = []
- line = self.getNextToken()
- # Extract the composition.
- # Each element in the composition consists of the first 'word',
- # integer at the second position, and float t the first position,
- # (Atomic Symbol, MassNo, Abundance) in the line.
- # If the marker line is encountered,
- # then the panel has been parsed.
- while "end of composition input" not in line:
-
- atomicSymbol = firstword(line)
- massNo = nthfloat(line, 1)
- abundance = nthfloat(line, 2)
-
- # Create an Element object and append to the composition list.
- composition.append(Element(atomicSymbol, massNo, abundance))
- line = self.getNextToken()
- # Create a Composition object from the dataFiles list constructed.
- container.composition = Composition(
- "Container",
- elements=composition
- )
-
- # For enumerated attributes,
- # where the member name of the attribute is
- # the first 'word' in the line, and we must get the member,
- # we do this: Enum[memberName].
- container.geometry = Geometry[firstword(self.getNextToken())]
-
- # Is the geometry FLATPLATE?
- if (
- (
- container.geometry == Geometry.SameAsBeam
- and cfg.geometry == Geometry.FLATPLATE
- )
- or container.geometry == Geometry.FLATPLATE):
- # If is is FLATPLATE, then extract the upstream and downstream
- # thickness, the angle of rotation and sample width.
- thickness = self.getNextToken()
- container.upstreamThickness = nthfloat(thickness, 0)
- container.downstreamThickness = nthfloat(thickness, 1)
-
- geometryValues = self.getNextToken()
- container.angleOfRotation = nthfloat(geometryValues, 0)
- container.sampleWidth = nthfloat(geometryValues, 1)
- else:
-
- # Otherwise, it is CYLINDRICAL,
- # then extract the inner and outer
- # radii and the sample height.
- radii = self.getNextToken()
- container.innerRadius = nthfloat(radii, 0)
- container.outerRadius = nthfloat(radii, 1)
- container.sampleHeight = nthfloat(self.getNextToken(), 0)
-
- # Extract the density.
- density = nthfloat(self.getNextToken(), 0)
-
- # Take the absolute value of the density - since it could be -ve.
- container.density = abs(density)
-
- # Decide on the units of density.
- # -ve density means it is atomic (atoms/A^3)
- # +ve means it is chemical (gm/cm^3)
- container.densityUnits = (
- UnitsOfDensity.ATOMIC if
- density < 0
- else UnitsOfDensity.CHEMICAL
- )
- crossSectionSource = firstword(self.getNextToken())
- if (
- crossSectionSource == "TABLES"
- or crossSectionSource == "TRANSMISSION"
- ):
- container.totalCrossSectionSource = (
- CrossSectionSource[crossSectionSource]
- )
- else:
- container.totalCrossSectionSource = CrossSectionSource.FILE
- container.crossSectionFilename = crossSectionSource
- container.tweakFactor = nthfloat(self.getNextToken(), 0)
-
- environmentValues = self.getNextToken()
- container.scatteringFraction = nthfloat(environmentValues, 0)
- container.attenuationCoefficient = nthfloat(environmentValues, 1)
-
- # Consume whitespace and the closing brace.
- self.consumeUpToDelim("}")
-
- return container
-
- except Exception as e:
- raise ParserException(
- "Whilst parsing Container, an exception occured."
- " The input file is most likely of an incorrect format, "
- "and some attributes were missing."
- ) from e
-
- def parseComponents(self):
- try:
- while self.stream:
- component = self.parseComponent()
- if component:
- self.components.addComponent(component)
- except Exception as e:
- raise ParserException(
- "Whilst parsing Components, an exception occured."
- " The input file is most likely of an incorrect format."
- ) from e
-
- def parseComponent(self):
- name = self.getNextToken().rstrip()
- component = Component(name)
- line = self.peekNextToken()
- if "(" in line:
- self.consumeTokens(1)
- else:
- return
- line = self.getNextToken()
- while line and ")" not in line:
- atomicSymbol, massNo, abundance = line.split()
- element = Element(atomicSymbol, float(massNo), float(abundance))
- component.addElement(element)
- line = self.getNextToken()
- return component
-
- def makeParse(self, key):
- """
- Calls a parsing function from a dictionary of parsing functions
- by the input key.
- Returns the result of the called parsing function.
- Only use case is as a helper function during parsing.
-
- Parameters
- ----------
- key : str
- Parsing function to call
- (INSTRUMENT/BEAM/NORMALISATION/SAMPLE BACKGROUND/SAMPLE/CONTAINER)
- Returns
- -------
- NoneType
- if parsing INSTRUMENT/BEAM/NORMALISATION
- SampleBackground
- if parsing SAMPLE BACKGROUND
- Sample
- if parsing Sample
- Container
- if parsing Container
- None
- if parsing Components
- """
-
- parsingFunctions = {
- "INSTRUMENT": self.parseInstrument,
- "BEAM": self.parseBeam,
- "NORMALISATION": self.parseNormalisation,
- "SAMPLE BACKGROUND": self.parseSampleBackground,
- "SAMPLE": self.parseSample,
- "CONTAINER": self.parseContainer,
- "COMPONENTS": self.parseComponents
- }
- # Return the result of the parsing function that was called.
- return parsingFunctions[key]()
-
- def sampleBackgroundHelper(self):
- """
- Helper method for parsing Sample Background and its
- Samples and their Containers.
- Returns the SampleBackground object.
- Parameters
- ----------
- None
- Returns
- -------
- SampleBackground
- The SampleBackground parsed from the lines.
- """
-
- # Parse sample background.
- sampleBackground = self.makeParse("SAMPLE BACKGROUND")
+ def setGudrunDir(self, dir):
+ self.instrument.GudrunInputFileDir = dir
- self.consumeWhitespace()
- line = self.peekNextToken()
+ def convertToSample(self, container, persist=False):
- # Parse all Samples and Containers belonging to the sample background.
- while "END" not in line and "SAMPLE BACKGROUND" not in line:
- if not line:
- raise ParserException("Unexpected EOF during parsing.")
- elif "GO" in line:
- self.getNextToken()
- elif "SAMPLE" in line and firstword(line) == "SAMPLE":
- sample = self.makeParse("SAMPLE")
- if not sample.name:
- sample.name = utils.replace_unwanted_chars(uniquifyName(
- "SAMPLE",
- [s.name for s in sampleBackground.samples],
- sep="",
- incFirst=True
- ))
- sampleBackground.samples.append(sample)
- elif "CONTAINER" in line and firstword(line) == "CONTAINER":
- container = self.makeParse("CONTAINER")
- if not container.name:
- container.name = uniquifyName(
- "CONTAINER",
- [c.name
- for c in sampleBackground.samples[-1].containers],
- sep="",
- incFirst=True)
- sampleBackground.samples[-1].containers.append(
- container
- )
- self.consumeWhitespace()
- line = self.peekNextToken()
- return sampleBackground
+ sample = container.convertToSample()
- def parse(self, path, config=False):
- """
- Parse the GudrunFile from its path.
- Assign objects from the file to the attributes of the class.
- Raises ParserException if Instrument,
- Beam or Normalisation are missing.
+ if persist:
+ for i, sampleBackground in enumerate(self.sampleBackgrounds):
+ for sample in sampleBackground.samples:
+ if container in sample.containers:
+ sample.containers.remove(container)
+ break
+ self.sampleBackgrounds[i].append(sample)
+ return sample
- Parameters
- ----------
- None
- Returns
- -------
- None
- """
- self.config = config
- # Ensure only valid files are given.
- if not path:
- raise ParserException(
- "Path not supplied. Cannot parse from an empty path!"
- )
- if not os.path.exists(path):
- raise ParserException(
- "The path supplied is invalid.\
- Cannot parse from an invalid path" + path
+ def determineError(self, sample):
+ gudPath = sample.dataFiles[0].replace(
+ self.instrument.dataFileType,
+ "gud"
+ )
+ gudFile = GudFile(
+ os.path.join(
+ self.instrument.GudrunInputFileDir, gudPath
)
- if self.format == Format.YAML:
- # YAML Files
- try:
- (
- self.instrument,
- self.beam,
- self.components,
- self.normalisation,
- self.sampleBackgrounds,
- cfg.GUI
- ) = self.yaml.parseYaml(path)
- except YAMLException as e:
- raise ParserException(e)
- else:
- # TXT Files
- parsing = ""
- KEYWORDS = {
- "INSTRUMENT": False,
- "BEAM": False,
- "NORMALISATION": False
- }
-
- # Decide the encoding
- import chardet
- with open(path, 'rb') as fp:
- encoding = chardet.detect(fp.read())['encoding']
-
- # Read the input stream into our attribute.
- with open(path, encoding=encoding) as fp:
- self.stream = fp.readlines()
-
- # Here we go! Get the first token and begin parsing.
- line = self.getNextToken()
+ )
+ error = round(
+ (
+ 1.0 - (gudFile.averageLevelMergedDCS / gudFile.expectedDCS)
+ ) * 100, 1
+ )
+ return error
- # Iterate through the file,
- # parsing the Instrument, Beam and Normalisation.
- while (
- self.stream
- and not all(value for value in KEYWORDS.values())
- ):
- if (
- firstword(line) in KEYWORDS.keys()
- and not KEYWORDS[firstword(line)]
- ):
- parsing = firstword(line)
- self.makeParse(parsing)
- KEYWORDS[parsing] = True
- line = self.getNextToken()
+ def runSamples(self):
+ return [
+ s for sb in self.sampleBackgrounds
+ for s in sb.samples
+ if s.runThisSample]
- # If we didn't parse each one of the keywords, then panic.
- if not all(KEYWORDS.values()) and not config:
- raise ParserException((
- 'INSTRUMENT, BEAM and NORMALISATION'
- ' were not parsed. It\'s possible the file'
- ' supplied is of an incorrect format!'
- ))
- elif not KEYWORDS["INSTRUMENT"] and config:
- raise ParserException((
- 'INSTRUMENT was not parsed. It\'s possible the file'
- ' supplied is of an incorrect format!'
- ))
+ def samples(self):
+ return [
+ s for sb in self.sampleBackgrounds
+ for s in sb.samples]
- # Ignore whitespace.
- self.consumeWhitespace()
- line = self.peekNextToken()
+ def containers(self):
+ return [c for s in self.samples() for c in s.containers]
- # Parse sample backgrounds, alongside their samples and containers.
- while self.stream:
- if "SAMPLE BACKGROUND" in line and "{" in line:
- self.sampleBackgrounds.append(
- self.sampleBackgroundHelper()
- )
- elif "COMPONENTS:" in line:
- self.makeParse("COMPONENTS")
- line = self.getNextToken()
+ def checkNormDataFiles(self):
+ return (len(self.normalisation.dataFiles)
+ and len(self.normalisation.dataFilesBg))
def __str__(self):
"""
@@ -1468,119 +240,3 @@ def __str__(self):
+ footer
+ components
)
-
- def save(self, path='', format=None):
- if not path:
- path = self.path()
-
- if not format:
- format = self.format
- if format == Format.TXT:
- self.write_out(
- path=f"{os.path.splitext(path)[0]}.txt", overwrite=True)
- elif format == Format.YAML:
- self.write_yaml(path=f"{os.path.splitext(path)[0]}.yaml")
-
- def write_yaml(self, path):
- self.yaml = YAML()
- self.yaml.writeYAML(self, path)
-
- def write_out(self, path='', overwrite=False, writeParameters=True):
- """
- Writes out the string representation of the GudrunFile.
- If 'overwrite' is True, then the initial file is overwritten.
- Otherwise, it is written to 'gudpy_{initial filename}.txt'.
-
- Parameters
- ----------
- overwrite : bool, optional
- Overwrite the initial file? (default is False).
- path : str, optional
- Path to write to.
- Returns
- -------
- None
- """
- if path:
- if not overwrite:
- assert (not os.path.exists(path))
- f = open(
- path, "w", encoding="utf-8"
- )
- elif not overwrite:
- assert (not os.path.exists(os.path.join(
- self.instrument.GudrunInputFileDir,
- self.OUTPATH)
- ))
- f = open(
- os.path.join(
- self.instrument.GudrunInputFileDir,
- self.OUTPATH
- ), "w", encoding="utf-8")
- else:
- if not self.path():
- path = os.path.join(
- self.instrument.GudrunInputFileDir,
- self.OUTPATH)
- f = open(path, "w", encoding="utf-8")
-
- if os.path.basename(f.name) == self.OUTPATH:
- for sampleBackground in self.sampleBackgrounds:
- sampleBackground.writeAllSamples = False
- f.write(str(self))
- f.close()
-
- if writeParameters:
- for sb in self.sampleBackgrounds:
- for s in sb.samples:
- if s.runThisSample:
- gf = deepcopy(self)
- gf.sampleBackgrounds = [deepcopy(sb)]
- gf.sampleBackgrounds[0].samples = [deepcopy(s)]
- gf.write_out(
- path=os.path.join(
- self.instrument.GudrunInputFileDir,
- s.pathName(),
- ),
- overwrite=True,
- writeParameters=False
- )
-
- def setGudrunDir(self, dir):
- self.instrument.GudrunInputFileDir = dir
-
- def convertToSample(self, container, persist=False):
-
- sample = container.convertToSample()
-
- if persist:
- for i, sampleBackground in enumerate(self.sampleBackgrounds):
- for sample in sampleBackground.samples:
- if container in sample.containers:
- sample.containers.remove(container)
- break
- self.sampleBackgrounds[i].append(sample)
- return sample
-
- def determineError(self, sample):
- gudPath = sample.dataFiles[0].replace(
- self.instrument.dataFileType,
- "gud"
- )
- gudFile = GudFile(
- os.path.join(
- self.instrument.GudrunInputFileDir, gudPath
- )
- )
- error = round(
- (
- 1.0 - (gudFile.averageLevelMergedDCS / gudFile.expectedDCS)
- ) * 100, 1
- )
- return error
-
-
-Container.getNextToken = GudrunFile.getNextToken
-Container.peekNextToken = GudrunFile.peekNextToken
-Container.consumeUpToDelim = GudrunFile.consumeUpToDelim
-Container.consumeWhitespace = GudrunFile.consumeWhitespace
diff --git a/gudpy/__init__.py b/gudpy/core/io/__init__.py
similarity index 100%
rename from gudpy/__init__.py
rename to gudpy/core/io/__init__.py
diff --git a/gudpy/core/io/gudpy_io.py b/gudpy/core/io/gudpy_io.py
new file mode 100644
index 00000000..c8a9fc89
--- /dev/null
+++ b/gudpy/core/io/gudpy_io.py
@@ -0,0 +1,485 @@
+from abc import abstractmethod
+from enum import Enum
+from ruamel.yaml import YAML as yaml
+from ruamel.yaml import YAMLError
+import shutil
+import os
+import typing as typ
+
+from core.composition import (
+ Component, Components, Composition, WeightedComponent
+)
+from core.data_files import DataFile, DataFiles
+from core.element import Element
+from core.exception import YAMLException
+from core.gui_config import GUIConfig
+from core import utils
+from core.instrument import Instrument
+from core.beam import Beam
+from core.normalisation import Normalisation
+from core.sample_background import SampleBackground
+from core.sample import Sample
+from core.container import Container
+from core.gudrun_file import GudrunFile
+from core.io.gudrun_file_parser import GudrunFileParser
+from core import config
+
+
+class GudPyIO:
+ projectDir = ""
+ loadFile = ""
+
+ def __init__(self):
+ self.yaml = self._getYamlModule()
+ self.gudrunFileParser = GudrunFileParser()
+
+ @classmethod
+ def projectName(cls) -> str:
+ return os.path.basename(cls.projectDir)
+
+ @classmethod
+ def autosavePath(cls) -> str:
+ return f"{os.path.basename(cls.projectDir)}.autosave"
+
+ @classmethod
+ def path(cls):
+ """Returns the path to the yaml file
+
+ Returns
+ -------
+ str
+ Path to yaml file
+
+ Raises
+ ------
+ RuntimeError
+ Raised if project directory is not set
+ """
+ if not cls.projectDir:
+ raise RuntimeError("Save location not set.")
+ return os.path.join(cls.projectDir, f"{cls.projectName()}.yaml")
+
+ @classmethod
+ def setSaveLocation(cls, projectDir: str):
+ """Sets the save location/project directory
+
+ Parameters
+ ----------
+ projectDir : str
+ Path to new save location
+ """
+ cls.projectDir = projectDir
+
+ @classmethod
+ def checkSaveLocation(cls) -> bool:
+ """Checks if user has set the save location
+ """
+ return bool(cls.projectDir)
+
+ def save(self, gudrunFile):
+ """Saves yaml file to project directory
+ """
+ self._writeYAML(gudrunFile, self.path())
+
+ def importGudrunFile(self, path, config=False) -> GudrunFile:
+ GudPyIO.loadFile = path
+ return self.gudrunFileParser.parseFromPath(path, config)
+
+ def importProject(self, projectDir) -> GudrunFile:
+ """Imports from a project directory
+
+ Parameters
+ ----------
+ projectDir : str
+ Path to GudPy project folder
+
+ Raises
+ ------
+ FileNotFoundError
+ Raised if there is no YAML input file in the
+ project directory
+
+ Returns
+ -------
+ GudrunFile
+ Genererated GudrunFile from input
+ """
+ loadFile = ""
+
+ if os.path.exists(os.path.join(
+ projectDir,
+ f"{os.path.basename(projectDir)}.yaml"
+ )):
+ # If default file exists
+ loadFile = os.path.join(
+ projectDir,
+ f"{os.path.basename(projectDir)}.yaml"
+ )
+ else:
+ # Try to find yaml files
+ for f in os.listdir(projectDir):
+ if os.path.splitext(f)[1] == ".yaml":
+ # If file is yaml
+ loadFile = os.path.join(projectDir, f)
+ if not loadFile:
+ raise FileNotFoundError(
+ "Could not find GudPy input file within the project")
+
+ self.setSaveLocation(projectDir)
+ return self._parseYaml(loadFile)
+
+ def importFromYamlFile(self, loadFile) -> GudrunFile:
+ return self._parseYaml(loadFile)
+
+ def exportProject(self, gudrunFile, targetDir):
+ if os.path.exists(targetDir):
+ raise IsADirectoryError("Cannot be an existing directory")
+
+ oldFile = os.path.join(targetDir, os.path.basename(self.loadFile))
+ os.makedirs(targetDir)
+
+ if os.path.exists(self.projectDir) and os.path.exists(oldFile):
+ shutil.copytree(self.projectDir, targetDir)
+ os.rename(oldFile, self.path())
+ else:
+ yamlPath = os.path.join(
+ targetDir,
+ f"{os.path.basename(targetDir)}.yaml"
+ )
+ self.exportYamlFile(gudrunFile, yamlPath)
+
+ def exportYamlFile(self, gudrunFile, path):
+ self._writeYAML(gudrunFile, path)
+
+ @classmethod
+ def exportGudrunFile(cls, gudrunFile, path):
+ return GudrunFileParser.export(gudrunFile, path)
+
+ @classmethod
+ def writeGudrunFile(cls, gudrunFile, path):
+ GudrunFileParser.writeGudrunFile(gudrunFile, path)
+
+ @classmethod
+ def writeObject(cls, obj, path):
+ with open(path, "w", encoding="utf-8") as f:
+ f.write(str(obj))
+ f.close()
+
+# =================================================================
+# PARSING HELPERS
+# =================================================================
+
+ def _getYamlModule(self) -> yaml:
+ yaml_ = yaml()
+ yaml_.preserve_quotes = True
+ yaml_.default_flow_style = None
+ yaml_.encoding = 'utf-8'
+ return yaml_
+
+ def _parseYaml(self, path) -> GudrunFile:
+ self.loadFile = path
+ yamldict = self._yamlToDict(path)
+ gudrunFile = GudrunFile()
+ for k, v in yamldict.items():
+ if k == "GUI":
+ GUI = GUIConfig()
+ self._yamlToObject(GUI, v)
+ config.GUIConfig = GUI
+ continue
+ if not hasattr(gudrunFile, k):
+ # If attribute is not valid
+ print(f"Ignoring depreciated attribute '{k}'")
+ continue
+
+ parsingFunc = self._parsingFuncMap(k)
+ if parsingFunc:
+ try:
+ obj = parsingFunc(v)
+ setattr(gudrunFile, k, obj)
+ except YAMLException as e:
+ raise YAMLException(e)
+ except YAMLError as e:
+ # Exception caused by yaml parsing library
+ raise YAMLException(f"Invalid YAML file: {str(e)}")
+ except Exception as e:
+ raise YAMLException(
+ f"Failed at: '{k}'. Details: {e}")
+ return gudrunFile
+
+ def _writeYAML(self, gudrunFile: GudrunFile, path: str) -> None:
+ with open(path, "wb") as fp:
+ outyaml = {
+ "instrument": gudrunFile.instrument,
+ "beam": gudrunFile.beam,
+ "components": gudrunFile.components.components,
+ "normalisation": gudrunFile.normalisation,
+ "sampleBackgrounds": gudrunFile.sampleBackgrounds,
+ "GUI": config.GUI
+ }
+ self.yaml.dump(
+ {k: self.toYaml(v) for k, v in outyaml.items()},
+ fp
+ )
+
+ def _parsingFuncMap(self, key) -> typ.Union[typ.Callable, None]:
+ parsingFuncs = {
+ "instrument": self._parseYamlInstrument,
+ "beam": self._parseYamlBeam,
+ "normalisation": self._parseYamlNormalisation,
+ "sampleBackgrounds": self._parseYamlSampleBackground,
+ "sample": self._parseYamlSample,
+ "composition": self._parseYamlComposition,
+ "component": self._parseYamlComponent,
+ "components": self._parseYamlComponents,
+ "elements": self._parseYamlElements,
+ "dataFiles": self._parseYamlDataFiles,
+ "dataFilesBg": self._parseYamlDataFiles
+ }
+ if key in parsingFuncs.keys():
+ return parsingFuncs[key]
+ else:
+ return None
+
+ def _yamlToDict(self, path) -> any:
+ # Read the input stream into our attribute.
+ with open(path, encoding=self.yaml.encoding) as fp:
+ return self.yaml.load(fp)
+
+ def _yamlToObject(self, obj: any, yamldict: any) -> any:
+ for k, v in yamldict.items():
+ self._assignYamlToAttr(obj, k, v)
+ return obj
+
+ def _assignYamlToAttr(self, obj: any, key, val) -> None:
+ if not hasattr(obj, key):
+ # If attribute is not valid
+ print(
+ f"Ignoring depreciated attribute '{key}'"
+ f"given to '{type(obj).__name__}'"
+ )
+ return
+
+ try:
+ if isinstance(obj.__dict__[key], Enum):
+ setattr(obj, key, type(obj.__dict__[key])[val])
+ return
+
+ if key == "outputFolder":
+ if not os.path.exists(val):
+ return
+
+ setattr(obj, key, type(obj.__dict__[key])(self.toBuiltin(val)))
+ except Exception as e:
+ raise YAMLException(
+ "Parsing failed while trying to assign attribute"
+ f"'{key}' to {type(obj).__name__}") from e
+
+ def _parseYamlInstrument(self, yamldict: any) -> Instrument:
+ instrument = Instrument()
+ instrument = self._yamlToObject(instrument, yamldict)
+ return instrument
+
+ def _parseYamlBeam(self, yamldict: any) -> Normalisation:
+ beam = Beam()
+ beam = self._yamlToObject(beam, yamldict)
+ return beam
+
+ def _parseYamlNormalisation(self, yamldict: any) -> Normalisation:
+ normalisation = Normalisation()
+ for k, v in yamldict.items():
+ if k == "composition":
+ composition = self._parseYamlComposition(v)
+ setattr(normalisation, k, composition)
+ elif k == "dataFiles" or k == "dataFilesBg":
+ dataFiles = self._parseYamlDataFiles(v)
+ setattr(normalisation, k, dataFiles)
+ else:
+ self._assignYamlToAttr(normalisation, k, v)
+ return normalisation
+
+ def _parseYamlSample(self, yamldict: any) -> Sample:
+ sample = Sample()
+ for k, v in yamldict.items():
+ if k == "dataFiles":
+ dataFiles = self._parseYamlDataFiles(v)
+ for dataFile in dataFiles:
+ dataFile.isSampleDataFile = True
+ setattr(sample, k, dataFiles)
+ elif k == "composition":
+ composition = self._parseYamlComposition(v)
+ setattr(sample, k, composition)
+ elif k == "containers":
+ for contyaml in yamldict[k]:
+ container = self._parseYamlContainer(contyaml)
+ sample.containers.append(container)
+ else:
+ self._assignYamlToAttr(sample, k, v)
+ return sample
+
+ def _parseYamlSampleBackground(
+ self, yamllist: list
+ ) -> list[SampleBackground]:
+ sampleBackgrounds = []
+ for sbg in yamllist:
+ sampleBg = SampleBackground()
+ for k, v in sbg.items():
+ if k == "samples":
+ for sampleyaml in v:
+ sample = self._parseYamlSample(sampleyaml)
+ sample.name = utils.replace_unwanted_chars(sample.name)
+ sampleBg.samples.append(sample)
+ elif k == "dataFiles":
+ dataFiles = self._parseYamlDataFiles(v)
+ setattr(sampleBg, k, dataFiles)
+ else:
+ self._assignYamlToAttr(sampleBg, k, v)
+ sampleBackgrounds.append(sampleBg)
+ return sampleBackgrounds
+
+ def _parseYamlComposition(self, yamldict: any) -> Composition:
+ composition = Composition(yamldict["type_"])
+ for k, v in yamldict.items():
+ if k == "elements":
+ elements = self._parseYamlElements(yamldict[k])
+ setattr(composition, k, elements)
+ elif k == "weightedComponents":
+ weightedComponents = []
+ for wc in yamldict[k]:
+ component = self._parseYamlComponent(wc)
+ ratio = wc["ratio"]
+ try:
+ weightedComponents.append(
+ WeightedComponent(
+ component, float(ratio))
+ )
+ except ValueError:
+ raise YAMLException(
+ "Invalid ratio given to Weighted Component")
+ setattr(composition, k, weightedComponents)
+ else:
+ self._assignYamlToAttr(composition, k, v)
+ return composition
+
+ def _parseYamlContainer(self, yamldict: any) -> Container:
+ container = Container()
+ for k, v in yamldict.items():
+ if k == "composition":
+ composition = self._parseYamlComposition(v)
+ setattr(container, k, composition)
+ elif k == "dataFiles":
+ dataFiles = self._parseYamlDataFiles(v)
+ setattr(container, k, dataFiles)
+ else:
+ self._assignYamlToAttr(container, k, v)
+ return container
+
+ def _parseYamlDataFiles(self, yamldict: any) -> DataFiles:
+ dataFiles = DataFiles([], yamldict["name"])
+ for df in yamldict["_dataFiles"]:
+ dataFile = DataFile(df["filename"])
+ for k, v in df.items():
+ if k == "_outputs":
+ if not v:
+ dataFile._outputs = {}
+ continue
+ outDict = {}
+ for k_, v_ in v.items():
+ if os.path.exists(v_):
+ outDict[k_] = v_
+ dataFile._outputs = outDict
+ else:
+ self._assignYamlToAttr(dataFile, k, v)
+ dataFiles.dataFiles.append(dataFile)
+ return dataFiles
+
+ def _parseYamlComponent(self, yamldict: any) -> Component:
+ if (
+ "component" not in yamldict
+ or "ratio" not in yamldict
+ ):
+ raise YAMLException(
+ "Weighted Component expects 'component' and"
+ + " 'ratio' to be provided")
+ component = Component()
+ self._yamlToObject(
+ component, yamldict["component"]
+ )
+ return component
+
+ def _parseYamlComponents(self, yamllist: list[dict]) -> Components:
+ components = []
+ for c in yamllist:
+ components.append(self._parseYamlComponent(c))
+ return Components(components)
+
+ def _parseYamlElements(self, yamllist: list[str]) -> list[Element]:
+ elements = []
+ for idx, element in enumerate(yamllist):
+ # Ensuring correct arguements are provided
+ if (
+ "atomicSymbol" not in element
+ or "massNo" not in element
+ or "abundance" not in element
+ ):
+ raise YAMLException(
+ "Insufficient arguments given to element"
+ + f" {idx + 1}. Expects 'atomicSymbol', 'massNo'"
+ + " and 'abundance'"
+ )
+
+ # Setting element properties
+ try:
+ element_ = Element(
+ **{
+ "atomicSymbol": element["atomicSymbol"],
+ "massNo": float(element["massNo"]),
+ "abundance": float(element["abundance"])
+ }
+ )
+ elements.append(element_)
+ except ValueError as e:
+ raise YAMLException(
+ f"Invalid number given to element {idx + 1}:"
+ f"{e}")
+ return elements
+
+ @abstractmethod
+ def toYaml(self, var: any):
+ if var.__class__.__module__ == "ruamel.yaml.scalarfloat":
+ return float(var)
+ if var.__class__.__module__ == "builtins":
+ if isinstance(var, (list, tuple)):
+ return type(var)([self.toYaml(v) for v in var])
+ else:
+ return var
+ elif isinstance(var, Enum):
+ return type(var)(var.value).name
+ elif isinstance(var, (
+ Instrument, Beam, Components, Normalisation,
+ SampleBackground, Sample, Container, WeightedComponent,
+ Component, Composition, Element, DataFiles, DataFile, GUIConfig
+ )):
+ return {
+ k: self.toYaml(v)
+ for k, v in var.__dict__.items()
+ if k not in var.yamlignore
+ }
+
+ @abstractmethod
+ def toBuiltin(self, yamlvar: any):
+ if yamlvar is None:
+ return None
+ if isinstance(yamlvar, (list, tuple)):
+ return [self.toBuiltin(v) for v in yamlvar]
+ elif (yamlvar.__class__.__module__ ==
+ "ruamel.yaml.comments.CommentedMap"):
+ dict = {}
+ for k, v in yamlvar.items():
+ dict[k] = v
+ return dict
+ elif yamlvar.__class__.__module__ == "builtins":
+ return yamlvar
+ elif yamlvar.__class__.__module__ == "ruamel.yaml.scalarfloat":
+ return float(yamlvar)
+ elif yamlvar.__class__.__module__ == "ruamel.yaml.scalarstring":
+ return str(yamlvar)
diff --git a/gudpy/core/io/gudrun_file_parser.py b/gudpy/core/io/gudrun_file_parser.py
new file mode 100644
index 00000000..c82c52e6
--- /dev/null
+++ b/gudpy/core/io/gudrun_file_parser.py
@@ -0,0 +1,1201 @@
+import re
+import os
+import typing as typ
+
+from core.composition import (
+ Component, Components, Composition
+)
+from core.data_files import DataFiles
+from core.element import Element
+from core.exception import ParserException
+from core import utils
+from core.instrument import Instrument
+from core.beam import Beam
+from core.normalisation import Normalisation
+from core.sample_background import SampleBackground
+from core.sample import Sample
+from core.container import Container
+from core.gudrun_file import GudrunFile
+from core import config
+import core.enums as enums
+
+
+class GudrunFileParser:
+ """Class to manage the parsing of Gudrun input files
+ """
+
+ def __init__(self):
+ # Text stream
+ self.stream = None
+ self.config = False
+
+ @classmethod
+ def writeGudrunFileTo(cls, gudrunFile: GudrunFile, path: str):
+ gudrunFile.setGudrunDir(os.path.dirname(path))
+ for sampleBackground in gudrunFile.sampleBackgrounds:
+ sampleBackground.writeAllSamples = False
+
+ with open(path, "w", encoding="utf-8") as f:
+ f.write(str(gudrunFile))
+ f.close()
+
+ @classmethod
+ def writeGudrunFile(cls, gudrunFile: GudrunFile, runDir: str):
+ cls.writeGudrunFileTo(gudrunFile, os.path.join(
+ runDir, GudrunFile.OUTPATH))
+
+ @classmethod
+ def export(cls, gudrunFile: GudrunFile, path: str):
+ for sampleBackground in gudrunFile.sampleBackgrounds:
+ sampleBackground.writeAllSamples = True
+
+ with open(path, "w", encoding="utf-8") as f:
+ f.write(str(gudrunFile))
+ f.close()
+
+ def parse(self, lines: list[str], config=False) -> GudrunFile:
+ """
+ Parse the GudrunFile from a list of lines.
+ Assign objects from the file to the attributes of the class.
+ Raises ParserException if Instrument,
+ Beam or Normalisation are missing.
+ """
+ instrument = Instrument()
+ beam = Beam()
+ components = Components()
+ normalisation = Normalisation()
+ sampleBackgrounds = []
+
+ KEYWORDS = {
+ "INSTRUMENT": False,
+ "BEAM": False,
+ "NORMALISATION": False
+ }
+ self.stream = lines
+ # Here we go! Get the first token and begin parsing.
+ line = self._getNextToken()
+
+ # Iterate through the file,
+ # parsing the Instrument, Beam and Normalisation.
+ while (self.stream and not all(value for value in KEYWORDS.values())):
+ line = self._getNextToken()
+ kwd = utils.firstword(line)
+ if kwd == "INSTRUMENT":
+ instrument = self._parseInstrument()
+ KEYWORDS[kwd] = True
+ elif kwd == "BEAM":
+ beam = self._parseBeam()
+ KEYWORDS[kwd] = True
+ elif kwd == "NORMALISATION":
+ normalisation = self._parseNormalisation()
+ KEYWORDS[kwd] = True
+ # If we didn't parse each one of the keywords, then panic.
+ if not all(KEYWORDS.values()) and not config:
+ unparsed = []
+ for k, v in KEYWORDS.items():
+ if not v:
+ unparsed.append(k)
+ raise ParserException((
+ 'INSTRUMENT, BEAM, NORMALISATION,'
+ ' were not parsed. It\'s possible the file'
+ ' supplied is of an incorrect format!'
+ ))
+ elif not KEYWORDS["INSTRUMENT"] and config:
+ raise ParserException((
+ 'INSTRUMENT was not parsed. It\'s possible the file'
+ ' supplied is of an incorrect format!'
+ ))
+
+ # Ignore whitespace.
+ self._consumeWhitespace()
+ line = self._peekNextToken()
+
+ # Parse sample backgrounds, alongside their samples and containers.
+ while self.stream:
+ if "SAMPLE BACKGROUND" in line and "{" in line:
+ sampleBackgrounds.append(
+ self._sampleBackgroundHelper()
+ )
+ elif "COMPONENTS:" in line:
+ components = self._parseComponents(line)
+ line = self._getNextToken()
+
+ return GudrunFile(
+ instrument=instrument,
+ beam=beam,
+ normalisation=normalisation,
+ sampleBackgrounds=sampleBackgrounds,
+ components=components
+ )
+
+ def parseFromPath(
+ self, path: str, config=False
+ ) -> typ.Tuple[
+ Instrument, Beam, Normalisation,
+ list[SampleBackground], Components]:
+ """
+ Parse the GudrunFile from its path.
+ Assign objects from the file to the attributes of the class.
+ Raises ParserException if Instrument,
+ Beam or Normalisation are missing.
+ """
+ self.config = config
+
+ # Ensure only valid files are given.
+ if not path:
+ raise ParserException(
+ "Path not supplied. Cannot parse from an empty path!"
+ )
+ if not os.path.exists(path):
+ raise ParserException(
+ "The path supplied is invalid.\
+ Cannot parse from an invalid path" + path
+ )
+
+ # Decide the encoding
+ import chardet
+ with open(path, 'rb') as fp:
+ encoding = chardet.detect(fp.read())['encoding']
+ # Read the input stream into our attribute.
+ with open(path, encoding=encoding) as fp:
+ lines = fp.readlines()
+ return self.parse(lines, config)
+
+# =================================================================
+# PARSING HELPERS
+# =================================================================
+
+ def _getNextToken(self) -> typ.Union[str, None]:
+ """
+ Pops the 'next token' from the stream and returns it.
+ Essentially removes the first line in the stream and returns it.
+ """
+ return self.stream.pop(0) if self.stream else None
+
+ def _peekNextToken(self) -> typ.Union[str, None]:
+ """
+ Returns the next token in the input stream, without removing it.
+ """
+ return self.stream[0] if self.stream else None
+
+ def _consumeTokens(self, n: int) -> None:
+ """
+ Consume n tokens from the input stream.
+ """
+ for _ in range(n):
+ self._getNextToken()
+
+ def _consumeUpToDelim(self, delim: str) -> None:
+ """
+ Consume tokens iteratively, until a delimiter is reached.
+ """
+ line = self._getNextToken()
+ while line[0] != delim:
+ line = self._getNextToken()
+
+ def _consumeWhitespace(self) -> None:
+ """
+ Consume tokens iteratively, while they are whitespace.
+ """
+ line = self._peekNextToken()
+ if line and line.isspace():
+ self._getNextToken()
+ line = self._peekNextToken()
+
+ def _parseInstrument(self) -> Instrument:
+ """
+ Intialises an Instrument object and assigns it to the
+ instrument attribute.
+ Parses the attributes of the Instrument from the input stream.
+ Raises a ParserException if any mandatory attributes are missing.
+ """
+ try:
+ instrument = Instrument()
+ self._consumeWhitespace()
+
+ # For string attributes,
+ # we simply extract the utils.firstword in the line.
+ instrument.name = enums.Instruments[
+ utils.firstword(self._getNextToken())]
+ self._consumeTokens(1)
+ instrument.dataFileDir = os.path.abspath(
+ utils.firstword(self._getNextToken())) + os.path.sep
+ instrument.dataFileType = utils.firstword(self._getNextToken())
+ instrument.detectorCalibrationFileName = (
+ utils.firstword(self._getNextToken())
+ )
+
+ # For single integer attributes,
+ # we extract the zeroth int from the line.
+ instrument.columnNoPhiVals = utils.nthint(self._getNextToken(), 0)
+ instrument.groupFileName = utils.firstword(self._getNextToken())
+ instrument.deadtimeConstantsFileName = (
+ utils.firstword(self._getNextToken())
+ )
+
+ # For N integer attributes,
+ # we extract the first N integers from the line.
+ instrument.spectrumNumbersForIncidentBeamMonitor = (
+ utils.extract_ints_from_string(self._getNextToken())
+ )
+
+ # For integer pair attributes,
+ # we extract the first 2 integers from the line.
+ instrument.wavelengthRangeForMonitorNormalisation = (
+ utils.firstNFloats(self._getNextToken(), 2)
+ )
+
+ if all(
+ instrument.wavelengthRangeForMonitorNormalisation
+ ) == 0.0:
+ instrument.wavelengthRangeForMonitorNormalisation = [
+ 0, 0
+ ]
+
+ instrument.spectrumNumbersForTransmissionMonitor = (
+ utils.extract_ints_from_string(self._getNextToken())
+ )
+
+ # For single float attributes,
+ # we extract the zeroth float from the line.
+ instrument.incidentMonitorQuietCountConst = (
+ utils.nthfloat(self._getNextToken(), 0)
+ )
+ instrument.transmissionMonitorQuietCountConst = (
+ utils.nthfloat(self._getNextToken(), 0)
+ )
+
+ instrument.channelNosSpikeAnalysis = (
+ utils.firstNInts(self._getNextToken(), 2)
+ )
+ instrument.spikeAnalysisAcceptanceFactor = (
+ utils.nthfloat(self._getNextToken(), 0)
+ )
+
+ # Extract wavelength range
+ # Which consists of the first 3 floats
+ # (min, max, step) in the line.
+ wavelengthRange = utils.firstNFloats(self._getNextToken(), 3)
+ instrument.wavelengthMin = wavelengthRange[0]
+ instrument.wavelengthMax = wavelengthRange[1]
+ instrument.wavelengthStep = wavelengthRange[2]
+
+ instrument.NoSmoothsOnMonitor = utils.nthint(
+ self._getNextToken(), 0)
+
+ # Extract X range
+ # Which consists of the first 3 floats
+ # (min, max, step) in the line.
+ XRange = utils.firstNFloats(self._getNextToken(), 3)
+
+ instrument.XMin = XRange[0]
+ instrument.XMax = XRange[1]
+ instrument.XStep = XRange[2]
+
+ # Extract the grouping parameter panel.
+ # Each row in the panel consists of the first 4 ints
+ # (Group, XMin, XMax, Background Factor) in the line.
+ # If the marker line is encountered,
+ # then the panel has been parsed.
+
+ line = self._getNextToken()
+ while "to end input of specified values" not in line:
+ group = utils.nthint(line, 0)
+ xMin = utils.nthfloat(line, 1)
+ xMax = utils.nthfloat(line, 2)
+ backgroundFactor = utils.nthfloat(line, 3)
+ instrument.groupingParameterPanel.append(
+ [group, xMin, xMax, backgroundFactor]
+ )
+ line = self._getNextToken()
+
+ instrument.groupsAcceptanceFactor = (
+ utils.nthfloat(self._getNextToken(), 0)
+ )
+ instrument.mergePower = utils.nthint(self._getNextToken(), 0)
+
+ # For boolean attributes, we convert the first
+ # integer in the line to its boolean value.
+ instrument.subSingleAtomScattering = (
+ utils.boolifyNum(utils.nthint(self._getNextToken(), 0))
+ )
+
+ # For enumerated attributes, where the value of the attribute is
+ # the first integer in the line, and we must get the member,
+ # we do this: Enum[Enum(value).name]
+ instrument.mergeWeights = (
+ enums.MergeWeights[enums.MergeWeights(
+ utils.nthint(self._getNextToken(), 0)).name]
+ )
+ instrument.incidentFlightPath = (
+ utils.nthfloat(self._getNextToken(), 0)
+ )
+ instrument.spectrumNumberForOutputDiagnosticFiles = (
+ utils.nthint(self._getNextToken(), 0)
+ )
+
+ instrument.neutronScatteringParametersFile = (
+ utils.firstword(self._getNextToken())
+
+ )
+ instrument.scaleSelection = (
+ enums.Scales[enums.Scales(
+ utils.nthint(self._getNextToken(), 0)).name]
+ )
+ instrument.subWavelengthBinnedData = (
+ utils.boolifyNum(utils.nthint(self._getNextToken(), 0))
+ )
+ self._consumeTokens(2)
+ instrument.logarithmicStepSize = (
+ utils.nthfloat(self._getNextToken(), 0)
+ )
+ instrument.hardGroupEdges = (
+ utils.boolifyNum(utils.nthint(self._getNextToken(), 0))
+ )
+
+ # If NeXus files are being used, then we expect a NeXus definition
+ # file to be present, and extract it.
+ if (
+ instrument.dataFileType == "NXS"
+ or instrument.dataFileType == "nxs"
+ ):
+ instrument.nxsDefinitionFile = (
+ utils.firstword(self._getNextToken())
+ )
+
+ if self.config:
+ instrument.goodDetectorThreshold = utils.nthint(
+ self._getNextToken(), 0
+ )
+
+ # Consume whitespace and the closing brace.
+ self._consumeUpToDelim("}")
+
+ # Resolve the paths, to make them relative.
+ # First construct the regular expression to match against.
+ pattern = re.compile(r"StartupFiles\S*")
+
+ match = re.search(
+ pattern,
+ instrument.detectorCalibrationFileName
+ )
+
+ if match:
+ instrument.detectorCalibrationFileName = match.group()
+
+ match = re.search(
+ pattern,
+ instrument.groupFileName
+ )
+
+ if match:
+ instrument.groupFileName = match.group()
+
+ match = re.search(
+ pattern,
+ instrument.deadtimeConstantsFileName
+ )
+
+ if match:
+ instrument.deadtimeConstantsFileName = match.group()
+
+ match = re.search(
+ pattern,
+ instrument.neutronScatteringParametersFile
+ )
+
+ if match:
+ instrument.neutronScatteringParametersFile = match.group()
+
+ match = re.search(
+ pattern,
+ instrument.neutronScatteringParametersFile
+ )
+
+ if match:
+ instrument.neutronScatteringParametersFile = match.group()
+
+ return instrument
+
+ except Exception as e:
+ raise ParserException(
+ "Whilst parsing Instrument, an exception occured."
+ " The input file is most likely of an incorrect format, "
+ "and some attributes were missing."
+ f"{str(e)}"
+ ) from e
+
+ def _parseBeam(self) -> Beam:
+ """
+ Intialises a Beam object and assigns it to the
+ beam attribute.
+ Parses the attributes of the Beam from the input stream.
+ Raises a ParserException if any mandatory attributes are missing.
+ """
+
+ try:
+ # Initialise beam attribute to a new instance of Beam.
+ beam = Beam()
+
+ self._consumeWhitespace()
+
+ # For enumerated attributes,
+ # where the member name of the attribute is
+ # the first 'word' in the line, and we must get the member,
+ # we do this: Enum[memberName].
+ beam.sampleGeometry = enums.Geometry[utils.firstword(
+ self._getNextToken())]
+
+ # Set the global geometry.
+ config.geometry = beam.sampleGeometry
+
+ # Ignore the number of beam values.
+ self._consumeTokens(1)
+
+ # For N float attributes,
+ # we extract the first N floats from the line.
+ beam.beamProfileValues = (
+ utils.extract_floats_from_string(self._getNextToken())
+ )
+
+ # For single float attributes,
+ # we extract the zeroth float from the line.
+ range = self._getNextToken()
+ beam.stepSizeAbsorption = utils.nthfloat(range, 0)
+ beam.stepSizeMS = utils.nthfloat(range, 1)
+ beam.noSlices = utils.nthint(range, 2)
+ beam.angularStepForCorrections = (
+ utils.nthint(self._getNextToken(), 0)
+ )
+
+ # Extract the incident beam edges
+ # relative to the centroid of the sample.
+ incidentBeamEdges = self._getNextToken()
+ beam.incidentBeamLeftEdge = utils.nthfloat(incidentBeamEdges, 0)
+ beam.incidentBeamRightEdge = utils.nthfloat(incidentBeamEdges, 1)
+ beam.incidentBeamBottomEdge = utils.nthfloat(incidentBeamEdges, 2)
+ beam.incidentBeamTopEdge = utils.nthfloat(incidentBeamEdges, 3)
+
+ # Extract the scattered beam edges
+ # relative to the centroid of the sample.
+ scatteredBeamEdges = self._getNextToken()
+ beam.scatteredBeamLeftEdge = utils.nthfloat(scatteredBeamEdges, 0)
+ beam.scatteredBeamRightEdge = utils.nthfloat(scatteredBeamEdges, 1)
+ beam.scatteredBeamBottomEdge = utils.nthfloat(
+ scatteredBeamEdges, 2)
+ beam.scatteredBeamTopEdge = utils.nthfloat(scatteredBeamEdges, 3)
+
+ # For string attributes,
+ # we simply extract the utils.firstword in the line.
+ beam.filenameIncidentBeamSpectrumParams = (
+ utils.firstword(self._getNextToken())
+ )
+
+ # Now match it against a pattern,
+ # to resolve the path to be relative.
+ pattern = re.compile(r"StartupFiles\S*")
+
+ match = re.search(
+ pattern,
+ beam.filenameIncidentBeamSpectrumParams
+ )
+
+ if match:
+ beam.filenameIncidentBeamSpectrumParams = match.group()
+
+ beam.overallBackgroundFactor = (
+ utils.nthfloat(self._getNextToken(), 0)
+ )
+ beam.sampleDependantBackgroundFactor = (
+ utils.nthfloat(self._getNextToken(), 0)
+ )
+ beam.shieldingAttenuationCoefficient = (
+ utils.nthfloat(self._getNextToken(), 0)
+ )
+
+ # Consume whitespace and the closing brace.
+ self._consumeUpToDelim("}")
+
+ return beam
+
+ except Exception as e:
+ raise ParserException(
+ "Whilst parsing Beam, an exception occured."
+ " The input file is most likely of an incorrect format, "
+ "and some attributes were missing."
+ f"{e}"
+ ) from e
+
+ def _parseNormalisation(self) -> Normalisation:
+ """
+ Intialises a Normalisation object and assigns it to the
+ normalisation attribute.
+ Parses the attributes of the Normalisation from the input stream.
+ Raises a ParserException if any mandatory attributes are missing.
+ """
+
+ try:
+ # Initialise normalisation attribute
+ # to a new instance of Normalisation.
+ normalisation = Normalisation()
+
+ self._consumeWhitespace()
+
+ # The number of files and period number are both stored
+ # on the same line.
+ # So we extract the 0th integer for the number of files,
+ # and the 1st integer for the period number.
+ dataFileInfo = self._getNextToken()
+ numberOfFiles = utils.nthint(dataFileInfo, 0)
+ normalisation.periodNumber = utils.nthint(dataFileInfo, 1)
+
+ # Extract params files
+ dataFiles = []
+ for _ in range(numberOfFiles):
+ dataFiles.append(utils.firstword(self._getNextToken()))
+ # Sorts list so that it is in ascending order
+ dataFiles.sort()
+
+ # Create a DataFiles object from the dataFiles list constructed.
+ normalisation.dataFiles = (
+ DataFiles(dataFiles, "NORMALISATION")
+ )
+
+ # The number of background files and
+ # background period number are both stored
+ # on the same line.
+ # So we extract the 0th integer for the number of background files,
+ # and the 1st integer for the background riod number.
+ dataFileInfoBg = self._getNextToken()
+ numberOfFilesBg = utils.nthint(dataFileInfoBg, 0)
+ normalisation.periodNumberBg = utils.nthint(dataFileInfoBg, 1)
+
+ # Extract background params files
+ dataFilesBg = []
+ for j in range(numberOfFilesBg):
+ dataFilesBg.append(utils.firstword(self._getNextToken()))
+
+ # Sorts list so that it is in ascending order
+ dataFilesBg.sort()
+
+ # Create a DataFiles object from the dataFiles list constructed.
+ normalisation.dataFilesBg = (
+ DataFiles(dataFilesBg, "NORMALISATION BACKGROUND")
+ )
+
+ # For boolean attributes, we convert the first
+ # integer in the line to its boolean value.
+ normalisation.forceCalculationOfCorrections = (
+ utils.boolifyNum(utils.nthint(self._getNextToken(), 0))
+ )
+
+ # Construct composition
+ composition = []
+ line = self._getNextToken()
+ # Extract the composition.
+ # Each element in the composition consists of the first 'word',
+ # integer at the second position, and float at the third position,
+ # (Atomic Symbol, MassNo, Abundance) in the line.
+ # If the marker line is encountered,
+ # then the panel has been parsed.
+ while "end of composition input" not in line:
+ atomicSymbol = utils.firstword(line)
+ massNo = utils.nthfloat(line, 1)
+ abundance = utils.nthfloat(line, 2)
+
+ # Create an Element object and append to the composition list.
+ composition.append(
+ Element(atomicSymbol, massNo, abundance)
+ )
+ line = self._getNextToken()
+
+ # Create a Composition object from the dataFiles list constructed.
+ normalisation.composition = (
+ Composition("Normalisation", elements=composition)
+ )
+
+ # For enumerated attributes,
+ # where the member name of the attribute is
+ # the first 'word' in the line, and we must get the member,
+ # we do this: Enum[memberName].
+ normalisation.geometry = (
+ enums.Geometry[utils.firstword(self._getNextToken())]
+ )
+
+ # Is the geometry FLATPLATE?
+ if (
+ (
+ normalisation.geometry == enums.Geometry.SameAsBeam
+ and config.geometry == enums.Geometry.FLATPLATE
+ )
+ or normalisation.geometry == enums.Geometry.FLATPLATE):
+ # If is is FLATPLATE, then extract the upstream and downstream
+ # thickness, the angle of rotation and sample width.
+ thickness = self._getNextToken()
+ normalisation.upstreamThickness = utils.nthfloat(thickness, 0)
+ normalisation.downstreamThickness = (
+ utils.nthfloat(thickness, 1)
+ )
+ geometryInfo = self._getNextToken()
+ normalisation.angleOfRotation = utils.nthfloat(geometryInfo, 0)
+ normalisation.sampleWidth = utils.nthfloat(geometryInfo, 1)
+ else:
+
+ # Otherwise, it is CYLINDRICAL,
+ # then extract the inner and outer
+ # radii and the sample height.
+ radii = self._getNextToken()
+ normalisation.innerRadius = utils.nthfloat(radii, 0)
+ normalisation.outerRadius = utils.nthfloat(radii, 1)
+ normalisation.sampleHeight = (
+ utils.nthfloat(self._getNextToken(), 0)
+ )
+
+ # Extract the density.
+ density = utils.nthfloat(self._getNextToken(), 0)
+
+ # Take the absolute value of the density - since it could be -ve.
+ normalisation.density = abs(density)
+
+ # Decide on the units of density.
+ # -ve density means it is atomic (atoms/A^3)
+ # +ve means it is chemical (gm/cm^3)
+ normalisation.densityUnits = (
+ enums.UnitsOfDensity.ATOMIC if
+ density < 0
+ else enums.UnitsOfDensity.CHEMICAL
+ )
+
+ normalisation.tempForNormalisationPC = (
+ utils.nthfloat(self._getNextToken(), 0)
+ )
+ crossSectionSource = utils.firstword(self._getNextToken())
+ if (
+ crossSectionSource == "TABLES"
+ or crossSectionSource == "TRANSMISSION"
+ ):
+ normalisation.totalCrossSectionSource = (
+ enums.CrossSectionSource[crossSectionSource]
+ )
+ else:
+ normalisation.totalCrossSectionSource = (
+ enums.CrossSectionSource.FILE
+ )
+ normalisation.crossSectionFilename = crossSectionSource
+
+ normalisation.normalisationDifferentialCrossSectionFile = (
+ utils.firstword(self._getNextToken())
+ )
+
+ normalisation.lowerLimitSmoothedNormalisation = (
+ utils.nthfloat(self._getNextToken(), 0)
+ )
+ normalisation.normalisationDegreeSmoothing = (
+ utils.nthfloat(self._getNextToken(), 0)
+ )
+ normalisation.minNormalisationSignalBR = (
+ utils.nthfloat(self._getNextToken(), 0)
+ )
+
+ # Consume whitespace and the closing brace.
+ self._consumeUpToDelim("}")
+
+ # Resolve to relative.
+ pattern = re.compile(r"StartupFiles\S*")
+
+ match = re.search(
+ pattern,
+ normalisation.
+ normalisationDifferentialCrossSectionFile
+ )
+
+ if match:
+ (
+ normalisation.
+ normalisationDifferentialCrossSectionFile
+ ) = match.group()
+
+ return normalisation
+
+ except Exception as e:
+ raise ParserException(
+ "Whilst parsing Normalisation, an exception occured."
+ " The input file is most likely of an incorrect format, "
+ "and some attributes were missing."
+ ) from e
+
+ def _parseSampleBackground(self) -> SampleBackground:
+ """
+ Intialises a SampleBackground object.
+ Parses the attributes of the SampleBackground from the input stream.
+ Raises a ParserException if any mandatory attributes are missing.
+ Returns the parsed object.
+
+ Parameters
+ ----------
+ None
+ Returns
+ -------
+ sampleBackground : SampleBackground
+ The SampleBackground that was parsed from the input lines.
+ """
+
+ try:
+ sampleBackground = SampleBackground()
+ line = self._peekNextToken()
+ if "SAMPLE BACKGROUND" in line and "{" in line:
+ self._consumeTokens(1)
+ self._consumeWhitespace()
+ dataFileInfo = self._getNextToken()
+ numberOfFiles = utils.nthint(dataFileInfo, 0)
+ sampleBackground.periodNumber = utils.nthint(dataFileInfo, 1)
+
+ dataFiles = []
+ for _ in range(numberOfFiles):
+ dataFiles.append(utils.firstword(self._getNextToken()))
+ sampleBackground.dataFiles = (
+ DataFiles(dataFiles, "SAMPLE BACKGROUND")
+ )
+
+ # Consume whitespace and the closing brace.
+ self._consumeUpToDelim("}")
+
+ return sampleBackground
+ except Exception as e:
+ raise ParserException(
+ "Whilst parsing Sample Background, an exception occured."
+ " The input file is most likely of an incorrect format, "
+ "and some attributes were missing."
+ ) from e
+
+ def _parseSample(self) -> Sample:
+ """
+ Intialises a Sample object.
+ Parses the attributes of the Sample from the input stream.
+ Raises a ParserException if any mandatory attributes are missing.
+ Returns the parsed object.
+ """
+
+ try:
+ # Create a new instance of Sample.
+ sample = Sample()
+
+ # Extract the sample name, and then discard whitespace lines.
+ sample.name = (
+ str(self._getNextToken()[:-2]).strip()
+ .replace("SAMPLE", "").strip()
+ )
+ sample.name = utils.replace_unwanted_chars(sample.name)
+ self._consumeWhitespace()
+ # The number of files and period number are both stored
+ # on the same line.
+ # So we extract the 0th integer for the number of files,
+ # and the 1st integer for the period number.
+ dataFileInfo = self._getNextToken()
+ numberOfFiles = utils.nthint(dataFileInfo, 0)
+ sample.periodNumber = utils.nthint(dataFileInfo, 1)
+
+ # Extract params files
+ dataFiles = []
+ for _ in range(numberOfFiles):
+ dataFiles.append(utils.firstword(self._getNextToken()))
+ # Create a DataFiles object from the dataFiles list constructed.
+ sample.dataFiles = DataFiles(dataFiles, sample.name, True)
+
+ # For boolean attributes, we convert the first
+ # integer in the line to its boolean value.
+ sample.forceCalculationOfCorrections = (
+ utils.boolifyNum(utils.nthint(self._getNextToken(), 0))
+ )
+
+ # Construct composition
+ composition = []
+ line = self._getNextToken()
+
+ # Extract the composition.
+ # Each element in the composition consists of the first 'word',
+ # integer at the second position, and float t the first position,
+ # (Atomic Symbol, MassNo, Abundance) in the line.
+ # If the marker line is encountered,
+ # then the panel has been parsed.
+ while "end of composition input" not in line:
+
+ atomicSymbol = utils.firstword(line)
+ massNo = utils.nthfloat(line, 1)
+ abundance = utils.nthfloat(line, 2)
+
+ # Create an Element object and append to the composition list.
+ composition.append(Element(atomicSymbol, massNo, abundance))
+ line = self._getNextToken()
+
+ # Create a Composition object from the dataFiles list constructed.
+ sample.composition = Composition("Sample", elements=composition)
+
+ # For enumerated attributes,
+ # where the member name of the attribute is
+ # the first 'word' in the line, and we must get the member,
+ # we do this: Enum[memberName].
+ sample.geometry = enums.Geometry[utils.firstword(
+ self._getNextToken())]
+
+ # Is the geometry FLATPLATE?
+ if (
+ (
+ sample.geometry == enums.Geometry.SameAsBeam
+ and config.geometry == enums.Geometry.FLATPLATE
+ )
+ or sample.geometry == enums.Geometry.FLATPLATE):
+ # If is is FLATPLATE, then extract the upstream and downstream
+ # thickness, the angle of rotation and sample width.
+ thickness = self._getNextToken()
+ sample.upstreamThickness = utils.nthfloat(thickness, 0)
+ sample.downstreamThickness = utils.nthfloat(thickness, 1)
+
+ geometryInfo = self._getNextToken()
+ sample.angleOfRotation = utils.nthfloat(geometryInfo, 0)
+ sample.sampleWidth = utils.nthfloat(geometryInfo, 1)
+ else:
+
+ # Otherwise, it is CYLINDRICAL,
+ # then extract the inner and outer
+ # radii and the sample height.
+ radii = self._getNextToken()
+ sample.innerRadius = utils.nthfloat(radii, 0)
+ sample.outerRadius = utils.nthfloat(radii, 1)
+ sample.sampleHeight = utils.nthfloat(self._getNextToken(), 0)
+
+ # Extract the density.
+ density = utils.nthfloat(self._getNextToken(), 0)
+
+ # Decide on the units of density.
+ # -ve density means it is atomic (atoms/A^3)
+ # +ve means it is chemical (gm/cm^3)
+ sample.density = abs(density)
+ sample.densityUnits = (
+ enums.UnitsOfDensity.ATOMIC if
+ density < 0
+ else enums.UnitsOfDensity.CHEMICAL
+ )
+ sample.tempForNormalisationPC = utils.nthfloat(
+ self._getNextToken(), 0)
+ crossSectionSource = utils.firstword(self._getNextToken())
+ if (
+ crossSectionSource == "TABLES"
+ or crossSectionSource == "TRANSMISSION"
+ ):
+ sample.totalCrossSectionSource = (
+ enums.CrossSectionSource[crossSectionSource]
+ )
+ else:
+ sample.totalCrossSectionSource = enums.CrossSectionSource.FILE
+ sample.crossSectionFilename = crossSectionSource
+ sample.sampleTweakFactor = utils.nthfloat(self._getNextToken(), 0)
+
+ topHatW = utils.nthfloat(self._getNextToken(), 0)
+ if topHatW == 0:
+ sample.topHatW = 0
+ sample.FTMode = enums.FTModes.NO_FT
+ elif topHatW < 0:
+ sample.topHatW = abs(topHatW)
+ sample.FTMode = enums.FTModes.SUB_AVERAGE
+ else:
+ sample.topHatW = topHatW
+ sample.FTMode = enums.FTModes.ABSOLUTE
+
+ sample.minRadFT = utils.nthfloat(self._getNextToken(), 0)
+ sample.grBroadening = utils.nthfloat(self._getNextToken(), 0)
+
+ # Extract the resonance values.
+ # Each row consists of the first 2 floats.
+ # (minWavelength, maxWavelength) in the line.
+ # If the marker line is encountered,
+ # then the values has been parsed.
+ line = self._getNextToken()
+ while (
+ "to finish specifying wavelength range of resonance"
+ not in line
+ ):
+ sample.resonanceValues.append(
+ utils.extract_floats_from_string(line)
+ )
+ line = self._getNextToken()
+
+ # Extract the exponential values.
+ # Each row consists of the first 3 numbers.
+ # (Amplitude, Decay, N) in the line.
+ # If the marker line is encountered,
+ # then the values has been parsed.
+ line = self._getNextToken()
+ if "to specify end of exponential parameter input" not in line:
+ sample.exponentialValues = []
+ while "to specify end of exponential parameter input" not in line:
+ sample.exponentialValues.append(
+ utils.extract_nums_from_string(line)
+ )
+
+ line = self._getNextToken()
+
+ sample.normalisationCorrectionFactor = (
+ utils.nthfloat(self._getNextToken(), 0)
+ )
+ sample.fileSelfScattering = utils.firstword(self._getNextToken())
+ sample.normaliseTo = (
+ enums.NormalisationType[
+ enums.NormalisationType(utils.nthint(
+ self._getNextToken(), 0)).name
+ ]
+ )
+ sample.maxRadFT = utils.nthfloat(self._getNextToken(), 0)
+ sample.outputUnits = (
+ enums.OutputUnits[enums.OutputUnits(
+ utils.nthint(self._getNextToken(), 0)).name]
+ )
+ sample.powerForBroadening = utils.nthfloat(self._getNextToken(), 0)
+ sample.stepSize = utils.nthfloat(self._getNextToken(), 0)
+ sample.runThisSample = utils.boolifyNum(
+ utils.nthint(self._getNextToken(), 0))
+ environmentValues = self._getNextToken()
+ sample.scatteringFraction = utils.nthfloat(environmentValues, 0)
+ sample.attenuationCoefficient = utils.nthfloat(
+ environmentValues, 1)
+
+ # Consume whitespace and the closing brace.
+ self._consumeUpToDelim("}")
+
+ return sample
+
+ except Exception as e:
+ raise ParserException(
+ "Whilst parsing Sample, an exception occured."
+ " The input file is most likely of an incorrect format, "
+ "and some attributes were missing."
+ ) from e
+
+ def _parseContainer(self) -> Container:
+ """
+ Intialises a Container object.
+ Parses the attributes of the Container from the input stream.
+ Raises a ParserException if any mandatory attributes are missing.
+ Returns the parsed object.
+ """
+
+ try:
+ # Create a new instance of Container.
+ container = Container()
+
+ # Extract the name from the lines,
+ # and then discard the unnecessary lines.
+ container.name = (
+ str(self._getNextToken()[:-2]).strip()
+ .replace("CONTAINER", "").strip()
+ )
+ self._consumeWhitespace()
+
+ # The number of files and period number are both stored
+ # on the same line.
+ # So we extract the 0th integer for the number of files,
+ # and the 1st integer for the period number.
+ dataFileInfo = self._getNextToken()
+ numberOfFiles = utils.nthint(dataFileInfo, 0)
+ container.periodNumber = utils.nthint(dataFileInfo, 1)
+
+ # Extract params files
+ dataFiles = []
+ for _ in range(numberOfFiles):
+ dataFiles.append(utils.firstword(self._getNextToken()))
+
+ # Create a DataFiles object from the dataFiles list constructed.
+ container.dataFiles = DataFiles(dataFiles, container.name, True)
+
+ # Construct composition
+ composition = []
+ line = self._getNextToken()
+ # Extract the composition.
+ # Each element in the composition consists of the first 'word',
+ # integer at the second position, and float t the first position,
+ # (Atomic Symbol, MassNo, Abundance) in the line.
+ # If the marker line is encountered,
+ # then the panel has been parsed.
+ while "end of composition input" not in line:
+
+ atomicSymbol = utils.firstword(line)
+ massNo = utils.nthfloat(line, 1)
+ abundance = utils.nthfloat(line, 2)
+
+ # Create an Element object and append to the composition list.
+ composition.append(Element(atomicSymbol, massNo, abundance))
+ line = self._getNextToken()
+ # Create a Composition object from the dataFiles list constructed.
+ container.composition = Composition(
+ "Container",
+ elements=composition
+ )
+
+ # For enumerated attributes,
+ # where the member name of the attribute is
+ # the first 'word' in the line, and we must get the member,
+ # we do this: Enum[memberName].
+ container.geometry = enums.Geometry[utils.firstword(
+ self._getNextToken())]
+
+ # Is the geometry FLATPLATE?
+ if (
+ (
+ container.geometry == enums.Geometry.SameAsBeam
+ and config.geometry == enums.Geometry.FLATPLATE
+ )
+ or container.geometry == enums.Geometry.FLATPLATE):
+ # If is is FLATPLATE, then extract the upstream and downstream
+ # thickness, the angle of rotation and sample width.
+ thickness = self._getNextToken()
+ container.upstreamThickness = utils.nthfloat(thickness, 0)
+ container.downstreamThickness = utils.nthfloat(thickness, 1)
+
+ geometryValues = self._getNextToken()
+ container.angleOfRotation = utils.nthfloat(geometryValues, 0)
+ container.sampleWidth = utils.nthfloat(geometryValues, 1)
+ else:
+
+ # Otherwise, it is CYLINDRICAL,
+ # then extract the inner and outer
+ # radii and the sample height.
+ radii = self._getNextToken()
+ container.innerRadius = utils.nthfloat(radii, 0)
+ container.outerRadius = utils.nthfloat(radii, 1)
+ container.sampleHeight = utils.nthfloat(
+ self._getNextToken(), 0)
+
+ # Extract the density.
+ density = utils.nthfloat(self._getNextToken(), 0)
+
+ # Take the absolute value of the density - since it could be -ve.
+ container.density = abs(density)
+
+ # Decide on the units of density.
+ # -ve density means it is atomic (atoms/A^3)
+ # +ve means it is chemical (gm/cm^3)
+ container.densityUnits = (
+ enums.UnitsOfDensity.ATOMIC if
+ density < 0
+ else enums.UnitsOfDensity.CHEMICAL
+ )
+ crossSectionSource = utils.firstword(self._getNextToken())
+ if (
+ crossSectionSource == "TABLES"
+ or crossSectionSource == "TRANSMISSION"
+ ):
+ container.totalCrossSectionSource = (
+ enums.CrossSectionSource[crossSectionSource]
+ )
+ else:
+ container.totalCrossSectionSource = (
+ enums.CrossSectionSource.FILE
+ )
+ container.crossSectionFilename = crossSectionSource
+ container.tweakFactor = utils.nthfloat(self._getNextToken(), 0)
+
+ environmentValues = self._getNextToken()
+ container.scatteringFraction = utils.nthfloat(environmentValues, 0)
+ container.attenuationCoefficient = utils.nthfloat(
+ environmentValues, 1)
+
+ # Consume whitespace and the closing brace.
+ self._consumeUpToDelim("}")
+
+ return container
+
+ except Exception as e:
+ raise ParserException(
+ "Whilst parsing Container, an exception occured."
+ " The input file is most likely of an incorrect format, "
+ "and some attributes were missing."
+ ) from e
+
+ def _parseComponents(self) -> Components:
+ components = Components()
+ try:
+ while self.stream:
+ component = self._parseComponent()
+ if component:
+ components.addComponent(component)
+ return components
+ except Exception as e:
+ raise ParserException(
+ "Whilst parsing Components, an exception occured."
+ " The input file is most likely of an incorrect format."
+ ) from e
+
+ def _parseComponent(self) -> Component:
+ name = self._getNextToken().rstrip()
+ component = Component(name)
+ line = self._peekNextToken()
+ if "(" in line:
+ self._consumeTokens(1)
+ else:
+ return
+ line = self._getNextToken()
+ while line and ")" not in line:
+ atomicSymbol, massNo, abundance = line.split()
+ element = Element(atomicSymbol, float(massNo), float(abundance))
+ component.addElement(element)
+ line = self._getNextToken()
+ return component
+
+ def _sampleBackgroundHelper(self) -> SampleBackground:
+ """
+ Helper method for parsing Sample Background and its
+ Samples and their Containers.
+ Returns the SampleBackground object.
+ Parameters
+ ----------
+ None
+ Returns
+ -------
+ SampleBackground
+ The SampleBackground parsed from the lines.
+ """
+
+ # Parse sample background.
+ sampleBackground = self._parseSampleBackground()
+
+ self._consumeWhitespace()
+ line = self._peekNextToken()
+
+ # Parse all Samples and Containers belonging to the sample background.
+ while "END" not in line and "SAMPLE BACKGROUND" not in line:
+ if not line:
+ raise ParserException("Unexpected EOF during parsing.")
+ elif "GO" in line:
+ self._getNextToken()
+ elif "SAMPLE" in line and utils.firstword(line) == "SAMPLE":
+ sample = self._parseSample()
+ if not sample.name:
+ sample.name = utils.replace_unwanted_chars(
+ utils.uniquifyName(
+ "SAMPLE",
+ [s.name for s in sampleBackground.samples],
+ sep="",
+ incFirst=True
+ ))
+ sampleBackground.samples.append(sample)
+ elif "CONTAINER" in line and utils.firstword(line) == "CONTAINER":
+ container = self._parseContainer()
+ if not container.name:
+ container.name = utils.uniquifyName(
+ "CONTAINER",
+ [c.name
+ for c in sampleBackground.samples[-1].containers],
+ sep="",
+ incFirst=True)
+ sampleBackground.samples[-1].containers.append(
+ container
+ )
+ self._consumeWhitespace()
+ line = self._peekNextToken()
+ return sampleBackground
+
+
+Container._getNextToken = GudrunFileParser._getNextToken
+Container._peekNextToken = GudrunFileParser._peekNextToken
+Container._consumeUpToDelim = GudrunFileParser._consumeUpToDelim
+Container._consumeWhitespace = GudrunFileParser._consumeWhitespace
diff --git a/gudpy/core/iterators.py b/gudpy/core/iterators.py
index 23360693..4187acea 100644
--- a/gudpy/core/iterators.py
+++ b/gudpy/core/iterators.py
@@ -1,10 +1,12 @@
from copy import deepcopy
+import os
import math
from enum import Enum
from core.gud_file import GudFile
from core.enums import Scales, IterationModes
from core.gudrun_file import GudrunFile
+from core import utils
import core.output_file_handler as handlers
@@ -61,7 +63,6 @@ def __init__(self, nTotal):
def performIteration(
self,
gudrunFile: GudrunFile,
- prevOutput: handlers.GudrunOutput
) -> GudrunFile:
"""
Performs a single iteration of the current workflow.
@@ -75,18 +76,18 @@ def performIteration(
s for s in sampleBackground.samples
if s.runThisSample and len(s.dataFiles)
]:
- gudFile = prevOutput.gudFile(name=sample.name)
+ gudFile = sample.gudFile
# Calculate coefficient: actualDCSLevel / expectedDCSLevel
coefficient = (
gudFile.averageLevelMergedDCS / gudFile.expectedDCS
)
# Apply the coefficient.
self.applyCoefficientToAttribute(
- sample, coefficient, prevOutput)
+ sample, coefficient)
self.nCurrent += 1
return gudrunFile
- def applyCoefficientToAttribute(self, sample, coefficient, prevOutput):
+ def applyCoefficientToAttribute(self, sample, coefficient):
"""
Stub method to be overriden by sub-classes.
The idea is that this method applies the 'coefficient'
@@ -101,14 +102,14 @@ def applyCoefficientToAttribute(self, sample, coefficient, prevOutput):
"""
pass
- def organiseOutput(self, gudrunFile, exclude=[]):
+ def organiseOutput(self, gudrunFile, exclude=[]) -> None:
"""
This organises the output of the iteration.
"""
outputHandler = handlers.GudrunOutputHandler(
gudrunFile=gudrunFile,
)
- return outputHandler.organiseOutput(exclude=exclude)
+ outputHandler.organiseOutput(exclude=exclude)
class Radius(Iterator):
@@ -136,7 +137,7 @@ def __init__(self, nTotal, target="inner"):
self.iterationMode = None
self.setTargetRadius(target)
- def applyCoefficientToAttribute(self, sample, coefficient, prevOutput):
+ def applyCoefficientToAttribute(self, sample, coefficient):
if not self.result.get(sample.name, ""):
self.result[sample.name] = {}
self.result[sample.name]["Old"] = {
@@ -182,11 +183,11 @@ class Thickness(Iterator):
"""
def __init__(self, nTotal):
- super().__init__(nTotal)
self.name = "Thickness"
+ super().__init__(nTotal)
self.iterationMode = IterationModes.THICKNESS
- def applyCoefficientToAttribute(self, sample, coefficient, prevOutput):
+ def applyCoefficientToAttribute(self, sample, coefficient):
if not self.result.get(sample.name, ""):
self.result[sample.name] = {}
self.result[sample.name]["Old"] = {
@@ -230,11 +231,11 @@ class TweakFactor(Iterator):
"""
def __init__(self, nTotal):
- super().__init__(nTotal)
self.name = "TweakFactor"
+ super().__init__(nTotal)
self.iterationMode = IterationModes.TWEAK_FACTOR
- def performIteration(self, gudrunFile, prevOutput) -> GudrunFile:
+ def performIteration(self, gudrunFile) -> GudrunFile:
"""
Performs a single iteration of the current workflow.
@@ -256,7 +257,7 @@ def performIteration(self, gudrunFile, prevOutput) -> GudrunFile:
self.result[sample.name]["Old"] = {
"Tweak Factor": sample.sampleTweakFactor
}
- gudFile = prevOutput.gudFile(name=sample.name)
+ gudFile = sample.gudFile
tweakFactor = float(gudFile.suggestedTweakFactor)
sample.sampleTweakFactor = tweakFactor
@@ -284,11 +285,11 @@ class Density(Iterator):
"""
def __init__(self, nTotal):
- super().__init__(nTotal)
self.name = "Density"
+ super().__init__(nTotal)
self.iterationMode = IterationModes.DENSITY
- def applyCoefficientToAttribute(self, sample, coefficient, prevOutput):
+ def applyCoefficientToAttribute(self, sample, coefficient):
"""
Multiplies a sample's density by a given coefficient.
Overrides the implementation from the base class.
@@ -373,8 +374,8 @@ def __init__(self, nTotal):
Input GudrunFile that we will be using for iterating.
"""
nTotal *= 2
- super().__init__(nTotal)
self.name = "Inelasticity Subtraction"
+ super().__init__(nTotal)
self.iterationMode = IterationModes.INELASTICITY
# Does a default iteration first (no changes)
self.iterationType = "QIteration"
@@ -387,12 +388,10 @@ def __init__(self, nTotal):
self.QMax = 0.
self.QMin = 0.
self.QStep = 0.
- self.gudrunOutputs = []
def setIteration(self, iterType):
self.iterationType = iterType
- name = "Inelasticity Subtraction"
- self.name = f"{name} ({iterType})"
+ self.name = f"{iterType}"
def enableLogarithmicBinning(self, gudrunFile):
"""
@@ -476,34 +475,35 @@ def collectTopHatWidths(self, gudrunFile):
if sample.runThisSample:
self.topHatWidths.append(sample.topHatW)
- def setSelfScatteringFiles(self, scale, gudrunFile, prevOutput=None):
+ def setSelfScatteringFiles(self, scale, gudrunFile):
"""
Alters file extensions of self scattering files for samples being run.
If the scale selected is the Q-scale, then set self scattering file
extensions to msubw01. If the scale selected is the wavelength-scale,
then set self scattering file extensions to mint01.
"""
- # Dict to pick suffix based on scale
- suffix = {Scales.Q: ".msubw01", Scales.WAVELENGTH: ".mint01"}[scale]
# Iterate through all of the samples, and set the suffixes of
# all of their data files to the suffix
# relevant to the specified scale
- for sampleBackground in gudrunFile.sampleBackgrounds:
- for sample in sampleBackground.samples:
- if sample.runThisSample and len(sample.dataFiles):
- target = sample
- filename = target.dataFiles[0]
- targetFile = (
- prevOutput.output(
- sample.name, filename, suffix)
- if prevOutput else ""
- )
- target.fileSelfScattering = (
- targetFile
+ if self.iterationCount == 0:
+ return
+ for sample in gudrunFile.runSamples():
+ if len(sample.dataFiles):
+ if scale == Scales.Q:
+ targetFile = sample.dataFiles[0].msubwFile
+ else:
+ targetFile = sample.dataFiles[0].mintFile
+ if not targetFile:
+ raise RuntimeError(
+ "There was an issue with Gudrun, please try again."
)
+ sample.fileSelfScattering = (
+ os.path.basename(targetFile)
+ )
+ sample.selfScatteringFilePath = targetFile
- def wavelengthIteration(self, gudrunFile, prevOutput):
+ def wavelengthIteration(self, gudrunFile):
"""
Performs one iteration on the wavelength scale.
If the iteration is the first iteration,
@@ -540,9 +540,9 @@ def wavelengthIteration(self, gudrunFile, prevOutput):
Scales.WAVELENGTH
)
self.zeroTopHatWidths(gudrunFile)
- self.setSelfScatteringFiles(Scales.WAVELENGTH, gudrunFile, prevOutput)
+ self.setSelfScatteringFiles(Scales.WAVELENGTH, gudrunFile)
- def QIteration(self, gudrunFile, prevOutput):
+ def QIteration(self, gudrunFile):
"""
Performs one iteration on the Q scale.
Enables subtracting of wavelength-binned data.
@@ -563,15 +563,15 @@ def QIteration(self, gudrunFile, prevOutput):
self.disableLogarithmicBinning(gudrunFile)
gudrunFile.instrument.scaleSelection = Scales.Q
self.resetTopHatWidths(gudrunFile)
- self.setSelfScatteringFiles(Scales.Q, gudrunFile, prevOutput)
+ self.setSelfScatteringFiles(Scales.Q, gudrunFile)
- def performIteration(self, gudrunFile, prevOutput):
+ def performIteration(self, gudrunFile):
if self.iterationType == "QIteration":
- self.wavelengthIteration(gudrunFile, prevOutput)
+ self.wavelengthIteration(gudrunFile)
self.iterationCount += 1
else:
- self.QIteration(gudrunFile, prevOutput)
- self.nCurrent += 1
+ self.QIteration(gudrunFile)
+ self.nCurrent += 1
return gudrunFile
def organiseOutput(self, gudrunFile, exclude=[]):
@@ -580,14 +580,19 @@ def organiseOutput(self, gudrunFile, exclude=[]):
"""
overwrite = (self.iterationCount == 1 and
self.iterationType == "WavelengthIteration")
+ head = os.path.join(
+ f"{utils.replace_unwanted_chars(self.name)}",
+ f"{self.iterationType}_{self.iterationCount}"
+ )
+
+ # Organise output history (don't overwrite)
+
outputHandler = handlers.GudrunOutputHandler(
gudrunFile=gudrunFile,
- head=f"{self.iterationType}_{self.iterationCount}",
+ head=head,
overwrite=overwrite
)
- output = outputHandler.organiseOutput(exclude=exclude)
- self.gudrunOutputs.append(output)
- return output
+ outputHandler.organiseOutput(exclude=exclude)
def calculateTotalMolecules(components, sample):
@@ -652,9 +657,9 @@ def __init__(
ratio=1,
components=[],
):
+ self.name = "Composition"
super().__init__(nTotal)
self.requireDefault = False
- self.name = "Composition"
self.originalGudrunFile = gudrunFile
self.mode = mode
self.nCurrent = 0
diff --git a/gudpy/core/optimise.py b/gudpy/core/optimise.py
new file mode 100644
index 00000000..9423035a
--- /dev/null
+++ b/gudpy/core/optimise.py
@@ -0,0 +1,255 @@
+import skopt
+import os
+import tempfile
+import copy
+from scipy.optimize import minimize
+import shutil
+
+import gudpy_cli as cli
+from core import data
+from core import gudpy
+from core import utils
+from core.gudrun_file import GudrunFile
+from core.sample import Sample
+from core.io import gudpy_io
+import core.exception as exc
+
+
+class InelasticityOptimisation:
+ def __init__(
+ self,
+ gudrunIterator: "gudpy.GudrunIterator",
+ samples: list[Sample],
+ purge=None,
+ ):
+ self.samples = []
+ self.purge = purge
+ self.gudrunIterator = gudrunIterator
+ self.mse = {}
+ self.results = {}
+ self.exitcode = 0
+ self.error = ""
+
+ self.mseRec = {}
+
+ for sample in self.gudrunFile.samples():
+ sample.runThisSample = False
+ if sample.name in [s.name for s in samples]:
+ sample.runThisSample = True
+ self.samples.append(sample)
+
+ self.mse[sample.name] = {}
+ self.mse[sample.name]["initial"] = copy.deepcopy(sample)
+ self.results[sample.name] = {}
+
+ @property
+ def gudrunFile(self):
+ return self.gudrunIterator.gudrunFile
+
+ def optimise(self):
+ projectDir = gudpy_io.GudPyIO.projectDir
+ gudpy_io.GudPyIO.projectDir = os.path.join(projectDir, "Optimise")
+
+ for gudrun in self.gudrunIterator.gudrunObjects:
+ exitcode, error = self.gudrunIterator.singleIteration(
+ self.gudrunFile, gudrun, self.purge)
+ self.exitcode = exitcode
+ if exitcode: # An exit code != 0 indicates failure
+ self.error = error
+ return (exitcode, error)
+ if self.gudrunIterator.iterator.iterationType == "QIteration":
+ for sample in self.samples:
+ i = self.gudrunIterator.iterator.iterationCount
+ self.mse[sample.name][i] = copy.deepcopy(sample)
+
+ for sample in self.samples:
+ self.results[sample.name]["bestSample"] = (
+ self.mse[sample.name]["initial"]
+ )
+ self.results[sample.name]["bestIt"] = "None"
+ for iteration, sample in self.mse[sample.name].items():
+ if (sample.mse()
+ < self.results[sample.name]["bestSample"].mse()):
+ self.results[sample.name]["bestSample"] = sample
+ self.results[sample.name]["bestIt"] = iteration
+
+ gudpy_io.GudPyIO.projectDir = projectDir
+
+ return (0, self.mseRec)
+
+ def exportResults(self, path):
+ utils.makeDir(path)
+ for sample in self.samples:
+ bestMint = (
+ self.results[sample.name]["bestSample"].dataFiles[0].mintFile
+ )
+ shutil.copyfile(bestMint, os.path.join(
+ path, f"{sample.name}.mint01"
+ ))
+
+
+class BayesianOptimisation:
+ def __init__(
+ self,
+ gudrunFile: GudrunFile,
+ samples: list[Sample],
+ purge: "gudpy.Purge" = None,
+ limit: float = 0.5,
+ nIters: int = 15,
+ verbose=False
+ ) -> None:
+ self.limit = limit
+ self.nCalls = nIters
+ self.nIters = nIters * len(samples)
+ self.purge = purge
+ self.gudrunFile = copy.deepcopy(gudrunFile)
+ self.samples = []
+ self.sample = None
+ self.nCurrent = 1
+ self.mse = None
+ self.result = None
+
+ self.exitcode = 0
+ self.error = ""
+
+ for sample in samples:
+ for copySample in self.gudrunFile.runSamples():
+ if sample.name == copySample.name:
+ self.samples.append(copySample)
+ self.sample = self.samples[0]
+
+ self.gudrun = gudpy.Gudrun()
+ self.gudrunObjects = []
+ self.gudrunQueue = []
+ for _ in range(nIters * len(self.samples)):
+ self.gudrunQueue.append(gudpy.Gudrun())
+
+ self.simulation = data.NpDataSet(
+ self.sample.referenceDataFile, self.limit)
+ self.actual = data.NpDataSet(
+ self.sample.dataFiles[0].mintFile, self.limit)
+ self.verbose = verbose
+
+ def tweakExponent(self, exponents):
+ if self.verbose:
+ cli.echoIndent("Running parameters: " + str(exponents))
+
+ Amplitude1, Decay1 = exponents[:2]
+ Amplitude2, Decay2 = exponents[2:4]
+
+ self.sample.exponentialValues = []
+ self.sample.exponentialValues.append(
+ [Amplitude1, Decay1, 0.0]
+ )
+ self.sample.exponentialValues.append(
+ [Amplitude2, Decay2, 0.0]
+ )
+
+ gudrun = self.gudrunQueue.pop()
+
+ self.gudrunObjects.append(gudrun)
+ exitcode = gudrun.gudrun(
+ gudrunFile=self.gudrunFile,
+ purge=self.purge
+ )
+
+ if exitcode:
+ raise exc.GudrunException(gudrun.error)
+
+ mintFile = self.sample.dataFiles[0].mintFile
+
+ self.actual = data.NpDataSet(mintFile, self.limit)
+
+ error = data.meanSquaredError(self.actual, self.simulation)
+
+ self.nCurrent += 1
+ self.mse = round(error, 5)
+
+ if self.verbose:
+ cli.echoIndent(f"MSE: {error}\n")
+ return error
+
+ def optimiseSample(self):
+ self.mse = None
+ for sample in self.gudrunFile.runSamples():
+ if sample.name != self.sample.name:
+ sample.runThisSample = False
+ else:
+ sample.runThisSample = True
+
+ initError = self.sample.mse(limit=self.limit)
+ initExp = self.sample.exponentialValues
+ paramSpace = [
+ skopt.space.Real(0, 3, name='amplitdute1'),
+ skopt.space.Real(0, 5, name='decay1'),
+ skopt.space.Real(0, 3, name='amplitdute2'),
+ skopt.space.Real(0, 5, name='decay2'),
+ ]
+
+ projectFolder = gudpy_io.GudPyIO.projectDir
+ outputPath = os.path.join(projectFolder, "Optimise", "Exponents")
+ utils.makeDir(outputPath)
+ gudpy_io.GudPyIO.projectDir = outputPath
+ result = skopt.gp_minimize(
+ self.tweakExponent, paramSpace, n_calls=self.nCalls
+ )
+ self.result = result
+ mse = round(result.fun, 5)
+ self.mse = mse
+ optExponents = [result.x[0:2] + [0.0],
+ result.x[2:] + [0.0]]
+
+ gudpy_io.GudPyIO.projectDir = projectFolder
+
+ if initError < mse:
+ optExponents = initExp
+ self.mse = initError
+
+ return optExponents
+
+ def optimise(self):
+ projectFolder = gudpy_io.GudPyIO.projectDir
+ try:
+ for sample in self.samples:
+ self.sample = sample
+ self.simulation = data.NpDataSet(
+ sample.referenceDataFile, self.limit)
+ self.actual = data.NpDataSet(
+ sample.dataFiles[0].mintFile, self.limit)
+ optExponents = self.optimiseSample()
+ self.sample.exponentialValues = optExponents
+ self.nCurrent = 1
+ self.nCurrent = "Final Run"
+ for sample in self.gudrunFile.samples():
+ sample.runThisSample = True
+ outputPath = os.path.join(projectFolder, "Optimise", "Exponents")
+ gudpy_io.GudPyIO.projectDir = outputPath
+ self.gudrun.gudrun(gudrunFile=self.gudrunFile, purge=self.purge)
+ gudpy_io.GudPyIO.projectDir = projectFolder
+ except exc.GudrunException as e:
+ gudpy_io.GudPyIO.projectDir = projectFolder
+ self.error = e.args
+ return (1, e)
+ return (0, "")
+
+ def gradientDescent(self, maxiter=15):
+ cli.echoIndent("Gradient Descent")
+ initial = [el for sl in self.sample.exponentialValues
+ for el in sl[:-1]]
+ initial.extend([0.0] * (4 - len(initial)))
+ bounds = [(0, 3), (0, 3), (0, 3), (0, 3)]
+ with tempfile.TemporaryDirectory() as tmp:
+ dir = os.path.join(tmp, "Optimisation")
+ utils.makeDir(os.path.join(dir))
+ self.gudrunFile.projectDir = dir
+ result = minimize(
+ fun=self.tweakExponent,
+ x0=initial,
+ bounds=bounds,
+ method='Nelder-Mead',
+ options={"maxiter": maxiter}
+ )
+ mse = round(result.fun, 5)
+ result.x = result.x.tolist()
+ optExponents = [result.x[0:2] + [0.0], result.x[2:] + [0.0]]
+ return mse, optExponents
diff --git a/gudpy/core/output_file_handler.py b/gudpy/core/output_file_handler.py
index 98631a58..6f6f3d4a 100644
--- a/gudpy/core/output_file_handler.py
+++ b/gudpy/core/output_file_handler.py
@@ -1,49 +1,11 @@
import os
import shutil
-import typing
-from dataclasses import dataclass
import tempfile
import core.utils as utils
from core.gud_file import GudFile
from core.gudrun_file import GudrunFile
-
-
-@dataclass
-class SampleOutput:
- sampleFile: str
- gudFile: GudFile
- outputs: typing.Dict[str, typing.Dict[str, str]]
- diagnostics: typing.Dict[str, typing.Dict[str, str]]
-
-
-@dataclass
-class GudrunOutput:
- path: str
- inputFilePath: str
- sampleOutputs: typing.Dict[str, SampleOutput]
-
- def gudFiles(self) -> list[str]:
- return [so.gudFile for so in self.sampleOutputs.values()]
-
- def gudFile(self, idx: int = None, *, name: str = None) -> GudFile:
- try:
- if idx is not None:
- asList = list(self.sampleOutputs.values())
- return asList[idx].gudFile
- elif name is not None:
- return self.sampleOutputs[name].gudFile
- except KeyError:
- return None
-
- def output(self, name: str, dataFile: str, type: str) -> str:
- try:
- if type in GudrunOutputHandler.outputExts:
- return (self.sampleOutputs[name].outputs[dataFile][type])
- else:
- return (self.sampleOutputs[name].diagnostics[dataFile][type])
- except KeyError:
- return None
+from core.io.gudpy_io import GudPyIO
class OutputHandler:
@@ -109,7 +71,7 @@ def __init__(
self,
gudrunFile: GudrunFile,
head: str = "",
- overwrite: bool = True
+ overwrite: bool = True,
):
"""
Initialise `GudrunOutputHandler`
@@ -127,27 +89,32 @@ def __init__(
super().__init__(
gudrunFile.instrument.GudrunInputFileDir,
- gudrunFile.projectDir,
+ GudPyIO.projectDir,
"Gudrun",
)
self.overwrite = overwrite
- # Append head to path
- self.outputDir = os.path.join(self.outputDir, f"{head}")
+ self.parentDir = self.outputDir
# List of run samples
self.samples = []
# Directory where Gudrun files are outputted (temp)
self.gudrunDir = self.procDir
self.gudrunFile = gudrunFile
+ self.head = head
# Make sure it is a temporary directory
assert (self.gudrunDir.startswith(tempfile.gettempdir()))
# Temporary output dir paths
self.tempOutDir = os.path.join(self.gudrunDir, "Gudrun")
if head:
+ # Append head to path
+ self.outputDir = os.path.join(
+ self.outputDir, head)
self.tempOutDir = os.path.join(
- self.tempOutDir, f"{head}")
+ self.tempOutDir, head)
+
+ self.gudrunFile.outputFolder = self.outputDir
# Files that have been copied
self.copiedFiles = []
@@ -161,34 +128,23 @@ def __init__(
def organiseOutput(self, exclude: list[str] = []):
"""Organises Gudrun outputs
-
- Returns
- -------
- GudrunOutput : GudrunOutput
- Dataclass containing information about important paths
"""
+
# Create normalisation and sample background folders
self._createNormDir(self.tempOutDir)
self._createSampleBgDir(self.tempOutDir)
# Create sample folders
- sampleOutputs = self._createSampleDir(self.tempOutDir)
+ self._createSampleDir(self.tempOutDir)
# Create additonal output folders
- inputFilePath = self._createAddOutDir(self.tempOutDir, exclude)
+ self._createAddOutDir(self.tempOutDir, exclude)
# If overwrite, move previous directory
- if self.overwrite and os.path.exists(
- os.path.join(self.gudrunFile.projectDir, "Gudrun")):
+ if self.overwrite and os.path.exists(self.parentDir):
with tempfile.TemporaryDirectory() as tmp:
- shutil.move(os.path.join(self.gudrunFile.projectDir, "Gudrun"),
- os.path.join(tmp, "prev"))
+ shutil.move(self.parentDir, os.path.join(tmp, "prev"))
# Move over folders to output directory
- shutil.move(self.tempOutDir, utils.uniquify(self.outputDir))
-
- return GudrunOutput(path=self.outputDir,
- inputFilePath=inputFilePath,
- sampleOutputs=sampleOutputs
- )
+ shutil.move(self.tempOutDir, self.outputDir)
def _createNormDir(self, dest: str):
"""
@@ -203,12 +159,15 @@ def _createNormDir(self, dest: str):
# Create normalisation folders and move datafiles
for normFile in self.gudrunFile.normalisation.dataFiles:
self._copyOutputs(
- normFile, os.path.join(
- dest, "Normalisation"))
+ normFile, dest, "Normalisation")
+ self.gudrunFile.normalisation.outputDir = os.path.join(
+ self.outputDir, "Normalisation"
+ )
for normBgFile in self.gudrunFile.normalisation.dataFilesBg:
- self._copyOutputs(normBgFile,
- os.path.join(dest,
- "NormalisationBackground"))
+ self._copyOutputs(normBgFile, dest, "NormalisationBackground")
+ self.gudrunFile.normalisation.backgroundOutputDir = os.path.join(
+ self.outputDir, "NormalisationBackground"
+ )
def _createSampleBgDir(self, dest: str):
"""
@@ -227,10 +186,15 @@ def _createSampleBgDir(self, dest: str):
for dataFile in sampleBackground.dataFiles:
self._copyOutputs(
dataFile,
+ dest,
os.path.join(
- dest, "SampleBackgrounds",
- f"SampleBackground{count + 1}")
- )
+ "SampleBackgrounds",
+ f"SampleBackground{count + 1}"
+ ))
+ sampleBackground.outputDir = os.path.join(
+ self.outputDir, "SampleBackgrounds",
+ f"SampleBackground{count + 1}"
+ )
def _createSampleDir(self, dest: str):
"""
@@ -254,14 +218,8 @@ def _createSampleDir(self, dest: str):
of their useful outputs
"""
- sampleOutputs = {}
# Create sample folders within background folders
for sample in self.samples:
- sampleFile = ""
- gudFile = None
- sampleOutput = {}
- sampleDiag = {}
-
samplePath = os.path.join(
dest,
utils.replace_unwanted_chars(sample.name)
@@ -273,10 +231,6 @@ def _createSampleDir(self, dest: str):
samplePath,
utils.replace_unwanted_chars(sample.name)
)
- if idx == 0:
- gudFile = gf
- sampleOutput[dataFile] = out
- sampleDiag[dataFile] = diag
# Copy over .sample file
if os.path.exists(os.path.join(
@@ -288,28 +242,45 @@ def _createSampleDir(self, dest: str):
)
self.copiedFiles.append(sample.pathName())
+ # Find self scattering file if it exists
+ if sample.fileSelfScattering:
+ msubw01File = diag.get(".msubw01", None)
+ if msubw01File and (
+ os.path.basename(msubw01File) == sample.fileSelfScattering
+ ):
+ sample.selfScatteringFilePath = msubw01File
+
+ # Find cross section file if it exists
+ if sample.fileSelfScattering:
+ msubw01File = diag.get(".msubw01", None)
+ if msubw01File and (
+ os.path.basename(msubw01File) == sample.fileSelfScattering
+ ):
+ sample.selfScatteringFilePath = msubw01File
+
# Path to sample file output
- sampleFile = os.path.join(
+ sample.sampleFile = os.path.join(
self.outputDir,
utils.replace_unwanted_chars(sample.name),
sample.pathName())
- sampleOutputs[sample.name] = SampleOutput(
- sampleFile, gudFile, sampleOutput, sampleDiag)
+ sample.outputFolder = os.path.join(
+ self.outputDir,
+ utils.replace_unwanted_chars(sample.name)
+
+ )
# Create container folders within sample folder
for container in sample.containers:
- containerPath = os.path.join(
- samplePath,
- (utils.replace_unwanted_chars(container.name)
- if container.name != "CONTAINER"
- else "Container"))
+ name = (utils.replace_unwanted_chars(container.name)
+ if container.name != "CONTAINER"
+ else "Container")
for dataFile in container.dataFiles:
- self._copyOutputs(
+ self._copyOutputsByExt(
dataFile,
- containerPath
+ os.path.join(samplePath, name),
+ name
)
- return sampleOutputs
def _createAddOutDir(self, dest: str, exclude: list[str] = []):
"""
@@ -349,7 +320,7 @@ def _createAddOutDir(self, dest: str, exclude: list[str] = []):
continue
return inputFile
- def _copyOutputs(self, fpath, dest):
+ def _copyOutputs(self, dataFile, dest, folderName):
"""
Copy all files with the same basename
as the provided filepath, except the original file.
@@ -363,13 +334,15 @@ def _copyOutputs(self, fpath, dest):
dest : str
Directory for the files to be copied to
"""
- fname = os.path.splitext(fpath)[0]
+ dest = os.path.join(dest, folderName)
+ fname = os.path.splitext(dataFile.filename)[0]
runDir = os.path.join(dest, fname)
dirCreated = False
for f in os.listdir(self.gudrunDir):
# Get files with the same filename but not the same
# extension
- if os.path.splitext(f)[0] == fname:
+ name, ext = os.path.splitext(f)
+ if name == fname:
if not dirCreated:
utils.makeDir(runDir)
dirCreated = True
@@ -377,9 +350,15 @@ def _copyOutputs(self, fpath, dest):
os.path.join(self.gudrunDir, f),
os.path.join(runDir, f)
)
+ dataFile.outputFolder = os.path.join(
+ self.outputDir, folderName, fname
+ )
+ dataFile.addOutput(os.path.join(
+ self.outputDir, folderName, fname, f
+ ))
self.copiedFiles.append(f)
- def _copyOutputsByExt(self, fpath, dest, folderName):
+ def _copyOutputsByExt(self, dataFile, dest, folderName):
"""
Copy all files with the same basename
as the provided filepath and splits them into outputs
@@ -404,7 +383,7 @@ def _copyOutputsByExt(self, fpath, dest, folderName):
Dictionary mapping output extension to filepath
"""
# Data filename
- fname = os.path.splitext(fpath)[0]
+ fname = os.path.splitext(dataFile.filename)[0]
# Path to folder which will hold all outputs from the run
runDir = os.path.join(dest, fname)
# Has the run dir been created?
@@ -431,9 +410,14 @@ def _copyOutputsByExt(self, fpath, dest, folderName):
gudFile = GudFile(os.path.join(self.gudrunDir, f))
outputs[ext] = os.path.join(
self.outputDir, folderName, fname, "Outputs", f)
+ dataFile.addOutput(os.path.join(
+ self.outputDir, folderName, fname, "Outputs", f))
+
else:
diagnostics[ext] = os.path.join(
self.outputDir, folderName, fname, "Diagnostics", f)
+ dataFile.addOutput(os.path.join(
+ self.outputDir, folderName, fname, "Diagnostics", f))
shutil.copyfile(
os.path.join(self.gudrunDir, f),
os.path.join(dir, f)
diff --git a/gudpy/core/purge_file.py b/gudpy/core/purge_file.py
index ef5e44c8..3da03398 100644
--- a/gudpy/core/purge_file.py
+++ b/gudpy/core/purge_file.py
@@ -10,7 +10,6 @@
class PurgeFile:
"""
Class to represent a PurgeFile.
-
...
Attributes
@@ -25,12 +24,6 @@ class PurgeFile:
standard deviation.
ignoreBad : bool
Ignore any existing bad spectrum files (spec.bad, spec.dat)?
- Methods
- -------
- write_out()
- Writes out the string representation of the PurgeFile to purge_det.dat
- purge()
- Writes out the file, and then calls purge_det on that file.
"""
def __init__(
@@ -53,28 +46,6 @@ def __init__(
self.standardDeviation = standardDeviation
self.ignoreBad = ignoreBad
- def write_out(self, path=""):
- """
- Writes out the string representation of the PurgeFile to
- purge_det.dat.
-
- Parameters
- ----------
- None
- Returns
- -------
- None
- """
- # Write out the string representation of the PurgeFile
- # To purge_det.dat.
- if not path:
- f = open("purge_det.dat", "w", encoding="utf-8")
- f.write(str(self))
- else:
- f = open(path, "w", encoding="utf-8")
- f.write(str(self))
- f.close()
-
def __str__(self):
"""
Returns the string representation of the PurgeFile object.
diff --git a/gudpy/core/run_batch_files.py b/gudpy/core/run_batch_files.py
index 945981e9..3ac778d8 100644
--- a/gudpy/core/run_batch_files.py
+++ b/gudpy/core/run_batch_files.py
@@ -167,7 +167,6 @@ def process(
iterator.performIteration(i)
initial.organiseOutput(
head=os.path.join(
- self.gudrunFile.instrument.GudrunInputFileDir,
f"BATCH_PROCESSING_BATCH_SIZE{batchSize}",
"FIRST_BATCH",
f"{dirText}_{i+1}"
@@ -196,7 +195,6 @@ def process(
iterator.performIteration(i)
self.batchedGudrunFile.organiseOutput(
head=os.path.join(
- self.gudrunFile.instrument.GudrunInputFileDir,
f"BATCH_PROCESSING_BATCH_SIZE{batchSize}",
"REST",
f"{dirText}_{i+1}"
diff --git a/gudpy/core/sample.py b/gudpy/core/sample.py
index 2f86ba95..f7ac82e5 100644
--- a/gudpy/core/sample.py
+++ b/gudpy/core/sample.py
@@ -1,3 +1,5 @@
+import os
+
from core.utils import bjoin, numifyBool
from core.data_files import DataFiles
from core.composition import Composition
@@ -7,6 +9,7 @@
)
from core import utils
from core import config
+from core import data
class Sample:
@@ -108,7 +111,7 @@ def __init__(self):
"""
self.name = ""
self.periodNumber = 1
- self.dataFiles = DataFiles([], "SAMPLE")
+ self.dataFiles = DataFiles([], "SAMPLE", True)
self.forceCalculationOfCorrections = True
self.composition = Composition("SAMPLE")
self.geometry = Geometry.SameAsBeam
@@ -145,9 +148,48 @@ def __init__(self):
self.containers = []
self.yamlignore = {
- "yamlignore"
+ "yamlignore",
}
+ self.outputFolder = ""
+ self.sampleFile = ""
+ self.selfScatteringFilePath = ""
+
+ self._referenceDataFile = ""
+
+ @property
+ def gudFile(self):
+ if not self.dataFiles:
+ return None
+ return self.dataFiles[0].gudFile
+
+ @property
+ def referenceDataFile(self):
+ return self._referenceDataFile
+
+ @referenceDataFile.setter
+ def referenceDataFile(self, path):
+ self._referenceDataFile = path
+
+ @property
+ def referenceData(self):
+ return data.NpDataSet(self._referenceDataFile)
+
+ def mse(self, limit=None):
+ if not self.dataFiles or not self.dataFiles[0].mintFile:
+ return None
+ if not os.path.exists(self.dataFiles[0].mintFile):
+ return None
+ if (not self.referenceDataFile
+ or not os.path.exists(self.referenceDataFile)):
+ return None
+ try:
+ refData = data.NpDataSet(self.referenceDataFile, lim=limit)
+ expData = data.NpDataSet(self.dataFiles[0].mintFile, lim=limit)
+ except TypeError:
+ return None
+ return data.meanSquaredError(refData, expData)
+
def pathName(self):
return utils.replace_unwanted_chars(self.name).translate(
{ord(x): '' for x in r'/\!*~,&|[]'}
diff --git a/gudpy/core/sample_background.py b/gudpy/core/sample_background.py
index 4e454899..2c4596dd 100644
--- a/gudpy/core/sample_background.py
+++ b/gudpy/core/sample_background.py
@@ -18,6 +18,7 @@ class SampleBackground:
Methods
-------
"""
+
def __init__(self):
"""
Constructs all the necessary attributes for the
@@ -37,6 +38,8 @@ def __init__(self):
"yamlignore"
}
+ self.outputFolder = ""
+
def __str__(self):
"""
Returns the string representation of the SampleBackground object.
diff --git a/gudpy/core/utils.py b/gudpy/core/utils.py
index 32dfe053..bcd7560a 100644
--- a/gudpy/core/utils.py
+++ b/gudpy/core/utils.py
@@ -23,6 +23,10 @@ def firstword(string):
return string.split(" ")[0]
+def lastword(string):
+ return string.split(" ")[-1]
+
+
def replace_unwanted_chars(string):
unwanted = [" "]
for char in unwanted:
@@ -183,7 +187,8 @@ def makeDir(targetPath):
dirPath : str
Path of created directory
"""
- if not os.path.isdir(targetPath):
+
+ if not os.path.exists(targetPath):
os.makedirs(targetPath)
return targetPath
diff --git a/gudpy/gudpy_cli.py b/gudpy/gudpy_cli.py
index 9bd3a637..7aaf3e22 100644
--- a/gudpy/gudpy_cli.py
+++ b/gudpy/gudpy_cli.py
@@ -1,43 +1,21 @@
+from core import utils
+from core import data
+from core import config
+from core import enums
+from core import gudpy as gp
+
import click
import os
import sys
-
-from core import gudpy as gp
-from core import enums
-from core import config
+import ast
def loadProject(ctx, project):
if not project:
- project = click.prompt("Path to project", type=click.Path())
- return
+ return echoError("No project directory specified")
ctx.obj = gp.GudPy()
ctx.obj.loadFromProject(project)
- echoIndent(click.style(u"\u2714", fg="green", bold=True) +
- f" GudPy project sucessfuly loaded at {project}")
-
-
-def loadFile(ctx, value):
- print(value)
- file, format = value
- if not file:
- file = click.prompt("Path to load file", type=click.Path())
- if not format:
- format = click.prompt(
- "File type",
- click.Tuple([click.Path(), click.Choice(["yaml", "txt"])]))
- if format == "yaml":
- format = enums.Format.YAML
- elif format == "txt":
- format = enums.Format.TXT
- else:
- return
- if not file or format:
- return
- ctx.obj = gp.GudPy()
- ctx.obj.loadFromFile(file, format)
- click.echo(click.style(u"\u2714", fg="green", bold=True) +
- f" GudPy input file {file} sucessfuly loaded")
+ echoTick(f"GudPy project sucessfuly loaded at {project}")
def loadConfig(ctx, cfg):
@@ -63,34 +41,43 @@ def echoIndent(text):
click.echo(" " + text)
+def echoTick(text):
+ echoIndent(click.style(u"\u2714 ", fg="green", bold=True) + text)
+
+
def echoWarning(text):
click.secho(" (!) " + f"WARNING: {text}\n",
- fg='yellow', bold=True)
+ fg="yellow", bold=True)
def echoProcess(name):
- click.secho("\n " + f">> {name}\n", bold=True, fg='cyan')
+ click.secho("\n " + f">> {name}\n", bold=True, fg="cyan")
+
+
+def echoError(msg):
+ click.secho(" (X) " + f"ERROR: {msg}",
+ fg="red", bold=True)
+ return 1
@click.group()
+def cli():
+ pass
+
+
+@cli.group()
@click.option(
"--project", "-p",
type=click.Path(exists=True),
help="Loads from a project"
)
-@click.option(
- "--file", "-f",
- type=click.Tuple([click.Path(exists=True), click.Choice(["yaml", "txt"])]),
- nargs=2,
- help="Loads from a file"
-)
@click.option(
"--config",
type=click.Choice(["NIMROD2012", "SANDALS2011"]),
help="Loads from a config file"
)
@click.pass_context
-def cli(ctx, project, file, config):
+def gudpy(ctx, project, config):
click.echo("============================================================"
"============================================================")
click.secho(" "
@@ -100,9 +87,8 @@ def cli(ctx, project, file, config):
"\n")
if project:
- loadProject(ctx, project)
- elif file:
- loadFile(ctx, file)
+ if loadProject(ctx, project):
+ sys.exit()
elif config:
loadConfig(ctx, config)
else:
@@ -111,25 +97,32 @@ def cli(ctx, project, file, config):
"See --help for options.", err=True)
-@cli.command()
+@gudpy.command()
@click.option(
"--verbose", "-v",
is_flag=True,
default=False,
help="Run processes verbosely, displaying the output"
)
+@click.option(
+ "--purge",
+ is_flag=True,
+ default=False,
+ help="Purge detectors before running"
+)
@click.pass_context
-def gudrun(ctx, verbose):
+def gudrun(ctx, verbose, p):
echoProcess("gudrun_dcs")
+ if p:
+ purge(ctx, verbose)
ctx.obj.runGudrun()
if verbose:
click.echo_via_pager(ctx.obj.gudrun.output)
- echoIndent(click.style(u"\u2714", fg="green", bold=True) +
- " Gudrun Complete")
- echoIndent(f" Outputs avaliable at {ctx.obj.projectDir}/Gudrun")
+ echoTick("Gudrun Complete")
+ echoTick(f"Outputs avaliable at {ctx.obj.projectDir}/Gudrun")
-@cli.command()
+@gudpy.command()
@click.option(
"--verbose", "-v",
is_flag=True,
@@ -142,8 +135,7 @@ def purge(ctx, verbose):
ctx.obj.runPurge()
if verbose:
click.echo_via_pager(ctx.obj.purge.output)
- echoIndent(click.style(u"\u2714", fg="green", bold=True) +
- " Purge Complete")
+ echoTick(" Purge Complete")
echoIndent("Number of Good Detectors: " +
click.style(f"{ctx.obj.purge.detectors}", bold=True))
thresh = ctx.obj.gudrunFile.instrument.goodDetectorThreshold
@@ -153,5 +145,148 @@ def purge(ctx, verbose):
f"{thresh}", fg="yellow", bold=True)
-if __name__ == '__main__':
+@gudpy.command()
+@click.option(
+ "--sample",
+ type=str
+)
+@click.argument(
+ "attribute",
+ type=str
+)
+@click.argument(
+ "value",
+ type=str
+)
+@click.argument(
+ "path",
+ type=click.Path()
+)
+@click.pass_context
+def edit(ctx, sample, attribute, value, path):
+ echoProcess("Modifying GudrunFile")
+
+ targetObj = None
+ for sbg in ctx.obj.gudrunFile.sampleBackgrounds:
+ for s in sbg.samples:
+ if s.name == sample:
+ targetObj = s
+
+ if sample and targetObj is None:
+ return echoError("No sample found")
+ elif not sample:
+ targetObj = ctx.obj.gudrunFile
+
+ if not hasattr(targetObj, attribute):
+ return echoError(f"Invalid Attribute: {attribute}")
+ literalValue = ast.literal_eval(value)
+ attr = targetObj.__dict__[attribute]
+
+ if not isinstance(literalValue, type(attr)):
+ return echoError(
+ f"Value is of incorrect type. {attribute} is of type {type(attr)}"
+ )
+
+ setattr(targetObj, attribute, literalValue)
+
+ projectDir = utils.makeDir(path)
+ ctx.obj.setSaveLocation(projectDir)
+ ctx.obj.save()
+
+ echoTick(
+ f"{attribute} have been set to: {targetObj.__dict__[attribute]}"
+ )
+ echoTick(f"Updated GudPy Project saved to {projectDir}")
+
+
+@gudpy.command()
+@click.option(
+ "--verbose", "-v",
+ is_flag=True,
+ default=False,
+ help="Run processes verbosely, displaying the output"
+)
+@click.argument(
+ "simulationData",
+ type=click.Path(exists=True),
+ nargs=1
+)
+@click.argument(
+ "samplename",
+ type=str,
+ nargs=1
+)
+@click.option(
+ "--niters", "-n",
+ type=int,
+ help="Number of calls for optimisation"
+)
+@click.option(
+ "--output", "-o",
+ type=click.Path(exists=False),
+ help="Location to create new project"
+)
+@click.pass_context
+def optimise(ctx, verbose, simulationdata, samplename, niters, output):
+ echoProcess("Bayesian Optimisation")
+ dir = output if output else os.path.join(
+ ctx.obj.projectDir,
+ f"BayesianOptimisation_{samplename}"
+ )
+ ctx.obj.setSaveLocation(dir)
+ echoProcess("gudrun_dcs")
+
+ ctx.obj.gudrun = gp.Gudrun()
+
+ sample = None
+ for sbg in ctx.obj.gudrunFile.sampleBackgrounds:
+ for s in sbg.samples:
+ if s.name == samplename:
+ sample = s
+
+ if sample is None:
+ return echoError("No sample found")
+
+ sample.referenceDataFile = simulationdata
+ samples = [sample]
+
+ echoIndent(f"Initial MSE: {round(sample.mse(), 5)}\n")
+
+ ctx.obj.optimiseExponentialSubtractions(
+ samples=samples)
+
+ echoTick("Optimisation complete")
+
+ echoTick(f"Optimum exponentialValues: {sample.exponentialValues}")
+ echoTick(f"MSE: {mse}")
+
+ for sbg in ctx.obj.gudrunFile.sampleBackgrounds:
+ for s in sbg.samples:
+ if s.name == samplename:
+ print(s.exponentialValues)
+
+ echoProcess("gudrun_dcs")
+ ctx.obj.runGudrun()
+ echoTick("Gudrun Complete")
+
+
+@cli.command()
+@click.argument(
+ 'data1',
+ type=click.Path(exists=True)
+)
+@click.argument(
+ 'data2',
+ type=click.Path(exists=True)
+)
+def mse(data1, data2):
+ echoProcess("Mean Squared Error")
+ d1 = data.NpDataSet(data1, 2.0)
+ d2 = data.NpDataSet(data2, 2.0)
+ err = data.meanSquaredError(d1, d2)
+
+ echoIndent(str(round(err, 5)))
+
+
+if __name__ == "__main__":
cli()
diff --git a/gudpy/gudpy_gui.py b/gudpy/gudpy_gui.py
index ec4cc015..72b6094d 100644
--- a/gudpy/gudpy_gui.py
+++ b/gudpy/gudpy_gui.py
@@ -10,7 +10,14 @@ def __init__(self, args):
super(GudPy, self).__init__(args)
self.gudpy = GudPyController()
self.aboutToQuit.connect(self.gudpy.cleanup)
- sys.exit(self.exec_())
+
+ if (sys.argv):
+ self.gudpy.gudpy.loadFromProject(sys.argv[1])
+ self.gudpy.mainWidget.updateWidgets(self.gudpy.gudrunFile)
+ self.gudpy.mainWidget.setWindowTitle(
+ f"GudPy - {self.gudpy.gudpy.io.projectName}[*]")
+
+ sys.exit(self.exec())
def onException(self, cls, exception, traceback):
self.gudpy.onException(cls, exception, traceback)
diff --git a/gudpy/gui/widgets/charts/beam_plot.py b/gudpy/gui/widgets/charts/beam_plot.py
index e797ebb3..c65092eb 100644
--- a/gudpy/gui/widgets/charts/beam_plot.py
+++ b/gudpy/gui/widgets/charts/beam_plot.py
@@ -3,7 +3,6 @@
class BeamChart(QChart):
-
def __init__(self):
super().__init__()
self.legend().setVisible(False)
diff --git a/gudpy/gui/widgets/charts/chart.py b/gudpy/gui/widgets/charts/chart.py
index c51aa7fc..6b1518bc 100644
--- a/gudpy/gui/widgets/charts/chart.py
+++ b/gudpy/gui/widgets/charts/chart.py
@@ -8,16 +8,11 @@
from gui.widgets.charts.sample_plot_config import SamplePlotConfig
from gui.widgets.charts.enums import PlotModes, SeriesTypes
from gui.widgets.charts.enums import Axes
-import core.output_file_handler as handlers
class GudPyChart(QChart):
-
- def __init__(self, gudrunOutput: handlers.GudrunOutput, parent=None):
-
+ def __init__(self, parent=None):
super().__init__(parent)
- self.gudrunOutput = gudrunOutput
-
self.legend().setMarkerShape(QLegend.MarkerShapeFromSeries)
self.legend().setAlignment(Qt.AlignRight)
self.samples = []
@@ -42,6 +37,7 @@ def connectMarkers(self):
def disconnectMarkers(self):
for marker in self.legend().markers():
try:
+
marker.clicked.disconnect(self.handleMarkerClicked)
except RuntimeError:
continue
@@ -136,7 +132,6 @@ def plot(self, plotMode=None):
offsetY = 0
plotConfig = SamplePlotConfig(
sample,
- self.gudrunOutput,
offsetX,
offsetY,
self
diff --git a/gudpy/gui/widgets/charts/sample_plot_config.py b/gudpy/gui/widgets/charts/sample_plot_config.py
index 6cd07227..20d5a13c 100644
--- a/gudpy/gui/widgets/charts/sample_plot_config.py
+++ b/gudpy/gui/widgets/charts/sample_plot_config.py
@@ -6,24 +6,27 @@
Mdor01Plot, Mgor01Plot, Mint01Plot
)
from gui.widgets.charts.enums import PlotModes
+from core.sample import Sample
class SamplePlotConfig():
-
- def __init__(self, sample, gudrunOutput, offsetX, offsetY, parent):
+ def __init__(self, sample, offsetX, offsetY, parent):
self.sample = sample
- self.gudrunOutput = gudrunOutput
self.parent = parent
+ self.mint01DataSet = None
+ self.mint01Series = None
+ self.mdcs01DataSet = None
+ self.mdcs01Series = None
+ self.dcsLevel = None
+ self.dcsSeries = None
+ self.mdor01DataSet = None
+ self.mdor01Series = None
self.constructDataSets(offsetX, offsetY)
def constructDataSets(self, offsetX, offsetY):
if len(self.sample.dataFiles):
# mint01 dataset.
- mintPath = self.gudrunOutput.output(
- name=self.sample.name,
- dataFile=self.sample.dataFiles[0],
- type=".mint01"
- )
+ mintPath = self.sample.dataFiles[0].mintFile
hasMintData = False
if mintPath and os.path.exists(mintPath):
@@ -37,11 +40,7 @@ def constructDataSets(self, offsetX, offsetY):
self.mint01Series.setName(f"{self.sample.name} mint01")
# mdcs01 dataset.
- mdcsPath = self.gudrunOutput.output(
- name=self.sample.name,
- dataFile=self.sample.dataFiles[0],
- type=".mdcs01"
- )
+ mdcsPath = self.sample.dataFiles[0].mcdsFile
hasMdcsData = False
if mdcsPath and os.path.exists(mdcsPath):
@@ -55,9 +54,9 @@ def constructDataSets(self, offsetX, offsetY):
self.mdcs01Series.setName(f"{self.sample.name} mdcs01")
# gud data, for dcs level.
- gudFile = self.gudrunOutput.gudFile(
- name=self.sample.name
- )
+ gudFile = None
+ if isinstance(self.sample, Sample):
+ gudFile = self.sample.gudFile
hasDCSData = False
if gudFile and os.path.exists(gudFile.path):
@@ -76,11 +75,7 @@ def constructDataSets(self, offsetX, offsetY):
)
# mdor01 dataset.
- mdorPath = self.gudrunOutput.output(
- name=self.sample.name,
- dataFile=self.sample.dataFiles[0],
- type=".mdor01"
- )
+ mdorPath = self.sample.dataFiles[0].mdorFile
hasMdorData = False
if mdorPath and os.path.exists(mdorPath):
@@ -94,11 +89,7 @@ def constructDataSets(self, offsetX, offsetY):
self.mdor01Series.setName(f"{self.sample.name} mdor01")
# mgor01 dataset.
- mgorPath = self.gudrunOutput.output(
- name=self.sample.name,
- dataFile=self.sample.dataFiles[0],
- type=".mgor01"
- )
+ mgorPath = self.sample.dataFiles[0].mgorFile
hasMgorData = False
if mgorPath and os.path.exists(mgorPath):
diff --git a/gudpy/gui/widgets/charts/sample_plot_data.py b/gudpy/gui/widgets/charts/sample_plot_data.py
index d390baec..998619d1 100644
--- a/gudpy/gui/widgets/charts/sample_plot_data.py
+++ b/gudpy/gui/widgets/charts/sample_plot_data.py
@@ -2,14 +2,13 @@
from PySide6.QtCharts import QLineSeries
from PySide6.QtCore import QPoint, QPointF
+from core import data
from core.gud_file import GudFile
-class Point():
+class GuiPoint(data.Point):
def __init__(self, x, y, err):
- self.x = x
- self.y = y
- self.err = err
+ super().__init__(x=x, y=y, err=err)
def toQPointF(self):
return QPointF(self.x, self.y)
@@ -18,16 +17,12 @@ def toQPoint(self):
return QPoint(self.x, self.y)
-class GudPyPlot():
+class GudPyPlot(data.DataSet):
# mint01 / mdcs01 / mdor01 / mgor01 / dcs
def __init__(self, path, exists):
- if not exists:
- self.dataSet = None
- else:
- self.dataSet = self.constructDataSet(path)
+ super().__init__(path=path, exists=exists)
- @abstractmethod
- def constructDataSet(self, path):
+ def constructDataSet(self, path, lim=None):
dataSet = []
with open(path, "r", encoding="utf-8") as fp:
for dataLine in fp.readlines():
@@ -36,8 +31,18 @@ def constructDataSet(self, path):
if dataLine[0] == "#":
continue
- x, y, err, *__ = [float(n) for n in dataLine.split()]
- dataSet.append(Point(x, y, err))
+ splitLine = [float(n) for n in dataLine.split()]
+ if len(splitLine) > 2:
+ x, y, err, *__ = splitLine
+ if lim and x > lim:
+ return dataSet
+ dataSet.append(GuiPoint(x, y, err))
+ else:
+ x, y = splitLine
+ if lim and x > lim:
+ return dataSet
+ dataSet.append(GuiPoint(x, y))
+
return dataSet
def toQPointList(self):
diff --git a/gudpy/gui/widgets/core/control.py b/gudpy/gui/widgets/core/control.py
index 730cb312..87957a41 100644
--- a/gudpy/gui/widgets/core/control.py
+++ b/gudpy/gui/widgets/core/control.py
@@ -1,5 +1,4 @@
import os
-import re
import sys
import traceback
import typing as typ
@@ -31,7 +30,6 @@ def __init__(self):
super().__init__()
self.gudpy: gp.GudPy = gp.GudPy()
self.mainWidget: QtWidgets.QMainWindow = GudPyMainWindow()
- self.purged: bool = False
# Current process thread running
self.workerThread: QtCore.QThread = None
@@ -83,7 +81,7 @@ def connectUiSlots(self):
self.mainWidget.ui.exportInputFile.triggered.connect(
self.exportInputFile)
self.mainWidget.ui.viewLiveInputFile.triggered.connect(
- self.mainWidget.viewInput)
+ self.viewInput)
self.mainWidget.ui.insertSampleBackground.triggered.connect(
self.mainWidget.ui.objectTree.insertSampleBackground
)
@@ -110,6 +108,29 @@ def connectUiSlots(self):
self.mainWidget.updateComponents)
self.mainWidget.ui.exportArchive.triggered.connect(self.exportArchive)
self.mainWidget.ui.exit.triggered.connect(self.exit_)
+ (self.mainWidget.optimiseTab.runOptimiseExponents.connect(
+ self.runOptimiseExponents
+ ))
+ (self.mainWidget.optimiseTab.runOptimiseInelasticity.connect(
+ self.runOptimiseInelasticity
+ ))
+ (self.mainWidget.optimiseTab.exponentialGroupBox
+ .setParamsButton.clicked.connect(
+ lambda: self.mainWidget.optimiseTab
+ .exponentialGroupBox.setParameters(
+ self.gudrunFile
+ )
+ ))
+
+ @property
+ def gudrunFile(self) -> None:
+ return self.gudpy.gudrunFile
+
+ @gudrunFile.setter
+ def gudrunFile(self, gudrunFile: gudrunFile) -> None:
+ self.gudpy = gp.GudPy()
+ self.gudpy.gudrunFile = gudrunFile
+ self.mainWidget.updateWidgets(self.gudpy.gudrunFile)
"""
@@ -119,12 +140,12 @@ def connectUiSlots(self):
def tryLoadAutosaved(self, projectDir):
for f in os.listdir(projectDir):
- if f == self.gudpy.autosaveLocation:
+ if f == self.gudpy.io.autosavePath:
path = os.path.join(projectDir, f)
autoFileInfo = QtCore.QFileInfo(path)
autoDate = autoFileInfo.lastModified()
- fileInfo = QtCore.QFileInfo(self.gudpy.gudrunFile.path())
+ fileInfo = QtCore.QFileInfo(self.gudpy.io.loadFile)
currentDate = fileInfo.lastModified()
if autoDate > currentDate:
@@ -160,7 +181,13 @@ def loadFromFile(self):
fmt = filters[filter]
try:
gudpy = gp.GudPy()
- gudpy.loadFromFile(loadFile=filename, format=fmt)
+ if fmt == enums.Format.TXT:
+ gudpy.loadFromGudrunFile(filename)
+ elif fmt == enums.Format.YAML:
+ gudpy.loadFromYamlFile(filename)
+ else:
+ raise RuntimeError(f"Unsupported format: {fmt}")
+
self.gudpy = gudpy
except (FileNotFoundError, exc.ParserException) as e:
self.mainWidget.sendError(e)
@@ -168,9 +195,9 @@ def loadFromFile(self):
except IOError:
self.mainWidget.sendError("Could not open file.")
return
- self.mainWidget.updateWidgets(self.gudpy.gudrunFile)
+ self.mainWidget.updateWidgets(self.gudrunFile)
self.mainWidget.setWindowTitle(
- f"GudPy - {self.gudpy.gudrunFile.filename}[*]")
+ f"GudPy - {os.path.basename(filename)}[*]")
def loadFromProject(self):
"""Load from previous GudPy project
@@ -187,7 +214,7 @@ def loadFromProject(self):
if autosave:
filename = autosave
gudpy = gp.GudPy()
- gudpy.loadFromFile(loadFile=filename)
+ gudpy.loadFromGudrunFile(loadFile=filename)
self.gudpy = gudpy
except (FileNotFoundError, exc.ParserException) as e:
self.mainWidget.sendError(e)
@@ -195,12 +222,15 @@ def loadFromProject(self):
except IOError:
self.mainWidget.sendError("Could not open file.")
return
- self.mainWidget.updateWidgets(self.gudpy.gudrunFile)
+ except exc.YAMLException as e:
+ self.mainWidget.sendError(f"Could not parse input file: {e}")
+ return
+ self.mainWidget.updateWidgets(self.gudrunFile)
self.mainWidget.setWindowTitle(
- f"GudPy - {self.gudpy.gudrunFile.filename}[*]")
+ f"GudPy - {self.gudpy.io.projectName}[*]")
def newProject(self):
- if self.gudpy.gudrunFile:
+ if self.gudrunFile:
save = QtWidgets.QMessageBox.question(
self.mainWidget,
"GudPy",
@@ -217,12 +247,11 @@ def newProject(self):
if not configurationDialog.cancelled and result:
self.gudpy = gp.GudPy()
- self.gudpy.loadFromFile(
+ self.gudpy.loadFromGudrunFile(
loadFile=configurationDialog.configuration,
- format=enums.Format.TXT,
config=True
)
- self.mainWidget.updateWidgets(self.gudpy.gudrunFile)
+ self.mainWidget.updateWidgets(self.gudrunFile)
def setSaveLocation(self, saveAs=False):
"""Function to let the user choose where the project is saved to
@@ -275,35 +304,37 @@ def exportInputFile(self):
"""
Saves the current state of the input file as...
"""
- filename, filter = QFileDialog.getSaveFileName(
- self,
+ filename, _ = QFileDialog.getSaveFileName(
+ self.mainWidget,
"Export input file as..",
".",
- "YAML (*.yaml);;Gudrun Compatible (*.txt)",
)
- fmt = enums.Format.YAML
if filename:
- ext = re.search(r"\((.+?)\)", filter).group(1).replace("*", "")
- fmt = enums.Format.TXT if ext == ".txt" else enums.Format.YAML
- if filter and sys.platform.startswith("linux"):
- filename += ext
+ filename.replace(".txt", "")
+ filename += ".txt"
if os.path.dirname(filename) == self.gudpy.projectDir:
self.mainWidget.sendWarning("Do not modify project folder.")
return
- self.gudpy.save(path=filename, format=fmt)
- self.setUnModified()
+ self.gudpy.io.exportGudrunFile(self.gudrunFile, filename)
+ self.setUnModified()
def exportArchive(self):
if not self.gudpy.checkSaveLocation():
if not self.setSaveLocation():
return
exportDialog = dialogs.io.ExportDialog(
- self.gudpy.gudrunFile, self.mainWidget)
+ self.gudrunFile, self.mainWidget)
exportDialog.widget.exec()
def autosave(self):
if self.gudpy.checkSaveLocation() and not self.workerThread:
- self.gudpy.save(path=self.gudpy.autosaveLocation)
+ self.gudpy.save()
+
+ def viewInput(self):
+ text = self.mainWidget.viewInput()
+ self.gudrunFile = self.gudpy.io.gudrunFileParser.parse(
+ text.splitlines())
+ self.mainWidget.updateWidgets(self.gudrunFile)
"""
@@ -313,7 +344,7 @@ def autosave(self):
def checkFilesExist(self, showSuccessDialog: bool = False):
result = file_library.GudPyFileLibrary(
- self.gudpy.gudrunFile).checkFilesExist()
+ self.gudrunFile).checkFilesExist()
if not all(r[0] for r in result[0]) or not all(r[0]
for r in result[1]):
undefined = [
@@ -323,7 +354,7 @@ def checkFilesExist(self, showSuccessDialog: bool = False):
missingFilesDialog = dialogs.io.MissingFilesDialog(
undefined, unresolved, self.mainWidget
)
- missingFilesDialog.widget.exec_()
+ missingFilesDialog.widget.exec()
return False
if showSuccessDialog:
@@ -348,21 +379,35 @@ def connectProcessSignals(
self.mainWidget.processStopped)
process.finished.connect(onFinish)
+ def dissconnectProcessSignals(
+ self,
+ process,
+ onFinish
+ ):
+ process.started.disconnect(self.mainWidget.processStarted)
+ process.outputChanged.disconnect(
+ self.mainWidget.outputSlots.setOutputStream)
+ process.progressChanged.disconnect(
+ self.mainWidget.updateProgressBar)
+ process.finished.disconnect(
+ self.mainWidget.processStopped)
+ process.finished.disconnect(onFinish)
+
def createPurgeProcess(self) -> bool:
if not self.prepareRun():
return False
self.mainWidget.setControlsEnabled(False)
purgeDialog = dialogs.purge.PurgeDialog(
- self.gudpy.gudrunFile, self.mainWidget)
- result = purgeDialog.widget.exec_()
+ self.gudrunFile, self.mainWidget)
+ result = purgeDialog.widget.exec()
if (purgeDialog.cancelled or result == QDialogButtonBox.No):
self.mainWidget.setControlsEnabled(True)
return False
- self.gudpy.purgeFile = PurgeFile(self.gudpy.gudrunFile)
+ self.gudpy.purgeFile = PurgeFile(self.gudrunFile)
self.gudpy.purge = worker.PurgeWorker(
purgeFile=self.gudpy.purgeFile,
- gudrunFile=self.gudpy.gudrunFile,
+ gudrunFile=self.gudrunFile,
)
self.connectProcessSignals(
process=self.gudpy.purge, onFinish=self.purgeFinished
@@ -374,7 +419,7 @@ def prepareRun(self) -> bool:
if not self.checkFilesExist():
return False
- if not self.gudpy.gudrunFile.checkNormDataFiles():
+ if not self.gudrunFile.checkNormDataFiles():
self.mainWidget.sendWarning(
"Please specify normalisation data files.")
return False
@@ -396,7 +441,7 @@ def startProcess(self) -> None:
os.path.join(
self.gudpy.projectDir, "Purge", "purge_det.dat"
)
- ):
+ ) or not self.gudrunFile.purged:
purgeResult = self.mainWidget.purgeOptionsMessageBox(
"purge_det.dat found, but wasn't run in this session. "
"Run Purge?",
@@ -410,7 +455,7 @@ def startProcess(self) -> None:
if purgeResult == QMessageBox.Yes:
if self.createPurgeProcess():
self.gudpy.purge.start()
- self.gudpy.purge.finished.connect(self.startProcess)
+ self.gudpy.purge.finished.connect(self.purgeFinished)
return
elif purgeResult != QMessageBox.No:
self.mainWidget.processStopped()
@@ -424,37 +469,44 @@ def runPurge(self) -> bool:
self.startProcess()
def purgeFinished(self, exitcode):
- self.purged = True
+ self.gudrunFile.purged = True
if exitcode != 0:
self.mainWidget.sendError(
"Purge failed with the following output: "
f"{self.gudpy.purge.error}"
)
+ self.workerThread = None
return
- thresh = self.gudpy.gudrunFile.instrument.goodDetectorThreshold
+ thresh = self.gudrunFile.instrument.goodDetectorThreshold
if thresh and self.gudpy.purge.detectors < thresh:
self.mainWidget.sendWarning(
f"{self.gudpy.purge.detectors} "
"detectors made it through the purge.\n"
" The acceptable minimum for "
- f"{self.gudpy.gudrunFile.instrument.name} is {thresh}"
+ f"{self.gudrunFile.instrument.name} is {thresh}"
)
self.mainWidget.ui.goodDetectorsLabel.setText(
f"Number of Good Detectors: {self.gudpy.purge.detectors}"
)
self.mainWidget.outputSlots.setOutput(
self.gudpy.purge.output, "purge_det",
- gudrunFile=self.gudpy.gudrunFile
+ self.gudrunFile
)
if isinstance(self.workerThread, gp.Purge):
+ self.dissconnectProcessSignals(
+ self.workerThread,
+ self.purgeFinished
+ )
self.workerThread = None
+ if self.workerThread:
+ self.startProcess()
def runGudrun(self, gudrunFile=None):
if not gudrunFile:
- gudrunFile = self.gudpy.gudrunFile
+ gudrunFile = self.gudrunFile
if not self.prepareRun():
return
@@ -469,7 +521,7 @@ def runGudrun(self, gudrunFile=None):
def iterateGudrun(self, dialog):
iterationDialog = dialog(
- self.mainWidget, self.gudpy.gudrunFile)
+ self.mainWidget, self.gudrunFile)
iterationDialog.widget.exec()
if not iterationDialog.params:
return
@@ -477,7 +529,7 @@ def iterateGudrun(self, dialog):
return
# If it is a Composition iteration, the gudrunFile must be specified
if iterationDialog.iteratorType == iterators.Composition:
- iterationDialog.params["gudrunFile"] = self.gudpy.gudrunFile
+ iterationDialog.params["gudrunFile"] = self.gudrunFile
self.gudpy.iterator = iterationDialog.iteratorType(
**iterationDialog.params)
@@ -485,7 +537,7 @@ def iterateGudrun(self, dialog):
# If Composition iterator, initialise Composition Worker
if iterationDialog.iteratorType == iterators.Composition:
self.gudpy.gudrunIterator = worker.CompositionWorker(
- self.gudpy.iterator, self.gudpy.gudrunFile, self.gudpy.purge)
+ self.gudpy.iterator, self.gudrunFile, self.gudpy.purge)
self.connectProcessSignals(
process=self.gudpy.gudrunIterator,
onFinish=self.compositionIterationFinished
@@ -493,7 +545,7 @@ def iterateGudrun(self, dialog):
# Else use standard GudrunIteratorWorker
else:
self.gudpy.gudrunIterator = worker.GudrunIteratorWorker(
- self.gudpy.iterator, self.gudpy.gudrunFile, self.gudpy.purge)
+ self.gudpy.iterator, self.gudrunFile, self.gudpy.purge)
self.connectProcessSignals(
process=self.gudpy.gudrunIterator,
onFinish=self.gudrunFinished
@@ -503,17 +555,22 @@ def iterateGudrun(self, dialog):
self.startProcess()
def gudrunFinished(self, exitcode):
- if self.workerThread == self.gudpy.gudrunIterator:
- if self.gudpy.gudrunIterator.exitcode[0] != 0:
- self.mainWidget.sendError(
- f"Gudrun Iteration failed with the following output: "
- f"\n{self.gudpy.gudrunIterator.error}"
- )
- return
+ self.mainWidget.setModified()
+ if self.workerThread.isRunning():
+ return
+
+ if exitcode != 0:
+ self.mainWidget.sendError(
+ f"Process failed with the following output: "
+ f"\n{self.workerThread.error}"
+ )
+ return
+ if self.workerThread == self.gudpy.gudrunIterator:
self.mainWidget.outputSlots.setOutput(
self.gudpy.gudrunIterator.output,
- f"Gudrun {self.gudpy.gudrunIterator.iterator.name}")
+ f"Gudrun {self.gudpy.gudrunIterator.iterator.name}",
+ self.gudrunFile)
self.mainWidget.sampleSlots.setSample(
self.mainWidget.sampleSlots.sample)
self.mainWidget.iterationResultsDialog(
@@ -521,21 +578,21 @@ def gudrunFinished(self, exitcode):
self.gudpy.gudrunIterator.iterator.name)
self.mainWidget.updateWidgets(
gudrunFile=self.gudpy.gudrunIterator.gudrunFile,
- gudrunOutput=self.gudpy.gudrunIterator.gudrunOutput
)
- elif self.workerThread == self.gudpy.gudrun:
- if exitcode != 0:
- self.mainWidget.sendError(
- f"Gudrun failed with the following output: "
- f"\n{self.gudpy.gudrun.error}"
- )
- return
- self.mainWidget.outputSlots.setOutput(
- self.gudpy.gudrun.output, "Gudrun")
+
+ if self.workerThread == self.gudpy.optimiser:
+ self.mainWidget.optimiseTab.setResults(
+ self.gudpy.optimiser.gudrunFile.samples(),
+ self.gudpy.optimiser
+ )
+
+ self.mainWidget.outputSlots.setOutput(
+ self.workerThread.output, "Gudrun", self.gudrunFile)
+ if not self.workerThread == self.gudpy.optimiser:
self.mainWidget.updateWidgets(
- gudrunFile=self.gudpy.gudrunFile,
- gudrunOutput=self.gudpy.gudrun.gudrunOutput
+ gudrunFile=self.gudrunFile,
)
+ self.gudpy.io.save(self.gudrunFile)
self.workerThread = None
def compositionIterationFinished(self, exitcode):
@@ -556,14 +613,14 @@ def runContainersAsSamples(self):
if not self.prepareRun():
return
gudrunFile = self.gudpy.runModes.convertContainersToSample(
- self.gudpy.gudrunFile
+ self.gudrunFile
)
self.runGudrun(gudrunFile=gudrunFile)
def runFilesIndividually(self):
if not self.prepareRun():
return
- gudrunFile = self.gudpy.runModes.partition(self.gudpy.gudrunFile)
+ gudrunFile = self.gudpy.runModes.partition(self.gudrunFile)
self.runGudrun(gudrunFile=gudrunFile)
def runBatchProcessing(self):
@@ -573,7 +630,7 @@ def runBatchProcessing(self):
self.mainWidget
)
self.gudpy.gudrunIterator = worker.BatchWorker(
- gudrunFile=self.gudpy.gudrunFile,
+ gudrunFile=self.gudrunFile,
purge=self.gudpy.purge,
iterator=dialog.iterator,
batchSize=dialog.batchSize,
@@ -590,6 +647,70 @@ def runBatchProcessing(self):
self.workerThread = self.gudpy.gudrunIterator
self.startProcess()
+ def runOptimiseExponents(self):
+ if not self.prepareRun():
+ return
+
+ samples = []
+ for sw in self.mainWidget.optimiseTab.sampleWidgets.values():
+ if sw.runSample:
+ samples.append(sw.sample)
+
+ self.gudpy.optimiser = worker.OptimiseExponentsWorker(
+ gudrunFile=self.gudrunFile,
+ samples=samples,
+ purge=self.gudpy.purge,
+ limit=(self.mainWidget.optimiseTab
+ .exponentialGroupBox.limitSpinBox.value()),
+ nIters=(self.mainWidget.optimiseTab
+ .exponentialGroupBox.nIterationsSpinBox.value())
+ )
+
+ self.mainWidget.optimiseTab.setSamples(
+ self.gudpy.gudrunFile.runSamples()
+ )
+
+ self.connectProcessSignals(
+ process=self.gudpy.optimiser, onFinish=self.gudrunFinished
+ )
+
+ self.gudpy.optimiser.progressChanged.connect(
+ self.mainWidget.optimiseTab.refreshWidgets
+ )
+
+ self.workerThread = self.gudpy.optimiser
+ self.startProcess()
+
+ def setNewExponentialParameters(self):
+ self.gudrunFile = self.gudpy.optimiser.gudrunFile
+ self.runGudrun()
+
+ def runOptimiseInelasticity(self):
+ if not self.prepareRun():
+ return
+
+ samples = []
+ for sw in self.mainWidget.optimiseTab.sampleWidgets.values():
+ if sw.runSample:
+ samples.append(sw.sample)
+
+ self.gudpy.optimiser = worker.OptimiseInelasticityWorker(
+ self.gudrunFile,
+ samples,
+ purge=self.gudpy.purge
+ )
+
+ self.connectProcessSignals(
+ process=self.gudpy.optimiser, onFinish=self.gudrunFinished
+ )
+
+ self.gudpy.optimiser.progressChanged.connect(
+ self.mainWidget.optimiseTab.refreshWidgets
+ )
+
+ self.workerThread = self.gudpy.optimiser
+ self.startProcess()
+
def stopProcess(self):
if self.workerThread:
self.workerThread.requestInterruption()
@@ -625,5 +746,5 @@ def exit_(self):
)
if result == QtWidgets.QMessageBox.Yes:
- self.gudpy.gudrunFile.save()
+ self.gudrunFile.save()
sys.exit(0)
diff --git a/gudpy/gui/widgets/core/gudpy_tree.py b/gudpy/gui/widgets/core/gudpy_tree.py
index 506e86d5..3d5bc171 100644
--- a/gudpy/gui/widgets/core/gudpy_tree.py
+++ b/gudpy/gui/widgets/core/gudpy_tree.py
@@ -275,7 +275,7 @@ def setData(self, index, value, role):
if not index.isValid():
return False
elif role == Qt.CheckStateRole and self.isSample(index):
- if value == Qt.Checked:
+ if Qt.CheckState.Checked == Qt.CheckState(value):
index.internalPointer().runThisSample = True
else:
index.internalPointer().runThisSample = False
@@ -285,11 +285,24 @@ def setData(self, index, value, role):
role == Qt.EditRole
and (self.isSample(index) or self.isContainer(index))
):
- index.internalPointer().name = uniquifyName(
- value,
- [s.name for s in self.gudrunFile.sampleBackground.samples],
- sep="",
- incFirst=True)
+ if self.isSample(index):
+ index.internalPointer().name = uniquifyName(
+ value,
+ [s.name for s in self.findParent(
+ index.internalPointer()).samples
+ if s != index.internalPointer()
+ ],
+ sep="_",
+ incFirst=False)
+ elif self.isContainer(index):
+ index.internalPointer().name = uniquifyName(
+ value,
+ [c.name for c in self.findParent(
+ index.internalPointer()).containers
+ if c != index.internalPointer()
+ ],
+ sep="_",
+ incFirst=False)
self.dataChanged.emit(index, index)
return True
else:
@@ -1007,7 +1020,17 @@ def insertSample(self, sample=None):
"""
if not sample:
sample = Sample()
- sample.name = "SAMPLE" # for now, give a default name.
+ samples = []
+ if isinstance(self.currentObject(), Sample):
+ samples = [s.name for s in self.model().findParent(
+ self.currentObject()).samples]
+ elif isinstance(self.currentObject(), SampleBackground):
+ samples = [s.name for s in self.currentObject().samples]
+ sample.name = uniquifyName(
+ "SAMPLE",
+ samples,
+ sep="_",
+ incFirst=False) # for now, give a default name.
self.insertRow(sample)
def insertContainer(self, container=None):
diff --git a/gudpy/gui/widgets/core/main_window.py b/gudpy/gui/widgets/core/main_window.py
index ef90960f..42a76379 100644
--- a/gudpy/gui/widgets/core/main_window.py
+++ b/gudpy/gui/widgets/core/main_window.py
@@ -6,6 +6,7 @@
from core.container import Container
from core.sample import Sample
+from core.io.gudpy_io import GudPyIO
from gui.widgets.dialogs.iterators import (
CompositionIterationDialog,
@@ -15,6 +16,7 @@
ThicknessIterationDialog,
TweakFactorIterationDialog,
)
+from gui.widgets.core.optimise_tab import OptimiseTab
from gui.widgets.dialogs.purge import PurgeDialog
from gui.widgets.dialogs.view_input_dialog import ViewInputDialog
@@ -27,7 +29,6 @@
from gui.widgets.dialogs.io import ExportDialog, MissingFilesDialog
from gui.widgets.dialogs.batch import BatchProcessingDialog
from gui.widgets.core.gudpy_tree import GudPyTreeView
-from gui.widgets.core.output_tree import OutputTreeView
from gui.widgets.tables.composition_table import CompositionTable
from gui.widgets.tables.ratio_composition_table import RatioCompositionTable
@@ -52,7 +53,7 @@
from gui.widgets.slots.container_slots import ContainerSlots
from gui.widgets.slots.sample_background_slots import SampleBackgroundSlots
from gui.widgets.slots.sample_slots import SampleSlots
-from gui.widgets.slots.output_slots import OutputSlots
+from gui.widgets.slots.output_slots import OutputSlots, OutputTreeView
# from gui.widgets.resources import resources_rc # noqa
from core import enums
from core import config
@@ -101,11 +102,12 @@ def __init__(self):
self.modified = False
self.clipboard = None
self.results = {}
- self.gudrunOutput = None
self.allPlots = []
self.plotModes = {}
+ self.optimiseTab = OptimiseTab(self)
+
self.initComponents()
self.instrumentSlots = InstrumentSlots(self.ui, self)
@@ -323,6 +325,9 @@ def initComponents(self):
self.ui.insertContainerMenu.addMenu(
insertContainerFromTemplate
)
+ self.ui.tabWidget.addTab(self.optimiseTab, "Optimise")
+
+ self.ui.runContainersAsSamples.setEnabled(False)
self.setActionsEnabled(False)
self.ui.tabWidget.setVisible(False)
@@ -357,7 +362,7 @@ def handleObjectsChanged(self):
if not self.widgetsRefreshing:
self.setModified()
- def updateWidgets(self, gudrunFile, gudrunOutput=None):
+ def updateWidgets(self, gudrunFile):
self.gudrunFile = gudrunFile
self.ui.gudrunFile = gudrunFile
self.widgetsRefreshing = True
@@ -366,6 +371,22 @@ def updateWidgets(self, gudrunFile, gudrunOutput=None):
self.beamSlots.setBeam(self.gudrunFile.beam)
self.componentSlots.setComponents(self.gudrunFile.components)
self.normalisationSlots.setNormalisation(self.gudrunFile.normalisation)
+ self.optimiseTab.setSamples(self.gudrunFile.runSamples())
+
+ if not self.allPlots:
+ allTopChart = GudPyChart()
+ samples = [*gudrunFile.samples(), *gudrunFile.containers()]
+ allTopChart.addSamples(samples)
+ allTopChart.plot(
+ self.ui.allPlotComboBox.itemData(
+ self.ui.allPlotComboBox.currentIndex()
+ )
+ )
+ allBottomChart = GudPyChart()
+ allBottomChart.addSamples(samples)
+ self.allPlots = [allTopChart, allBottomChart]
+ self.ui.allSampleTopPlot.setChart(allTopChart)
+ self.ui.allSampleBottomPlot.setChart(allBottomChart)
if len(self.gudrunFile.sampleBackgrounds):
self.sampleBackgroundSlots.setSampleBackground(
@@ -390,8 +411,10 @@ def updateWidgets(self, gudrunFile, gudrunOutput=None):
self.ui.objectTree.model().dataChanged.connect(
self.handleObjectsChanged
)
- if gudrunOutput:
- self.updateResults(gudrunOutput)
+ self.ui.objectTree.model().dataChanged.connect(
+ lambda: self.optimiseTab.setSamples(self.gudrunFile.runSamples())
+ )
+ self.updateResults()
self.widgetsRefreshing = False
def updateGeometries(self):
@@ -430,7 +453,7 @@ def updateCompositions(self):
self.ui.containerCompositionTable.farmCompositions()
def focusResult(self):
- if not self.gudrunOutput:
+ if not self.gudrunFile.outputFolder:
return
self.updateSamples()
@@ -566,17 +589,16 @@ def updateProgressBar(self, progress: int, taskName: str):
self.ui.currentTaskLabel.setText(taskName)
def updateSamples(self):
- if not self.gudrunOutput:
- return
-
samples = [
*self.ui.objectTree.getSamples(),
*self.ui.objectTree.getContainers(),
]
for sample in samples:
- topChart = GudPyChart(self.gudrunOutput)
+ if sample in self.results.keys():
+ continue
+ topChart = GudPyChart()
topChart.addSample(sample)
- bottomChart = GudPyChart(self.gudrunOutput)
+ bottomChart = GudPyChart()
bottomChart.addSample(sample)
if sample not in self.plotModes.keys():
plotMode = (
@@ -592,31 +614,31 @@ def updateSamples(self):
bottomChart.plot(bottom)
else:
topChart.plot(plotMode)
- gf = self.gudrunOutput.gudFile(name=sample.name)
+ gf = sample.gudFile
self.results[sample] = [
topChart, bottomChart, gf if gf else None]
def updateAllSamples(self):
+ return
samples = [
*self.ui.objectTree.getSamples(),
*self.ui.objectTree.getContainers(),
]
- allTopChart = GudPyChart(self.gudrunOutput)
+ allTopChart = GudPyChart()
allTopChart.addSamples(samples)
allTopChart.plot(
self.ui.allPlotComboBox.itemData(
self.ui.allPlotComboBox.currentIndex()
)
)
- allBottomChart = GudPyChart(self.gudrunOutput)
+ allBottomChart = GudPyChart()
allBottomChart.addSamples(samples)
self.allPlots = [allTopChart, allBottomChart]
self.ui.allSampleTopPlot.setChart(allTopChart)
self.ui.allSampleBottomPlot.setChart(allBottomChart)
- def updateResults(self, gudrunOutput):
+ def updateResults(self):
self.ui.plotTab.setEnabled(True)
- self.gudrunOutput = gudrunOutput
self.updateSamples()
self.updateAllSamples()
self.focusResult()
@@ -706,7 +728,7 @@ def purgeOptionsMessageBox(self, text):
def setModified(self):
if not self.modified:
- if self.gudrunFile.path():
+ if GudPyIO.projectDir:
self.modified = True
self.ui.setWindowModified(True)
self.ui.save.setEnabled(True)
@@ -738,7 +760,7 @@ def setControlsEnabled(self, state):
self.ui.viewLiveInputFile.setEnabled(state)
self.ui.save.setEnabled(
state & self.modified
- if self.gudrunFile.path()
+ if GudPyIO.projectDir
else False
)
self.ui.exportInputFile.setEnabled(state)
@@ -749,7 +771,8 @@ def setControlsEnabled(self, state):
self.ui.new_.setEnabled(state)
self.ui.checkFilesExist.setEnabled(state)
self.ui.runFilesIndividually.setEnabled(state)
- self.ui.runContainersAsSamples.setEnabled(state)
+ self.ui.runContainersAsSamples.setEnabled(False)
+ self.optimiseTab.setEnabled(state)
def setActionsEnabled(self, state):
self.setTreeActionsEnabled(state)
@@ -760,7 +783,7 @@ def setActionsEnabled(self, state):
self.ui.runFilesIndividually.setEnabled(state)
self.ui.checkFilesExist.setEnabled(state)
self.ui.runFilesIndividually.setEnabled(state)
- self.ui.runContainersAsSamples.setEnabled(state)
+ self.ui.runContainersAsSamples.setEnabled(False)
self.ui.batchProcessing.setEnabled(state)
self.ui.viewLiveInputFile.setEnabled(state)
self.ui.save.setEnabled(state & self.modified)
@@ -779,7 +802,8 @@ def setTreeActionsEnabled(self, state):
def viewInput(self):
self.currentState = str(self.gudrunFile)
viewInputDialog = ViewInputDialog(self.gudrunFile, self)
- viewInputDialog.widget.exec_()
+ text = viewInputDialog.widget.exec()
+ return text
def handleAllPlotModeChanged(self, index):
plotMode = self.ui.allPlotComboBox.itemData(index)
diff --git a/gudpy/gui/widgets/core/optimise_tab.py b/gudpy/gui/widgets/core/optimise_tab.py
new file mode 100644
index 00000000..90232519
--- /dev/null
+++ b/gudpy/gui/widgets/core/optimise_tab.py
@@ -0,0 +1,474 @@
+from PySide6 import QtWidgets, QtCore
+import os
+import shutil
+
+from core import utils
+import core.optimise as opt
+
+
+class SampleWidget(QtWidgets.QWidget):
+ refDataSet = QtCore.Signal(int)
+
+ def __init__(self, parent, sample):
+ super().__init__(parent)
+ self._parent = parent
+ self.sample = sample
+ self.runSample: bool = False
+ self.initUI()
+
+ def initUI(self):
+ layout = QtWidgets.QHBoxLayout()
+
+ self.sampleNameLabel = QtWidgets.QLabel(self.sample.name, self)
+ layout.addWidget(self.sampleNameLabel)
+
+ self.checkBox = QtWidgets.QCheckBox("Optimise", self)
+ self.checkBox.setChecked(False)
+ self.checkBox.stateChanged.connect(self.checkChanged)
+ layout.addWidget(self.checkBox)
+
+ self.lineEdit = QtWidgets.QLineEdit(self)
+ self.lineEdit.editingFinished.connect(self.checkFile)
+ if self.sample.referenceDataFile:
+ self.lineEdit.setText(self.sample.referenceDataFile)
+ self.checkBox.setChecked(True)
+ self.runSample = True
+ layout.addWidget(self.lineEdit)
+
+ self.browseButton = QtWidgets.QPushButton("Browse", self)
+ self.browseButton.clicked.connect(self.browseFiles)
+ layout.addWidget(self.browseButton)
+ self.setLayout(layout)
+
+ def checkChanged(self):
+ self.runSample = self.checkBox.isChecked()
+
+ def browseFiles(self):
+ filepath, _ = QtWidgets.QFileDialog.getOpenFileName(
+ self,
+ "Select reference data (.r)",
+ filter="*.r")
+ if filepath:
+ self.lineEdit.setText(filepath)
+ self.checkFile()
+
+ def checkFile(self):
+ if not os.path.exists(self.lineEdit.text()):
+ if self.runSample:
+ self.sampleNameLabel.setStyleSheet("color: darkRed;")
+ else:
+ self.sampleNameLabel.setStyleSheet("")
+ self.sample.referenceDataFile = ""
+ else:
+ self.lineEdit.setStyleSheet("")
+ self.sample.referenceDataFile = self.lineEdit.text()
+ self.refDataSet.emit(1)
+
+
+class OptimiseResultsModel(QtCore.QAbstractTableModel):
+ def __init__(self, headings):
+ super().__init__()
+ self.samples = []
+ self.initSamples = []
+ self.headings = headings
+ self.dataFn = None
+
+ def rowCount(
+ self, parent: QtCore.QModelIndex) -> int:
+ return len(self.initSamples)
+
+ def columnCount(self, parent: QtCore.QModelIndex) -> int:
+ return len(self.headings)
+
+ def headerData(self, section: int, orientation: QtCore.Qt.Orientation,
+ role: int = ...) -> QtWidgets:
+ if role == QtCore.Qt.DisplayRole:
+ if orientation == QtCore.Qt.Horizontal:
+ return self.headings[section]
+ else:
+ return ""
+
+ def setSamples(self, initSamples):
+ self.initSamples = initSamples
+ bottomRight = self.createIndex(len(self.samples), len(self.headings))
+ self.dataChanged.emit(self.createIndex(0, 0), bottomRight)
+
+ def setResults(self, resultSamples):
+ self.samples = []
+ for s in self.initSamples:
+ for rs in resultSamples:
+ if rs.name == s.name:
+ self.samples.append(rs)
+
+ def refresh(self):
+ bottomRight = self.createIndex(len(self.samples), len(self.headings))
+ self.dataChanged.emit(self.createIndex(0, 0), bottomRight)
+
+ def round(self, val, precision):
+ try:
+ return round(val, precision)
+ except TypeError:
+ return "-"
+
+
+class OptimiseExponents(QtWidgets.QGroupBox):
+ class OptimiseExponentsResultsModel(OptimiseResultsModel):
+ def __init__(self, limitSpinBox):
+ super().__init__(("Sample", "Initial MSE", "Final MSE",
+ "Optimum Parameters"))
+ self.limitSpinBox = limitSpinBox
+
+ def data(self, index: QtCore.QModelIndex, role: int):
+ if role == QtCore.Qt.DisplayRole:
+ if index.column() == 0:
+ return self.initSamples[index.row()].name
+ if index.column() == 1:
+ if not self.initSamples[index.row()].mse(
+ self.limitSpinBox.value()):
+ return "-"
+ return str(self.round(self.initSamples[index.row()].mse(
+ self.limitSpinBox.value()), 5))
+ if index.column() == 2:
+ if not self.samples:
+ return "-"
+ if not self.samples[index.row()].mse(
+ self.limitSpinBox.value()):
+ return "-"
+ return str(self.round(self.samples[index.row()].mse(
+ self.limitSpinBox.value()), 5))
+ if index.column() == 3:
+ rounded = ""
+ samples = (self.samples if self.samples
+ else self.initSamples)
+ for set in samples[index.row()].exponentialValues:
+ roundedSet = [self.round(v, 3) for v in set]
+ if rounded:
+ rounded += ", "
+ rounded += str(roundedSet)
+ return rounded
+
+ def __init__(self, parent=None):
+ super().__init__(parent, title="Optimise Exponential Subtractions")
+ self.exponentialLayout = QtWidgets.QVBoxLayout()
+
+ self.optionsLayout = QtWidgets.QHBoxLayout()
+ self.options = QtWidgets.QVBoxLayout()
+
+ self.nIterationsSpinBox = QtWidgets.QSpinBox()
+ self.nIterationsSpinBox.setValue(15)
+ self.nIterationsSpinBox.setMinimum(10)
+ self.nIterationsSpinBox.setSizePolicy(
+ QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
+ self.nIterationsSpinBox.setMinimumWidth(100)
+
+ self.options.addWidget(QtWidgets.QLabel(text="Iterations"))
+ self.options.addWidget(self.nIterationsSpinBox)
+
+ self.limitSpinBox = QtWidgets.QDoubleSpinBox()
+ self.limitSpinBox.setValue(0.5)
+ self.limitSpinBox.setSizePolicy(
+ QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
+ self.nIterationsSpinBox.setMinimumWidth(100)
+
+ self.options.addWidget(QtWidgets.QLabel(text="Q Limit"))
+ self.options.addWidget(self.limitSpinBox)
+ self.options.addItem(QtWidgets.QSpacerItem(
+ 0, 0, QtWidgets.QSizePolicy.Minimum,
+ QtWidgets.QSizePolicy.Expanding))
+ self.options.setContentsMargins(10, 0, 0, 0)
+
+ self.resultsLayout = QtWidgets.QVBoxLayout()
+
+ self.resultsTable = QtWidgets.QTableView()
+ self.resultsModel = self.OptimiseExponentsResultsModel(
+ self.limitSpinBox)
+ self.resultsTable.setModel(self.resultsModel)
+ self.resultsModel.dataChanged.connect(self.resultsTable.update)
+ self.resultsTable.verticalHeader().hide()
+ self.resultsTable.horizontalHeader().setSectionResizeMode(
+ QtWidgets.QHeaderView.ResizeMode.Stretch
+ )
+ self.resultsTable.setSizePolicy(
+ QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
+ self.resultsLayout.addWidget(self.resultsTable)
+
+ self.buttonLayout = QtWidgets.QHBoxLayout()
+ self.runOptimiseExponents = QtWidgets.QPushButton("Run")
+
+ self.setParamsButton = QtWidgets.QPushButton("Set new parameters")
+ self.setParamsButton.setEnabled(False)
+
+ self.exportDataButton = QtWidgets.QPushButton("Export Data")
+ self.exportDataButton.setEnabled(False)
+ self.exportDataButton.clicked.connect(self.exportData)
+
+ self.buttonLayout.addWidget(self.runOptimiseExponents)
+ self.buttonLayout.addItem(QtWidgets.QSpacerItem(
+ 0, 0, QtWidgets.QSizePolicy.Expanding,
+ QtWidgets.QSizePolicy.Minimum))
+ self.buttonLayout.addWidget(self.setParamsButton)
+ self.buttonLayout.addWidget(self.exportDataButton)
+ self.resultsLayout.addItem(self.buttonLayout)
+ self.optionsLayout.addItem(self.resultsLayout)
+
+ self.optionsLayout.addItem(self.options)
+
+ self.exponentialLayout.addItem(self.optionsLayout)
+
+ self.setLayout(self.exponentialLayout)
+
+ def setSamples(self, samples):
+ self.resultsModel.setSamples(samples)
+ self.resultsModel.refresh()
+
+ def setResults(self, resultSamples):
+ self.resultsModel.setResults(resultSamples)
+ self.resultsModel.refresh()
+ self.setParamsButton.setEnabled(True)
+ self.exportDataButton.setEnabled(True)
+
+ def setParameters(self, gudrunFile):
+ for optSample in self.resultsModel.samples:
+ for sample in gudrunFile.samples():
+ if optSample.name == sample.name:
+ sample.exponentialValues = optSample.exponentialValues
+ message = QtWidgets.QMessageBox()
+ message.setWindowTitle("GudPy Info")
+ message.setText(
+ "New parameters applied to the samples. " +
+ "Run Gudrun to apply the changes.")
+ message.exec()
+
+ def exportData(self):
+ dirname, _ = QtWidgets.QFileDialog.getSaveFileName(
+ caption="Export mint files to...",
+ )
+ if not dirname:
+ return False
+ utils.makeDir(dirname)
+ for sample in self.resultsModel.samples:
+ mintFile = sample.dataFiles[0].mintFile
+ shutil.copyfile(mintFile, os.path.join(
+ dirname, os.path.basename(mintFile)))
+
+
+class OptimiseInelasticity(QtWidgets.QGroupBox):
+ class OptimiseInelasticityResultsModel(OptimiseResultsModel):
+ def __init__(self):
+ super().__init__(("Sample", "Initial MSE",
+ "Final MSE", "Optimum iterations"))
+ self.results = {}
+
+ def data(self, index: QtCore.QModelIndex, role: int):
+ if role == QtCore.Qt.DisplayRole:
+ sample = self.initSamples[index.row()]
+ if index.column() == 0:
+ return sample.name
+ if index.column() == 1:
+ if not sample.mse():
+ return "-"
+ return str(self.round(sample.mse(), 5))
+ if index.column() == 2:
+ if not self.results.get(sample.name, None):
+ if not sample.mse():
+ return "-"
+ return str(self.round(sample.mse(), 5))
+ bestSamp = self.results[sample.name]["bestSample"]
+ return str(self.round(bestSamp.mse(), 5))
+ if index.column() == 3:
+ if not self.results.get(sample.name, None):
+ return "-"
+
+ return (self.results[
+ self.samples[index.row()].name]["bestIt"]
+ if self.results else "-")
+
+ def __init__(self, parent=None):
+ super().__init__(parent, title="Optimise Inelasticity Subtractions")
+ self._parent = parent
+ self.results = {}
+ self.inelasticityLayout = QtWidgets.QVBoxLayout()
+
+ self.resultsLayout = QtWidgets.QHBoxLayout()
+
+ self.resultsTable = QtWidgets.QTableView()
+
+ self.resultsModel = self.OptimiseInelasticityResultsModel()
+ self.resultsTable.setModel(self.resultsModel)
+ self.resultsModel.dataChanged.connect(self.resultsTable.update)
+ self.resultsTable.horizontalHeader().setSectionResizeMode(
+ QtWidgets.QHeaderView.ResizeMode.ResizeToContents
+ )
+ self.resultsTable.verticalHeader().hide()
+ self.resultsTable.horizontalHeader().setSectionResizeMode(
+ QtWidgets.QHeaderView.ResizeMode.Stretch
+ )
+ self.resultsTable.setSizePolicy(
+ QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
+ self.resultsLayout.addWidget(self.resultsTable)
+ self.inelasticityLayout.addItem(self.resultsLayout)
+
+ self.buttonLayout = QtWidgets.QHBoxLayout()
+ self.runInelasticityButton = QtWidgets.QPushButton("Run")
+ self.exportDataButton = QtWidgets.QPushButton("Export Data")
+ self.exportDataButton.clicked.connect(self.exportData)
+ self.exportDataButton.setEnabled(False)
+ self.buttonLayout.addWidget(self.runInelasticityButton)
+ self.buttonLayout.addItem(QtWidgets.QSpacerItem(
+ 0, 0, QtWidgets.QSizePolicy.Expanding,
+ QtWidgets.QSizePolicy.Minimum))
+ self.buttonLayout.addWidget(self.exportDataButton)
+
+ self.inelasticityLayout.addItem(self.buttonLayout)
+ self.setLayout(self.inelasticityLayout)
+
+ def setSamples(self, samples):
+ self.resultsModel.setSamples(samples)
+ self.resultsModel.refresh()
+
+ def setResults(self, resultSamples, results: dict):
+ self.resultsModel.setResults(resultSamples)
+ self.resultsModel.results = results
+ self.resultsModel.refresh()
+ self.exportDataButton.setEnabled(True)
+
+ def exportData(self):
+ dirname, _ = QtWidgets.QFileDialog.getSaveFileName(
+ caption="Export mint files to...",
+ )
+ if not dirname:
+ return False
+ utils.makeDir(dirname)
+
+ for sample in self.resultsModel.samples:
+ mintFile = None
+ optSample = self.results.get(sample.name, None)
+ if not optSample:
+ mintFile = sample.dataFiles[0].mintFile
+ else:
+ mintFile = optSample["bestSample"].mintFile
+ shutil.copyfile(mintFile, os.path.join(
+ dirname, os.path.basename(mintFile)))
+
+
+class OptimiseTab(QtWidgets.QWidget):
+ runOptimiseExponents = QtCore.Signal(int)
+ runOptimiseInelasticity = QtCore.Signal(int)
+
+ def __init__(self, parent=None):
+ super().__init__(parent)
+ self._parent = parent
+ self.samples = []
+ self.sampleWidgets = {}
+ self.mainLayout = QtWidgets.QVBoxLayout()
+
+ self.sampleGroupBox = QtWidgets.QGroupBox(
+ title="Sample Reference Files")
+ self.sampleLayout = QtWidgets.QVBoxLayout()
+ self.sampleLayout.setSpacing(1)
+ self.sampleGroupBox.setLayout(self.sampleLayout)
+
+ self.optimiseOptionsLayout = QtWidgets.QVBoxLayout()
+ self.exponentialGroupBox = OptimiseExponents(self)
+ self.exponentialGroupBox.runOptimiseExponents.clicked.connect(
+ self.handleOptimiseExponentsButton
+ )
+
+ self.mainLayout.addWidget(self.sampleGroupBox)
+
+ self.inelasticityGroupBox = OptimiseInelasticity(self._parent)
+ self.inelasticityGroupBox.runInelasticityButton.clicked.connect(
+ self.handleOptimiseInelasticityButton
+ )
+
+ self.optimiseOptionsLayout.addWidget(self.exponentialGroupBox)
+ self.optimiseOptionsLayout.addItem(
+ QtWidgets.QSpacerItem(
+ 0, 20, QtWidgets.QSizePolicy.Expanding,
+ QtWidgets.QSizePolicy.Preferred)
+ )
+ self.optimiseOptionsLayout.addWidget(self.inelasticityGroupBox)
+ self.mainLayout.addItem(self.optimiseOptionsLayout)
+
+ self.setLayout(self.mainLayout)
+
+ def refreshWidgets(self):
+ self.exponentialGroupBox.resultsModel.refresh()
+ self.inelasticityGroupBox.resultsModel.refresh()
+
+ def setSamples(self, samples):
+ self.inelasticityGroupBox.setSamples(samples)
+ self.exponentialGroupBox.setSamples(samples)
+
+ if samples == self.samples:
+ return
+
+ self.samples = samples
+
+ while self.sampleLayout.count():
+ item = self.sampleLayout.takeAt(0)
+ widget = item.widget()
+ if widget is not None:
+ widget.deleteLater()
+
+ for sample in samples:
+ sampleWidget = self.sampleWidgets.get(sample.name)
+ if not sampleWidget:
+ sampleWidget = SampleWidget(self._parent, sample)
+ sampleWidget.refDataSet.connect(
+ lambda: (
+ self.exponentialGroupBox.resultsModel.setSamples(
+ self.samples)))
+ sampleWidget.refDataSet.connect(
+ lambda: (
+ self.inelasticityGroupBox.resultsModel.setSamples(
+ self.samples)))
+ self.sampleWidgets[sample.name] = sampleWidget
+ self.sampleLayout.addWidget(sampleWidget)
+
+ for sname in self.sampleWidgets.keys():
+ if sname not in [s.name for s in samples]:
+ self.sampleWidgets.pop(sname, None)
+
+ if not self._parent.widgetsRefreshing:
+ self._parent.setModified()
+
+ def setResults(self, samples, optimiser):
+ self.exponentialGroupBox.setResults(samples)
+
+ if isinstance(optimiser, opt.InelasticityOptimisation):
+ self.inelasticityGroupBox.setResults(samples, optimiser.results)
+
+ if not self._parent.widgetsRefreshing:
+ self._parent.setModified()
+
+ def validateRun(self):
+ sampleSelected = False
+ for sw in self.sampleWidgets.values():
+ if sw.runSample:
+ sampleSelected = True
+ if not sw.sample.referenceDataFile:
+ self._parent.sendError(
+ f"Reference data not provided for {sw.sample.name}"
+ )
+ return False
+ if (not sw.sample.dataFiles or
+ not sw.sample.dataFiles[0].mintFile):
+ self._parent.sendError(
+ "Please run Gudrun before optimising."
+ )
+ return False
+ if not sampleSelected:
+ self._parent.sendError(
+ "No samples selected"
+ )
+ return False
+ return True
+
+ def handleOptimiseExponentsButton(self):
+ if self.validateRun():
+ self.runOptimiseExponents.emit(1)
+
+ def handleOptimiseInelasticityButton(self):
+ if self.validateRun():
+ self.runOptimiseInelasticity.emit(1)
diff --git a/gudpy/gui/widgets/core/output_tree.py b/gudpy/gui/widgets/core/output_tree.py
deleted file mode 100644
index b8f07eb7..00000000
--- a/gudpy/gui/widgets/core/output_tree.py
+++ /dev/null
@@ -1,216 +0,0 @@
-from copy import deepcopy
-from PySide6.QtWidgets import QTreeView
-from PySide6.QtCore import (
- QAbstractItemModel,
- QModelIndex,
- QPersistentModelIndex,
- Qt
-)
-
-from core.gudrun_file import GudrunFile
-from core.instrument import Instrument
-from core.sample import Sample
-
-
-class OutputTreeModel(QAbstractItemModel):
-
- def __init__(self, output, gudrunFile, keyMap=None, parent=None):
- super().__init__(parent)
- if isinstance(output, str):
- output = {0: output}
- self.keyMap = keyMap
- self.output = output
- self.gudrunFile = gudrunFile
- self.map = {}
- self.refs = []
- self.data_ = {}
-
- for idx, key in enumerate(output.keys()):
- self.data_[idx] = {
- "name": key,
- "outputs": []
- }
-
- self.persistentIndexes = {}
- self.setupData()
-
- def setupData(self):
- for idx, [name, output] in enumerate(self.output.items()):
- gf = deepcopy(self.gudrunFile)
- gf.name = name
- gf.output = output
- self.refs.append(gf)
- # Indexes for each sample background
- offsets = [
- n for n, l in
- enumerate(output.splitlines(keepends=True))
- if "Got to: SAMPLE BACKGROUND" in l
- ]
-
- if not offsets:
- i = deepcopy(self.gudrunFile.instrument)
- i.output = output
- i.name = "General"
- self.data_[idx]["outputs"].append(i)
- return
-
- sbindicies = []
- for i in range(len(offsets) - 1):
- sbindicies.append([offsets[i], offsets[i + 1] - 1])
-
- sbindicies.append(
- [
- offsets[-1], len(output.splitlines(keepends=True))
- ]
- )
-
- instrument = deepcopy(self.gudrunFile.instrument)
- instrument.output = "".join(
- output.splitlines(keepends=True)
- [0: sbindicies[0][0]]
- )
- instrument.name = "General"
- self.data_[idx]["outputs"].append(instrument)
- prev = None
- for start, end in sbindicies:
- splicedOutput = (
- output.splitlines(keepends=True)[start:end]
- )
- indices = [
- n for n, l in
- enumerate(splicedOutput) if "Got to: SAMPLE" in l
- ][1:]
- for sample, index in zip(
- [
- s for sb in
- self.gudrunFile.sampleBackgrounds
- for s in sb.samples
- if s.runThisSample
- ], indices
- ):
- s = deepcopy(sample)
- s.output = index + start
-
- # If iterator output
- if len(self.data_.keys()) != 1:
- if prev:
- prev.output = "".join(
- output.splitlines(keepends=True)
- [prev.output:index + start - 1]
- )
-
- prev = s
- self.data_[idx]["outputs"].append(s)
- if prev:
- prev.output = "".join(
- output.splitlines(keepends=True)
- [prev.output:end]
- )
-
- def index(self, row, column, parent=QModelIndex()):
- # Check for invalid index.
- if not self.hasIndex(row, column, parent):
- return QModelIndex()
- elif not parent.isValid():
- if len(self.data_.keys()) == 1:
- obj = self.data_[0]["outputs"][row]
- elif len(self.data_.keys()) > 1:
- try:
- obj = self.refs[row]
- except IndexError:
- return QModelIndex()
- else:
- return QModelIndex()
- elif parent.isValid():
- obj = self.data_[
- self.refs.index(
- parent.internalPointer())]["outputs"][row]
- index = self.createIndex(row, 0, obj)
- self.persistentIndexes[obj] = QPersistentModelIndex(index)
- return index
-
- def findParent(self, obj):
- for parent, items in self.data_.items():
- if obj in items["outputs"]:
- return self.refs[parent]
-
- def parent(self, index):
- if not index.isValid():
- return QModelIndex()
- if isinstance(index.internalPointer(), GudrunFile):
- return QModelIndex()
- elif isinstance(index.internalPointer(), (Instrument, Sample)):
- if len(self.data_.keys()) > 1:
- parent = self.findParent(index.internalPointer())
- return QModelIndex(self.persistentIndexes[parent])
- else:
- return QModelIndex()
- else:
- return QModelIndex()
-
- def rowCount(self, parent=QModelIndex()):
- if not parent.isValid():
- if len(self.data_.keys()) == 1:
- # get length of first value
- return len(self.data_[0]["outputs"])
- elif len(self.data_.keys()) > 1:
- return len(self.data_.keys())
- else:
- return 0
- parentObj = parent.internalPointer()
- if isinstance(parentObj, GudrunFile):
- return len(
- self.data_[
- self.refs.index(
- parent.internalPointer())]["outputs"]
- )
- else:
- return 0
-
- def columnCount(self, parent=QModelIndex()):
- return 1
-
- def data(self, index, role=Qt.DisplayRole):
- if not index.isValid():
- return None
- if role == Qt.DisplayRole or role == Qt.EditRole:
- obj = index.internalPointer()
- if isinstance(obj, GudrunFile):
- if self.keyMap:
- return self.keyMap[obj.name]
- else:
- return obj.name
- elif isinstance(obj, (Instrument, Sample)):
- return obj.name
- else:
- return None
-
-
-class OutputTreeView(QTreeView):
-
- def __init__(self, parent):
- super().__init__(parent)
-
- def buildTree(self, gudrunFile, output, parent, keyMap=None):
- self.gudrunFile = gudrunFile
- self.output = output
- self.parent = parent
- self.keyMap = keyMap
- self.makeModel()
- self.setCurrentIndex(self.model().index(0, 0))
- self.setHeaderHidden(True)
- self.expandAll()
-
- def makeModel(self):
- self.model_ = OutputTreeModel(
- self.output, self.gudrunFile,
- keyMap=self.keyMap, parent=self
- )
- self.setModel(self.model_)
-
- def currentChanged(self, current, previous):
- if current.internalPointer():
- self.parent.widget.outputTextEdit.setText(
- current.internalPointer().output
- )
- return super().currentChanged(current, previous)
diff --git a/gudpy/gui/widgets/core/worker.py b/gudpy/gui/widgets/core/worker.py
index f27ef063..e8bc8afc 100644
--- a/gudpy/gui/widgets/core/worker.py
+++ b/gudpy/gui/widgets/core/worker.py
@@ -6,8 +6,10 @@
import core.exception as exc
from core.gudrun_file import GudrunFile
from core.purge_file import PurgeFile
-from core.iterators import Iterator
+from core.iterators import Iterator, InelasticitySubtraction
from core import iterators, config
+from core import optimise
+from core.sample import Sample
SUFFIX = ".exe" if os.name == "nt" else ""
@@ -37,17 +39,22 @@ def __init__(self, purgeFile: PurgeFile, gudrunFile: GudrunFile):
self.dataFileType = gudrunFile.instrument.dataFileType
self.dataFiles = [gudrunFile.instrument.groupFileName]
- self.appendDataFiless(gudrunFile.normalisation.dataFiles[0])
- self.appendDataFiless(gudrunFile.normalisation.dataFilesBg[0])
- self.appendDataFiless([df for sb in gudrunFile.sampleBackgrounds
- for df in sb.dataFiles])
+ self.appendDataFiles(
+ [df.filename for df in gudrunFile.normalisation.dataFiles])
+ self.appendDataFiles(
+ [df.filename for df in gudrunFile.normalisation.dataFilesBg])
+ self.appendDataFiles([df.filename for sb
+ in gudrunFile.sampleBackgrounds
+ for df in sb.dataFiles])
if not purgeFile.excludeSampleAndCan:
- self.appendDataFiless([df for sb in gudrunFile.sampleBackgrounds
- for s in sb.samples for df in s.dataFiles
- if s.runThisSample])
- self.appendDataFiless([df for sb in gudrunFile.sampleBackgrounds
- for s in sb.samples for c in s.containers
- for df in c.dataFiles if s.runThisSample])
+ self.appendDataFiles([df.filename for sb
+ in gudrunFile.sampleBackgrounds
+ for s in sb.samples for df in s.dataFiles
+ if s.runThisSample])
+ self.appendDataFiles([df.filename for sb
+ in gudrunFile.sampleBackgrounds
+ for s in sb.samples for c in s.containers
+ for df in c.dataFiles if s.runThisSample])
def _progressChanged(self):
stepSize = math.ceil(100 / len(self.dataFiles))
@@ -57,7 +64,7 @@ def _progressChanged(self):
progress += stepSize
return progress
- def appendDataFiless(self, dfs):
+ def appendDataFiles(self, dfs):
if isinstance(dfs, str):
dfs = [dfs]
for df in dfs:
@@ -88,23 +95,26 @@ def __init__(
self.progress = 0
# Number of GudPy objects
- self.markers = (
- config.NUM_GUDPY_CORE_OBJECTS - 1
- + len(self.gudrunFile.sampleBackgrounds) + sum([sum([
- len([
- sample
- for sample in sampleBackground.samples
- if sample.runThisSample
- ]),
- *[
- len(sample.containers)
- for sample in sampleBackground.samples
- if sample.runThisSample
- ]])
- for sampleBackground in self.gudrunFile.sampleBackgrounds
- ]))
+ self.markers = None
def _progressChanged(self):
+ if not self.markers:
+ self.markers = (
+ config.NUM_GUDPY_CORE_OBJECTS - 1
+ + len(self.gudrunFile.sampleBackgrounds) + sum([sum([
+ len([
+ sample
+ for sample in sampleBackground.samples
+ if sample.runThisSample
+ ]),
+ *[
+ len(sample.containers)
+ for sample in sampleBackground.samples
+ if sample.runThisSample
+ ]])
+ for sampleBackground in self.gudrunFile.sampleBackgrounds
+ ]))
+
stepSize = math.ceil(100 / self.markers)
self.progress = stepSize * sum([
self.output.count("Got to: INSTRUMENT"),
@@ -114,8 +124,6 @@ def _progressChanged(self):
self.output.count("Finished merging data for sample"),
self.output.count("Got to: CONTAINER"),
])
- if isinstance(self.iterator, iterators.InelasticitySubtraction):
- self.progress /= 2
return self.progress
def run(self):
@@ -144,7 +152,7 @@ def __init__(
for _ in range(iterator.nTotal
+ (1 if iterator.requireDefault else 0)):
- worker = GudrunWorker(gudrunFile, iterator)
+ worker = GudrunWorker(self.gudrunFile, iterator)
worker.outputChanged.connect(self._outputChanged)
worker.progressChanged.connect(self._progressChanged)
self.gudrunObjects.append(worker)
@@ -153,6 +161,12 @@ def _outputChanged(self, output):
idx = (f"{self.iterator.name} {self.iterator.nCurrent}"
if self.iterator.nCurrent != 0
or not self.iterator.requireDefault else "Default run")
+
+ if isinstance(self.iterator, InelasticitySubtraction):
+ idx = (
+ f"{self.iterator.iterationType} {self.iterator.iterationCount}"
+ )
+
currentOutput = self.output.get(idx, "")
self.output[idx] = currentOutput + output
self.outputChanged.emit(output)
@@ -240,3 +254,153 @@ def run(self):
except exc.GudrunException as e:
self.error = str(e)
self.finished.emit(1)
+
+
+class OptimiseExponentsWorker(QThread, optimise.BayesianOptimisation):
+ outputChanged = Signal(str)
+ progressChanged = Signal(int, str)
+ finished = Signal(int)
+
+ def __init__(
+ self,
+ gudrunFile: GudrunFile,
+ samples: list[Sample],
+ purge: PurgeWorker = None,
+ limit: float = 0.5,
+ nIters: int = 15,
+ verbose=False
+ ) -> None:
+ super().__init__(
+ gudrunFile=gudrunFile,
+ samples=samples,
+ limit=limit,
+ nIters=nIters,
+ verbose=verbose
+ )
+ self.output = {}
+
+ self.gudrunObjects = []
+ self.gudrunQueue = []
+ for _ in range(nIters * len(samples)):
+ gudrunWorker = GudrunWorker(
+ gudrunFile=self.gudrunFile, purge=purge)
+ self.gudrunObjects.append(gudrunWorker)
+ self.gudrunQueue.append(gudrunWorker)
+ gudrunWorker.outputChanged.connect(self._outputChanged)
+ gudrunWorker.progressChanged.connect(self._progressChanged)
+
+ self.gudrun = GudrunWorker(gudrunFile=self.gudrunFile, purge=purge)
+ self.gudrun.outputChanged.connect(self._outputChanged)
+ self.gudrun.progressChanged.connect(self._progressChanged)
+
+ def _outputChanged(self, output):
+ idx = f"{self.sample.name} {self.nCurrent}"
+
+ currentOutput = self.output.get(idx, "")
+ self.output[idx] = currentOutput + output
+ self.outputChanged.emit(output)
+
+ def _progressChanged(self, progress):
+ rounded = ""
+
+ for set in self.sample.exponentialValues:
+ roundedSet = [round(v, 3) for v in set]
+ if rounded:
+ rounded += ", "
+ rounded += str(roundedSet)
+
+ if self.nCurrent == "Final Run":
+ text = (
+ "Final Run with params " +
+ f"- {rounded}"
+ )
+ else:
+ text = (
+ f"Optimise {self.sample.name} " +
+ f"- {rounded}" +
+ (f" MSE: {self.mse} " if self.mse else "") +
+ f"- {self.nCurrent}/{int(self.nIters/len(self.samples))}"
+ )
+
+ self.progressChanged.emit(
+ progress,
+ text
+ )
+
+ def _progressChangedFinalRun(self, progress):
+ rounded = ""
+
+ for set in self.sample.exponentialValues:
+ roundedSet = [round(v, 3) for v in set]
+ if rounded:
+ rounded += ", "
+ rounded += str(roundedSet)
+
+ text = (
+ "Final run with params " +
+ f"- {rounded}" +
+ f" MSE: {self.mse}"
+ )
+
+ self.progressChanged.emit(
+ progress,
+ text
+ )
+
+ def run(self):
+ exitcode, error = self.optimise()
+ self.error = error
+ self.finished.emit(exitcode)
+
+
+class OptimiseInelasticityWorker(QThread, optimise.InelasticityOptimisation):
+ outputChanged = Signal(str)
+ progressChanged = Signal(int, str)
+ finished = Signal(int)
+
+ def __init__(
+ self,
+ gudrunFile: GudrunFile,
+ samples: list,
+ purge: PurgeWorker = None,
+ ) -> None:
+ self.gudrunIterator = GudrunIteratorWorker(
+ gudrunFile=gudrunFile,
+ iterator=iterators.InelasticitySubtraction(3)
+ )
+
+ super().__init__(
+ gudrunIterator=self.gudrunIterator,
+ samples=samples,
+ purge=purge
+ )
+
+ for gudrunWorker in self.gudrunIterator.gudrunObjects:
+ gudrunWorker.outputChanged.connect(self._outputChanged)
+ gudrunWorker.progressChanged.connect(self._progressChanged)
+
+ @property
+ def output(self):
+ return self.gudrunIterator.output
+
+ def _outputChanged(self, output):
+ idx = (f"{self.gudrunIterator.iterator.nCurrent}")
+
+ currentOutput = self.output.get(idx, "")
+ self.output[idx] = currentOutput + output
+ self.outputChanged.emit(output)
+
+ def _progressChanged(self, progress):
+ text = ("Optimise Inelasticity - " +
+ f"{self.gudrunIterator.iterator.iterationType} " +
+ f"{self.gudrunIterator.iterator.iterationCount} / 3")
+
+ self.progressChanged.emit(
+ progress,
+ text
+ )
+
+ def run(self):
+ exitcode, res = self.optimise()
+ self.error = res
+ self.finished.emit(exitcode)
diff --git a/gudpy/gui/widgets/dialogs/io.py b/gudpy/gui/widgets/dialogs/io.py
index a5510ac5..9285ebdc 100644
--- a/gudpy/gui/widgets/dialogs/io.py
+++ b/gudpy/gui/widgets/dialogs/io.py
@@ -95,8 +95,6 @@ class ExportDialog(QDialog):
Toggles renaming files to the sample name.
performExport(filename)
Performs an export to a filename.
- export()
- Performs a standard export.
exportAs()
Allows exporting to a specific file.
"""
@@ -174,14 +172,10 @@ def loadFilesList(self, rename=False):
def toggleRename(self, state):
self.loadFilesList(rename=bool(state))
- def performExport(self, filename=None):
+ def performExport(self, filename):
fl = GudPyFileLibrary(self.gudrunFile)
archive = fl.exportMintData(
- [
- s
- for sb in self.gudrunFile.sampleBackgrounds
- for s in sb.samples
- ],
+ self.gudrunFile.samples(),
renameDataFiles=self.widget.renameCheckBox.checkState(),
exportTo=filename,
includeParams=self.widget.includeParamsCheckBox.checkState()
@@ -193,11 +187,7 @@ def performExport(self, filename=None):
)
self.widget.close()
- def export(self):
- self.performExport()
-
def exportAs(self):
-
dialog = QFileDialog()
dialog.setDefaultSuffix("zip")
dialog.setWindowTitle("Export to..")
diff --git a/gudpy/gui/widgets/dialogs/view_input_dialog.py b/gudpy/gui/widgets/dialogs/view_input_dialog.py
index bcce525e..c45faaea 100644
--- a/gudpy/gui/widgets/dialogs/view_input_dialog.py
+++ b/gudpy/gui/widgets/dialogs/view_input_dialog.py
@@ -3,6 +3,7 @@
from PySide6.QtCore import QFile
from PySide6.QtWidgets import QDialog
from PySide6.QtUiTools import QUiLoader
+from core.io.gudpy_io import GudPyIO
class ViewInputDialog(QDialog):
@@ -52,7 +53,7 @@ def initComponents(self):
)
loader = QUiLoader()
self.widget = loader.load(uifile)
- self.widget.setWindowTitle(self.gudrunFile.path())
+ self.widget.setWindowTitle(GudPyIO.projectName)
self.widget.saveAndCloseButton.clicked.connect(self.save)
self.widget.closeButton.clicked.connect(self.widget.close)
self.widget.textEdit.setText(str(self.gudrunFile))
@@ -67,7 +68,6 @@ def save(self):
"""
Saves the input file and updates the UI appropiately.
"""
- with open(self.gudrunFile.path(), "w", encoding="utf-8") as fp:
- fp.write(self.widget.textEdit.toPlainText())
+ text = self.widget.textEdit.toPlainText()
self.widget.close()
- self.parent.updateFromFile()
+ return text
diff --git a/gudpy/gui/widgets/slots/container_slots.py b/gudpy/gui/widgets/slots/container_slots.py
index 87391a14..7aab92bf 100644
--- a/gudpy/gui/widgets/slots/container_slots.py
+++ b/gudpy/gui/widgets/slots/container_slots.py
@@ -26,7 +26,7 @@ def setContainer(self, container):
# Populate the data files list.
self.widget.containerDataFilesList.makeModel(
- self.container.dataFiles.dataFiles
+ self.container.dataFiles.dataFilenames
)
self.widget.containerDataFilesList.model().dataChanged.connect(
@@ -640,7 +640,7 @@ def handleDataFilesAltered(self):
if not self.widgetsRefreshing:
self.parent.setModified()
self.parent.gudrunFile.purged = False
- self.container.dataFiles.dataFiles = (
+ self.container.dataFiles.setFiles(
self.widget.containerDataFilesList.model().stringList()
)
diff --git a/gudpy/gui/widgets/slots/normalisation_slots.py b/gudpy/gui/widgets/slots/normalisation_slots.py
index 235ff8eb..494726a2 100644
--- a/gudpy/gui/widgets/slots/normalisation_slots.py
+++ b/gudpy/gui/widgets/slots/normalisation_slots.py
@@ -20,10 +20,10 @@ def setNormalisation(self, normalisation):
self.widgetsRefreshing = True
self.widget.dataFilesList.makeModel(
- self.normalisation.dataFiles.dataFiles
+ self.normalisation.dataFiles.dataFilenames
)
self.widget.backgroundDataFilesList.makeModel(
- self.normalisation.dataFilesBg.dataFiles
+ self.normalisation.dataFilesBg.dataFilenames
)
self.widget.dataFilesList.setSelectionMode(
@@ -645,7 +645,7 @@ def handleDataFilesAltered(self):
if not self.widgetsRefreshing:
self.parent.setModified()
self.parent.gudrunFile.purged = False
- self.normalisation.dataFiles.dataFiles = (
+ self.normalisation.dataFiles.setFiles(
self.widget.dataFilesList.model().stringList()
)
@@ -659,7 +659,7 @@ def handleDataFilesBgAltered(self):
if not self.widgetsRefreshing:
self.parent.setModified()
self.parent.gudrunFile.purged = False
- self.normalisation.dataFilesBg.dataFiles = (
+ self.normalisation.dataFilesBg.setFiles(
self.widget.backgroundDataFilesList.model().stringList()
)
diff --git a/gudpy/gui/widgets/slots/output_slots.py b/gudpy/gui/widgets/slots/output_slots.py
index f8623c0e..efd37b66 100644
--- a/gudpy/gui/widgets/slots/output_slots.py
+++ b/gudpy/gui/widgets/slots/output_slots.py
@@ -1,5 +1,12 @@
-class OutputSlots():
+from PySide6.QtCore import (
+ QAbstractItemModel,
+ QModelIndex,
+ Qt
+)
+from PySide6.QtWidgets import QTreeView
+
+class OutputSlots():
def __init__(self, widget, parent):
self.widget = widget
self.parent = parent
@@ -9,15 +16,182 @@ def setOutputStream(self, stdout):
"".join(stdout.split("\n"))
)
- def setOutput(self, output, task, gudrunFile=None, keyMap=None):
+ def setScroll(self):
+ self.widget.outputTextEdit.verticalScrollBar().setValue(
+ self.widget.outputTextEdit.verticalScrollBar(
+ ).maximum()
+ )
+
+ def setOutput(self, output, task, gudrunFile):
if not gudrunFile:
gudrunFile = self.parent.gudrunFile
self.output = output
self.task = task
self.widget.outputTree.buildTree(
- gudrunFile, output, self, keyMap=keyMap
+ gudrunFile, output, self
)
- self.widget.outputTextEdit.verticalScrollBar().setValue(
- self.widget.outputTextEdit.verticalScrollBar(
- ).maximum()
+ self.setScroll()
+
+
+class OutputNode:
+ def __init__(self, name, data: str) -> None:
+ self._name = name
+ self._data = data
+ self._parent = None
+ self._children: list[OutputNode] = []
+ self._row = 0
+
+ def name(self):
+ return self._name
+
+ def data(self):
+ return self._data
+
+ def parent(self):
+ return self._parent
+
+ def row(self):
+ return self._row
+
+ def columnCount(self):
+ return 1
+
+ def childCount(self):
+ return len(self._children)
+
+ def child(self, row):
+ if row >= 0 and row < self.childCount():
+ return self._children[row]
+
+ def addChild(self, child):
+ child._parent = self
+ child._row = len(self._children)
+ self._children.append(child)
+
+
+class OutputTreeModel(QAbstractItemModel):
+ def __init__(self, processName, parent=None):
+ super().__init__(parent)
+ self.processName = processName
+ self._root = OutputNode(processName, "")
+
+ def setOutput(self, output, gudrunFile):
+ self.beginResetModel()
+ if isinstance(output, dict):
+ self._setupDictData(output, gudrunFile)
+ else:
+ self._setupSingleData(self._root, output, gudrunFile)
+ self.endResetModel()
+
+ def addChild(self, parent, node):
+ self.beginInsertRows(
+ self.createIndex(parent.childCount() + 1, 0, node),
+ parent.childCount(),
+ parent.childCount())
+ parent.addChild(node)
+ self.endInsertRows()
+
+ def splitOutput(self, outputLines, marker):
+ splitOutput = [[]]
+ currentIndex = 0
+ for line in outputLines:
+ if line.strip() == f"gudrun_dcs> Got to: {marker}":
+ splitOutput.append([])
+ currentIndex += 1
+ splitOutput[currentIndex].append(line)
+ return splitOutput
+
+ def _setupSingleData(self, root, output, gudrunFile):
+ root._data = output
+ outputlines = output.splitlines(keepends=True)
+ # Indexes for each sample background
+ samples = [s for sb in gudrunFile.sampleBackgrounds
+ for s in sb.samples if s.runThisSample]
+
+ sampleBgOutputs = self.splitOutput(
+ outputlines, "SAMPLE BACKGROUND")
+
+ self.addChild(root, OutputNode(
+ "General", "".join(sampleBgOutputs[0])))
+ sampleBgOutputs.pop(0)
+
+ for sbg, sbgOutput in zip(
+ gudrunFile.sampleBackgrounds, sampleBgOutputs):
+ sampleOutputs = self.splitOutput(sbgOutput, "SAMPLE")
+
+ sbgNode = OutputNode(
+ "Sample Background", "".join(sampleOutputs[0]))
+ sampleOutputs.pop(0)
+
+ samples = [s for s in sbg.samples if s.runThisSample]
+
+ for sample, sampleOutput in zip(samples, sampleOutputs):
+ self.addChild(sbgNode, OutputNode(
+ sample.name, "".join(sampleOutput)))
+ self.addChild(root, sbgNode)
+
+ def _setupDictData(self, output, gudrunFile):
+ for key, value in output.items():
+ iterationOutput = OutputNode(key, value)
+ self.addChild(self._root, iterationOutput)
+ self._setupSingleData(iterationOutput, value, gudrunFile)
+
+ def index(self, row, column, parent=QModelIndex()):
+ if not self.hasIndex(row, column, parent):
+ return QModelIndex()
+ if not parent.isValid():
+ parentNode = self._root
+ else:
+ parentNode = parent.internalPointer()
+ child = parentNode.child(row)
+ if child:
+ return self.createIndex(row, column, child)
+ return QModelIndex()
+
+ def parent(self, index):
+ if not index.isValid():
+ return QModelIndex()
+ child = index.internalPointer()
+ parentNode = child.parent()
+ if parentNode == self._root or not parentNode:
+ return QModelIndex()
+ return self.createIndex(parentNode.row(), 0, parentNode)
+
+ def rowCount(self, index):
+ if index.isValid():
+ return index.internalPointer().childCount()
+ return self._root.childCount()
+
+ def columnCount(self, parent=QModelIndex()):
+ return 1
+
+ def data(self, index, role=Qt.DisplayRole):
+ if index.isValid() and role == Qt.DisplayRole:
+ return index.internalPointer().name()
+ return None
+
+
+class OutputTreeView(QTreeView):
+ def __init__(self, parent):
+ super().__init__(parent)
+ self.model_ = None
+
+ def buildTree(self, gudrunFile, output, parent):
+ self.parent = parent
+ self.model_ = OutputTreeModel(
+ "Gudrun", parent=self
)
+ self.setModel(self.model_)
+
+ self.model().setOutput(output, gudrunFile)
+
+ self.setCurrentIndex(self.model().index(0, 0))
+ self.setHeaderHidden(True)
+ self.expandAll()
+
+ def currentChanged(self, current, previous):
+ if current.internalPointer():
+ self.parent.widget.outputTextEdit.setText(
+ current.internalPointer().data()
+ )
+ return super().currentChanged(current, previous)
diff --git a/gudpy/gui/widgets/slots/sample_background_slots.py b/gudpy/gui/widgets/slots/sample_background_slots.py
index 3049e29e..ad7ece9b 100644
--- a/gudpy/gui/widgets/slots/sample_background_slots.py
+++ b/gudpy/gui/widgets/slots/sample_background_slots.py
@@ -21,7 +21,7 @@ def setSampleBackground(self, sampleBackground):
# Populate data files list.
self.widget.sampleBackgroundDataFilesList.makeModel(
- self.sampleBackground.dataFiles.dataFiles
+ self.sampleBackground.dataFiles.dataFilenames
)
self.widget.sampleBackgroundDataFilesList.model().dataChanged.connect(
@@ -63,7 +63,7 @@ def handleDataFilesAltered(self):
if not self.widgetsRefreshing:
self.parent.setModified()
self.parent.gudrunFile.purged = False
- self.sampleBackground.dataFiles.dataFiles = (
+ self.sampleBackground.dataFiles.setFiles(
self.widget.sampleBackgroundDataFilesList.model().stringList()
)
diff --git a/gudpy/gui/widgets/slots/sample_slots.py b/gudpy/gui/widgets/slots/sample_slots.py
index e807cbd7..02277b18 100644
--- a/gudpy/gui/widgets/slots/sample_slots.py
+++ b/gudpy/gui/widgets/slots/sample_slots.py
@@ -27,7 +27,7 @@ def setSample(self, sample):
# Populate the data files list.
self.widget.sampleDataFilesList.makeModel(
- self.sample.dataFiles.dataFiles
+ self.sample.dataFiles.dataFilenames
)
self.widget.sampleDataFilesList.model().dataChanged.connect(
@@ -889,7 +889,7 @@ def handleDataFilesAltered(self):
if not self.widgetsRefreshing:
self.parent.setModified()
self.parent.gudrunFile.purged = False
- self.sample.dataFiles.dataFiles = (
+ self.sample.dataFiles.setFiles(
self.widget.sampleDataFilesList.model().stringList()
)
@@ -1149,7 +1149,7 @@ def updateExpectedDCSLevel(self, _=None, __=None):
actualDcsLevel = nthfloat(self.widget.dcsLabel.text(), 2)
try:
error = round(
- ((actualDcsLevel - dcsLevel) / actualDcsLevel)*100, 1
+ ((actualDcsLevel - dcsLevel) / actualDcsLevel)*100, 1
)
except ZeroDivisionError:
error = 100.
diff --git a/gudpy/gui/widgets/ui_files/mainWindow.ui b/gudpy/gui/widgets/ui_files/mainWindow.ui
index ca7068e1..e11ea0d9 100644
--- a/gudpy/gui/widgets/ui_files/mainWindow.ui
+++ b/gudpy/gui/widgets/ui_files/mainWindow.ui
@@ -2298,21 +2298,18 @@
-
+
+
+ Qt::ScrollBarAlwaysOn
+
-
- 626
- 286
-
-
-
-
- 16777215
- 16777215
+ 900
+ 1000
@@ -2375,12 +2372,6 @@
0
-
-
- 16777215
- 16777215
-
-
-
@@ -2410,22 +2401,10 @@
0
-
-
- 16777215
- 16777215
-
-
-
-
-
- 16777215
- 16777215
-
-
no. slices
@@ -2531,6 +2510,12 @@
-
+
+
+ 800
+ 250
+
+
QFrame::StyledPanel
@@ -2935,6 +2920,19 @@
+ -
+
+
+ Qt::Vertical
+
+
+
+ 20
+ 40
+
+
+
+
-
@@ -2946,21 +2944,15 @@
-
-
+
0
0
- 0
- 0
-
-
-
-
- 16777215
- 16777215
+ 400
+ 400
@@ -3032,19 +3024,6 @@
- -
-
-
- Qt::Horizontal
-
-
-
- 40
- 20
-
-
-
-
@@ -3053,7 +3032,7 @@
-
+
-
@@ -3245,7 +3224,7 @@
-
+
-
@@ -3260,8 +3239,8 @@
- 0
- 0
+ 400
+ 400
@@ -4331,7 +4310,7 @@
-
+
-
diff --git a/gudpy/test/TestData/NIMROD-water/good_water/good_water.yaml b/gudpy/test/TestData/NIMROD-water/good_water/good_water.yaml
index 17c96259..a9d3613b 100644
--- a/gudpy/test/TestData/NIMROD-water/good_water/good_water.yaml
+++ b/gudpy/test/TestData/NIMROD-water/good_water/good_water.yaml
@@ -1,6 +1,6 @@
-Instrument:
+instrument:
name: NIMROD
- dataFileDir: TestData/NIMROD-water/raw/
+ dataFileDir: test/TestData/NIMROD-water/raw/
dataFileType: raw
detectorCalibrationFileName: StartupFiles/NIMROD/NIMROD84modules+9monitors+LAB5Oct2012Detector.dat
columnNoPhiVals: 4
@@ -34,7 +34,7 @@ Instrument:
logarithmicStepSize: 0.04
hardGroupEdges: true
nxsDefinitionFile: ''
-Beam:
+beam:
sampleGeometry: FLATPLATE
beamProfileValues: [1.0, 1.0]
stepSizeAbsorption: 0.05
@@ -53,16 +53,78 @@ Beam:
overallBackgroundFactor: 1.0
sampleDependantBackgroundFactor: 0.0
shieldingAttenuationCoefficient: 0.0
-Components: []
-Normalisation:
+components: []
+normalisation:
periodNumber: 1
dataFiles:
- dataFiles: [NIMROD00016702_V.raw]
+ _dataFiles:
+ - filename: NIMROD00016702_V.raw
+ name: NIMROD00016702_V
+ ext: .raw
+ outputFolder:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Normalisation/NIMROD00016702_V
+ _outputs: {.smomon:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Normalisation/NIMROD00016702_V/NIMROD00016702_V.smomon,
+ .rebinq01:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Normalisation/NIMROD00016702_V/NIMROD00016702_V.rebinq01,
+ .mul01:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Normalisation/NIMROD00016702_V/NIMROD00016702_V.mul01,
+ .mut01:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Normalisation/NIMROD00016702_V/NIMROD00016702_V.mut01,
+ .trans01:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Normalisation/NIMROD00016702_V/NIMROD00016702_V.trans01,
+ .transnofit01:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Normalisation/NIMROD00016702_V/NIMROD00016702_V.transnofit01,
+ .rawtrans:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Normalisation/NIMROD00016702_V/NIMROD00016702_V.rawtrans,
+ .rawmon:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Normalisation/NIMROD00016702_V/NIMROD00016702_V.rawmon,
+ .pla01:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Normalisation/NIMROD00016702_V/NIMROD00016702_V.pla01,
+ .vanrat:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Normalisation/NIMROD00016702_V/NIMROD00016702_V.vanrat}
+ isSampleDataFile: false
+ isSampleDataFile: false
name: NORMALISATION
+ outputFolders: {}
+ _outputs: {}
periodNumberBg: 1
dataFilesBg:
- dataFiles: [NIMROD00016698_EmptyInst.raw, NIMROD00016703_EmptyInst.raw]
+ _dataFiles:
+ - filename: NIMROD00016698_EmptyInst.raw
+ name: NIMROD00016698_EmptyInst
+ ext: .raw
+ outputFolder:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/NormalisationBackground/NIMROD00016698_EmptyInst
+ _outputs: {.rawmon:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/NormalisationBackground/NIMROD00016698_EmptyInst/NIMROD00016698_EmptyInst.rawmon,
+ .rawtrans:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/NormalisationBackground/NIMROD00016698_EmptyInst/NIMROD00016698_EmptyInst.rawtrans,
+ .smomon:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/NormalisationBackground/NIMROD00016698_EmptyInst/NIMROD00016698_EmptyInst.smomon,
+ .rat:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/NormalisationBackground/NIMROD00016698_EmptyInst/NIMROD00016698_EmptyInst.rat,
+ .grp:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/NormalisationBackground/NIMROD00016698_EmptyInst/NIMROD00016698_EmptyInst.grp,
+ .bad:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/NormalisationBackground/NIMROD00016698_EmptyInst/NIMROD00016698_EmptyInst.bad}
+ isSampleDataFile: false
+ - filename: NIMROD00016703_EmptyInst.raw
+ name: NIMROD00016703_EmptyInst
+ ext: .raw
+ outputFolder:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/NormalisationBackground/NIMROD00016703_EmptyInst
+ _outputs: {.rat:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/NormalisationBackground/NIMROD00016703_EmptyInst/NIMROD00016703_EmptyInst.rat,
+ .grp:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/NormalisationBackground/NIMROD00016703_EmptyInst/NIMROD00016703_EmptyInst.grp,
+ .bad:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/NormalisationBackground/NIMROD00016703_EmptyInst/NIMROD00016703_EmptyInst.bad}
+ isSampleDataFile: false
+ isSampleDataFile: false
name: NORMALISATION BACKGROUND
+ outputFolders: {}
+ _outputs: {}
forceCalculationOfCorrections: true
composition:
type_: Normalisation
@@ -86,17 +148,87 @@ Normalisation:
lowerLimitSmoothedNormalisation: 0.01
normalisationDegreeSmoothing: 1.0
minNormalisationSignalBR: 0.0
-SampleBackgrounds:
+sampleBackgrounds:
- periodNumber: 1
dataFiles:
- dataFiles: [NIMROD00016698_EmptyInst.raw, NIMROD00016703_EmptyInst.raw]
+ _dataFiles:
+ - filename: NIMROD00016698_EmptyInst.raw
+ name: NIMROD00016698_EmptyInst
+ ext: .raw
+ outputFolder:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/SampleBackgrounds/SampleBackground1/NIMROD00016698_EmptyInst
+ _outputs: {.rawmon:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/SampleBackgrounds/SampleBackground1/NIMROD00016698_EmptyInst/NIMROD00016698_EmptyInst.rawmon,
+ .rawtrans:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/SampleBackgrounds/SampleBackground1/NIMROD00016698_EmptyInst/NIMROD00016698_EmptyInst.rawtrans,
+ .smomon:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/SampleBackgrounds/SampleBackground1/NIMROD00016698_EmptyInst/NIMROD00016698_EmptyInst.smomon,
+ .rat:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/SampleBackgrounds/SampleBackground1/NIMROD00016698_EmptyInst/NIMROD00016698_EmptyInst.rat,
+ .grp:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/SampleBackgrounds/SampleBackground1/NIMROD00016698_EmptyInst/NIMROD00016698_EmptyInst.grp,
+ .bad:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/SampleBackgrounds/SampleBackground1/NIMROD00016698_EmptyInst/NIMROD00016698_EmptyInst.bad}
+ isSampleDataFile: false
+ - filename: NIMROD00016703_EmptyInst.raw
+ name: NIMROD00016703_EmptyInst
+ ext: .raw
+ outputFolder:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/SampleBackgrounds/SampleBackground1/NIMROD00016703_EmptyInst
+ _outputs: {.rat:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/SampleBackgrounds/SampleBackground1/NIMROD00016703_EmptyInst/NIMROD00016703_EmptyInst.rat,
+ .grp:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/SampleBackgrounds/SampleBackground1/NIMROD00016703_EmptyInst/NIMROD00016703_EmptyInst.grp,
+ .bad:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/SampleBackgrounds/SampleBackground1/NIMROD00016703_EmptyInst/NIMROD00016703_EmptyInst.bad}
+ isSampleDataFile: false
+ isSampleDataFile: false
name: SAMPLE BACKGROUND
+ outputFolders: {}
+ _outputs: {}
samples:
- name: H2O,_Can_N9
periodNumber: 1
dataFiles:
- dataFiles: [NIMROD00016608_H2O_in_N9.raw, NIMROD00016610_H2O_in_N9.raw]
+ _dataFiles:
+ - filename: NIMROD00016608_H2O_in_N9.raw
+ name: NIMROD00016608_H2O_in_N9
+ ext: .raw
+ outputFolder: ''
+ _outputs:
+ .mdcs01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Outputs/NIMROD00016608_H2O_in_N9.mdcs01'
+ .rawmon: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Diagnostics/NIMROD00016608_H2O_in_N9.rawmon'
+ .gr2: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Diagnostics/NIMROD00016608_H2O_in_N9.gr2'
+ .transnofit01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Diagnostics/NIMROD00016608_H2O_in_N9.transnofit01'
+ .mint01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Outputs/NIMROD00016608_H2O_in_N9.mint01'
+ .bak: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Diagnostics/NIMROD00016608_H2O_in_N9.bak'
+ .trans01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Diagnostics/NIMROD00016608_H2O_in_N9.trans01'
+ .msub01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Diagnostics/NIMROD00016608_H2O_in_N9.msub01'
+ .mdor01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Outputs/NIMROD00016608_H2O_in_N9.mdor01'
+ .mgor01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Outputs/NIMROD00016608_H2O_in_N9.mgor01'
+ .mut01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Diagnostics/NIMROD00016608_H2O_in_N9.mut01'
+ .gud: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Outputs/NIMROD00016608_H2O_in_N9.gud'
+ .mul01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Diagnostics/NIMROD00016608_H2O_in_N9.mul01'
+ .gr1: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Diagnostics/NIMROD00016608_H2O_in_N9.gr1'
+ .rawtrans: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Diagnostics/NIMROD00016608_H2O_in_N9.rawtrans'
+ .abs01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Diagnostics/NIMROD00016608_H2O_in_N9.abs01'
+ .dcs01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Outputs/NIMROD00016608_H2O_in_N9.dcs01'
+ .chksum: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Diagnostics/NIMROD00016608_H2O_in_N9.chksum'
+ .sub01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Diagnostics/NIMROD00016608_H2O_in_N9.sub01'
+ .smomon: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Diagnostics/NIMROD00016608_H2O_in_N9.smomon'
+ .pla01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Diagnostics/NIMROD00016608_H2O_in_N9.pla01'
+ .samrat: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/NIMROD00016608_H2O_in_N9/Diagnostics/NIMROD00016608_H2O_in_N9.samrat'
+ isSampleDataFile: true
+ - filename: NIMROD00016610_H2O_in_N9.raw
+ name: NIMROD00016610_H2O_in_N9
+ ext: .raw
+ outputFolder: ''
+ _outputs: {}
+ isSampleDataFile: true
+ isSampleDataFile: false
name: H2O,_Can_N9
+ outputFolders: {}
+ _outputs: {}
forceCalculationOfCorrections: true
composition:
type_: Sample
@@ -139,8 +271,29 @@ SampleBackgrounds:
- name: N9
periodNumber: 1
dataFiles:
- dataFiles: [NIMROD00016694_Empty_N9.raw, NIMROD00016699_Empty_N9.raw, NIMROD00016704_Empty_N9.raw]
+ _dataFiles:
+ - filename: NIMROD00016694_Empty_N9.raw
+ name: NIMROD00016694_Empty_N9
+ ext: .raw
+ outputFolder: ''
+ _outputs: {}
+ isSampleDataFile: false
+ - filename: NIMROD00016699_Empty_N9.raw
+ name: NIMROD00016699_Empty_N9
+ ext: .raw
+ outputFolder: ''
+ _outputs: {}
+ isSampleDataFile: false
+ - filename: NIMROD00016704_Empty_N9.raw
+ name: NIMROD00016704_Empty_N9
+ ext: .raw
+ outputFolder: ''
+ _outputs: {}
+ isSampleDataFile: false
+ isSampleDataFile: false
name: N9
+ outputFolders: {}
+ _outputs: {}
composition:
type_: Container
elements:
@@ -162,11 +315,54 @@ SampleBackgrounds:
tweakFactor: 1.0
scatteringFraction: 1.0
attenuationCoefficient: 0.0
+ outputFolder: ''
+ outputFolder: /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9
+ sampleFile:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/H2O,_Can_N9/H2O_Can_N9.sample
+ selfScatteringFilePath: ''
+ _referenceDataFile: /Users/noella/Documents/dev/gp-test/dissolve-water/water-orig/H2O-SimulatedFR.r
- name: D2O,_Can_N10
periodNumber: 1
dataFiles:
- dataFiles: [NIMROD00016609_D2O_in_N10.raw, NIMROD00016611_D2O_in_N10.raw]
+ _dataFiles:
+ - filename: NIMROD00016609_D2O_in_N10.raw
+ name: NIMROD00016609_D2O_in_N10
+ ext: .raw
+ outputFolder: ''
+ _outputs:
+ .sub01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Diagnostics/NIMROD00016609_D2O_in_N10.sub01'
+ .mgor01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Outputs/NIMROD00016609_D2O_in_N10.mgor01'
+ .mdor01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Outputs/NIMROD00016609_D2O_in_N10.mdor01'
+ .bak: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Diagnostics/NIMROD00016609_D2O_in_N10.bak'
+ .dcs01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Outputs/NIMROD00016609_D2O_in_N10.dcs01'
+ .abs01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Diagnostics/NIMROD00016609_D2O_in_N10.abs01'
+ .gr2: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Diagnostics/NIMROD00016609_D2O_in_N10.gr2'
+ .mint01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Outputs/NIMROD00016609_D2O_in_N10.mint01'
+ .gud: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Outputs/NIMROD00016609_D2O_in_N10.gud'
+ .mdcs01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Outputs/NIMROD00016609_D2O_in_N10.mdcs01'
+ .rawmon: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Diagnostics/NIMROD00016609_D2O_in_N10.rawmon'
+ .msub01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Diagnostics/NIMROD00016609_D2O_in_N10.msub01'
+ .pla01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Diagnostics/NIMROD00016609_D2O_in_N10.pla01'
+ .smomon: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Diagnostics/NIMROD00016609_D2O_in_N10.smomon'
+ .trans01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Diagnostics/NIMROD00016609_D2O_in_N10.trans01'
+ .samrat: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Diagnostics/NIMROD00016609_D2O_in_N10.samrat'
+ .gr1: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Diagnostics/NIMROD00016609_D2O_in_N10.gr1'
+ .mul01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Diagnostics/NIMROD00016609_D2O_in_N10.mul01'
+ .rawtrans: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Diagnostics/NIMROD00016609_D2O_in_N10.rawtrans'
+ .chksum: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Diagnostics/NIMROD00016609_D2O_in_N10.chksum'
+ .transnofit01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Diagnostics/NIMROD00016609_D2O_in_N10.transnofit01'
+ .mut01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/NIMROD00016609_D2O_in_N10/Diagnostics/NIMROD00016609_D2O_in_N10.mut01'
+ isSampleDataFile: true
+ - filename: NIMROD00016611_D2O_in_N10.raw
+ name: NIMROD00016611_D2O_in_N10
+ ext: .raw
+ outputFolder: ''
+ _outputs: {}
+ isSampleDataFile: true
+ isSampleDataFile: false
name: D2O,_Can_N10
+ outputFolders: {}
+ _outputs: {}
forceCalculationOfCorrections: true
composition:
type_: Sample
@@ -209,8 +405,29 @@ SampleBackgrounds:
- name: N10
periodNumber: 1
dataFiles:
- dataFiles: [NIMROD00016695_Empty_N10.raw, NIMROD00016700_Empty_N10.raw, NIMROD00016705_Empty_N10.raw]
+ _dataFiles:
+ - filename: NIMROD00016695_Empty_N10.raw
+ name: NIMROD00016695_Empty_N10
+ ext: .raw
+ outputFolder: ''
+ _outputs: {}
+ isSampleDataFile: false
+ - filename: NIMROD00016700_Empty_N10.raw
+ name: NIMROD00016700_Empty_N10
+ ext: .raw
+ outputFolder: ''
+ _outputs: {}
+ isSampleDataFile: false
+ - filename: NIMROD00016705_Empty_N10.raw
+ name: NIMROD00016705_Empty_N10
+ ext: .raw
+ outputFolder: ''
+ _outputs: {}
+ isSampleDataFile: false
+ isSampleDataFile: false
name: N10
+ outputFolders: {}
+ _outputs: {}
composition:
type_: Container
elements:
@@ -232,11 +449,54 @@ SampleBackgrounds:
tweakFactor: 1.0
scatteringFraction: 1.0
attenuationCoefficient: 0.0
+ outputFolder: ''
+ outputFolder: /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10
+ sampleFile:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/D2O,_Can_N10/D2O_Can_N10.sample
+ selfScatteringFilePath: ''
+ _referenceDataFile: /Users/noella/Documents/dev/gp-test/dissolve-water/water-orig/D2O-SimulatedFR.r
- name: HDO,_Can_N6
periodNumber: 1
dataFiles:
- dataFiles: [NIMROD00016741_HDO_in_N6.raw, NIMROD00016743_HDO_in_N6.raw]
+ _dataFiles:
+ - filename: NIMROD00016741_HDO_in_N6.raw
+ name: NIMROD00016741_HDO_in_N6
+ ext: .raw
+ outputFolder: ''
+ _outputs:
+ .rawtrans: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Diagnostics/NIMROD00016741_HDO_in_N6.rawtrans'
+ .sub01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Diagnostics/NIMROD00016741_HDO_in_N6.sub01'
+ .transnofit01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Diagnostics/NIMROD00016741_HDO_in_N6.transnofit01'
+ .abs01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Diagnostics/NIMROD00016741_HDO_in_N6.abs01'
+ .dcs01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Outputs/NIMROD00016741_HDO_in_N6.dcs01'
+ .gr1: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Diagnostics/NIMROD00016741_HDO_in_N6.gr1'
+ .chksum: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Diagnostics/NIMROD00016741_HDO_in_N6.chksum'
+ .smomon: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Diagnostics/NIMROD00016741_HDO_in_N6.smomon'
+ .samrat: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Diagnostics/NIMROD00016741_HDO_in_N6.samrat'
+ .trans01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Diagnostics/NIMROD00016741_HDO_in_N6.trans01'
+ .pla01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Diagnostics/NIMROD00016741_HDO_in_N6.pla01'
+ .mint01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Outputs/NIMROD00016741_HDO_in_N6.mint01'
+ .rawmon: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Diagnostics/NIMROD00016741_HDO_in_N6.rawmon'
+ .mdcs01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Outputs/NIMROD00016741_HDO_in_N6.mdcs01'
+ .msub01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Diagnostics/NIMROD00016741_HDO_in_N6.msub01'
+ .bak: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Diagnostics/NIMROD00016741_HDO_in_N6.bak'
+ .gr2: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Diagnostics/NIMROD00016741_HDO_in_N6.gr2'
+ .gud: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Outputs/NIMROD00016741_HDO_in_N6.gud'
+ .mul01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Diagnostics/NIMROD00016741_HDO_in_N6.mul01'
+ .mgor01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Outputs/NIMROD00016741_HDO_in_N6.mgor01'
+ .mdor01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Outputs/NIMROD00016741_HDO_in_N6.mdor01'
+ .mut01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/NIMROD00016741_HDO_in_N6/Diagnostics/NIMROD00016741_HDO_in_N6.mut01'
+ isSampleDataFile: true
+ - filename: NIMROD00016743_HDO_in_N6.raw
+ name: NIMROD00016743_HDO_in_N6
+ ext: .raw
+ outputFolder: ''
+ _outputs: {}
+ isSampleDataFile: true
+ isSampleDataFile: false
name: HDO,_Can_N6
+ outputFolders: {}
+ _outputs: {}
forceCalculationOfCorrections: true
composition:
type_: Sample
@@ -280,8 +540,17 @@ SampleBackgrounds:
- name: N6
periodNumber: 1
dataFiles:
- dataFiles: [NIMROD00014908_Empty_N6.raw]
+ _dataFiles:
+ - filename: NIMROD00014908_Empty_N6.raw
+ name: NIMROD00014908_Empty_N6
+ ext: .raw
+ outputFolder: ''
+ _outputs: {}
+ isSampleDataFile: false
+ isSampleDataFile: false
name: N6
+ outputFolders: {}
+ _outputs: {}
composition:
type_: Container
elements:
@@ -303,11 +572,54 @@ SampleBackgrounds:
tweakFactor: 1.0
scatteringFraction: 1.0
attenuationCoefficient: 0.0
+ outputFolder: ''
+ outputFolder: /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6
+ sampleFile:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/HDO,_Can_N6/HDO_Can_N6.sample
+ selfScatteringFilePath: ''
+ _referenceDataFile: /Users/noella/Documents/dev/gp-test/dissolve-water/water-orig/HDO-SimulatedFR.r
- name: Null_Water,_Can_N8
periodNumber: 1
dataFiles:
- dataFiles: [NIMROD00016742_NullWater_in_N8.raw, NIMROD00016744_NullWater_in_N8.raw]
+ _dataFiles:
+ - filename: NIMROD00016742_NullWater_in_N8.raw
+ name: NIMROD00016742_NullWater_in_N8
+ ext: .raw
+ outputFolder: ''
+ _outputs:
+ .smomon: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Diagnostics/NIMROD00016742_NullWater_in_N8.smomon'
+ .samrat: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Diagnostics/NIMROD00016742_NullWater_in_N8.samrat'
+ .gud: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Outputs/NIMROD00016742_NullWater_in_N8.gud'
+ .trans01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Diagnostics/NIMROD00016742_NullWater_in_N8.trans01'
+ .rawtrans: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Diagnostics/NIMROD00016742_NullWater_in_N8.rawtrans'
+ .bak: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Diagnostics/NIMROD00016742_NullWater_in_N8.bak'
+ .mut01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Diagnostics/NIMROD00016742_NullWater_in_N8.mut01'
+ .gr2: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Diagnostics/NIMROD00016742_NullWater_in_N8.gr2'
+ .mul01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Diagnostics/NIMROD00016742_NullWater_in_N8.mul01'
+ .chksum: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Diagnostics/NIMROD00016742_NullWater_in_N8.chksum'
+ .mgor01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Outputs/NIMROD00016742_NullWater_in_N8.mgor01'
+ .abs01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Diagnostics/NIMROD00016742_NullWater_in_N8.abs01'
+ .dcs01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Outputs/NIMROD00016742_NullWater_in_N8.dcs01'
+ .mdor01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Outputs/NIMROD00016742_NullWater_in_N8.mdor01'
+ .sub01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Diagnostics/NIMROD00016742_NullWater_in_N8.sub01'
+ .mint01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Outputs/NIMROD00016742_NullWater_in_N8.mint01'
+ .pla01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Diagnostics/NIMROD00016742_NullWater_in_N8.pla01'
+ .gr1: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Diagnostics/NIMROD00016742_NullWater_in_N8.gr1'
+ .rawmon: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Diagnostics/NIMROD00016742_NullWater_in_N8.rawmon'
+ .mdcs01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Outputs/NIMROD00016742_NullWater_in_N8.mdcs01'
+ .msub01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Diagnostics/NIMROD00016742_NullWater_in_N8.msub01'
+ .transnofit01: '/Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/NIMROD00016742_NullWater_in_N8/Diagnostics/NIMROD00016742_NullWater_in_N8.transnofit01'
+ isSampleDataFile: true
+ - filename: NIMROD00016744_NullWater_in_N8.raw
+ name: NIMROD00016744_NullWater_in_N8
+ ext: .raw
+ outputFolder: ''
+ _outputs: {}
+ isSampleDataFile: true
+ isSampleDataFile: false
name: Null_Water,_Can_N8
+ outputFolders: {}
+ _outputs: {}
forceCalculationOfCorrections: true
composition:
type_: Sample
@@ -351,8 +663,17 @@ SampleBackgrounds:
- name: N8
periodNumber: 1
dataFiles:
- dataFiles: [NIMROD00016994_Empty_N8.raw]
+ _dataFiles:
+ - filename: NIMROD00016994_Empty_N8.raw
+ name: NIMROD00016994_Empty_N8
+ ext: .raw
+ outputFolder: ''
+ _outputs: {}
+ isSampleDataFile: false
+ isSampleDataFile: false
name: N8
+ outputFolders: {}
+ _outputs: {}
composition:
type_: Container
elements:
@@ -374,4 +695,11 @@ SampleBackgrounds:
tweakFactor: 1.0
scatteringFraction: 1.0
attenuationCoefficient: 0.0
+ outputFolder: ''
+ outputFolder: /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8
+ sampleFile:
+ /Users/noella/Documents/dev/gp-test/water-updated/Gudrun/Null_Water,_Can_N8/Null_Water_Can_N8.sample
+ selfScatteringFilePath: ''
+ _referenceDataFile: ''
+ outputFolder: ''
GUI: {useComponents: false}
diff --git a/gudpy/test/TestData/NIMROD-water/water.txt b/gudpy/test/TestData/NIMROD-water/water.txt
index a6ecea35..ec85923c 100644
--- a/gudpy/test/TestData/NIMROD-water/water.txt
+++ b/gudpy/test/TestData/NIMROD-water/water.txt
@@ -287,7 +287,7 @@ NIMROD00016742_NullWater_in_N8.msubw01 Name of file containing self sca
CONTAINER N8 {
-1 1 Number of files and period number
+1 1 Number of files and period number
NIMROD00016994_Empty_N8.raw CONTAINER N8 data files
Ti 0 7.16 Composition
Zr 0 3.438 Composition
diff --git a/gudpy/test/test_gud_file.py b/gudpy/test/test_gud_file.py
index a4480d10..6c85fb8c 100644
--- a/gudpy/test/test_gud_file.py
+++ b/gudpy/test/test_gud_file.py
@@ -4,6 +4,7 @@
from core.exception import ParserException
from core.gud_file import GudFile
from core import gudpy
+from core.io.gudpy_io import GudPyIO
from test.test_gudpy_workflows import GudPyContext
@@ -250,13 +251,6 @@ def setUp(self) -> None:
self.gudpy.gudrunFile.instrument.dataFileDir = str(dataFileDir) + "/"
return super().setUp()
- def tearDown(self) -> None:
- [os.remove(f) for f in os.listdir() if f not in self.keepsakes]
- [os.remove(os.path.join(self.gudpy.projectDir, f))
- for f in os.listdir(self.gudpy.projectDir)
- if f not in self.keepsakes]
- return super().tearDown()
-
def testEmptyPath(self):
emptyPath = ""
self.assertRaises(ParserException, GudFile, emptyPath)
@@ -272,13 +266,13 @@ def testInvalidPath(self):
def testValidPath(self):
with GudPyContext() as gudpy:
gudpy.runGudrun()
- gf = gudpy.gudrun.gudrunOutput.gudFile(0)
+ gf = gudpy.gudrunFile.runSamples()[0].gudFile
self.assertIsInstance(gf, GudFile)
def loadGudFile(self, index):
with GudPyContext() as gudpy:
gudpy.runGudrun()
- gf = gudpy.gudrun.gudrunOutput.gudFile(index)
+ gf = gudpy.gudrunFile.runSamples()[index].gudFile
self.assertIsInstance(gf, GudFile)
@@ -332,12 +326,12 @@ def testLoadGudFileD(self):
def testWriteGudFileA(self):
with GudPyContext() as gudpy:
gudpy.runGudrun()
- gf = gudpy.gudrun.gudrunOutput.gudFile(0)
+ gf = gudpy.gudrunFile.runSamples()[0].gudFile
path = os.path.join(
gudpy.projectDir,
gf.fname
)
- gf.write_out(path)
+ GudPyIO.writeObject(gf, path)
gf1 = GudFile(path)
dicA = gf.__dict__
@@ -358,12 +352,12 @@ def testWriteGudFileA(self):
def testWriteGudFileB(self):
with GudPyContext() as gudpy:
gudpy.runGudrun()
- gf = gudpy.gudrun.gudrunOutput.gudFile(1)
+ gf = gudpy.gudrunFile.runSamples()[1].gudFile
path = os.path.join(
gudpy.projectDir,
gf.fname
)
- gf.write_out(path)
+ GudPyIO.writeObject(gf, path)
gf1 = GudFile(path)
dicA = gf.__dict__
diff --git a/gudpy/test/test_gudpy_io.py b/gudpy/test/test_gudpy_io.py
index 591a6274..215d1899 100644
--- a/gudpy/test/test_gudpy_io.py
+++ b/gudpy/test/test_gudpy_io.py
@@ -22,9 +22,10 @@
from core.enums import (
CrossSectionSource, FTModes, Instruments, Scales, UnitsOfDensity,
MergeWeights, NormalisationType, OutputUnits,
- Geometry, Format
+ Geometry
)
from core import gudpy as gp
+from core.io.gudrun_file_parser import GudrunFileParser
class GudPyContext:
@@ -36,24 +37,27 @@ def __init__(self):
)
self.gudpy = gp.GudPy()
+ self.gudpy.testFilePath = os.path.join(testDir, "test_data.txt")
- self.gudpy.loadFromFile(
+ self.gudpy.loadFromGudrunFile(
loadFile=path,
- format=Format.TXT
)
- gPath = os.path.join(self.tempdir.name, "good_water.txt")
- self.gudpy.gudrunFile.write_out(gPath, overwrite=True)
+ gPath = os.path.join(self.tempdir.name, GudrunFile.OUTPATH)
+ GudrunFileParser.writeGudrunFileTo(self.gudpy.gudrunFile, gPath)
- self.gudpy.loadFromFile(
+ self.gudpy.loadFromGudrunFile(
loadFile=gPath,
- format=Format.TXT
)
self.gudpy.setSaveLocation(os.path.join(
self.tempdir.name, "good_water"
))
+ os.makedirs(os.path.join(
+ self.tempdir.name, "good_water"
+ ))
+
def __enter__(self):
return self.gudpy
@@ -237,6 +241,7 @@ def setUp(self) -> None:
"grBroadening": 0.0,
"powerForBroadening": 0.0,
"stepSize": 0.0,
+ "outputFolder": '',
"yamlignore": {
"runAsSample",
"topHatW",
@@ -292,6 +297,7 @@ def setUp(self) -> None:
"grBroadening": 0.0,
"powerForBroadening": 0.0,
"stepSize": 0.0,
+ "outputFolder": "",
"yamlignore": {
"runAsSample",
"topHatW",
@@ -342,6 +348,7 @@ def setUp(self) -> None:
"grBroadening": 0.0,
"powerForBroadening": 0.0,
"stepSize": 0.0,
+ "outputFolder": '',
"yamlignore": {
"runAsSample",
"topHatW",
@@ -392,6 +399,7 @@ def setUp(self) -> None:
"grBroadening": 0.0,
"powerForBroadening": 0.0,
"stepSize": 0.0,
+ "outputFolder": '',
"yamlignore": {
"runAsSample",
"topHatW",
@@ -455,6 +463,10 @@ def setUp(self) -> None:
"scatteringFraction": 1.0,
"attenuationCoefficient": 0.0,
"containers": [self.expectedContainerA],
+ "outputFolder": '',
+ "sampleFile": '',
+ "selfScatteringFilePath": '',
+ "_referenceDataFile": "",
"yamlignore": {
"yamlignore",
}
@@ -510,6 +522,10 @@ def setUp(self) -> None:
"scatteringFraction": 1.0,
"attenuationCoefficient": 0.0,
"containers": [self.expectedContainerB],
+ "outputFolder": '',
+ "sampleFile": '',
+ "selfScatteringFilePath": '',
+ "_referenceDataFile": "",
"yamlignore": {
"yamlignore"
}
@@ -565,6 +581,10 @@ def setUp(self) -> None:
"scatteringFraction": 1.0,
"attenuationCoefficient": 0.0,
"containers": [self.expectedContainerC],
+ "outputFolder": '',
+ "sampleFile": '',
+ "selfScatteringFilePath": '',
+ "_referenceDataFile": "",
"yamlignore": {
"yamlignore"
}
@@ -621,6 +641,10 @@ def setUp(self) -> None:
"scatteringFraction": 1.0,
"attenuationCoefficient": 0.0,
"containers": [self.expectedContainerD],
+ "outputFolder": '',
+ "sampleFile": '',
+ "selfScatteringFilePath": '',
+ "_referenceDataFile": "",
"yamlignore": {
"yamlignore"
}
@@ -646,6 +670,7 @@ def setUp(self) -> None:
self.expectedSampleB,
self.expectedSampleC,
],
+ "outputFolder": '',
"writeAllSamples": True,
"yamlignore": {
"writeAllSamples",
@@ -751,7 +776,6 @@ def testLoadGudrunFile(self):
)
)
for key_ in sampleAttrsDict.keys():
-
if key_ == "containers":
for j, container in enumerate(sample[key_]):
containerAttrsDict = (
@@ -762,7 +786,6 @@ def testLoadGudrunFile(self):
)
for _key in containerAttrsDict.keys():
-
if isinstance(
container[_key],
(DataFiles, Composition),
@@ -804,12 +827,18 @@ def testLoadGudrunFile(self):
sampleBackgroundsAttrsDict[key],
)
+ def testSaveAsProject(self):
+ with GudPyContext() as gudpy:
+ gudpy.saveAs(os.path.join(
+ gudpy.projectDir, "test"
+ ))
+
def testWriteGudrunFile(self):
with GudPyContext() as gudpy:
- gudpy.gudrunFile.write_out(
- gudpy.gudrunFile.loadFile, overwrite=True)
+ GudrunFileParser.writeGudrunFileTo(
+ gudpy.gudrunFile, gudpy.io.loadFile)
with open(
- gudpy.gudrunFile.loadFile,
+ gudpy.io.loadFile,
encoding="utf-8"
) as f:
outlines = "\n".join(f.readlines()[:-5])
@@ -872,7 +901,7 @@ def valueInLines(value, lines):
else:
valueInLines(value, outlines)
inlines = ""
- with open(gudpy.gudrunFile.loadFile, encoding="utf-8") as f:
+ with open(gudpy.io.loadFile, encoding="utf-8") as f:
inlines = f.read()
for dic in self.dicts:
for value in dic.values():
@@ -896,19 +925,16 @@ def valueInLines(value, lines):
def testRewriteGudrunFile(self):
with GudPyContext() as gudpy:
- gudpy.gudrunFile.write_out(
- gudpy.gudrunFile.loadFile, overwrite=True)
+ GudrunFileParser.writeGudrunFileTo(
+ gudpy.gudrunFile, gudpy.io.loadFile)
copyPath = os.path.join(
gudpy.gudrunFile.instrument.GudrunInputFileDir,
"copyGF.txt"
)
- g1 = GudrunFile(
- loadFile=gudpy.gudrunFile.loadFile,
- format=Format.TXT
- )
+ g1 = gudpy.io.importGudrunFile(gudpy.io.loadFile)
g1.instrument.GudrunInputFileDir = (
gudpy.gudrunFile.instrument.GudrunInputFileDir)
- g1.write_out(copyPath, overwrite=True)
+ GudrunFileParser.writeGudrunFileTo(g1, copyPath)
def compareString(string1, string2):
return string1 == string2
@@ -937,7 +963,7 @@ def compareString(string1, string2):
with open(
os.path.join(
- gudpy.gudrunFile.loadFile
+ gudpy.io.loadFile
),
encoding="utf-8"
) as fg:
@@ -950,12 +976,9 @@ def compareString(string1, string2):
def testReloadGudrunFile(self):
with GudPyContext() as gudpy:
- gudpy.gudrunFile.write_out(
- gudpy.gudrunFile.loadFile, overwrite=True)
- g1 = GudrunFile(
- loadFile=gudpy.gudrunFile.loadFile,
- format=Format.TXT
- )
+ GudrunFileParser.writeGudrunFileTo(
+ gudpy.gudrunFile, gudpy.io.loadFile)
+ g1 = gudpy.io.importGudrunFile(gudpy.io.loadFile)
g1.instrument.GudrunInputFileDir = (
gudpy.gudrunFile.instrument.GudrunInputFileDir)
self.assertEqual(
@@ -964,128 +987,90 @@ def testReloadGudrunFile(self):
)
def testLoadEmptyGudrunFile(self):
- f = open("test_data.txt", "w", encoding="utf-8")
- f.close()
- with self.assertRaises(ParserException) as cm:
- GudrunFile(loadFile="test_data.txt", format=Format.TXT)
- self.assertEqual((
- 'INSTRUMENT, BEAM and NORMALISATION'
- ' were not parsed. It\'s possible the file'
- ' supplied is of an incorrect format!'
- ),
- str(cm.exception),
- )
+ with GudPyContext() as gudpy:
+ f = open(gudpy.testFilePath, "w", encoding="utf-8")
+ f.close()
+ self.assertRaises(
+ ParserException, gudpy.loadFromGudrunFile,
+ gudpy.testFilePath)
def testLoadMissingInstrument(self):
- with open("test_data.txt", "w", encoding="utf-8") as f:
- f.write("' ' ' ' '/'\n\n")
- f.write("BEAM {\n\n" + str(self.goodBeam) + "\n\n}\n\n")
- f.write(
- "NORMALISATION {\n\n"
- + str(self.goodNormalisation)
- + "\n\n}\n\n"
- )
-
- with self.assertRaises(ParserException) as cm:
- GudrunFile(loadFile="test_data.txt", format=Format.TXT)
- self.assertEqual((
- 'INSTRUMENT, BEAM and NORMALISATION'
- ' were not parsed. It\'s possible the file'
- ' supplied is of an incorrect format!'
- ),
- str(cm.exception),
- )
+ with GudPyContext() as gudpy:
+ with open(gudpy.testFilePath, "w", encoding="utf-8") as f:
+ f.write("' ' ' ' '/'\n\n")
+ f.write("BEAM {\n\n" + str(self.goodBeam) + "\n\n}\n\n")
+ f.write(
+ "NORMALISATION {\n\n"
+ + str(self.goodNormalisation)
+ + "\n\n}\n\n"
+ )
+ self.assertRaises(
+ ParserException, gudpy.loadFromGudrunFile,
+ gudpy.testFilePath)
def testLoadMissingBeam(self):
- with open("test_data.txt", "w", encoding="utf-8") as f:
- f.write("' ' ' ' '/'\n\n")
- f.write(
- "INSTRUMENT {\n\n" + str(self.goodInstrument)
- + "\n\n}\n\n"
- )
- f.write(
- "NORMALISATION {\n\n"
- + str(self.goodNormalisation)
- + "\n\n}"
- )
- with self.assertRaises(ParserException) as cm:
- GudrunFile(loadFile="test_data.txt", format=Format.TXT)
- self.assertEqual((
- 'INSTRUMENT, BEAM and NORMALISATION'
- ' were not parsed. It\'s possible the file'
- ' supplied is of an incorrect format!'
- ),
- str(cm.exception),
- )
+ with GudPyContext() as gudpy:
+ with open(gudpy.testFilePath, "w", encoding="utf-8") as f:
+ f.write("' ' ' ' '/'\n\n")
+ f.write(
+ "INSTRUMENT {\n\n" + str(self.goodInstrument)
+ + "\n\n}\n\n"
+ )
+ f.write(
+ "NORMALISATION {\n\n"
+ + str(self.goodNormalisation)
+ + "\n\n}"
+ )
+ self.assertRaises(
+ ParserException, gudpy.loadFromGudrunFile,
+ gudpy.testFilePath)
def testLoadMissingNormalisation(self):
- with open("test_data.txt", "w", encoding="utf-8") as f:
- f.write("' ' ' ' '/'\n\n")
- f.write(
- "INSTRUMENT {\n\n" + str(self.goodInstrument)
- + "\n\n}\n\n"
- )
- f.write("BEAM {\n\n" + str(self.goodBeam) + "\n\n}\n\n")
-
- with self.assertRaises(ParserException) as cm:
- GudrunFile(loadFile="test_data.txt", format=Format.TXT)
- self.assertEqual((
- 'INSTRUMENT, BEAM and NORMALISATION'
- ' were not parsed. It\'s possible the file'
- ' supplied is of an incorrect format!'
- ),
- str(cm.exception),
- )
+ with GudPyContext() as gudpy:
+ with open(gudpy.testFilePath, "w", encoding="utf-8") as f:
+ f.write("' ' ' ' '/'\n\n")
+ f.write(
+ "INSTRUMENT {\n\n" + str(self.goodInstrument)
+ + "\n\n}\n\n"
+ )
+ f.write("BEAM {\n\n" + str(self.goodBeam) + "\n\n}\n\n")
+ self.assertRaises(
+ ParserException, gudpy.loadFromGudrunFile,
+ gudpy.testFilePath)
def testLoadMissingInstrumentAndBeam(self):
- with open("test_data.txt", "w", encoding="utf-8") as f:
- f.write("' ' ' ' '/'\n\n")
- f.write(
- "NORMALISATION {\n\n"
- + str(self.goodNormalisation)
- + "\n\n}"
- )
+ with GudPyContext() as gudpy:
+ with open(gudpy.testFilePath, "w", encoding="utf-8") as f:
+ f.write("' ' ' ' '/'\n\n")
+ f.write(
+ "NORMALISATION {\n\n"
+ + str(self.goodNormalisation)
+ + "\n\n}"
+ )
- with self.assertRaises(ParserException) as cm:
- GudrunFile(loadFile="test_data.txt", format=Format.TXT)
- self.assertEqual((
- 'INSTRUMENT, BEAM and NORMALISATION'
- ' were not parsed. It\'s possible the file supplied'
- ' is of an incorrect format!'),
- str(cm.exception),
- )
+ self.assertRaises(
+ ParserException, gudpy.loadFromGudrunFile, gudpy.testFilePath)
def testLoadMissingInstrumentAndNormalisation(self):
- with open("test_data.txt", "w", encoding="utf-8") as f:
- f.write("' ' ' ' '/'\n\n")
- f.write("BEAM {\n\n" + str(self.goodBeam) + "\n\n}")
-
- with self.assertRaises(ParserException) as cm:
- GudrunFile(loadFile="test_data.txt", format=Format.TXT)
- self.assertEqual((
- 'INSTRUMENT, BEAM and NORMALISATION'
- ' were not parsed. It\'s possible the file'
- ' supplied is of an incorrect format!'
- ),
- str(cm.exception),
- )
+ with GudPyContext() as gudpy:
+ with open(gudpy.testFilePath, "w", encoding="utf-8") as f:
+ f.write("' ' ' ' '/'\n\n")
+ f.write("BEAM {\n\n" + str(self.goodBeam) + "\n\n}")
+ self.assertRaises(
+ ParserException, gudpy.loadFromGudrunFile,
+ gudpy.testFilePath)
def testLoadMissingNormalisationAndBeam(self):
- with open("test_data.txt", "w", encoding="utf-8") as f:
- f.write("' ' ' ' '/'\n\n")
- f.write(
- "INSTRUMENT {\n\n" + str(self.goodInstrument) + "\n\n}"
- )
-
- with self.assertRaises(ParserException) as cm:
- GudrunFile(loadFile="test_data.txt", format=Format.TXT)
- self.assertEqual((
- 'INSTRUMENT, BEAM and NORMALISATION'
- ' were not parsed. It\'s possible the file'
- ' supplied is of an incorrect format!'
- ),
- str(cm.exception),
- )
+ with GudPyContext() as gudpy:
+ with open(gudpy.testFilePath, "w", encoding="utf-8") as f:
+ f.write("' ' ' ' '/'\n\n")
+ f.write(
+ "INSTRUMENT {\n\n" +
+ str(self.goodInstrument) + "\n\n}"
+ )
+ self.assertRaises(
+ ParserException, gudpy.loadFromGudrunFile,
+ gudpy.testFilePath)
def testLoadMissingInstrumentAttributesSeq(self):
expectedInstrument = deepcopy(self.expectedInstrument)
@@ -1099,25 +1084,20 @@ def testLoadMissingInstrumentAttributesSeq(self):
expectedInstrument.pop("goodDetectorThreshold", None)
expectedInstrument.pop("yamlignore", None)
for i in range(len(expectedInstrument.keys())):
-
badInstrument = str(self.goodInstrument).split("\n")
del badInstrument[i]
badInstrument = "\n".join(badInstrument)
- with open("test_data.txt", "w", encoding="utf-8") as f:
- f.write("' ' ' ' '/'\n\n")
- f.write(
- "INSTRUMENT {\n\n" + str(badInstrument) + "\n\n}"
- )
-
- with self.assertRaises(ParserException) as cm:
- GudrunFile(loadFile="test_data.txt", format=Format.TXT)
- self.assertEqual(
- "Whilst parsing Instrument, an exception occured."
- " The input file is most likely of an incorrect format, "
- "and some attributes were missing.",
- str(cm.exception)
- )
+ with GudPyContext() as gudpy:
+ with open(gudpy.testFilePath, "w", encoding="utf-8") as f:
+ f.write("' ' ' ' '/'\n\n")
+ f.write(
+ "INSTRUMENT {\n\n" +
+ str(badInstrument) + "\n\n}"
+ )
+ self.assertRaises(
+ ParserException, gudpy.loadFromGudrunFile,
+ gudpy.testFilePath)
def testLoadMissingInstrumentAttributesRand(self):
expectedInstrument = deepcopy(self.expectedInstrument)
@@ -1131,7 +1111,6 @@ def testLoadMissingInstrumentAttributesRand(self):
expectedInstrument.pop("goodDetectorThreshold", None)
expectedInstrument.pop("yamlignore", None)
for i in range(50):
-
key = random.choice(list(expectedInstrument))
j = list(expectedInstrument).index(key)
@@ -1139,19 +1118,16 @@ def testLoadMissingInstrumentAttributesRand(self):
del badInstrument[j]
badInstrument = "\n".join(badInstrument)
- with open("test_data.txt", "w", encoding="utf-8") as f:
- f.write("' ' ' ' '/'\n\n")
- f.write(
- "INSTRUMENT {\n\n" + str(badInstrument) + "\n\n}"
- )
- with self.assertRaises(ParserException) as cm:
- GudrunFile(loadFile="test_data.txt", format=Format.TXT)
- self.assertEqual(
- "Whilst parsing Instrument, an exception occured."
- " The input file is most likely of an incorrect format, "
- "and some attributes were missing.",
- str(cm.exception)
- )
+ with GudPyContext() as gudpy:
+ with open(gudpy.testFilePath, "w", encoding="utf-8") as f:
+ f.write("' ' ' ' '/'\n\n")
+ f.write(
+ "INSTRUMENT {\n\n" +
+ str(badInstrument) + "\n\n}"
+ )
+ self.assertRaises(
+ ParserException, gudpy.loadFromGudrunFile,
+ gudpy.testFilePath)
def testLoadMissingBeamAttributesSeq(self):
expectedBeam = deepcopy(self.expectedBeam)
@@ -1169,24 +1145,18 @@ def testLoadMissingBeamAttributesSeq(self):
badBeam = str(self.goodBeam).split("\n")
del badBeam[i]
badBeam = "\n".join(badBeam)
-
- with open("test_data.txt", "w", encoding="utf-8") as f:
- f.write("' ' ' ' '/'\n\n")
- f.write(
- "INSTRUMENT {\n\n"
- + str(self.goodInstrument)
- + "\n\n}"
- )
- f.write("\n\nBEAM {\n\n" + str(badBeam) + "\n\n}")
-
- with self.assertRaises(ParserException) as cm:
- GudrunFile(loadFile="test_data.txt", format=Format.TXT)
- self.assertEqual(
- "Whilst parsing Beam, an exception occured."
- " The input file is most likely of an incorrect format, "
- "and some attributes were missing.",
- str(cm.exception)
- )
+ with GudPyContext() as gudpy:
+ with open(gudpy.testFilePath, "w", encoding="utf-8") as f:
+ f.write("' ' ' ' '/'\n\n")
+ f.write(
+ "INSTRUMENT {\n\n"
+ + str(self.goodInstrument)
+ + "\n\n}"
+ )
+ f.write("\n\nBEAM {\n\n" + str(badBeam) + "\n\n}")
+ self.assertRaises(
+ ParserException, gudpy.loadFromGudrunFile,
+ gudpy.testFilePath)
def testLoadMissingBeamAttributesRand(self):
expectedBeam = deepcopy(self.expectedBeam)
@@ -1209,23 +1179,18 @@ def testLoadMissingBeamAttributesRand(self):
del badBeam[j]
badBeam = "\n".join(badBeam)
- with open("test_data.txt", "w", encoding="utf-8") as f:
- f.write("' ' ' ' '/'\n\n")
- f.write(
- "INSTRUMENT {\n\n"
- + str(self.goodInstrument)
- + "\n\n}"
- )
- f.write("\n\nBEAM {\n\n" + str(badBeam) + "\n\n}")
-
- with self.assertRaises(ParserException) as cm:
- GudrunFile(loadFile="test_data.txt", format=Format.TXT)
- self.assertEqual(
- "Whilst parsing Beam, an exception occured."
- " The input file is most likely of an incorrect format, "
- "and some attributes were missing.",
- str(cm.exception)
- )
+ with GudPyContext() as gudpy:
+ with open(gudpy.testFilePath, "w", encoding="utf-8") as f:
+ f.write("' ' ' ' '/'\n\n")
+ f.write(
+ "INSTRUMENT {\n\n"
+ + str(self.goodInstrument)
+ + "\n\n}"
+ )
+ f.write("\n\nBEAM {\n\n" + str(badBeam) + "\n\n}")
+ self.assertRaises(
+ ParserException, gudpy.loadFromGudrunFile,
+ gudpy.testFilePath)
def testLoadMissingNormalisationAttributesSeq(self):
expectedNormalisation = deepcopy(self.expectedNormalisation)
@@ -1237,6 +1202,7 @@ def testLoadMissingNormalisationAttributesSeq(self):
expectedNormalisation.pop("outerRadius", None)
expectedNormalisation.pop("sampleHeight", None)
expectedNormalisation.pop("crossSectionFilename")
+ expectedNormalisation.pop("outputFolder", None)
expectedNormalisation.pop("yamlignore", None)
self.goodNormalisation.dataFiles = DataFiles([], "")
@@ -1244,33 +1210,27 @@ def testLoadMissingNormalisationAttributesSeq(self):
Composition("")
)
for i in range(len(expectedNormalisation.keys())):
-
badNormalisation = str(self.goodNormalisation).split("\n")
del badNormalisation[i]
badNormalisation = "\n".join(badNormalisation)
-
- with open("test_data.txt", "w", encoding="utf-8") as f:
- f.write("' ' ' ' '/'\n\n")
- f.write(
- "INSTRUMENT {\n\n"
- + str(self.goodInstrument)
- + "\n\n}"
- )
- f.write("\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}")
- f.write(
- "\n\nNORMALISATION {\n\n"
- + str(badNormalisation)
- + "\n\n}"
- )
-
- with self.assertRaises(ParserException) as cm:
- GudrunFile(loadFile="test_data.txt", format=Format.TXT)
- self.assertEqual(
- "Whilst parsing Beam, an exception occured."
- " The input file is most likely of an incorrect format, "
- "and some attributes were missing.",
- str(cm.exception)
- )
+ with GudPyContext() as gudpy:
+ with open(gudpy.testFilePath, "w", encoding="utf-8") as f:
+ f.write("' ' ' ' '/'\n\n")
+ f.write(
+ "INSTRUMENT {\n\n"
+ + str(self.goodInstrument)
+ + "\n\n}"
+ )
+ f.write(
+ "\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}")
+ f.write(
+ "\n\nNORMALISATION {\n\n"
+ + str(badNormalisation)
+ + "\n\n}"
+ )
+ self.assertRaises(
+ ParserException, gudpy.loadFromGudrunFile,
+ gudpy.testFilePath)
def testLoadMissingNormalisationAttributesRand(self):
expectedNormalisation = deepcopy(self.expectedNormalisation)
@@ -1282,6 +1242,7 @@ def testLoadMissingNormalisationAttributesRand(self):
expectedNormalisation.pop("outerRadius", None)
expectedNormalisation.pop("sampleHeight", None)
expectedNormalisation.pop("crossSectionFilename")
+ expectedNormalisation.pop("outputFolder", None)
expectedNormalisation.pop("yamlignore", None)
self.goodNormalisation.dataFiles = DataFiles([], "")
@@ -1289,7 +1250,6 @@ def testLoadMissingNormalisationAttributesRand(self):
Composition("")
)
for i in range(50):
-
key = random.choice(list(expectedNormalisation))
j = list(expectedNormalisation).index(key)
@@ -1298,53 +1258,46 @@ def testLoadMissingNormalisationAttributesRand(self):
del badNormalisation[j]
badNormalisation = "\n".join(badNormalisation)
- with open("test_data.txt", "w", encoding="utf-8") as f:
+ with GudPyContext() as gudpy:
+ with open(gudpy.testFilePath, "w", encoding="utf-8") as f:
+ f.write("' ' ' ' '/'\n\n")
+ f.write(
+ "INSTRUMENT {\n\n"
+ + str(self.goodInstrument)
+ + "\n\n}"
+ )
+ f.write(
+ "\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}")
+ f.write(
+ "\n\nNORMALISATION {\n\n"
+ + str(badNormalisation)
+ + "\n\n}"
+ )
+ self.assertRaises(
+ ParserException, gudpy.loadFromGudrunFile,
+ gudpy.testFilePath)
+
+ def testLoadMissingSampleBackgroundAttributes(self):
+ badSampleBackground = str(self.goodSampleBackground).split("\n")
+ del badSampleBackground[2]
+ badSampleBackground = "\n".join(badSampleBackground)
+ with GudPyContext() as gudpy:
+ with open(gudpy.testFilePath, "w", encoding="utf-8") as f:
f.write("' ' ' ' '/'\n\n")
f.write(
- "INSTRUMENT {\n\n"
- + str(self.goodInstrument)
- + "\n\n}"
+ "INSTRUMENT {\n\n" +
+ str(self.goodInstrument) + "\n\n}"
)
f.write("\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}")
f.write(
"\n\nNORMALISATION {\n\n"
- + str(badNormalisation)
+ + str(self.goodNormalisation)
+ "\n\n}"
)
-
- with self.assertRaises(ParserException) as cm:
- GudrunFile(loadFile="test_data.txt", format=Format.TXT)
- self.assertEqual(
- "Whilst parsing Normalisation, an exception occured."
- " The input file is most likely of an incorrect format, "
- "and some attributes were missing.",
- str(cm.exception)
- )
-
- def testLoadMissingSampleBackgroundAttributes(self):
- badSampleBackground = str(self.goodSampleBackground).split("\n")
- del badSampleBackground[2]
- badSampleBackground = "\n".join(badSampleBackground)
- with open("test_data.txt", "w", encoding="utf-8") as f:
- f.write("' ' ' ' '/'\n\n")
- f.write(
- "INSTRUMENT {\n\n" + str(self.goodInstrument) + "\n\n}"
- )
- f.write("\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}")
- f.write(
- "\n\nNORMALISATION {\n\n"
- + str(self.goodNormalisation)
- + "\n\n}"
- )
- f.write("\n\n{}\n\nEND".format(str(badSampleBackground)))
- with self.assertRaises(ParserException) as cm:
- GudrunFile(loadFile="test_data.txt", format=Format.TXT)
- self.assertEqual(
- "Whilst parsing Sample Background, an exception occured."
- " The input file is most likely of an incorrect format, "
- "and some attributes were missing.",
- str(cm.exception)
- )
+ f.write("\n\n{}\n\nEND".format(str(badSampleBackground)))
+ self.assertRaises(
+ ParserException, gudpy.loadFromGudrunFile,
+ gudpy.testFilePath)
def testLoadMissingSampleAttributesSeq(self):
expectedSampleA = deepcopy(self.expectedSampleA)
@@ -1360,6 +1313,11 @@ def testLoadMissingSampleAttributesSeq(self):
expectedSampleA.pop("exponentialValues", None)
expectedSampleA.pop("crossSectionFilename", None)
expectedSampleA.pop("FTMode", None)
+ expectedSampleA.pop("outputFolder", None)
+ expectedSampleA.pop("sampleFile", None)
+ expectedSampleA.pop("selfScatteringFilePath", None)
+ expectedSampleA.pop("_referenceDataFile", None)
+
expectedSampleA.pop("yamlignore", None)
self.goodSampleBackground.samples[0].dataFiles = DataFiles([], "")
@@ -1377,29 +1335,25 @@ def testLoadMissingSampleAttributesSeq(self):
badSampleBackground = sbgStr.split("\n")
del badSampleBackground[i + 10]
badSampleBackground = "\n".join(badSampleBackground)
- with open("test_data.txt", "w", encoding="utf-8") as f:
- f.write("' ' ' ' '/'\n\n")
- f.write(
- "INSTRUMENT {\n\n"
- + str(self.goodInstrument)
- + "\n\n}"
- )
- f.write("\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}")
- f.write(
- "\n\nNORMALISATION {\n\n"
- + str(self.goodNormalisation)
- + "\n\n}"
- )
- f.write("\n\n{}\n\nEND".format(str(badSampleBackground)))
-
- with self.assertRaises(ParserException) as cm:
- GudrunFile(loadFile="test_data.txt", format=Format.TXT)
- self.assertEqual(
- "Whilst parsing Sample, an exception occured."
- " The input file is most likely of an incorrect format, "
- "and some attributes were missing.",
- str(cm.exception)
- )
+ with GudPyContext() as gudpy:
+ with open(gudpy.testFilePath, "w", encoding="utf-8") as f:
+ f.write("' ' ' ' '/'\n\n")
+ f.write(
+ "INSTRUMENT {\n\n"
+ + str(self.goodInstrument)
+ + "\n\n}"
+ )
+ f.write(
+ "\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}")
+ f.write(
+ "\n\nNORMALISATION {\n\n"
+ + str(self.goodNormalisation)
+ + "\n\n}"
+ )
+ f.write("\n\n{}\n\nEND".format(str(badSampleBackground)))
+ self.assertRaises(
+ ParserException, gudpy.loadFromGudrunFile,
+ gudpy.testFilePath)
def testLoadMissingSampleAttributesRand(self):
expectedSampleA = deepcopy(self.expectedSampleA)
@@ -1415,6 +1369,10 @@ def testLoadMissingSampleAttributesRand(self):
expectedSampleA.pop("exponentialValues", None)
expectedSampleA.pop("crossSectionFilename", None)
expectedSampleA.pop("FTMode", None)
+ expectedSampleA.pop("outputFolder", None)
+ expectedSampleA.pop("sampleFile", None)
+ expectedSampleA.pop("selfScatteringFilePath", None)
+ expectedSampleA.pop("_referenceDataFile", None)
expectedSampleA.pop("yamlignore", None)
self.goodSampleBackground.samples[0].dataFiles = DataFiles([], "")
@@ -1435,29 +1393,25 @@ def testLoadMissingSampleAttributesRand(self):
del badSampleBackground[j + 10]
badSampleBackground = "\n".join(badSampleBackground)
- with open("test_data.txt", "w", encoding="utf-8") as f:
- f.write("' ' ' ' '/'\n\n")
- f.write(
- "INSTRUMENT {\n\n"
- + str(self.goodInstrument)
- + "\n\n}"
- )
- f.write("\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}")
- f.write(
- "\n\nNORMALISATION {\n\n"
- + str(self.goodNormalisation)
- + "\n\n}"
- )
- f.write("\n\n{}\n\nEND".format(str(badSampleBackground)))
-
- with self.assertRaises(ParserException) as cm:
- GudrunFile(loadFile="test_data.txt", format=Format.TXT)
- self.assertEqual(
- "Whilst parsing Sample, an exception occured."
- " The input file is most likely of an incorrect format, "
- "and some attributes were missing.",
- str(cm.exception)
- )
+ with GudPyContext() as gudpy:
+ with open(gudpy.testFilePath, "w", encoding="utf-8") as f:
+ f.write("' ' ' ' '/'\n\n")
+ f.write(
+ "INSTRUMENT {\n\n"
+ + str(self.goodInstrument)
+ + "\n\n}"
+ )
+ f.write(
+ "\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}")
+ f.write(
+ "\n\nNORMALISATION {\n\n"
+ + str(self.goodNormalisation)
+ + "\n\n}"
+ )
+ f.write("\n\n{}\n\nEND".format(str(badSampleBackground)))
+ self.assertRaises(
+ ParserException, gudpy.loadFromGudrunFile,
+ gudpy.testFilePath)
def testLoadMissingContainerAttributesSeq(self):
expectedContainerA = deepcopy(self.expectedContainerA)
@@ -1480,6 +1434,7 @@ def testLoadMissingContainerAttributesSeq(self):
expectedContainerA.pop("powerForBroadening", None)
expectedContainerA.pop("stepSize", None)
expectedContainerA.pop("yamlignore", None)
+ expectedContainerA.pop("outputFolder", None)
self.goodSampleBackground.samples[0].containers[0].dataFiles = (
DataFiles([], "")
@@ -1496,40 +1451,39 @@ def testLoadMissingContainerAttributesSeq(self):
del badSampleBackground[i + 44]
badSampleBackground = "\n".join(badSampleBackground)
- with open("test_data.txt", "w", encoding="utf-8") as f:
- f.write("' ' ' ' '/'\n\n")
- f.write(
- "INSTRUMENT {\n\n"
- + str(self.goodInstrument)
- + "\n\n}"
- )
- f.write("\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}")
- f.write(
- "\n\nNORMALISATION {\n\n"
- + str(self.goodNormalisation)
- + "\n\n}"
- )
- f.write("\n\n{}\n\nEND".format(str(badSampleBackground)))
- with self.assertRaises(ParserException) as cm:
- GudrunFile(loadFile="test_data.txt", format=Format.TXT)
- self.assertEqual(
- "Whilst parsing Container, an exception occured."
- " The input file is most likely of an incorrect format, "
- "and some attributes were missing.",
- str(cm.exception)
- )
+ with GudPyContext() as gudpy:
+ with open(gudpy.testFilePath, "w", encoding="utf-8") as f:
+ f.write("' ' ' ' '/'\n\n")
+ f.write(
+ "INSTRUMENT {\n\n"
+ + str(self.goodInstrument)
+ + "\n\n}"
+ )
+ f.write(
+ "\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}")
+ f.write(
+ "\n\nNORMALISATION {\n\n"
+ + str(self.goodNormalisation)
+ + "\n\n}"
+ )
+ f.write("\n\n{}\n\nEND".format(str(badSampleBackground)))
+ self.assertRaises(
+ ParserException, gudpy.loadFromGudrunFile,
+ gudpy.testFilePath)
def testAppendExponentialValues(self):
with GudPyContext() as gudpy:
# Remove last element of exponential values list
gudpy.gudrunFile.sampleBackgrounds[
0].samples[0].exponentialValues[0].pop()
- gudpy.gudrunFile.write_out()
- gudrunFile = GudrunFile(loadFile=os.path.join(
- gudpy.gudrunFile.instrument.GudrunInputFileDir,
+ gfFilePath = os.path.join(
+ gudpy.io.projectDir,
gudpy.gudrunFile.OUTPATH
- ), format=Format.TXT)
+ )
+
+ GudrunFileParser.writeGudrunFileTo(gudpy.gudrunFile, gfFilePath)
+ gudrunFile = gudpy.io.importGudrunFile(gfFilePath)
# Test that a default value is appended
self.assertEqual(
self.expectedSampleA["exponentialValues"],
@@ -1557,6 +1511,7 @@ def testLoadMissingContainerAttributesRand(self):
expectedContainerA.pop("powerForBroadening", None)
expectedContainerA.pop("stepSize", None)
expectedContainerA.pop("yamlignore", None)
+ expectedContainerA.pop("outputFolder", None)
self.goodSampleBackground.samples[0].containers[0].dataFiles = (
DataFiles([], "")
@@ -1573,37 +1528,35 @@ def testLoadMissingContainerAttributesRand(self):
]
sbgStr = str(self.goodSampleBackground)
badSampleBackground = sbgStr.split("\n")
+ if not badSampleBackground[j + 44]:
+ continue
del badSampleBackground[j + 44]
badSampleBackground = "\n".join(badSampleBackground)
- with open("test_data.txt", "w", encoding="utf-8") as f:
- f.write("' ' ' ' '/'\n\n")
- f.write(
- "INSTRUMENT {\n\n"
- + str(self.goodInstrument)
- + "\n\n}"
- )
- f.write("\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}")
- f.write(
- "\n\nNORMALISATION {\n\n"
- + str(self.goodNormalisation)
- + "\n\n}"
- )
- f.write("\n\n{}\n\nEND".format(str(badSampleBackground)))
- with self.assertRaises(ParserException) as cm:
- GudrunFile(loadFile="test_data.txt", format=Format.TXT)
- self.assertEqual(
- "Whilst parsing Container, an exception occured."
- " The input file is most likely of an incorrect format, "
- "and some attributes were missing.",
- str(cm.exception)
- )
+ with GudPyContext() as gudpy:
+ with open(gudpy.testFilePath, "w", encoding="utf-8") as f:
+ f.write("' ' ' ' '/'\n\n")
+ f.write(
+ "INSTRUMENT {\n\n"
+ + str(self.goodInstrument)
+ + "\n\n}"
+ )
+ f.write(
+ "\n\nBEAM {\n\n" + str(self.goodBeam) + "\n\n}")
+ f.write(
+ "\n\nNORMALISATION {\n\n"
+ + str(self.goodNormalisation)
+ + "\n\n}"
+ )
+ f.write("\n\n{}\n\nEND".format(str(badSampleBackground)))
+ self.assertRaises(
+ ParserException, gudpy.loadFromGudrunFile,
+ gudpy.testFilePath)
def testZeroExitGudrun(self):
with GudPyContext() as gudpy:
- gudpy.loadFromFile(
- loadFile=gudpy.gudrunFile.loadFile, format=Format.TXT)
+ gudpy.loadFromGudrunFile(loadFile=gudpy.io.loadFile)
gudpy.setSaveLocation(os.path.splitext(
- gudpy.gudrunFile.loadFile)[0])
+ gudpy.io.loadFile)[0])
gudpy.runGudrun()
self.assertEqual(gudpy.gudrun.exitcode, 0)
diff --git a/gudpy/test/test_gudpy_workflows.py b/gudpy/test/test_gudpy_workflows.py
index 5e8e2f10..b7fa5258 100644
--- a/gudpy/test/test_gudpy_workflows.py
+++ b/gudpy/test/test_gudpy_workflows.py
@@ -42,9 +42,8 @@ def __exit__(self, exc_type, exc_value, tb):
class TestGudPyWorkflows(TestCase):
def getGudFile(self, gudpy, sampleIndex) -> GudFile:
- return gudpy.gudrunOutput.sampleOutputs[
- gudpy.gudrunFile.sampleBackgrounds[0].samples[
- sampleIndex].name].gudFile
+ return gudpy.gudrunFile.sampleBackgrounds[0].samples[
+ sampleIndex].dataFiles[0].gudFile
def testGudPyDCS(self):
with GudPyContext() as gudpy:
@@ -70,16 +69,13 @@ def testGudPyDCS(self):
self.assertAlmostEqual(dcsLevelPercentage, 13.0, 0)
for sample in gudpy.gudrunFile.sampleBackgrounds[0].samples:
- mintFilename = (
- os.path.splitext(sample.dataFiles[0])[0]
- )
+ mintFilename = sample.dataFiles[0].name
actualMintFile = ("test/TestData/water-ref/plain/"
f"{mintFilename}.mint01")
- actualData = open(gudpy.gudrunOutput.sampleOutputs[
- sample.name].outputs[sample.dataFiles[0]][".mint01"],
- "r", encoding="utf-8"
- ).readlines()[10:]
+ actualData = open(sample.dataFiles[0].mintFile,
+ "r", encoding="utf-8"
+ ).readlines()[10:]
expectedData = open(
actualMintFile, "r", encoding="utf-8"
).readlines()[10:]
@@ -203,6 +199,7 @@ def testIterateByComposition(self):
gudpy.gudrunFile = g
gudpy.runPurge()
+
iterator = iterators.Composition(
gudrunFile=g,
nTotal=10,
@@ -231,21 +228,17 @@ def testGudPyIterateBySubtractingWavelength(self):
for sample in [
x
- for x in gudpy.gudrunFile.sampleBackgrounds[0].samples
+ for x in self.g.sampleBackgrounds[0].samples
if x.runThisSample
]:
- dataFilename = (
- os.path.splitext(sample.dataFiles[0])[0]
- )
-
+ dataFilename = sample.dataFiles[0].name
actualMintFile = (
f'test/TestData/water-ref/wavelength{i}/'
f'{dataFilename}.mint01'
)
actualData = open(
- gudpy.gudrunIterator.iterator.gudrunOutputs[-1].output(
- sample.name, sample.dataFiles[0], ".mint01"),
+ sample.dataFiles[0].mintFile,
"r", encoding="utf-8"
).readlines()[10:]
expectedData = open(
@@ -273,12 +266,15 @@ def testGudPyIterateBySubtractingWavelength(self):
)
actualData = open(
- gudpy.gudrunIterator.iterator.gudrunOutputs[
- len(
- gudpy.gudrunIterator.iterator.gudrunOutputs
- ) - 2
- ].output(
- sample.name, sample.dataFiles[0], ".msubw01"),
+ os.path.join(
+ gudpy.projectDir,
+ "Gudrun",
+ "WavelengthIteration",
+ f"WavelengthIteration_{i}",
+ sample.name,
+ sample.dataFiles[0].name,
+ "Diagnostics",
+ f"{sample.dataFiles[0].name}.msubw01"),
"r", encoding="utf-8"
).readlines()[10:]
expectedData = open(
diff --git a/gudpy/test/test_gudpy_yaml.py b/gudpy/test/test_gudpy_yaml.py
index c68a6380..6be08f13 100644
--- a/gudpy/test/test_gudpy_yaml.py
+++ b/gudpy/test/test_gudpy_yaml.py
@@ -1,150 +1,166 @@
from unittest import TestCase
+import tempfile
import os
from core import gudpy
-from core.enums import Format
class TestYAML(TestCase):
def testYAML(self):
- gudpy1 = gudpy.GudPy()
- gudpy1.loadFromFile(
- loadFile="test/TestData/NIMROD-water/water.txt",
- format=Format.TXT
- )
- gudpy1.save(path="test/TestData/NIMROD-water/water.yaml")
- gf1 = gudpy1.gudrunFile
-
- gudpy2 = gudpy.GudPy()
- gudpy2.loadFromFile(
- loadFile="test/TestData/NIMROD-water/water.yaml",
- format=Format.YAML
- )
- gf2 = gudpy2.gudrunFile
-
- gf1.instrument.GudrunInputFileDir = os.path.abspath(
- gf1.instrument.GudrunInputFileDir)
- gf2.instrument.GudrunInputFileDir = os.path.abspath(
- gf2.instrument.GudrunInputFileDir)
-
- self.assertDictEqual(
- gf1.instrument.__dict__, gf2.instrument.__dict__)
- self.assertDictEqual(gf2.beam.__dict__, gf2.beam.__dict__)
-
- normalisationDataFilesA = gf1.normalisation.__dict__.pop("dataFiles")
- normalisationDataFilesBgA = gf1.normalisation.__dict__.pop(
- "dataFilesBg"
- )
- normalisationCompositionA = gf1.normalisation.__dict__.pop(
- "composition"
- )
- normalisationElementsA = normalisationCompositionA.__dict__.pop(
- "elements"
- )
-
- normalisationDataFilesB = gf2.normalisation.__dict__.pop("dataFiles")
- normalisationDataFilesBgB = gf2.normalisation.__dict__.pop(
- "dataFilesBg"
- )
- normalisationCompositionB = gf2.normalisation.__dict__.pop(
- "composition"
- )
- normalisationElementsB = normalisationCompositionB.__dict__.pop(
- "elements"
- )
-
- self.assertDictEqual(
- normalisationDataFilesA.__dict__, normalisationDataFilesB.__dict__
- )
- self.assertDictEqual(
- normalisationDataFilesBgA.__dict__,
- normalisationDataFilesBgB.__dict__,
- )
- self.assertDictEqual(
- normalisationCompositionA.__dict__,
- normalisationCompositionB.__dict__,
- )
- self.assertDictEqual(
- gf1.normalisation.__dict__, gf2.normalisation.__dict__
- )
-
- for elementA, elementB in zip(
- normalisationElementsA, normalisationElementsB
- ):
- self.assertDictEqual(elementA.__dict__, elementB.__dict__)
-
- sampleBackgroundDataFilesA = gf1.sampleBackgrounds[0].__dict__.pop(
- "dataFiles"
- )
- sampleBackgroundSamplesA = gf1.sampleBackgrounds[0].__dict__.pop(
- "samples"
- )
-
- sampleBackgroundDataFilesB = gf2.sampleBackgrounds[0].__dict__.pop(
- "dataFiles"
- )
- sampleBackgroundSamplesB = gf2.sampleBackgrounds[0].__dict__.pop(
- "samples"
- )
-
- self.assertDictEqual(
- sampleBackgroundDataFilesA.__dict__,
- sampleBackgroundDataFilesB.__dict__,
- )
- self.assertDictEqual(
- gf1.sampleBackgrounds[0].__dict__,
- gf2.sampleBackgrounds[0].__dict__,
- )
-
- for sampleA, sampleB in zip(
- sampleBackgroundSamplesA, sampleBackgroundSamplesB
- ):
- sampleDataFilesA = sampleA.__dict__.pop("dataFiles")
- sampleCompositionA = sampleA.__dict__.pop("composition")
- sampleElementsA = sampleCompositionA.__dict__.pop("elements")
- sampleContainersA = sampleA.__dict__.pop("containers")
-
- sampleDataFilesB = sampleB.__dict__.pop("dataFiles")
- sampleCompositionB = sampleB.__dict__.pop("composition")
- sampleElementsB = sampleCompositionB.__dict__.pop("elements")
- sampleContainersB = sampleB.__dict__.pop("containers")
+ with tempfile.TemporaryDirectory() as tmp:
+ gudpy1 = gudpy.GudPy()
+ gudpy1.loadFromGudrunFile(
+ loadFile="test/TestData/NIMROD-water/water.txt",
+ )
+ gudpy1.saveAs(os.path.join(tmp, "water"))
+ gf1 = gudpy1.gudrunFile
+
+ gudpy2 = gudpy.GudPy()
+ gudpy2.loadFromProject(os.path.join(tmp, "water"))
+ gf2 = gudpy2.gudrunFile
+
+ gf1.instrument.GudrunInputFileDir = os.path.abspath(
+ gf1.instrument.GudrunInputFileDir)
+ gf2.instrument.GudrunInputFileDir = os.path.abspath(
+ gf2.instrument.GudrunInputFileDir)
self.assertDictEqual(
- sampleDataFilesA.__dict__, sampleDataFilesB.__dict__
+ gf1.instrument.__dict__, gf2.instrument.__dict__)
+ self.assertDictEqual(gf2.beam.__dict__, gf2.beam.__dict__)
+
+ normalisationDataFilesA = gf1.normalisation.__dict__.pop(
+ "dataFiles")
+ normalisationDataFilesA.__dict__.pop("_dataFiles")
+ normalisationDataFilesBgA = gf1.normalisation.__dict__.pop(
+ "dataFilesBg"
+ )
+ normalisationDataFilesBgA.__dict__.pop("_dataFiles")
+ normalisationCompositionA = gf1.normalisation.__dict__.pop(
+ "composition"
+ )
+ normalisationElementsA = normalisationCompositionA.__dict__.pop(
+ "elements"
+ )
+
+ normalisationDataFilesB = gf2.normalisation.__dict__.pop(
+ "dataFiles")
+ normalisationDataFilesB.__dict__.pop("_dataFiles")
+ normalisationDataFilesBgB = gf2.normalisation.__dict__.pop(
+ "dataFilesBg"
+ )
+ normalisationDataFilesBgB.__dict__.pop("_dataFiles")
+ normalisationCompositionB = gf2.normalisation.__dict__.pop(
+ "composition"
+ )
+ normalisationElementsB = normalisationCompositionB.__dict__.pop(
+ "elements"
+ )
+
+ self.assertDictEqual(
+ normalisationDataFilesA.__dict__,
+ normalisationDataFilesB.__dict__
+ )
+ self.assertDictEqual(
+ normalisationDataFilesBgA.__dict__,
+ normalisationDataFilesBgB.__dict__,
+ )
+ self.assertDictEqual(
+ normalisationCompositionA.__dict__,
+ normalisationCompositionB.__dict__,
)
self.assertDictEqual(
- sampleCompositionA.__dict__, sampleCompositionB.__dict__
+ gf1.normalisation.__dict__, gf2.normalisation.__dict__
)
- for elementA, elementB in zip(sampleElementsA, sampleElementsB):
+
+ for elementA, elementB in zip(
+ normalisationElementsA, normalisationElementsB
+ ):
self.assertDictEqual(elementA.__dict__, elementB.__dict__)
- self.assertDictEqual(sampleA.__dict__, sampleB.__dict__)
+ sampleBackgroundDataFilesA = gf1.sampleBackgrounds[0].__dict__.pop(
+ "dataFiles"
+ )
+ sampleBackgroundDataFilesA.__dict__.pop("_dataFiles")
+ sampleBackgroundSamplesA = gf1.sampleBackgrounds[0].__dict__.pop(
+ "samples"
+ )
- for containerA, containerB in zip(
- sampleContainersA, sampleContainersB
- ):
- containerDataFilesA = containerA.__dict__.pop("dataFiles")
- containerCompositionA = containerA.__dict__.pop("composition")
- containerElementsA = containerCompositionA.__dict__.pop(
- "elements"
- )
+ sampleBackgroundDataFilesB = gf2.sampleBackgrounds[0].__dict__.pop(
+ "dataFiles"
+ )
+ sampleBackgroundDataFilesB.__dict__.pop("_dataFiles")
+ sampleBackgroundSamplesB = gf2.sampleBackgrounds[0].__dict__.pop(
+ "samples"
+ )
- containerDataFilesB = containerB.__dict__.pop("dataFiles")
- containerCompositionB = containerB.__dict__.pop("composition")
- containerElementsB = containerCompositionB.__dict__.pop(
- "elements"
- )
+ self.assertDictEqual(
+ sampleBackgroundDataFilesA.__dict__,
+ sampleBackgroundDataFilesB.__dict__,
+ )
+ self.assertDictEqual(
+ gf1.sampleBackgrounds[0].__dict__,
+ gf2.sampleBackgrounds[0].__dict__,
+ )
+
+ for sampleA, sampleB in zip(
+ sampleBackgroundSamplesA, sampleBackgroundSamplesB
+ ):
+ sampleDataFilesA = sampleA.__dict__.pop("dataFiles")
+ sampleDataFilesA.__dict__.pop("_dataFiles")
+ sampleCompositionA = sampleA.__dict__.pop("composition")
+ sampleElementsA = sampleCompositionA.__dict__.pop("elements")
+ sampleContainersA = sampleA.__dict__.pop("containers")
+
+ sampleDataFilesB = sampleB.__dict__.pop("dataFiles")
+ sampleDataFilesB.__dict__.pop("_dataFiles")
+ sampleCompositionB = sampleB.__dict__.pop("composition")
+ sampleElementsB = sampleCompositionB.__dict__.pop("elements")
+ sampleContainersB = sampleB.__dict__.pop("containers")
self.assertDictEqual(
- containerDataFilesA.__dict__, containerDataFilesB.__dict__
+ sampleDataFilesA.__dict__, sampleDataFilesB.__dict__
)
self.assertDictEqual(
- containerCompositionA.__dict__,
- containerCompositionB.__dict__,
+ sampleCompositionA.__dict__, sampleCompositionB.__dict__
)
for elementA, elementB in zip(
- containerElementsA, containerElementsB
- ):
+ sampleElementsA, sampleElementsB):
self.assertDictEqual(elementA.__dict__, elementB.__dict__)
- self.assertDictEqual(containerA.__dict__, containerB.__dict__)
+ self.assertDictEqual(sampleA.__dict__, sampleB.__dict__)
+
+ for containerA, containerB in zip(
+ sampleContainersA, sampleContainersB
+ ):
+ containerDataFilesA = containerA.__dict__.pop("dataFiles")
+ containerDataFilesA.__dict__.pop("_dataFiles")
+ containerCompositionA = containerA.__dict__.pop(
+ "composition")
+ containerElementsA = containerCompositionA.__dict__.pop(
+ "elements"
+ )
+
+ containerDataFilesB = containerB.__dict__.pop("dataFiles")
+ containerDataFilesB.__dict__.pop("_dataFiles")
+ containerCompositionB = containerB.__dict__.pop(
+ "composition")
+ containerElementsB = containerCompositionB.__dict__.pop(
+ "elements"
+ )
+
+ self.assertDictEqual(
+ containerDataFilesA.__dict__,
+ containerDataFilesB.__dict__
+ )
+ self.assertDictEqual(
+ containerCompositionA.__dict__,
+ containerCompositionB.__dict__,
+ )
+ for elementA, elementB in zip(
+ containerElementsA, containerElementsB
+ ):
+ self.assertDictEqual(
+ elementA.__dict__, elementB.__dict__)
+
+ self.assertDictEqual(
+ containerA.__dict__, containerB.__dict__)
diff --git a/gudpy/test/test_gudrun_classes.py b/gudpy/test/test_gudrun_classes.py
index a783058d..7e69a585 100644
--- a/gudpy/test/test_gudrun_classes.py
+++ b/gudpy/test/test_gudrun_classes.py
@@ -1,7 +1,6 @@
from unittest import TestCase
from core.exception import ParserException
-from core.gudrun_file import GudrunFile
from core.beam import Beam
from core.composition import Composition
from core.container import Container
@@ -15,17 +14,21 @@
FTModes, Instruments, Scales, UnitsOfDensity, MergeWeights,
NormalisationType, OutputUnits, Geometry, CrossSectionSource
)
+from core.io.gudpy_io import GudPyIO
class TestGudrunClasses(TestCase):
def testEmptyPath(self):
-
emptyPath = ""
- self.assertRaises(RuntimeError, GudrunFile, loadFile=emptyPath)
+ gudpyIO = GudPyIO()
+ self.assertRaises(
+ ParserException, gudpyIO.importGudrunFile, emptyPath)
def testInvalidPath(self):
invalidPath = "invalid_path"
- self.assertRaises(ParserException, GudrunFile, loadFile=invalidPath)
+ gudpyIO = GudPyIO()
+ self.assertRaises(
+ ParserException, gudpyIO.importGudrunFile, invalidPath)
def testInstrumentInitDataTypes(self):
diff --git a/gudpy/test/test_purge_file.py b/gudpy/test/test_purge_file.py
index cf138845..bd69ae7a 100644
--- a/gudpy/test/test_purge_file.py
+++ b/gudpy/test/test_purge_file.py
@@ -1,37 +1,21 @@
import os
from unittest import TestCase
-from shutil import copyfile
+import tempfile
from core.purge_file import PurgeFile
-from core.enums import Format
from core import gudpy
+from core.io.gudpy_io import GudPyIO
class TestPurgeFile(TestCase):
def setUp(self) -> None:
- path = "TestData/NIMROD-water/water.txt"
-
- if os.name == "nt":
- from pathlib import Path
- dirpath = Path().resolve() / "test/" / Path(path)
- else:
- dirpath = (
- "/".join(os.path.realpath(__file__).split("/")[:-1])
- + "/"
- + path
- )
+ path = f"TestData{os.path.sep}NIMROD-water{os.path.sep}water.txt"
+ dirpath = (os.path.dirname(__file__))
self.gudpy = gudpy.GudPy()
self.keepsakes = os.listdir()
- copyfile(dirpath, "test/TestData/NIMROD-water/good_water.txt")
- self.gudpy.loadFromFile(
- loadFile="test/TestData/NIMROD-water/good_water.txt",
- format=Format.TXT)
-
- self.gudpy.gudrunFile.write_out(
- path="test/TestData/NIMROD-water/good_water.txt",
- overwrite=True
- )
+ refFile = os.path.join(dirpath, path)
+ self.gudpy.loadFromGudrunFile(loadFile=refFile)
self.g = self.gudpy.gudrunFile
self.expectedPurgeFile = {
"standardDeviation": (10, 10),
@@ -42,7 +26,6 @@ def setUp(self) -> None:
return super().setUp()
def tearDown(self) -> None:
-
[os.remove(f) for f in os.listdir() if f not in self.keepsakes]
return super().tearDown()
@@ -58,9 +41,11 @@ def testCreatePurgeClass(self):
)
def testWritePurgeFile(self):
-
- purge = PurgeFile(self.g)
- purge.write_out()
- with open("purge_det.dat", encoding="utf-8") as f:
- outlines = f.read()
- self.assertEqual(outlines, str(purge))
+ with tempfile.TemporaryDirectory() as tmp:
+ path = os.path.join(tmp, "purge_det.dat")
+ purge = PurgeFile(self.g)
+ GudPyIO.writeObject(purge, path)
+ with open(path, encoding="utf-8") as f:
+ outlines = f.read()
+ self.assertEqual(outlines, str(purge))
+ f.close()
diff --git a/requirements.txt b/requirements.txt
index b314d292..53c4ad8d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,4 +2,5 @@ PySide6==6.6.0
chardet
ruamel.yaml
h5py
-click
\ No newline at end of file
+click
+scikit-optimize
\ No newline at end of file