repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
CristianContrera95/strict_df | tests/test_utils.py | import pytest
from src.strictdf.utils.dtypes import str_check_float, str_check_bool, str_check_int
@pytest.mark.parametrize(
'string, expected',
[
("", False),
(".", False),
("2.0", False),
("1.5", False),
("a", False),
("adsada", False),
("0.5", False),
("2", False),
("1.0", True),
("1.000000000", True),
("0", True),
(".0", True),
("True", True),
("true", True),
("False", True),
("False", True),
("t", True),
("1", True),
("f", True),
("t.", True),
]
)
def test_str_check_bool(string, expected):
assert bool(str_check_bool(string)) == expected
@pytest.mark.parametrize(
'string, expected',
[
("", False),
(".", False),
("212.", True),
("2.0", True),
("1.5", False),
("a", False),
("adsada", False),
(".5", False),
("2", True),
("1.0", True),
("1.000000000", True),
("0", True),
(".0", True),
("True", False),
("true", False),
]
)
def test_str_check_int(string, expected):
assert bool(str_check_int(string)) == expected
@pytest.mark.parametrize(
'string, expected',
[
("", False),
(".", False),
("212.", True),
("2.0", True),
("1.5", True),
("a", False),
("adsada", False),
(".5", True),
("2", False),
("1.0", True),
("1.000000000", True),
("0", False),
(".0", True),
("True", False),
("true", False),
]
)
def test_str_check_float(string, expected):
assert bool(str_check_float(string)) == expected
|
CristianContrera95/strict_df | setup.py | <reponame>CristianContrera95/strict_df
"""
Module setup for installation
"""
import os
from setuptools import setup
def load_requirements(fname):
"""Turn requirements.txt into a list"""
requirements = []
if os.path.exists(fname):
with open(fname, "r") as fp:
requirements = [line.strip() for line in fp]
return requirements
setup(
name="strictdf",
version="0.1.1",
install_requires=load_requirements('requirements.txt'),
tests_require=["pytest"],
test_suite="tests",
extras_require={'dev': load_requirements('requirements-dev.txt')},
python_requires=">=3.8",
)
|
CristianContrera95/strict_df | tests/test_strict_df.py | <reponame>CristianContrera95/strict_df
import pytest
import pandas as pd
import numpy as np
from src.strictdf import StrictDataFrame
@pytest.mark.parametrize(
'df, expected',
[
(pd.DataFrame({}), pd.DataFrame({})),
(pd.DataFrame({
'col1': np.arange(10),
'col2': list(range(9)) + ['none'],
'col3': list(map(float, range(9))) + ['none'],
'col4': list(map(bool, range(9))) + ['none'],
'col5': map(bool, range(10)),
'col6': list(map(str, range(9))) + ['none'],
}),
pd.DataFrame({
'col1': np.arange(10),
'col2': list(range(9)) + ['none'],
'col3': list(map(float, range(9))) + ['none'],
'col4': list(map(bool, range(9))) + ['none'],
'col5': map(bool, range(10)),
'col6': list(map(str, range(9))) + ['none'],
})
),
]
)
def test_old_df(df, expected):
sdf = StrictDataFrame(df)
assert sdf.old_df.shape[0] == df.shape[0]
assert pd.concat([sdf.old_df, expected]).drop_duplicates(keep=False).shape[0] == 0
@pytest.mark.parametrize(
'df, expected',
[
(pd.DataFrame({}), pd.DataFrame({})),
(pd.DataFrame({
'col1': np.arange(10),
'col2': list(range(9)) + ['none'],
'col3': list(map(float, range(9))) + ['none'],
'col4': list(map(bool, range(9))) + ['none'],
'col5': map(bool, range(10)),
'col6': list(map(str, range(9))) + ['none'],
}),
pd.DataFrame({
'col1': np.arange(9),
'col2': np.arange(9),
'col3': np.arange(9),
'col4': map(bool, range(9)),
'col5': map(bool, range(9)),
'col6': np.arange(9),
}),
),
]
)
def test_new_df(df, expected):
sdf = StrictDataFrame(df)
assert sdf.new_df.shape[0] <= df.shape[0]
assert pd.concat([sdf.new_df, expected]).drop_duplicates(keep=False).shape[0] == 0
@pytest.mark.parametrize(
'df, expected',
[
(pd.DataFrame({
'col1': np.arange(10),
'col2': list(range(9)) + ['none'],
'col3': list(map(float, range(9))) + ['none'],
'col4': list(map(bool, range(9))) + ['none'],
'col5': map(bool, range(10)),
'col6': list(map(str, range(9))) + ['none'],
}),
{ 'col1': 'int64',
'col2': 'int64',
'col3': 'int64',
'col4': 'bool',
'col5': 'bool',
'col6': 'int64',
}
),
(pd.DataFrame({
'col1': ['1.5' for _ in range(9)] + ['none'],
'col2': ['1.5' for _ in range(5)] + ['1.0' for _ in range(5)],
'col3': ['1.5' for _ in range(9)] + [np.nan],
'col4': ['a' for _ in range(10)],
}),
{ 'col1': 'float64',
'col2': 'float64',
'col3': 'float64',
'col4': 'str',
}
),
]
)
def test_new_df_dtypes(df, expected):
sdf = StrictDataFrame(df)
assert sdf.dtypes == expected
@pytest.mark.parametrize(
'df, expected',
[
(pd.DataFrame({
'col1': np.arange(10),
'col2': list(range(9)) + ['none'],
'col3': list(map(float, range(9))) + ['none'],
'col4': list(map(bool, range(9))) + ['none'],
'col5': map(bool, range(10)),
'col6': list(map(str, range(9))) + ['none'],
}),
"DataFrame having shape '(9, 6)' (1 rows removed from original)"
),
(pd.DataFrame({
'col1': ['1.5' for _ in range(9)] + ['none'],
'col2': ['1.5' for _ in range(5)] + ['1.0' for _ in range(5)],
'col3': ['1.5' for _ in range(9)] + [np.nan],
'col4': ['a' for _ in range(10)],
}),
"DataFrame having shape '(9, 4)' (1 rows removed from original)"
),
]
)
def test_report(df, expected):
sdf = StrictDataFrame(df)
assert sdf.report() == expected
|
CristianContrera95/strict_df | src/strictdf/__main__.py | print("strictdf version 0.1.0")
|
pexip/os-python-treq | setup.py | <gh_stars>0
from setuptools import find_packages, setup
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Framework :: Twisted",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
if __name__ == "__main__":
with open('README.rst') as f:
readme = f.read()
setup(
name="treq",
packages=find_packages('src'),
package_dir={"": "src"},
setup_requires=["incremental"],
use_incremental=True,
install_requires=[
"incremental",
"requests >= 2.1.0",
"six",
"Twisted[tls] >= 16.4.0",
"attrs",
],
extras_require={
"dev": [
"mock",
"pep8",
"pyflakes",
"sphinx",
"httpbin==0.5.0",
],
},
package_data={"treq": ["_version"]},
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
classifiers=classifiers,
description="A requests-like API built on top of twisted.web's Agent",
license="MIT/X",
url="https://github.com/twisted/treq",
long_description=readme
)
|
narayananmtrs/College-Project | main.py | <filename>main.py
import os
import cv2
import uuid
import time
import click
import pickle
import face_recognition
def prepare_image(cameraImage):
image_encoded = face_recognition.face_encodings(cameraImage)
if(len(image_encoded) == 0):
print("Error : Face features cannot be extracted from this file.")
return None
return image_encoded[0]
def store_image_for_reference(image, username):
# generate a uuid for the new image
uid = uuid.uuid4().hex
# `uuid_username` format
filename = "{0}_{1}".format(uid, username.replace(
'Default: ', '').replace(' ', '%'))
out_file = open('persist/' + filename, 'ab')
pickle.dump(image, out_file)
return filename
def create_out_dir():
os.mkdir('persist')
# returns boolean
def out_dir_exist():
return os.path.exists('persist')
def get_image_identifier(image_to_match):
if not out_dir_exist():
create_out_dir()
return None
images = os.listdir('persist')
"""
iterate through all the files and try matching
If no matches, store as new
"""
for image in images:
# open image in binary format
image_meta_file = open('persist/' + image, 'rb')
existing_encoded_image = pickle.load(image_meta_file)
# compare with existing encoded
result = face_recognition.compare_faces(
[image_to_match], existing_encoded_image)
# match is found
if result[0] == True:
return image
return None
def get_image_from_cam():
cam = cv2.VideoCapture(0)
cv2.namedWindow("Press Space to capture")
while True:
ret, frame = cam.read()
if not ret:
print("Unable to open video capture.")
break
cv2.imshow("Press Space to capture", frame)
k = cv2.waitKey(1)
if k % 256 == 32:
cam.release()
print("Image Captured...")
return frame
def show_user_info(user):
person = user.split('_')
uuid = person[0]
username = person[1].replace('%', ' ')
print("Username: {}".format(username))
print("Unique Identifier: {}".format(uuid))
def authenticate(autosave, showid, image=None):
if image is not None:
# get the encoded version
image_to_be_matched_encoded = prepare_image(image)
# encoding successful
if image_to_be_matched_encoded is not None:
user = get_image_identifier(image_to_be_matched_encoded)
if not user:
print('---- Access Denied! ----')
if autosave:
print('Saving new user...')
# prompt to get a username
username = click.prompt(
'Enter username: ', default='Default: No username', type=str)
user = store_image_for_reference(
image_to_be_matched_encoded, username)
if showid:
show_user_info(user)
else:
print('---- Access Granted ----')
if showid:
show_user_info(user)
print()
@click.command()
@click.option("--autosave", default=False, help="Auto save new user.")
@click.option("--listdir", default=False, help="List all existing users.")
@click.option("--showid", default=False, help="Show ID of authenticated user on success.")
@click.option("--image", default=None, help="Authenticate from an image instead of webcam")
@click.option("--multiple", default=False, help="Try with multiple images in folder")
def main(autosave, listdir, showid, image, multiple):
time_start = time.time()
if listdir:
if not out_dir_exist():
create_out_dir()
images = os.listdir('persist')
if len(images) <= 0:
print('Empty Directory!')
else:
for _image in images:
# Trim only id, ignore `Name` part
print(_image.split('_')[0])
else:
if image is not None:
# get image from path
frame = face_recognition.load_image_file("images/{}".format(image))
authenticate(autosave, showid, frame)
else:
if not multiple:
# get single image from webcam
frame = get_image_from_cam()
authenticate(autosave, showid, frame)
else:
images = os.listdir('images')
if len(images) <= 0:
print('Empty Directory!')
else:
for _image in images:
# load each image and perform authentication
print("Using image: {}".format(_image))
frame = face_recognition.load_image_file(
"images/{}".format(_image))
authenticate(autosave, showid, frame)
print('Took %s seconds' % (int(time.time() - time_start)))
if __name__ == '__main__':
main()
|
bcbernardo/Zenity | setup.py | <reponame>bcbernardo/Zenity<gh_stars>0
# -*- coding: utf-8 -*-
"""
Zenity setup script.
"""
__doc__="""zenity - display dialogs with python
DESCRIPTION
zenity is a Python library that will display GTK+ dialogs using zanity
tool, and return (eitherin the return code, or on standard output) the
users input. This allows you to present information, and ask for infor
mation from the user.
For example, zenity.show(zenity.question) will return either 0, 1 or 5, depending
on whether the user pressed OK, Cancel or timeout has been reached.
zenity.show(zenity.entry) will output on standard output what the user typed into
the text entry field.
Comprehensive documentation is coming soon.
ENVIRONMENT
Normally, zenity detects the window from which it was launched
and keeps itself above that window. This behavior can be disabled by
unsetting the WINDOWID environment variable.
AUTHOR
Original Zenity was written by <NAME> <<EMAIL>>.
This tool is written by <NAME> <<EMAIL>>
"""
try:
from setuptools import setup # noqa, analysis:ignore
except ImportError:
print("please install setuptools\npython -m pip install setuptools\nor\npython -m pip install setuptools")
raise ImportError()
# Define name and description
name = 'pyzenity'
description = "lightweight and full featured library to display dialogs with python."
## Setup
setup(
name=name,
version='2.0.0',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
url='https://github.com/Ksengine/Zenity',
download_url='https://pypi.python.org/pypi/Zenity',
keywords="GUI, dialog, lightweight, full, featured, full-featured,"
+"library, to,display dialogs, display dialog, dialogs, python",
description=description,
long_description=__doc__,
platforms='any',
provides=[name],
install_requires=[],
py_modules=['zenity'],
scripts=['zenity.py'],
entry_points='''
[console_scripts]
pyzenity=zenity:cli
''',
zip_safe=True,
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 4 - Beta',
"Operating System :: OS Independent",
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3',
],
)
|
bcbernardo/Zenity | zenity.py | """Zenity - display dialogs with python
DESCRIPTION
zenity is a Python library that will display GTK+ dialogs using zanity
tool, and return (eitherin the return code, or on standard output) the
users input. This allows you to present information, and ask for infor
mation from the user.
For example, zenity.show(zenity.question) will return either 0, 1 or 5, depending
on whether the user pressed OK, Cancel or timeout has been reached.
zenity.show(zenity.entry) will output on standard output what the user typed into
the text entry field.
Comprehensive documentation is coming soon.
ENVIRONMENT
Normally, zenity detects the window from which it was launched
and keeps itself above that window. This behavior can be disabled by
unsetting the WINDOWID environment variable.
AUTHOR
Original Zenity was written by <NAME> <<EMAIL>>.
This tool is written by <NAME> <<EMAIL>>
"""
from __future__ import absolute_import, division
import subprocess
import os
import time
from sys import version_info
__version__ = '2.0.0'
#Application Options:
calendar ="calendar" #Display calendar dialog
entry ="entry" #Display text entry dialog
error ="error" #Display error dialog
info ="info" #Display info dialog
file_selection ="file-selection" #Display file selection dialog
list ="list" #Display list dialog
notification ="notification" #Display notification
progress ="progress" #Display progress indication dialog
question ="question" #Display question dialog
warning ="warning" #Display warning dialog
scale ="scale" #Display scale dialog
text_info ="text-info" #Display text information dialog
color_selection="color-selection" #Display color selection dialog
password ="password" #Display password dialog
forms ="forms" #Display forms dialog
class ZenityException(Exception):pass
def test_call(*args, **kwargs):
""" Test whether a subprocess call succeeds.
"""
try:
subprocess.check_output(*args, **kwargs, timeout=10)
return True
except Exception:
return False
def _message(args, writeable=False):
def write(message=''):
try:
p.stdin.write(str(message) + '\n')
except IOError as e:
print(e)
exit()
return p.returncode
env = os.environ.copy()
env['WINDOWID'] = ''
if writeable:
p = subprocess.Popen(['zenity'] + args,stdin=subprocess.PIPE,stderr=subprocess.PIPE,stdout=subprocess.PIPE,env=env)
return write
else:
p = subprocess.Popen(['zenity'] + args,stdin=subprocess.PIPE,stderr=subprocess.PIPE,stdout=subprocess.PIPE,env=env)
try:
while p.poll() is None:
time.sleep(0.002)
return not p.poll(), p.stdout.read().decode('utf-8', 'ignore')
finally:
if p.poll() is None: # pragma: no cover
p.kill()
def works():
t = test_call(['zenity', '--version'])
if t:
return True
try:
from os import system
system("sudo apt-get install zenity")
except Exception as e:
raise ZenityException("Zenity Not Working\nlog:\n"+e)
return test_call(['zenity', '--version'])
def show(*args,**kwargs):
"""show dialog"""
w=False
if works():
flags_list=[]
if "writeable" in kwargs:
w=True
del kwargs["writeable"]
for kwarg in kwargs:
flags_list.append("--{}".format(kwarg))
flags_list.append(repr(kwargs[kwarg]))
for arg in args:
flags_list.append("--{}".format(arg))
if w:
return _message(flags_list,writeable=True)
return _message(flags_list)
else:
return False
def cli():
import sys
from os import system
if works():
system('zenity '+' '.join(sys.argv[1:]))
if __name__=='__main__':
cli()
|
bcbernardo/Zenity | test.py | <reponame>bcbernardo/Zenity
import zenity
import time
t = zenity.show("progress",writeable=True)
|
russetrob/nail | examples/dns_cpp/dns/bench/visualize.py | <reponame>russetrob/nail<filename>examples/dns_cpp/dns/bench/visualize.py<gh_stars>100-1000
#!/usr/bin/python
import sys
import os
import numpy as np
from pylab import *
def labelstr(path):
return os.path.splitext(path)[0]
files = sys.argv
del files[0]
data = []
labels = []
for file in files:
print(file)
data.append(loadtxt(file))
labels.append(labelstr(file))
figure(1)
boxplot(data, vert=False)
xticks(range(0,len(labels)),labels)
show()
|
fstrnad/pyunicorn | examples/tutorials/spatial_networks.py | <reponame>fstrnad/pyunicorn
import numpy as np
import matplotlib.pyplot as plt
from pyunicorn.core import spatial_network as sp
from pyunicorn.core.grid import Grid
"""
This example code offers an overview of the spatial network code for GeoModel1
and GeoModel2 can be called. Furthermore, with the python versions GeoModel1_py
and GeoModel2_py further analysis can be done, to visualize the working of the
network operations.
"""
# Create Random Grids
rect_grid_num=20 # For larger network sizes this might take a while to compute!
grid=Grid.RegularGrid( time_seq=np.arange(2), lat_grid=np.random.randint(low=0, high=40, size=rect_grid_num),
lon_grid=np.random.randint(low=0, high=40, size=rect_grid_num) )
erdos_renyi=sp.SpatialNetwork.ErdosRenyi(grid=grid, n_nodes=int(rect_grid_num**2),link_probability=0.1 )
geo_model=sp.SpatialNetwork(grid=erdos_renyi.grid, adjacency=erdos_renyi.adjacency )
# Apply geoModel code
new_link_list=geo_model.GeoModel1(n_steps=int(5e4), tolerance=1, grid_type='euclidean', verbose=False)
print("New link list", new_link_list)
# Here the python version is used for visualizing the Hamming-distance
# Attention higher tolerance might significantly slow done performance!
erdos_renyi=sp.SpatialNetwork.ErdosRenyi(grid=grid, n_nodes=int(rect_grid_num**2),link_probability=0.1 )
geo_model=sp.SpatialNetwork(grid=erdos_renyi.grid, adjacency=erdos_renyi.adjacency )
link_list1, dic1=geo_model.GeoModel1_py(n_steps=int(5e4), tolerance=0.1, grid_type='euclidean', verbose=False)
print("New link list" , link_list1)
erdos_renyi=sp.SpatialNetwork.ErdosRenyi(grid=grid, n_nodes=int(rect_grid_num**2),link_probability=0.1 )
geo_model=sp.SpatialNetwork(grid=erdos_renyi.grid, adjacency=erdos_renyi.adjacency)
link_list2, dic2=geo_model.GeoModel1_py(n_steps=int(5e4), tolerance=0.5, grid_type='euclidean', verbose=False)
erdos_renyi=sp.SpatialNetwork.ErdosRenyi(grid=grid, n_nodes=int(rect_grid_num**2),link_probability=0.1 )
geo_model=sp.SpatialNetwork(grid=erdos_renyi.grid, adjacency=erdos_renyi.adjacency)
link_list3, dic3=geo_model.GeoModel1_py(n_steps=int(5e4), tolerance=1, grid_type='euclidean', verbose=False)
#link_list2, dic2 = geo_model.GeoModel2(n_steps=100, grid_type='spherical', verbose=True)
#print(dic1['x'])
# Test results
plt.figure(figsize=(6,4))
plt.xlabel('Steps')
plt.ylabel('Values')
plt.plot(dic1['x'], dic1['H'], label='H-Distance, GeoModel1, tolerance=0.1')
plt.plot(dic2['x'], dic2['H'], label='H-Distance, GeoModel1, tolerance=0.5')
plt.plot(dic3['x'], dic3['H'], label='H-Distance, GeoModel1, tolerance=1')
plt.legend()
plt.tight_layout()
plt.show() |
fstrnad/pyunicorn | pyunicorn/core/spatial_network.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of pyunicorn.
# Copyright (C) 2008--2019 <NAME> and pyunicorn authors
# URL: <http://www.pik-potsdam.de/members/donges/software>
# License: BSD (3-clause)
import igraph
import sys
from .geo_network import GeoNetwork
from .network import NetworkError
from pyunicorn.core._ext.numerics import _geo_model1, _geo_model2
#from ._ext.numerics import _geo_model1
"""
Provides classes for analyzing spatial complex networks, that allow to generate
random surrogates from a given spatially embedded network which can preserve
global and local statistics associated with the nodes' embedding in a metric
space.
This code is based on the work of Wiedermann et al. 2016 .
"""
# array object and fast numerics
import numpy as np
from numpy.random import choice # random number generation
class SpatialNetwork(GeoNetwork):
def __init__(self, grid, adjacency=None, edge_list=None, directed=False,
node_weight_type="surface", silence_level=0):
GeoNetwork.__init__(self, grid=grid, adjacency=adjacency,
edge_list=edge_list, directed=directed, node_weight_type=node_weight_type,
silence_level=silence_level)
"""
Gives distances in spherical/euclidian networks from i to all surrounding nodes
"""
def distance(self, D, node, list_of_neighbors=None):
d_i_all=[]
if list_of_neighbors is None:
list_of_neighbors=range(len(D[node]))
for i in list_of_neighbors:
d_i_all.append(D[node][i])
return np.array(d_i_all)
def link_id(self, node1, node2, N):
id = -0.5 * node1 * (node1 - 2 * N + 1)
id += node2 - node1 - 1
return id, node1, node2
def _Hamming_Distance(self, A, B):
# Check if the graphs have the same number of vertices
if len(A) == len(B):
# Calculate the hamming distance
hamming = (A != B).sum()
# Return the normalized hamming distance
return hamming / float(len(A) * (len(A) - 1))
else:
raise NetworkError("Only defined for networks with same number of \
nodes.")
"""
GeoModel1 preserves the global link-length distribution P(l). Hence,
the potentially newly established links are of the same
length as those that are removed from the network. This means
that the four randomly drawn nodes i, j, k, and l form
a kite with exactly one link present at each of the two sides
of the same length.
The here implemented version of Wiedermann et al. 2016 differs from the original
paper in so far that it uses a mask with a certain tolerance. Only nodes within
this mask are further analyzed. This is a very good approximation of the
original paper. However, one saves significantly computation time using
the below outlined code.
"""
def GeoModel1(self, n_steps, tolerance, grid_type="spherical", verbose=False):
if grid_type == "spherical":
D = self.grid.angular_distance()
elif grid_type == "euclidean":
D = self.grid.euclidean_distance()
else:
print("ERROR, This grid type is not recognized: ", grid_type)
sys.exit(1)
# Get number of nodes
N = self.N
# Get number of links
E = self.n_links
#check_requirements(link_list, n_steps, n_steps)
#check_GeoModel_requirements(grid, tolerance, grid_type)
#grid=self.grid.coord_sequence_from_rect_grid(lat_grid, lon_grid)
# Needs to be done to prevent error in computation!
link_list=np.array(self.graph.get_edgelist(), np.int).copy(order='c')
# link_list = link_list.copy()
A= self.adjacency.copy(order='c')
_geo_model1(n_steps, tolerance, A, D, link_list, N, E) # run as fast c - code!
# Update adjacancy matrix
self.adjacency=A
return link_list
def GeoModel1_py(self, n_steps, tolerance, grid_type="spherical", verbose=False):
"""
This implementation in python can be used for further analysis, as e.g. plotting data
for Hamming Distance, it does the same operation as the c-implemented version.
The here implemented version offers however the possibility of further investigation
of the inner working of the GeoModel1.
"""
if grid_type == "spherical":
D = self.grid.angular_distance()
elif grid_type == "euclidean":
D = self.grid.euclidean_distance()
else:
print("ERROR, This grid type is not recognized: ", grid_type)
sys.exit(1)
# Get an array of all links between all nodes in terms of [...,[i,j],...] for all Nodes
link_list=np.array(self.graph.get_edgelist(), np.int32).copy(order='c')
A= self.adjacency.copy(order='c')
n_sampling_points=self.n_links
# Get number of nodes
N = self.N
# Get number of links
E = self.n_links
original_link_ids = -0.5 * link_list[:, 0] * (link_list[:, 0] - 2 * N + 1)
original_link_ids += link_list[:, 1] - link_list[:, 0] - 1
sur_link_ids = original_link_ids.copy()
g = igraph.Graph(link_list.tolist())
T = [g.transitivity_avglocal_undirected()]
x = [0]
H = [0]
L = [g.average_path_length()]
ass = [g.assortativity_degree()]
c = 0
q = 0
for u in range(n_steps):
if verbose:
print ("Step {0}/{1}".format(u+1, n_steps))
cond = True
while cond:
q += 1
first_link_index = np.random.randint(E)
active_link = link_list[first_link_index]
active_link = np.random.permutation(active_link)
i, j = active_link
# If second argument is None, distance to any neighbor node is calculated
d_i_all = self.distance( D, i, None )
D_tot = d_i_all - d_i_all[j]
mask = np.abs(D_tot) < tolerance * d_i_all[j]
mask[i] = False
mask[j] = False
possible_nbs = np.arange(N)[mask]
possible_nbs = np.random.permutation(possible_nbs)
l = None
for k in possible_nbs:
nbs_of_k = np.fliplr(link_list == k)
nbs_of_k = link_list[nbs_of_k]
nbs_of_k = np.array(list(set(nbs_of_k) - set([k])))
if i in nbs_of_k or len(nbs_of_k) == 0:
continue
d_k_all = self.distance(D, k, nbs_of_k)
d_j_all = self.distance(D, j, nbs_of_k)
D_tot = d_k_all - d_j_all
mask = np.abs(D_tot) < tolerance * d_k_all
if mask.any():
l_candidate = choice(nbs_of_k[mask])
nbs_of_l = np.fliplr(link_list == l_candidate)
nbs_of_l = link_list[nbs_of_l]
if j not in nbs_of_l:
l = l_candidate
break
if l is None:
continue
cond = False
second_link_index = ((link_list == k) | (link_list == l))
second_link_index = second_link_index.sum(axis=1) == 2
second_link_index = np.arange(E)[second_link_index]
A[i,j] = A[j,i] = 0 # Delete link i<->j
A[k,l] = A[l,k] = 0 # Delete link k<->l
A[i,k] = A[k,i] = 1 # Add link i<->k
A[j,l] = A[l,j] = 1 # Add link j<->l
# print("Second Link List", second_link_index, k,l)
# gives id for link i<->k resp. j<->l in original_link_ids
id1, i, k = self.link_id(i, k, N)
id2, j, l = self.link_id(j, l, N)
link_list[first_link_index] = [i, k]
link_list[second_link_index] = [j, l]
sur_link_ids[first_link_index] = id1
sur_link_ids[second_link_index] = id2
c += 1
# compute_at = int(n_steps / n_sampling_points)
if c % n_sampling_points == 0:
g = igraph.Graph(link_list.tolist())
x.append(u)
T.append(g.transitivity_avglocal_undirected())
H.append(self._Hamming_Distance(original_link_ids, sur_link_ids))
L.append(g.average_path_length())
ass.append(g.assortativity_degree())
print(c)
print ("# Total steps:", q)
dic = {"x": x, "T": T, "L": L, "H": H, "links": link_list,
"assortativity": ass}
# Update adjcancy matrix
self.adjacency=A
return link_list, dic
"""
GeoModel2 preserves as well as GeoModel1 the global link-length distribution P(l).
Moreover, it the model requires that the that the four drawn nodes i, j, k, and l form
a square with exactly one link present at each of the two sides of the same length
"""
def GeoModel2(self, n_steps, tolerance, grid_type="spherical", verbose=False):
if grid_type == "spherical":
D = self.grid.angular_distance()
elif grid_type == "euclidean":
D = self.grid.euclidean_distance()
else:
print("ERROR, This grid type is not recognized: ", grid_type)
sys.exit(1)
# Get number of nodes
N = self.N
# Get number of links
E = self.n_links
#check_requirements(link_list, n_steps, n_steps)
#check_GeoModel_requirements(grid, tolerance, grid_type)
#grid=self.grid.coord_sequence_from_rect_grid(lat_grid, lon_grid)
# Needs to be done to prevent error in computation!
link_list=np.array(self.graph.get_edgelist(), np.int).copy(order='c')
# link_list = link_list.copy()
A= self.adjacency.copy(order='c')
n_sampling_points=self.n_links
original_link_ids = -0.5 * link_list[:, 0] * (link_list[:, 0] - 2 * N + 1)
original_link_ids += link_list[:, 1] - link_list[:, 0] - 1
compute_at = int(n_steps / n_sampling_points)
print(compute_at)
g = igraph.Graph(link_list.tolist())
T = [g.transitivity_avglocal_undirected()]
x = [0]
H = [0]
L = [g.average_path_length()]
ass = [g.assortativity_degree()]
c = 0
#print(link_list.shape)
np.savetxt('link_list.txt', link_list, delimiter=' ', fmt='%d')
np.savetxt('A.txt', A, delimiter=' ', fmt='%d')
np.savetxt('D.txt', D, delimiter=' ', fmt='%1f')
_geo_model2(n_steps, tolerance, A, D, link_list, N, E) # run as fast c - code!
dic = {"x": x, "T": T, "L": L, "H": H, "links": link_list,
"assortativity": ass}
# Update adjacancy matrix
self.adjacency=A
return link_list, dic
def GeoModel2_py(self, n_steps, tolerance, grid_type="spherical", verbose=False):
"""
This implementation in python can be used for further analysis, as e.g. plotting data
for Hamming Distance, it does the same operation as the c-implemented version.
The here implemented version offers however the possibility of further investigation
of the inner working of the GeoModel2.
"""
if grid_type == "spherical":
D = self.grid.angular_distance()
elif grid_type == "euclidean":
D = self.grid.euclidean_distance()
else:
print("ERROR, This grid type is not recognized: ", grid_type)
sys.exit(1)
# Get an array of all links between all nodes in terms of [...,[i,j],...] for all Nodes
link_list=np.array(self.graph.get_edgelist(), np.int32).copy(order='c')
A= self.adjacency.copy(order='c')
n_sampling_points=self.n_links
# Get number of nodes
N = self.N
# Get number of links
E = self.n_links
original_link_ids = -0.5 * link_list[:, 0] * (link_list[:, 0] - 2 * N + 1)
original_link_ids += link_list[:, 1] - link_list[:, 0] - 1
sur_link_ids = original_link_ids.copy()
g = igraph.Graph(link_list.tolist())
T = [g.transitivity_avglocal_undirected()]
x = [0]
H = [0]
L = [g.average_path_length()]
ass = [g.assortativity_degree()]
c = 0
q = 0
for u in range(n_steps):
if verbose:
print ("Step {0}/{1}".format(u+1, n_steps))
cond = True
while cond:
q += 1
first_link_index = np.random.randint(E)
active_link = link_list[first_link_index]
active_link = np.random.permutation(active_link)
i, j = active_link
d_i_all = self.distance(D, i, None )
D_tot = d_i_all - d_i_all[j]
mask = np.abs(D_tot) < tolerance * d_i_all[j]
mask[i] = False
mask[j] = False
possible_nbs = np.arange(N)[mask]
possible_nbs = np.random.permutation(possible_nbs)
l = None
for k in possible_nbs:
nbs_of_k = np.fliplr(link_list == k)
nbs_of_k = link_list[nbs_of_k]
nbs_of_k = np.array(list(set(nbs_of_k) - set([k])))
if i in nbs_of_k or len(nbs_of_k) == 0:
continue
d_k_all = self.distance(D, k, nbs_of_k)
d_j_all = self.distance(D, j, nbs_of_k)
D_tot = d_k_all - d_j_all
# This is the same as in GeoModel1
mask1 = np.abs(D_tot) < tolerance * d_k_all
# This mask is applied furthermore
mask2 = np.abs(d_k_all - d_i_all[j]) < tolerance * d_i_all[j]
# Only intersection of mask1 and mask2 are valid nodes
mask = mask1 & mask2
if mask.any():
l_candidate = choice(nbs_of_k[mask])
nbs_of_l = np.fliplr(link_list == l_candidate)
nbs_of_l = link_list[nbs_of_l]
if j not in nbs_of_l:
l = l_candidate
break
if l is None:
continue
cond = False
second_link_index = ((link_list == k) | (link_list == l))
second_link_index = second_link_index.sum(axis=1) == 2
second_link_index = np.arange(E)[second_link_index]
A[i,j] = A[j,i] = 0 # Delete link i<->j
A[k,l] = A[l,k] = 0 # Delete link k<->l
A[i,k] = A[k,i] = 1 # Add link i<->k
A[j,l] = A[l,j] = 1 # Add link j<->l
# print("Second Link List", second_link_index, k,l)
# gives id for link i<->k resp. j<->l in original_link_ids
id1, i, k = self.link_id(i, k, N)
id2, j, l = self.link_id(j, l, N)
link_list[first_link_index] = [i, k]
link_list[second_link_index] = [j, l]
sur_link_ids[first_link_index] = id1
sur_link_ids[second_link_index] = id2
c += 1
# compute_at = int(n_steps / n_sampling_points)
if c % n_sampling_points == 0:
g = igraph.Graph(link_list.tolist())
x.append(u)
T.append(g.transitivity_avglocal_undirected())
H.append(self._Hamming_Distance(original_link_ids, sur_link_ids))
L.append(g.average_path_length())
ass.append(g.assortativity_degree())
print(c)
print ("# Total steps:", q)
dic = {"x": x, "T": T, "L": L, "H": H, "links": link_list,
"assortativity": ass}
# Update Adjcancy matrix
self.adjacency=A
return link_list, dic
|
wiredlab/orbit | orbit/utilities.py | def calc_year(year):
twentieth = ('6', '7', '8', '9')
twenty_first = ('0', '1', '2', '3', '4', '5')
if year.startswith(twentieth):
return "19%s" % year
elif year.startswith(twenty_first):
return "20%s" % year
else:
return year |
wiredlab/orbit | orbit/__init__.py | <reponame>wiredlab/orbit
__all__ = ["satellite", "tle", "utilities"]
from .satellite import satellite |
wiredlab/orbit | setup.py | from setuptools import setup
setup(name='orbit',
version='0.2',
description='Gives current information about orbital objects of interest',
url='http://github.com/seanherron/orbit',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
long_description=open('README.txt').read(),
packages=['orbit'],
install_requires=[
'lxml',
'requests',
'requests_cache',
'pyephem',
],
) |
wiredlab/orbit | orbit/tle.py | <filename>orbit/tle.py<gh_stars>1-10
import math
from lxml import html
import requests
import requests_cache
import ephem
from . import utilities
requests_cache.install_cache(expire_after=86400)
def get(catnr):
page = html.fromstring(requests.get('http://www.celestrak.com/cgi-bin/TLE.pl?CATNR=%s' % catnr).text)
tle = page.xpath('//pre/text()')[0].split('\n')
return tle[1].strip(), tle[2].strip(), tle[3].strip()
def parse(name, line1, line2):
tle_rec = ephem.readtle(name, line1, line2)
tle_rec.compute()
return tle_rec |
wiredlab/orbit | orbit/satellite.py | <filename>orbit/satellite.py
from math import degrees
from . import tle, utilities
class satellite:
def __init__(self,catnr):
self.tle_raw = tle.get(catnr)
self.tle_parsed = tle.parse(self.tle_raw[0], self.tle_raw[1], self.tle_raw[2])
def name(self):
return self.tle_raw[0].strip()
def catalog_number(self):
return self.tle_raw[1][2:7]
def elsat_classification(self):
return self.tle_raw[1][7]
def launch_year(self):
return utilities.calc_year(self.tle_raw[1][9:11])
def tle(self):
return [self.tle_raw[0], self.tle_raw[1], self.tle_raw[2]]
def lat(self):
return degrees(self.tle_parsed.sublat)
def long(self):
return degrees(self.tle_parsed.sublong)
def elevation(self):
return self.tle_parsed.elevation
def eclipsed(self):
return self.tle_parsed.eclipsed |
tarikyayla/reddit_dashboard | reddit_dashboard/migrations/0017_auto_20201118_2308.py | <filename>reddit_dashboard/migrations/0017_auto_20201118_2308.py
# Generated by Django 3.1.2 on 2020-11-18 20:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reddit_dashboard', '0016_auto_20201118_2255'),
]
operations = [
migrations.AlterField(
model_name='posts',
name='submission_id',
field=models.CharField(max_length=250, unique=True),
),
]
|
tarikyayla/reddit_dashboard | reddit_dashboard/redis_connection.py | import redis
from reddit_dashboard.settings import REDIS_DATABASE, REDIS_HOST, REDIS_PORT
REDIS_CONNECTION = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DATABASE)
class RedisConsts(object):
DISCORD_PUSH = 'discord:push'
SERVER_PUSH = 'django:model'
|
tarikyayla/reddit_dashboard | reddit_dashboard/utils/user_utils.py | from rest_framework.authtoken.models import Token
from reddit_dashboard.models import DashboardUser
from django.conf import settings
def get_default_user_token():
user = DashboardUser.objects.filter(username=settings.USERNAME).first()
token = None
if not user:
user = DashboardUser.objects.create_user(username=settings.USERNAME, password=<PASSWORD>)
user.is_active = True
user.is_staff = True
user.is_superuser = True
user.save()
token = Token.objects.get_or_create(user=user)
return "Token " + str(token[0]) |
tarikyayla/reddit_dashboard | reddit_dashboard/migrations/0006_auto_20201029_1917.py | # Generated by Django 3.1.2 on 2020-10-29 16:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reddit_dashboard', '0005_dashboarduser_subreddits'),
]
operations = [
migrations.AlterField(
model_name='dashboarduser',
name='is_staff',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='dashboarduser',
name='is_superuser',
field=models.BooleanField(default=True),
),
]
|
tarikyayla/reddit_dashboard | reddit_dashboard/migrations/0003_dashboarduser_reddit_username.py | <reponame>tarikyayla/reddit_dashboard
# Generated by Django 3.1.2 on 2020-10-17 18:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reddit_dashboard', '0002_auto_20201017_1650'),
]
operations = [
migrations.AddField(
model_name='dashboarduser',
name='reddit_username',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
tarikyayla/reddit_dashboard | reddit_dashboard/migrations/0016_auto_20201118_2255.py | <gh_stars>0
# Generated by Django 3.1.2 on 2020-11-18 19:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reddit_dashboard', '0015_auto_20201118_2250'),
]
operations = [
migrations.RenameField(
model_name='sentposts',
old_name='textchannel',
new_name='text_channel',
),
]
|
tarikyayla/reddit_dashboard | reddit_dashboard/celery_loader.py | <filename>reddit_dashboard/celery_loader.py<gh_stars>0
import os
from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "reddit_dashboard.settings")
app = Celery("reddit_dashboard",
include=['reddit_dashboard.tasks'])
app.config_from_object('django.conf:settings', namespace='CELERY')
|
tarikyayla/reddit_dashboard | reddit_dashboard/migrations/0002_auto_20201017_1650.py | # Generated by Django 3.1.2 on 2020-10-17 13:50
import django.contrib.auth.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reddit_dashboard', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='dashboarduser',
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.RenameField(
model_name='dashboarduser',
old_name='active',
new_name='is_active',
),
migrations.RenameField(
model_name='dashboarduser',
old_name='admin',
new_name='is_staff',
),
migrations.RenameField(
model_name='dashboarduser',
old_name='staff',
new_name='is_superuser',
),
]
|
tarikyayla/reddit_dashboard | api/views/consts.py | <filename>api/views/consts.py
GET_FROM_REDDIT = 1
|
tarikyayla/reddit_dashboard | reddit_dashboard/migrations/0015_auto_20201118_2250.py | # Generated by Django 3.1.2 on 2020-11-18 19:50
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('reddit_dashboard', '0014_posts_subreddit'),
]
operations = [
migrations.AddField(
model_name='posts',
name='create_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='sentposts',
name='SentDate',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='sentposts',
name='textchannel',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sent_posts_text_channel', to='reddit_dashboard.textchannel'),
),
]
|
tarikyayla/reddit_dashboard | reddit_dashboard/migrations/0009_auto_20201114_2009.py | # Generated by Django 3.1.2 on 2020-11-14 17:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reddit_dashboard', '0008_auto_20201030_0021'),
]
operations = [
migrations.RemoveField(
model_name='discordserver',
name='subreddits',
),
migrations.RemoveField(
model_name='discordserver',
name='text_channel',
),
migrations.AlterField(
model_name='dashboarduser',
name='subreddits',
field=models.ManyToManyField(blank=True, null=True, to='reddit_dashboard.Subreddit'),
),
migrations.AlterField(
model_name='discordserver',
name='create_date',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='discordserver',
name='server_id',
field=models.CharField(max_length=250, unique=True),
),
migrations.CreateModel(
name='TextChannel',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('slug', models.CharField(max_length=255)),
('channel_id', models.CharField(max_length=250, unique=True)),
('server', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reddit_dashboard.discordserver')),
],
),
]
|
tarikyayla/reddit_dashboard | api/serializers/discord_serializers.py | <filename>api/serializers/discord_serializers.py
from rest_framework import serializers
from reddit_dashboard.models import DiscordServer, TextChannel
from api.serializers.reddit_serializers import SubredditSerializer
class TextChannelSerializer(serializers.ModelSerializer):
following_subreddits = SubredditSerializer(many=True)
class Meta:
model = TextChannel
fields = '__all__'
class TextChannelFollowSerializer(serializers.Serializer):
subreddit_id = serializers.IntegerField(required=True)
method = serializers.IntegerField(default=1, required=False)
class TextChannelCreateSerializer(serializers.Serializer):
discord_id = serializers.IntegerField(required=True)
name = serializers.CharField(required=True)
channel_id = serializers.CharField(required=True)
class DiscordSerializer(serializers.ModelSerializer):
text_channels = serializers.SerializerMethodField()
def get_text_channels(self, obj):
return TextChannelSerializer(TextChannel.objects.filter(server_id=obj.id), many=True).data
class Meta:
model = DiscordServer
fields = ('id', 'name', 'text_channels',)
|
tarikyayla/reddit_dashboard | reddit_dashboard/migrations/0004_discordserver_subreddit.py | # Generated by Django 3.1.2 on 2020-10-18 14:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reddit_dashboard', '0003_dashboarduser_reddit_username'),
]
operations = [
migrations.CreateModel(
name='Subreddit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('url', models.CharField(max_length=100)),
('type', models.CharField(max_length=100)),
('subscribers', models.BigIntegerField(default=0)),
('description', models.TextField(blank=True, null=True)),
('added_date', models.DateField(auto_now=True)),
('last_checked_date', models.DateTimeField()),
('added_by', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='DiscordServer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('server_id', models.CharField(max_length=1000)),
('text_channel', models.CharField(max_length=255)),
('create_date', models.DateTimeField(auto_now=True)),
('added_by', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),
('subreddits', models.ManyToManyField(to='reddit_dashboard.Subreddit')),
],
),
]
|
tarikyayla/reddit_dashboard | api/urls.py | from django.urls import path, include
from api.views.public import api_check, get_api_token
from api.views import user, discord
from rest_framework.routers import DefaultRouter
router = DefaultRouter(trailing_slash=False)
router.register(r'subreddits', user.Subreddits, basename="subreddits")
router.register(r'text-channels', discord.TextChannels, basename='text-channels')
urlpatterns = [
path("hello/", api_check),
path("get-api-token", get_api_token, name="get_api_token"),
path("", include(router.urls)),
path("refresh-subreddits", user.RefreshSubreddits.as_view(), name="refresh_subreddits"),
path("reddit-auth", user.RedditAuth.as_view(), name="reddit_auth"),
path('search-subreddits', user.SearchSubreddit.as_view(), name='search-auth'),
path('discord', discord.DiscordChannels.as_view(), name='discord-channels'),
]
|
tarikyayla/reddit_dashboard | reddit_dashboard/migrations/0018_auto_20201122_1313.py | # Generated by Django 3.1.2 on 2020-11-22 10:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reddit_dashboard', '0017_auto_20201118_2308'),
]
operations = [
migrations.AlterModelOptions(
name='posts',
options={'verbose_name': 'Post', 'verbose_name_plural': 'Posts'},
),
migrations.AlterUniqueTogether(
name='sentposts',
unique_together={('post', 'text_channel')},
),
]
|
tarikyayla/reddit_dashboard | api/serializers/reddit_serializers.py | <reponame>tarikyayla/reddit_dashboard
from rest_framework import serializers
from reddit_dashboard.models import Subreddit
class SubredditSerializer(serializers.ModelSerializer):
class Meta:
model = Subreddit
exclude = ('description',)
|
tarikyayla/reddit_dashboard | api/migrations/0001_initial.py | <reponame>tarikyayla/reddit_dashboard
# Generated by Django 3.1.2 on 2020-10-17 11:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Subreddit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('url', models.CharField(max_length=100)),
('type', models.CharField(max_length=100)),
('subscribers', models.BigIntegerField(default=0)),
('description', models.TextField(blank=True, null=True)),
('added_date', models.DateField(auto_now=True)),
('last_checked_date', models.DateTimeField()),
('added_by', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='DiscordServer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('server_id', models.CharField(max_length=1000)),
('text_channel', models.CharField(max_length=255)),
('create_date', models.DateTimeField(auto_now=True)),
('added_by', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),
('subreddits', models.ManyToManyField(to='api.Subreddit')),
],
),
]
|
tarikyayla/reddit_dashboard | reddit_dashboard/migrations/0010_auto_20201114_2015.py | <gh_stars>0
# Generated by Django 3.1.2 on 2020-11-14 17:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reddit_dashboard', '0009_auto_20201114_2009'),
]
operations = [
migrations.AlterField(
model_name='dashboarduser',
name='subreddits',
field=models.ManyToManyField(blank=True, to='reddit_dashboard.Subreddit'),
),
]
|
tarikyayla/reddit_dashboard | reddit_dashboard/migrations/0007_subreddit_banner_img.py | <filename>reddit_dashboard/migrations/0007_subreddit_banner_img.py
# Generated by Django 3.1.2 on 2020-10-29 21:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reddit_dashboard', '0006_auto_20201029_1917'),
]
operations = [
migrations.AddField(
model_name='subreddit',
name='banner_img',
field=models.CharField(blank=True, max_length=500, null=True),
),
]
|
tarikyayla/reddit_dashboard | api/reddit/manager.py | <reponame>tarikyayla/reddit_dashboard
import praw
from praw.exceptions import PRAWException
from django.conf import settings
from reddit_dashboard.models import Subreddit
from reddit_dashboard.models import DashboardUser
import json
class RedditManager:
instance = None
def __init__(self):
self.instance = self.get_instance()
def get_instance(self):
return praw.Reddit(client_id=settings.PRAW_CLIENT_ID,
client_secret=settings.PRAW_SECRET,
user_agent=settings.PRAW_USER_AGENT,
redirect_uri=settings.PRAW_REDIRECT_URL)
def search_by_subreddit(self, search_text):
return self.instance.subreddits.search_by_name(search_text)
def get_subreddit(self, display_name):
return self.instance.subreddit(display_name)
def add_subreddit(self, display_name, user):
subreddit = self.get_subreddit(display_name)
if not subreddit:
raise PRAWException
subreddit_object = Subreddit(
name=subreddit.display_name,
type=subreddit.subreddit_type,
added_by=user,
last_checked_date=subreddit.url,
description=subreddit.description,
subscribers=subreddit.subscribers,
)
subreddit_object.save()
return subreddit_object
def get_refresh_token(self, code):
return self.instance.auth.authorize(code)
def get_auth_link(self, username):
return self.instance.auth.url(["identity", "mysubreddits"], username, "permanent")
@staticmethod
def get_user_instance(user=None, username=None, refresh_token=None):
if not user and not refresh_token:
user = DashboardUser.objects.get(username=username)
if not refresh_token:
refresh_token = user.reddit_user_id
temporary_instance = praw.Reddit(
client_id=settings.PRAW_CLIENT_ID,
client_secret=settings.PRAW_SECRET,
user_agent=settings.PRAW_USER_AGENT,
refresh_token=refresh_token
)
return temporary_instance
def get_user_data(self, user=None, username=None, instance=None):
if not instance:
if not user:
user = DashboardUser.objects.get(username=username)
instance = self.get_user_instance(user=user)
user.reddit_user_data = json.dumps(instance.user.me().subreddit)
user.reddit_username = instance.user.me().name
user.save() # save changes
return user.reddit_user_data
def get_user_subreddits(self, user=None, username=None):
if not user:
user = DashboardUser.objects.get(username=username)
return self.get_user_instance(user=user).user.subreddits()
reddit_manager = RedditManager() # singleton object
|
tarikyayla/reddit_dashboard | reddit_dashboard/migrations/0005_dashboarduser_subreddits.py | # Generated by Django 3.1.2 on 2020-10-18 14:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reddit_dashboard', '0004_discordserver_subreddit'),
]
operations = [
migrations.AddField(
model_name='dashboarduser',
name='subreddits',
field=models.ManyToManyField(to='reddit_dashboard.Subreddit'),
),
]
|
tarikyayla/reddit_dashboard | reddit_dashboard/redis_serializers.py | <reponame>tarikyayla/reddit_dashboard
from reddit_dashboard.models import DashboardUser, TextChannel, DiscordServer, REDIS_CONNECTION, RedisConsts
import json
class RedisModelSerializer:
def __init__(self, server_id=None, server_name=None):
self.payload = {
"server_id": server_id,
"server_name": server_name,
"channels": []
}
def add_channel(self, channel_name, channel_id):
self.payload["channels"].append(
{
"channel_id": channel_id,
"slug": channel_name
}
)
def push(self):
REDIS_CONNECTION.lpush(RedisConsts.SERVER_PUSH, json.dumps(self.payload))
@classmethod
def serialize(cls, payload):
payload_data = json.loads(payload)
server = DiscordServer.objects.filter(server_id=payload_data["server_id"]).first()
if not server:
server = DiscordServer()
server.name = payload_data["server_name"]
server.server_id = payload_data["server_id"]
server.added_by = DashboardUser.get_default_user()
server.save()
for channel in payload_data["channels"]:
text_channel = TextChannel(**channel)
text_channel.server = server
text_channel.save()
|
tarikyayla/reddit_dashboard | reddit_dashboard/migrations/0011_discordserver_code.py | <reponame>tarikyayla/reddit_dashboard
# Generated by Django 3.1.2 on 2020-11-14 18:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reddit_dashboard', '0010_auto_20201114_2015'),
]
operations = [
migrations.AddField(
model_name='discordserver',
name='code',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
]
|
tarikyayla/reddit_dashboard | discord_bot/bot.py | <gh_stars>0
import discord
from discord.ext import tasks
import redis
import json
from asgiref.sync import sync_to_async
from reddit_dashboard.redis_connection import REDIS_CONNECTION, RedisConsts
from reddit_dashboard.redis_serializers import RedisModelSerializer
REDIS_KEY = RedisConsts.DISCORD_PUSH
client = discord.Client()
class PushPayload:
def __init__(self, pid, post_id, text_channel_id, server_id, url, text, title):
self.id = pid
self.post_id = post_id
self.text_channel_id = text_channel_id
self.server_id = server_id
self.url = url
self.text = text
self.title = title
@classmethod
def serialize(cls, payload):
return cls(
pid=payload["id"],
post_id=payload["post_id"],
text_channel_id=payload["text_channel_id"],
server_id=payload["server_id"],
url=payload["url"],
text=payload["text"],
title=payload["title"],
)
@tasks.loop(seconds=1)
async def redis_listener():
await client.wait_until_ready()
value = REDIS_CONNECTION.lpop(REDIS_KEY)
if value:
data = json.loads(value)
data = PushPayload.serialize(data)
url = 'https://reddit.com' + data.url
text = discord.Embed(
title=data.title,
url=url,
description=data.text[:100])
subreddit = "r/" + data.url.split("/")[2]
text.set_author(url="https://reddit.com/" + subreddit, name=subreddit)
guild = client.get_guild(int(data.server_id))
if guild:
channel = guild.get_channel(int(data.text_channel_id))
if channel:
try:
await client.get_guild(int(data.server_id)).get_channel(int(data.text_channel_id)).send(embed=text)
except:
print(f"Submission sent error : {data.id}")
@client.event
async def on_ready():
print("Bot connected")
redis_listener.start()
@client.event
async def on_guild_join(guild):
"""
Get guild id, name and text channels
"""
# Not saving models in async functions bc of mysql gone away error
# django.db.utils.OperationalError: (2006, 'MySQL server has gone away')
model = RedisModelSerializer(guild.id, guild.name)
for channel in guild.channels:
if str(channel.type) == 'text':
model.add_channel(channel.name, channel.id)
model.push()
|
tarikyayla/reddit_dashboard | reddit_dashboard/settings.py | <gh_stars>0
"""
Django settings for reddit_dashboard project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import environ
import os
import logging
logging.Logger(name="reddit_dashboard")
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
env = environ.Env()
environ.Env.read_env()
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'api',
'reddit_dashboard',
'django_celery_results'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'reddit_dashboard.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'frontend', 'build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'reddit_dashboard.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASE_ENGINE = {
"MYSQL": {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': env("DB_NAME"),
"HOST": env("DB_HOST"),
"USER": env("DB_USER"),
"PASSWORD": env("DB_PASSWORD")
}
},
"SQLITE": {
"default": {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'dashboard.db'
}
}
}
DATABASES = DATABASE_ENGINE["SQLITE"]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
'DEFAULT_PAGINATION_CLASS': 'api.serializers.StandardResultsSetPagination'
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'reddit_dashboard.DashboardUser'
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'frontend', 'build'),
os.path.join(BASE_DIR, 'frontend', 'build', 'static')
]
# PRAW CONFIGURATION
PRAW_CLIENT_ID = env("PRAW_CLIENT_ID")
PRAW_SECRET = env("PRAW_SECRET")
PRAW_USER_AGENT = env("PRAW_USER_AGENT")
PRAW_REDIRECT_URL = "http://localhost:8000/reddit_redict_url"
# USER LOGIN INFORMATION
USERNAME = env("LOGIN_USERNAME")
PASSWORD = env("LOGIN_PASSWORD")
# REDIS CONNECTIONS
REDIS_HOST = "redis"
REDIS_PORT = 6379
REDIS_DATABASE = 0
# Celery Settings
CELERY_BROKER_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DATABASE}'
CELERY_RESULT_BACKEND = 'django-db'
CELERY_CACHE_BACKEND = 'default'
# MAIL SETTINGS
SEND_MAILS = True # As celery report
EMAIL_HOST = env("HOST", default=None)
EMAIL_PORT = env("PORT", default=None)
EMAIL_HOST_USER = env("EMAIL_ADDRESS", default=None)
EMAIL_HOST_PASSWORD = env("EMAIL_PASSWORD", default=None)
# DISCORD SETTINGS
DISCORD_CLIENT_ID = env("DISCORD_CLIENT_ID")
DISCORD_BOT_TOKEN = env("DISCORD_TOKEN")
DISCORD_REDIRECT_URL = env("DISCORD_REDIRECT_ID", default="http://localhost:8000/discord_redirect")
|
tarikyayla/reddit_dashboard | api/migrations/0002_auto_20201018_1714.py | <filename>api/migrations/0002_auto_20201018_1714.py
# Generated by Django 3.1.2 on 2020-10-18 14:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='subreddit',
name='added_by',
),
migrations.DeleteModel(
name='DiscordServer',
),
migrations.DeleteModel(
name='Subreddit',
),
]
|
tarikyayla/reddit_dashboard | api/views/discord.py | <gh_stars>0
from reddit_dashboard.models import DiscordServer, Subreddit, TextChannel, DashboardUser
from api.serializers.discord_serializers import TextChannelSerializer, TextChannelCreateSerializer, \
TextChannelFollowSerializer
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from reddit_dashboard import settings
from api.serializers.discord_serializers import DiscordSerializer
from rest_framework.response import Response
from api.responses import FAIL_RESPONSE, SUCCESS_RESPONSE
class DiscordChannels(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request):
user = request.user
if not user.is_authenticated:
user = DashboardUser.objects.get(username=settings.USERNAME)
discord_channels = DiscordSerializer(DiscordServer.objects.filter(added_by=user), many=True).data
add_discord_url = f"https://discord.com/api/oauth2/authorize?client_id={settings.DISCORD_CLIENT_ID}" \
f"&permissions=0&redirect_uri={settings.DISCORD_REDIRECT_URL}&response_type=code" \
f"&scope=bot%20identify"
return Response({
"discord_channels": discord_channels,
"add_url": add_discord_url
})
class TextChannels(ModelViewSet):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def list(self, request, pk=None):
discord_id = request.GET.get("discord_id")
channels = TextChannel.objects.filter(server_id=discord_id)
return Response(TextChannelSerializer(instance=channels, many=True).data)
def retrieve(self, request, pk=None):
return Response(TextChannelSerializer(TextChannel.objects.get(pk=pk)).data)
def create(self, request):
serializer = TextChannelCreateSerializer(data=request.data)
if serializer.is_valid():
serializer = serializer.data
dc_server = DiscordServer.objects.get(pk=serializer["discord_id"])
text_channel = TextChannel(
slug=serializer["name"],
channel_id=serializer["channel_id"],
server=dc_server
)
text_channel.save()
return SUCCESS_RESPONSE
return FAIL_RESPONSE()
def update(self, request, pk=None):
channel = TextChannel.objects.get(pk=pk)
serializer = TextChannelFollowSerializer(data=request.data)
if serializer.is_valid():
subreddit = Subreddit.objects.get(pk=serializer.data["subreddit_id"])
channel.following_subreddits.add(subreddit)
channel.save()
return SUCCESS_RESPONSE
return FAIL_RESPONSE()
def destroy(self, request, pk=None):
text_channel = TextChannel.objects.get(pk=pk)
text_channel.delete()
return SUCCESS_RESPONSE
|
tarikyayla/reddit_dashboard | api/exceptions/validation_exception.py | from rest_framework.exceptions import APIException
class ValidationException(APIException):
status_code = 500
validation_key = ""
reason = ""
def __init__(self, key, reason, status_code=500):
self.status_code = status_code
self.detail = {"success": False, "reason": reason, "key": key}
|
tarikyayla/reddit_dashboard | reddit_dashboard/management/commands/get_token.py | <gh_stars>0
from django.core.management import BaseCommand
from reddit_dashboard.utils.user_utils import get_default_user_token
from reddit_dashboard.tasks import get_hot_posts
from discord_bot.bot import client
from discord import TextChannel
from reddit_dashboard.settings import DISCORD_BOT_TOKEN
class Command(BaseCommand):
help = "Get authentication token for api and create user if not exist "
def handle(self, *args, **options):
#token = get_default_user_token()
#print(token)
servers = client.servers
print(servers)
for channel in servers.channels:
print(isinstance(TextChannel, channel), channel.id, channel.name)
|
tarikyayla/reddit_dashboard | reddit_dashboard/__init__.py | import logging
from .celery_loader import app as celery_app
__all__ = ('celery_app',)
logger = logging.getLogger(name="reddit_dashboard")
|
tarikyayla/reddit_dashboard | api/views/user.py | from rest_framework.views import APIView
from rest_framework import generics, viewsets
from api.responses import SUCCESS_RESPONSE, FAIL_RESPONSE
from api.reddit.manager import reddit_manager, RedditManager
from api.serializers import StandardResultsSetPagination
from reddit_dashboard.models import Subreddit
from reddit_dashboard import logger
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from api.serializers.reddit_serializers import SubredditSerializer
from django.http.response import HttpResponseNotFound
from rest_framework.authentication import TokenAuthentication
class RedditAuth(APIView):
permission_classes = [IsAuthenticated]
def get(self, request):
user = request.user
if user.reddit_user_id:
return Response({
"active": True,
"user_id": user.reddit_user_id,
"user_data": user.reddit_user_data,
"username": user.reddit_username
})
redirect_url = reddit_manager.get_auth_link(request.user.username)
return Response({
"active": False,
"redirect_link": redirect_url
})
class RefreshSubreddits(APIView):
permission_classes = [IsAuthenticated]
def get(self, request):
try:
subreddits = reddit_manager.get_user_subreddits(user=request.user)
request.user.add_subreddits_to_user(subreddit_list=subreddits)
return SUCCESS_RESPONSE
except Exception as ex:
logger.error(str(ex))
raise ex
class Subreddits(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
pagination_class = StandardResultsSetPagination
serializer_class = SubredditSerializer
def list(self, requests):
queryset = requests.user.subreddits.all()
if requests.GET.get("name"):
print("name -->", requests.GET["name"])
query = requests.user.subreddits.filter(name__contains=requests.GET["name"])
serializer = SubredditSerializer(query, many=True)
return Response(serializer.data)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serialized_data = self.get_serializer(queryset, many=True)
return Response(serialized_data.data)
def retrieve(self, request, pk=None):
if not pk:
return HttpResponseNotFound()
obj = Subreddit.objects.filter(pk=pk).first()
if not obj:
return HttpResponseNotFound()
return Response(self.get_serializer(obj).data)
def create(self, request):
user = request.user
subreddit_id = request.data["subreddit_id"]
subreddit = Subreddit.objects.filter(pk=subreddit_id).first()
if not subreddit:
return FAIL_RESPONSE("Subreddit not exist")
request.user.add_subreddit_obj_to_user(subreddit)
return SUCCESS_RESPONSE
def destroy(self, request, pk=None):
user = request.user
subreddit = Subreddit.objects.filter(pk=pk).first()
if not subreddit:
return FAIL_RESPONSE("Subreddit not exist")
user.unfollow_subreddit(subreddit)
return SUCCESS_RESPONSE
class SearchSubreddit(generics.ListAPIView):
permission_classes = [IsAuthenticated]
pagination_class = StandardResultsSetPagination
serializer_class = SubredditSerializer
def get_queryset(self):
search_text = self.request.query_params.get("text")
if not search_text:
return []
manager = RedditManager()
results = manager.search_by_subreddit(search_text)
for result in results:
Subreddit.create(result, self.request.user)
return Subreddit.objects.filter(name__startswith=search_text)
|
tarikyayla/reddit_dashboard | reddit_dashboard/management/commands/run_bot.py | from django.core.management import BaseCommand
from discord_bot.bot import client
from reddit_dashboard.settings import DISCORD_BOT_TOKEN
class Command(BaseCommand):
help = "To run discord bot"
def handle(self, *args, **options):
client.run(DISCORD_BOT_TOKEN)
|
tarikyayla/reddit_dashboard | reddit_dashboard/models.py | <filename>reddit_dashboard/models.py
from django.contrib.auth.models import AbstractBaseUser, UserManager
from django.db import models
from django.utils.timezone import now
from django.conf import settings
from reddit_dashboard.redis_connection import REDIS_CONNECTION, RedisConsts
import json
class DashboardUser(AbstractBaseUser):
email = models.EmailField(unique=True)
username = models.CharField(unique=True, max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=True)
is_superuser = models.BooleanField(default=True)
reddit_user_data = models.TextField(blank=True, null=True)
reddit_user_id = models.CharField(blank=True, null=True, max_length=255)
reddit_username = models.CharField(blank=True, null=True, max_length=255)
subreddits = models.ManyToManyField('Subreddit', blank=True)
USERNAME_FIELD = 'username'
objects = UserManager()
def __str__(self):
return self.username
def add_subreddits_to_user(self, subreddit_list):
for subreddit in subreddit_list:
self.add_subreddit_to_user(subreddit)
def add_subreddit_to_user(self, subreddit):
sub = Subreddit.objects.filter(name=subreddit.display_name).first()
if not sub:
sub = Subreddit(
name=subreddit.display_name,
url=subreddit.url,
description=subreddit.description_html,
added_by=self,
subscribers=subreddit.subscribers,
type=subreddit.subreddit_type,
last_checked_date=now(),
banner_img=subreddit.banner_img,
icon_img=subreddit.icon_img,
over18=subreddit.over18
)
sub.save()
exist = self.subreddits.filter(name=sub.name).first()
if not exist:
self.subreddits.add(sub)
self.save()
def add_subreddit_obj_to_user(self, subreddit):
exist = self.subreddits.filter(pk=subreddit.id).first()
if not exist:
self.subreddits.add(subreddit)
def subreddit_exist(self, subreddit=None, subreddit_id=None):
if not subreddit:
subreddit = Subreddit.objects.filter(pk=subreddit_id).first()
if not subreddit:
raise Exception("Subreddit not exist!")
return self.subreddits.filter(pk=subreddit.id).first()
def unfollow_subreddit(self, subreddit):
exist = self.subreddit_exist(subreddit)
if exist:
self.subreddits.remove(exist)
def has_perm(self, perm, obj=None):
return self.is_admin
def has_module_perms(self, app_label):
return self.is_admin
@property
def is_admin(self):
return self.is_superuser
@classmethod
def get_default_user(cls):
user = cls.objects.filter(username=settings.USERNAME).first()
if not user:
user = cls.objects.create_user(
username=settings.USERNAME,
password=<PASSWORD>
)
return user
class Subreddit(models.Model):
name = models.CharField(max_length=100)
url = models.CharField(max_length=100)
type = models.CharField(max_length=100)
subscribers = models.BigIntegerField(default=0)
description = models.TextField(null=True, blank=True)
added_date = models.DateField(auto_now=True)
added_by = models.ForeignKey(DashboardUser, on_delete=models.DO_NOTHING)
last_checked_date = models.DateTimeField()
banner_img = models.CharField(max_length=500, blank=True, null=True)
icon_img = models.CharField(max_length=500, blank=True, null=True)
over18 = models.BooleanField(default=False)
def __str__(self):
return self.name
@classmethod
def create(cls, subreddit, user=None):
if not user:
user = DashboardUser.objects.filter(username=settings.USERNAME).first()
exist = cls.objects.filter(name=subreddit.display_name).first()
if not exist:
exist = cls(
name=subreddit.display_name,
url=subreddit.url,
description=subreddit.description_html,
added_by=user,
subscribers=subreddit.subscribers,
type=subreddit.subreddit_type,
last_checked_date=now(),
banner_img=subreddit.banner_img,
icon_img=subreddit.icon_img,
over18=subreddit.over18
)
exist.save()
return exist
class DiscordServer(models.Model):
name = models.CharField(max_length=255, blank=False, null=False)
server_id = models.CharField(max_length=250, blank=False, null=False, unique=True)
code = models.CharField(max_length=255)
added_by = models.ForeignKey(DashboardUser, on_delete=models.DO_NOTHING)
create_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
class TextChannel(models.Model):
id = models.AutoField(primary_key=True)
slug = models.CharField(max_length=255)
channel_id = models.CharField(unique=True, max_length=250)
server = models.ForeignKey(DiscordServer, on_delete=models.CASCADE)
following_subreddits = models.ManyToManyField(Subreddit)
def __str__(self):
self.slug = self.slug if self.slug else ""
return self.server.name + "." + self.slug
class Posts(models.Model):
id = models.AutoField(primary_key=True)
submission_id = models.CharField(max_length=250, unique=True)
title = models.CharField(max_length=255, null=False, blank=False)
url = models.CharField(max_length=255, null=False, blank=False)
over_18 = models.BooleanField(default=False)
text = models.TextField()
subreddit = models.ForeignKey(Subreddit, on_delete=models.CASCADE)
create_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
class Meta:
verbose_name = "Post"
verbose_name_plural = "Posts"
@classmethod
def create(cls, submission, subreddit):
cls(
submission_id=submission.id,
title=submission.title,
url=submission.permalink,
over_18=submission.over_18,
text=submission.selftext,
subreddit=subreddit
).save()
def save(self, *args, **kwargs):
super().save(*args, **kwargs) # save
for text_channel in TextChannel.objects.filter(following_subreddits=self.subreddit):
SentPosts(post=self, text_channel=text_channel).save()
class SentPosts(models.Model):
id = models.AutoField(primary_key=True)
post = models.ForeignKey(Posts, on_delete=models.CASCADE, related_name='sent_posts_posts')
text_channel = models.ForeignKey(TextChannel, on_delete=models.CASCADE, related_name="sent_posts_text_channel")
SentDate = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = (('post', 'text_channel'),)
def serialize(self):
return json.dumps(
{
"id": self.id,
"post_id": self.post.id,
"text_channel_id": self.text_channel.channel_id,
"server_id": self.text_channel.server.server_id,
"url": self.post.url,
"text": self.post.text,
"title": self.post.title,
})
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
super().save(force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields)
REDIS_CONNECTION.lpush(RedisConsts.DISCORD_PUSH, self.serialize())
|
tarikyayla/reddit_dashboard | api/views/public.py | from django.shortcuts import render
from django.http.response import JsonResponse
from django.contrib.auth.decorators import login_required
from reddit_dashboard.utils.user_utils import get_default_user_token
# Create your views here.
def api_check(request):
return JsonResponse({"status": True})
def get_api_token(request):
return JsonResponse({"token": get_default_user_token()})
|
tarikyayla/reddit_dashboard | api/permissions.py | <reponame>tarikyayla/reddit_dashboard
from rest_framework import permissions
class OnlyAnon(permissions.BasePermission):
message = "Already logged in!"
def has_permission(self, request, view):
return not request.user.is_authenticated |
tarikyayla/reddit_dashboard | reddit_dashboard/views.py | <reponame>tarikyayla/reddit_dashboard<gh_stars>0
from django.shortcuts import render, redirect
from django.views.decorators.csrf import ensure_csrf_cookie
from api.reddit.manager import reddit_manager
from django.http.response import HttpResponseNotFound, HttpResponse, HttpResponseRedirect
from reddit_dashboard.models import DashboardUser, DiscordServer
from django.conf import settings
@ensure_csrf_cookie
def dashboard(requests):
return render(requests, "index.html")
def reddit_callback(request):
state = request.GET["state"]
code = request.GET["code"]
if settings.USERNAME == state:
user = DashboardUser.objects.get(username=state)
user.reddit_user_id = reddit_manager.get_refresh_token(code)
user.save()
reddit_manager.get_user_data(user=user)
return HttpResponseRedirect("/")
else:
return HttpResponseNotFound()
def discord_callback(request):
return HttpResponseRedirect("/")
|
tarikyayla/reddit_dashboard | api/responses.py | <reponame>tarikyayla/reddit_dashboard
from rest_framework import status
from rest_framework.response import Response
from django.conf import settings
SUCCESS_RESPONSE = Response({
"success": True
}, status=status.HTTP_200_OK)
def FAIL_RESPONSE(detail=None):
resp = {
"success": False,
}
if settings.DEBUG and detail:
resp["detail"] = str(detail)
return Response(resp, status=status.HTTP_400_BAD_REQUEST)
|
tarikyayla/reddit_dashboard | reddit_dashboard/tasks.py | <reponame>tarikyayla/reddit_dashboard
from celery.task import periodic_task, task
from celery.schedules import crontab
from datetime import timedelta
from reddit_dashboard.models import TextChannel, Posts
from api.reddit.manager import reddit_manager
from reddit_dashboard.redis_connection import RedisConsts, REDIS_CONNECTION
from reddit_dashboard.redis_serializers import RedisModelSerializer
from celery.signals import celeryd_init
from discord_bot.bot import client
from reddit_dashboard.settings import DISCORD_BOT_TOKEN
from reddit_dashboard.celery_loader import app
import asyncio
# celery -A reddit_dashboard beat -l info
# celery -A reddit_dashboard worker --pool=solo -l info // for windows
#@periodic_task(run_every=crontab(minute=0, hour='10, 22'))
@periodic_task(run_every=timedelta(seconds=30))
def get_hot_posts():
print("GETTING HOT POSTS!")
following_subreddits = []
for rel_data in TextChannel.following_subreddits.through.objects.all():
if rel_data.subreddit not in following_subreddits:
following_subreddits.append(rel_data.subreddit)
for following_subreddit in following_subreddits:
subreddit = reddit_manager.get_subreddit(display_name=following_subreddit.name)
for submission in subreddit.hot(limit=15):
try:
Posts.create(submission, subreddit=following_subreddit)
except Exception as ex:
print(ex)
@periodic_task(name="discord_server_pushes", run_every=timedelta(seconds=5))
def discord_server_pushes():
payload = REDIS_CONNECTION.lpop(RedisConsts.SERVER_PUSH)
if payload:
RedisModelSerializer.serialize(payload)
|
tarikyayla/reddit_dashboard | reddit_dashboard/admin.py | from django.contrib import admin
from reddit_dashboard.models import DashboardUser, Subreddit, SentPosts, Posts, DiscordServer, TextChannel
admin.site.register(DashboardUser)
admin.site.register(Subreddit)
admin.site.register(SentPosts)
admin.site.register(Posts)
admin.site.register(DiscordServer)
admin.site.register(TextChannel)
|
tarikyayla/reddit_dashboard | api/serializers/user_serializers.py | from rest_framework import serializers
from api.exceptions.validation_exception import ValidationException
from django.contrib.auth import login, authenticate, checks
from reddit_dashboard.models import DashboardUser
|
Alexis-ba6/Z-WaterHeater | RPi.py | <reponame>Alexis-ba6/Z-WaterHeater<gh_stars>1-10
class gpio:
def __getattr__(self, name):
def t(*args):
return None
return t
GPIO = gpio()
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/control/controlSet.py | <gh_stars>1-10
from flask import request
from Z_WH.api.middlewares import response, authentification
from Z_WH.services import outputManager
@response.json
@authentification.checkUserKey
def setControlState(**kwargs):
mode = kwargs['mode']
groupId = request.args.get('groupId')
if mode == 'AUTO':
outputManager.switchAUTO()
elif mode == 'OFF':
outputManager.switchOFF()
elif mode == 'ON':
outputManager.switchON(groupId)
return {
'message': 'Ok'
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/outGroup/groupAdd.py |
from Z_WH.api.middlewares import authentification, response, schema
from Z_WH.services import groupManager
from Z_WH.services.output import GroupManagerError
from Z_WH.config.output import AVAILABLE_OUTPUTS
AVAILABLE_OUTPUTS_ID = [availableOutput[1] for availableOutput in AVAILABLE_OUTPUTS]
@response.json
@authentification.checkUserKey
@schema.schemaValidator({
'type': 'object',
'properties': {
'name': {
'type': 'string'
},
'outId': {
'type': 'array',
'maxItems': len(AVAILABLE_OUTPUTS_ID)**2,
'items': {
'enum': AVAILABLE_OUTPUTS_ID,
}
}
},
'required': ['name', 'outId']
})
def addGroupCtrl(**kwargs):
json = kwargs['json']
try:
group = groupManager.addGroup(json['outId'], json['name'])
except GroupManagerError as error:
return {
'error': error.message
}, 400
return {
'message': 'Group added !',
'group': {
'id': group.id,
'name': group.name,
'outId': [output.id for output in group.outputs]
}
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/token/code.py | from Z_WH.services import verificationCodeManager
from Z_WH.api.middlewares import response, schema, authentification
@response.json
def generateCodeCtrl():
valid_time = 6
verificationCodeManager.generateCode(60)
return {
'message': 'Code generated !',
'validTime': valid_time
}, 200
@response.json
@schema.schemaValidator({
'type': 'object',
'properties': {
'code': {
'type': 'string'
}
},
'required': ['code']
})
def checkCodeCtrl(**kwargs):
json = kwargs.get('json')
verificationCodeManager.verifyCode(json['code'])
expirationTime = 60*20
token = authentification.createToken(expirationTime)
return {
'message': 'Verification ok',
'token': token,
'validTime': expirationTime
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/timeSlot/timeSlotUpdate.py | <gh_stars>1-10
from typing import List
from Z_WH.api.middlewares import authentification, response, schema
from Z_WH.tools.time import ISO_TIME_PATTERN
from Z_WH.services import autoTimeSlotManager, groupManager
from Z_WH.services.autoTimeSlot import TimeSlot
@response.json
@authentification.checkUserKey
@schema.schemaValidator({
'type': 'array',
'minItems': 1,
'items': [
{
'type': 'object',
'properties': {
'id': {
'type': 'string'
},
'groupId': {
'type': 'string'
},
'start': {
'type': 'string',
'pattern': ISO_TIME_PATTERN
},
'end': {
'type': 'string',
'pattern': ISO_TIME_PATTERN
}
},
'required': ['id', 'start', 'end', 'groupId']
}
]
})
def updateTimeSlotCtrl(**kwargs):
json = kwargs['json']
timeSlots: List[TimeSlot] = []
for dataSlot in json:
timeSlot = TimeSlot()
timeSlot.id = dataSlot.get('id')
timeSlot.startISO = dataSlot.get('start')
timeSlot.groupId = dataSlot.get('groupId')
groupManager.getGroup(timeSlot.groupId)
try:
timeSlot.endISO = dataSlot.get('end')
timeSlot.groupId = dataSlot.get('groupId')
except ValueError as error:
return {
'error': 'Invalid time format !'
}, 400
timeSlots.append(timeSlot)
autoTimeSlotManager.addUpdateTimeSlot(timeSlots)
return {
'message': 'Slot updated !'
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/settings/settingsTempSaver.py | <reponame>Alexis-ba6/Z-WaterHeater<gh_stars>1-10
from Z_WH.api.middlewares import response, schema, authentification
from Z_WH.services import tempSaverManager
@response.json
@authentification.checkUserKey
def getTempSaverSettings(**kwargs):
return tempSaverManager.getSettings(), 200
@response.json
@authentification.checkUserKey
@schema.schemaValidator(tempSaverManager.getSettingsSchema())
def updateTempSaverSettings(**kwargs):
json = kwargs.get('json')
tempSaverManager.updateSettings(**json)
return {
'message': 'Temp saver setting updated !'
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/settings/settingsMail.py |
from Z_WH.api.middlewares import response, schema, authentification
from Z_WH.services import mailManager
@response.json
@authentification.checkUserKey
def getMailSettings(**kwargs):
return mailManager.getSettings(), 200
@response.json
@authentification.checkUserKey
@schema.schemaValidator(mailManager.getSettingsSchema())
def updateMailSettings(**kwargs):
mailManager.updateSettings(**kwargs.get('json'))
return {
'message': 'Mail settings updated !'
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/auth/key.py | from Z_WH.api.middlewares import response, authentification
from Z_WH.services import userManager
@response.json
@authentification.checkUserKey
def regenerateKeyCtrl(**kwargs):
logKey = userManager.regenerateKey(kwargs['keyId'])
return {
'keyId': logKey.id,
'newKey': logKey.key
}, 200
@response.json
@authentification.checkUserKey
def checkKeyCtrl(**kwargs):
return {
'message': 'VALID'
}, 200
@response.json
@authentification.checkUserKey
def getUserLogKeyInfoCtrl(**kwargs):
return {
'logKeys': userManager.getLogKeysInfo()
}, 200
@response.json
@authentification.checkUserKey
def deleteKeyCtrl(**kwargs):
userManager.deleteKey(kwargs['id'])
return {
'message': 'Key deleted successfully !'
}, 200
@response.json
@authentification.checkUserKey
def deleteAllKeyCtrl(**kwargs):
userManager.deleteAllKey()
return {
'message': 'Keys deleted successfully !'
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/settings/settingsOutput.py | <filename>Z_WH/api/controllers/settings/settingsOutput.py
from Z_WH.api.middlewares import response, schema, authentification
from Z_WH.services import outputManager
@response.json
@authentification.checkUserKey
def getTempSaverSettings(**kwargs):
return outputManager.getSettings(), 200
@response.json
@authentification.checkUserKey
@schema.schemaValidator(outputManager.getSettingsSchema())
def updateTempSaverSettings(**kwargs):
outputManager.updateSettings(**kwargs.get('json'))
return {
'message': 'Output setting updated !'
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/control/controlGet.py |
from Z_WH.api.middlewares import response, authentification
from Z_WH.services import outputManager
@response.json
@authentification.checkUserKey
def getControlOutputInfo(**kwargs):
return outputManager.getInfoControl(), 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/timeSlot/timeSlotGet.py | from Z_WH.api.middlewares import authentification, response
from Z_WH.services import autoTimeSlotManager
@response.json
@authentification.checkUserKey
def getTimeSlotCtrl(**kwargs):
return {
'timeSlots': [{
'id': timeSlot.id,
'groupIp': timeSlot.id,
'start': timeSlot.startISO,
'end': timeSlot.endISO
} for timeSlot in autoTimeSlotManager.getTimeSlot()]
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/__init__.py | from .router.auth import authRouter
from .router.token import tokenRouter
from .router.user import userRouter
from .router.timeSlots import timeSlotRouter
from .router.outGroup import outGroupRouter
from .router.temperature import temperatureRouter
from .router.tempSensor import tempSensorRouter
from .router.settings import settingsRouter
from .router.control import controlRouter
from .app import app
from .responces import error
app.register_blueprint(authRouter)
app.register_blueprint(tokenRouter)
app.register_blueprint(userRouter)
app.register_blueprint(timeSlotRouter)
app.register_blueprint(outGroupRouter)
app.register_blueprint(temperatureRouter)
app.register_blueprint(tempSensorRouter)
app.register_blueprint(settingsRouter)
app.register_blueprint(controlRouter)
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/temperature/tempGetDay.py | import datetime
from flask import request
from Z_WH.api.middlewares import response, authentification
from Z_WH.services import tempSaverManager
@response.json
@authentification.checkUserKey
def getTempDayCtrl(**kwargs):
date = datetime.date.today()
dateArg = request.args.get('date')
if dateArg:
try:
date = datetime.date.fromisoformat(dateArg)
except ValueError as error:
return {
'error': error.args
}, 400
tempData = tempSaverManager.read(date)
if len(tempData) == 0:
return {
'error': 'No data found !'
}, 404
return {
'date': date.isoformat(),
'data': tempData
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/tempSensors/tempSensorUpdate.py | <reponame>Alexis-ba6/Z-WaterHeater<gh_stars>1-10
from Z_WH.api.middlewares import authentification, response, schema
from Z_WH.services import tempSensorManager
from Z_WH.services.tempSensor import TempSensorManagerError
@response.json
@authentification.checkUserKey
@schema.schemaValidator({
'type': 'object',
'properties': {
'name': {
'type': 'string',
'minLength': 2,
'maxLength': 6
},
'color': {
'type': 'string',
'pattern': '^#([a-fA-F0-9]{6}|[a-fA-F0-9]{3})$'
},
'displayOnScreen': {
'type': 'boolean'
}
}
})
def updateTempSensorCtrl(**kwargs):
json = kwargs['json']
try:
tempSensorManager.sensorUpdate(
kwargs['sensorId'],
name=json.get('name'),
color=json.get('color'),
displayOnScreen=json.get('displayOnScreen')
)
except TempSensorManagerError as e:
return {
'error': e.error
}, 400
return {
'message': 'Sensor updated !'
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/settings/settingsTempLimit.py |
from Z_WH.api.middlewares import response, authentification, schema
from Z_WH.services import tempLimitManager
from Z_WH.services.tempSensor import TempSensorManagerError
@response.json
@authentification.checkUserKey
def getTempLimitSettings(**kwargs):
return tempLimitManager.getSettings(), 200
@response.json
@authentification.checkUserKey
@schema.schemaValidator(tempLimitManager.getSettingsSchema())
def updateTempLimitSettings(**kwargs):
try:
tempLimitManager.updateSettings(**kwargs.get('json'))
except TempSensorManagerError as error:
return {
'message': error.error
}, 400
return {
'message': 'Temp limit settings updated !'
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/auth/logout.py | from Z_WH.api.middlewares import response, authentification
from Z_WH.services import userManager
@response.json
@authentification.checkUserKey
def logoutCtrl(**kwargs):
userManager.deleteKey(kwargs['keyId'])
return {
'message': 'Logout successfully !'
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/timeSlot/timeSlotAdd.py | <gh_stars>1-10
from typing import List
from Z_WH.api.middlewares import authentification, response, schema
from Z_WH.tools.time import ISO_TIME_PATTERN
from Z_WH.services import autoTimeSlotManager
from Z_WH.services.autoTimeSlot import TimeSlot
@response.json
@authentification.checkUserKey
@schema.schemaValidator({
'type': 'array',
'minItems': 1,
'items': [
{
'type': 'object',
'properties': {
'groupId': {
'type': 'string'
},
'start': {
'type': 'string',
'pattern': ISO_TIME_PATTERN
},
'end': {
'type': 'string',
'pattern': ISO_TIME_PATTERN
}
},
'required': ['start', 'end', 'groupId']
}
]
})
def addTimeSlotCtrl(**kwargs):
json = kwargs['json']
timeSlots: List[TimeSlot] = []
for dataSlot in json:
timeSlot = TimeSlot()
timeSlot.startISO = dataSlot.get('start')
timeSlot.endISO = dataSlot.get('end')
timeSlot.groupId = dataSlot.get('groupId')
timeSlots.append(timeSlot)
autoTimeSlotManager.addUpdateTimeSlot(timeSlots)
return {
'message': 'Time slot added !',
'timeSlotsAdded': [
{
'id': timeSlot.id,
'start': timeSlot.startISO,
'end': timeSlot.endISO,
'groupId': timeSlot.groupId
} for timeSlot in timeSlots
]
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/tempSensors/tempSensorGet.py |
from Z_WH.api.middlewares import authentification, response
from Z_WH.services import tempSensorManager
@response.json
@authentification.checkUserKey
def getSensorsCtrl(**kwargs):
return {
'sensors': tempSensorManager.getSensorsInfo()
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/__init__.py | from os import getenv
from dotenv import load_dotenv
from .api import app
from .config import DEBUG
from .services import initAllServices, notificationManager
from .services.notification import Notification
from .tools.schedule import run_continuously
load_dotenv()
def setup():
run_continuously()
initAllServices()
app.run(port=getenv('API_PORT'), host=getenv('API_HOST'), debug=DEBUG)
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/app.py | <gh_stars>1-10
from flask import Flask
from flask_cors import CORS
app = Flask(__name__)
app.config['debug'] = True
app.config['CORS_HEADERS'] = True
CORS(app)
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/auth/login.py | from flask import request
from Z_WH.api.middlewares import response, schema
from Z_WH.services import userManager
@response.json
@schema.schemaValidator({
'type': 'object',
'properties': {
'email': {
'type': 'string'
},
'password': {
'type': 'string'
}
},
'required': ['email', 'password']
})
def loginCtrl(**kwargs):
json = kwargs.get('json')
logKey = userManager.login(json['email'], json['password'], request.user_agent.string, request.remote_addr)
return {
'message': 'Connected !',
'keyId': logKey.id,
'key': logKey.key
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/outGroup/groupDelete.py | <filename>Z_WH/api/controllers/outGroup/groupDelete.py<gh_stars>1-10
from Z_WH.api.middlewares import authentification, response
from Z_WH.services import groupManager
from Z_WH.services.output import GroupManagerError
@response.json
@authentification.checkUserKey
def deleteGroupCtrl(**kwargs):
try:
groupManager.deleteGroup(kwargs['groupId'])
except GroupManagerError as error:
return {
'error': error.message
}, 400
return {
'message': 'Group deleted !'
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/timeSlot/timeSlotDelete.py | from Z_WH.api.middlewares import authentification, response
from Z_WH.services import autoTimeSlotManager
@response.json
@authentification.checkUserKey
def deleteTimeSlotCtrl(**kwargs):
autoTimeSlotManager.deleteTimeSlot(kwargs['slotId'])
return {
'message': 'Time slot deleted !'
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/outGroup/groupGet.py | <filename>Z_WH/api/controllers/outGroup/groupGet.py
from Z_WH.services import groupManager
from Z_WH.api.middlewares import response, authentification
@response.json
@authentification.checkUserKey
def getGroupCtrl(**kwargs):
return {
'outGroups': [
{
'id': group.id,
'outId': [output.id for output in group.outputs],
'name': group.name
} for group in groupManager.getGroups()
]
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/outGroup/groupUpdate.py | <reponame>Alexis-ba6/Z-WaterHeater
from Z_WH.api.middlewares import authentification, response, schema
from Z_WH.services import groupManager
from Z_WH.services.output import GroupManagerError
from Z_WH.config.output import AVAILABLE_OUTPUTS
AVAILABLE_OUTPUTS_ID = [availableOutput[1] for availableOutput in AVAILABLE_OUTPUTS]
@response.json
@authentification.checkUserKey
@schema.schemaValidator({
'type': 'object',
'properties': {
'name': {
'type': 'string'
},
'outId': {
'type': 'array',
'maxItems': len(AVAILABLE_OUTPUTS_ID)**2,
'items': {
'enum': AVAILABLE_OUTPUTS_ID,
}
}
}
})
def updateGroupCtrl(**kwargs):
json = kwargs['json']
try:
groupManager.updateGroup(kwargs['groupId'], json.get('outId'), json.get('name'))
except GroupManagerError as error:
return {
'error': error.message
}, 400
return {
'message': 'Group updated !'
}, 200
|
Alexis-ba6/Z-WaterHeater | Z_WH/api/controllers/users/userGet.py | <filename>Z_WH/api/controllers/users/userGet.py
from Z_WH.api.middlewares import authentification, response
from Z_WH.services import userManager
@response.json
@authentification.checkUserKey
def getUser(**kwargs):
return {
'email': userManager.email
}, 200
|
ScottSoren/EC_MS | src/EC_MS/utils/kinetic_models.py | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 23 16:03:33 2020
@author: scott
"""
import numpy as np
from scipy.optimize import minimize
from scipy.integrate import odeint
from scipy.interpolate import interp1d
def carbonic_ode(S, t, pars):
"""
Equations from Scott's Thesis page 53
"""
k = pars[0] # rate constant / s^-1
alpha = pars[1] # H2(16)O / H2(18)O ratio
S44, S46, S48 = S[0], S[1], S[2]
dS44 = k * (-2 / 3 * (1 - alpha) * S44 + 1 / 3 * alpha * S46)
dS46 = k * (2 / 3 * (1 - alpha) * S44 - 1 / 3 * S46 + 2 / 3 * alpha * S48)
dS48 = k * (1 / 3 * (1 - alpha) * S46 - 2 / 3 * alpha * S48)
return [dS44, dS46, dS48]
def solve_carbonic_burst(k=0.026, alpha=0.27, tspan=[0, 60]):
"""
Returns the partial concentrations at M44, M46, and M48 following an
initial burst of CO(16) oxidation given:
g = the H2(18)O/H2(16)O ratio
k = the rate constant for H2O + CO2 --> H2CO3 in s^-1
"""
print("k = " + str(k))
print("alhpa = " + str(alpha))
S0 = np.array([alpha, 1 - alpha, 0])
pars = [k, alpha]
if len(tspan) == 2:
tspan = np.linspace(tspan[0], tspan[-1], 200)
SS = odeint(carbonic_ode, S0, tspan, args=(pars,))
return SS
|
ScottSoren/EC_MS | src/EC_MS/Chips.py | <filename>src/EC_MS/Chips.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 22 15:26:33 2017
@author: scott
"""
from __future__ import print_function, division
import os
import numpy as np
from matplotlib import pyplot as plt
from . import Chem
from .Molecules import Molecule
from .Object_Files import structure_to_lines, lines_to_dictionary
from .Object_Files import lines_to_structure, date_scott, update_lines
data_directory = (
os.path.dirname(os.path.realpath(__file__)) + os.sep + "data" + os.sep + "chips"
)
cwd = os.getcwd()
# for python2:
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
design_parameters = {}
design_parameters["membrane_chip_02"] = dict(
w_cap=6e-6, # capillary width / [m]
h_cap=6e-6, # capillary height / [m]
l_cap=1e-3, # capillary length / [m]
p=1e5, # chip pressure /
T=298,
)
class Chip:
def __init__(
self, name="unknown", chip_type="membrane_chip_02", updatefile=False, **kwargs
):
"""
Chip parameters taken from Daniel's thesis
"""
self.parameters = {}
self.name = name
if not self.reset(): # reset returns false if there is no existing data file
try:
self.parameters = design_parameters[chip_type]
except KeyError:
print("WARNING!!! no parameters available for chip '" + name + "'.")
else:
self.write(name=name)
parameters = self.parameters
parameters.update(kwargs)
for key, value in parameters.items():
setattr(self, key, value)
if updatefile:
self.write((key, value))
def capillary_flow(
self, gas="He", w_cap=None, h_cap=None, l_cap=None, T=None, p=None
):
"""
adapted from Membrane_chip.py,
equations from <NAME>'s PhD Thesis
Returns the flux in molecules/s of a carrier gas through the
chip capillary.
As the flow starts out viscous, at low analyte production rates,
an analyte flux is simply the analyte's mol fraction in the chip times
this flux.
We assume that flow is governed by the bulk properties (viscosity)
of the carrier gas and the molecular properties (diameter, mass)
of the analyte.
"""
if type(gas) is str:
gas = Molecule(gas)
""" #This does not work! I do not know why.
print(self.w_cap)
for var in 'w_cap', 'h_cap', 'l_cap', 'T', 'p':
if locals()[var] is None:
locals()[var] = getattr(self, var)
"""
# This is actually the most compact way I can figure out to do this.
w = next(x for x in [w_cap, self.w_cap] if x is not None)
h = next(x for x in [h_cap, self.h_cap] if x is not None)
l = next(x for x in [l_cap, self.l_cap] if x is not None)
T = next(x for x in [T, self.T] if x is not None)
p = next(x for x in [p, self.p] if x is not None)
s = gas.molecule_diameter # molecular diameter in m
m = gas.molecule_mass # molecular mass in kg
eta = gas.dynamic_viscosity # viscosity in Pa*s
d = ((w * h) / np.pi) ** 0.5 * 2 # hydraulic diameter
# d=4.4e-6 #used in Henriksen2009
a = d / 2 # hydraulic radius
p_1 = p # pressure at start of capillary (chip pressure)
lam = d # mean free path of transition from visc. to mol. flow
p_t = Chem.kB * T / (2 ** 0.5 * np.pi * s ** 2 * lam) # transition pressure
p_2 = 0 # pressure at end of capillary (vacuum)
p_m = (p_1 + p_t) / 2 # average pressure in the visc. + trans. flow region
v_m = (8 * Chem.kB * T / (np.pi * m)) ** 0.5 # mean molecular velocity
nu = (m / (Chem.kB * T)) ** 0.5 # a resiprocal velocity used for short-hand
# dumb, but inherited straight from Henrikson2009 through all of
# Daniel's work up to his thesis, where it was finally dropped, which
# unfortunatly makes that term of the equation a bit silly-looking.
N_dot = (
1
/ (Chem.kB * T)
* 1
/ l
* (
(
a ** 4 * np.pi / (8 * eta) * p_m
+ a ** 3
* 2
* np.pi
/ 3
* v_m
* (1 + 2 * a * nu * p_m / eta)
/ (1 + 2.48 * a * nu * p_m / eta)
)
* (p_1 - p_t)
+ a ** 3 * 2 * np.pi / 3 * v_m * (p_t - p_2)
)
)
return N_dot
def write(self, a=None, attr=None, name=None, *args, **kwargs):
"""
Identical to the function in class Molecule of same name
... could move this to Object_Files, but a bit tricky
this is supposed to be a versitile tool for writing to the Molecule's
data file. Whether the added intricacy will be worth the lines of code
it saves, I can't tell yet.
Writes in one of the following ways:
1. If the name of an attribute is given, that attribute is written.
2. If a is a string, simply write a to the molecule's datafile.
3. If a is a function, then it's a function that writes what you want
to the data file given in the first argument
4. If a is a dictionary, list, or tuple, convert it to lines according to
the system encoded in Object_Files.
5. If a is not given but keyword arguments are, write **kwargs to the
data file according to the system encoded in Object_Files.
"""
if name is None:
name = self.name
if attr is not None:
a = (attr, getattr(self, attr))
elif a is None:
if len(kwargs) == 0:
print("nothing to write.")
return
else:
a = kwargs
cwd = os.getcwd()
os.chdir(data_directory)
file_name = name + ".txt"
with open(file_name, "a") as f:
if callable(a):
a(f, *args, **kwargs)
elif type(a) is str:
if a[-1] != "\n":
a += "\n"
f.write(a)
elif type(a) in (list, dict):
if "key" in kwargs.keys():
lines = structure_to_lines(a, preamble=kwargs["key"])
else:
lines = structure_to_lines(a)
f.writelines(lines)
elif type(a) is tuple:
lines = structure_to_lines(a[1], preamble=a[0])
f.writelines(lines)
else:
print("Couldn" "t write " + str(a))
os.chdir(cwd)
def save(self, name):
"""
Save the important stuff in the chip, namely
"""
if name is None:
name = self.name
else:
self.name = name
print("Saving info on Chip " + name + " to file")
file_name = name + ".txt"
cwd = os.getcwd()
os.chdir(data_directory)
with open(file_name, "w") as f:
f.write("# created " + date_scott() + "\n")
os.chdir(cwd)
for key, value in self.parameters.items():
setattr(self, key, value)
self.write((key, value))
def reset(self):
file_name = self.name + ".txt"
try:
cwd = os.getcwd()
os.chdir(data_directory)
f = open(file_name, "r")
except FileNotFoundError:
print("no file for " + self.name)
os.chdir(cwd)
return False
print("loaded info on Chip " + self.name + " from file.")
lines = f.readlines()
f.close()
os.chdir(cwd)
dictionary = lines_to_dictionary(lines)
self.parameters.update(dictionary)
for key, value in dictionary.items():
setattr(self, key, value)
return True
if __name__ == "__main__":
c = Chip(name="SI-3iv1-1-C5")
N_dot_He = c.capillary_flow("He")
print("He flow is " + str(N_dot_He / Chem.NA * 1e9) + " nmol/s")
N_dot_CO = c.capillary_flow("CO")
print("CO flow is " + str(N_dot_CO / Chem.NA * 1e9) + " nmol/s")
N_dot_CH4 = c.capillary_flow("CH4")
print("CH4 flow is " + str(N_dot_CH4 / Chem.NA * 1e9) + " nmol/s")
|
ScottSoren/EC_MS | _tests_/29C10_capillary_equation/testing_cap.py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 10 15:34:09 2020
@author: scott
"""
import numpy as np
from matplotlib import pyplot as plt
from EC_MS import Chip, Chem
chip = Chip()
print("design length = " + str(chip.l_cap * 1e3) + " mm")
T0 = 273.15
T_vec = np.linspace(0, 100, 101) + T0
fig, ax = plt.subplots()
ax.set_xlabel("Temperature / [deg C]")
ax.set_ylabel("capilarry flux / [nmol/s]")
p_colors = [(1e5, "k"), (2e5, "b"), (2.5e5, "g"), (3e5, "r")]
for p, color in p_colors:
n_dot_vec = np.array([])
for T in T_vec:
# chip.T = T
# chip.p = p
n_dot = chip.capillary_flow("air", p=p, T=T) / Chem.NA
n_dot_vec = np.append(n_dot_vec, n_dot)
ax.plot(T_vec - T0, n_dot_vec * 1e9, color=color, label=str(p))
ax.legend()
fig.savefig("air.png")
|
ScottSoren/EC_MS | src/EC_MS/converters.py | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 5 11:39:18 2021
@author: scott
"""
import json, re
from .Molecules import Molecule
def mdict_from_SI2020_calibration_file(calibration_file):
"""Return a molecule dict from an old Spectro Inlets calibration file"""
mdict = {}
with open(calibration_file, "r") as f:
calibration_dict = json.load(f)
for name in calibration_dict["mol_list"]:
real_name = calibration_dict["real_names"].get(name, name)
m = Molecule(real_name)
m.F_mat = {}
primary_match = re.search(r"M[0-9]+", name)
if primary_match:
m.primary = primary_match.group(0)
if name in calibration_dict["F"]:
for mass, F_M in calibration_dict["F"][name].items():
if F_M == 0:
continue # EC_MS can't handle sensitivity=0
m.F_mat[mass] = F_M
if mass == m.primary:
m.F = F_M
m.F_cal = F_M
mdict[name] = m
return mdict
|
ScottSoren/EC_MS | src/EC_MS/Plotting.py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 28 19:07:45 2016
Most recently edited: 17B21
@author: scott
"""
from matplotlib import pyplot as plt
from matplotlib import gridspec
import numpy as np
import os
# from mpl_toolkits.axes_grid1 import make_axes_locatable
from .EC import sync_metadata, select_cycles
from .Quantification import get_flux, get_signal
from .Object_Files import lines_to_dictionary
from .Molecules import Molecule
preferencedir = os.path.dirname(os.path.realpath(__file__)) + os.sep + "preferences"
with open(preferencedir + os.sep + "standard_colors.txt", "r") as f:
lines = f.readlines()
standard_colors = lines_to_dictionary(lines, removecomments=False)[
"standard colors"
]
def get_standard_colors():
return standard_colors
def colorax(ax, color, lr="right", xy="y"):
ax.spines[lr].set_color(color)
ax.tick_params(axis=xy, color=color)
ax.tick_params(axis=xy, labelcolor=color)
if xy == "y":
ax.yaxis.label.set_color(color)
if xy == "x":
ax.xaxis.label.set_color(color)
def align_zero(ax, ax_ref, xy="y"):
ylim0 = ax.get_ylim()
ylim_ref = ax_ref.get_ylim()
A = ylim_ref[-1] / ylim_ref[0]
B = ylim0[-1] - ylim0[0]
a = B / (A - 1)
b = A * B / (A - 1)
ylim = [a, b]
ax.set_ylim(ylim)
return ylim
def smooth(y, n_points):
smoother = np.ones((n_points,)) / n_points
y_smooth = np.convolve(y, smoother, mode="same")
return y_smooth
def plot_EC_vs_t(
data,
t_str="time/s",
J_str=None,
V_str=None,
V_color="k",
J_color="r",
ax="new",
verbose=True,
**kwargs,
):
if J_str is None or V_str is None:
V_str_0, J_str_0 = sync_metadata(data)
if J_str is None:
J_str = J_str_0
if V_str is None:
V_str = V_str_0
t, V, J = data[t_str], data[V_str], data[J_str]
if ax == "new":
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
else:
ax1 = ax[0]
ax2 = ax[1]
ax1.plot(t, V, color=V_color, **kwargs)
ax2.plot(t, J, color=J_color, **kwargs)
ax1.set_xlabel(t_str)
ax1.set_ylabel(V_str)
ax2.set_ylabel(J_str)
return [ax1, ax2]
def plot_vs_potential(
CV_and_MS_0,
colors=None,
tspan=None,
RE_vs_RHE=None,
A_el=None,
cycles="all",
ax1="new",
ax2="new",
ax=None, # spec='k-',
overlay=0,
logplot=True,
leg=False,
verbose=True,
removebackground=None,
background=None,
t_bg=None,
endpoints=3,
masses="all",
masses_left=None,
masses_right=None,
mols=None,
mols_left=None,
mols_right=None,
unit=None,
smooth_points=0,
emphasis="ms",
hspace=0.1,
left_space=0.15,
right_space=0.95, # for gridspec
J_str=None,
V_str=None,
fig=None,
spec={},
t_str=None,
**kwargs,
):
"""
This will plot current and select MS signals vs E_we, as is the
convention for cyclic voltammagrams. added 16I29
#there's a lot of code here that's identical to plot_experiment. Consider
#having another function for e.g. processing these inputs.
"""
if verbose:
print("\n\nfunction 'plot_vs_potential' at your service!\n")
if type(logplot) is not list:
logplot = [logplot, False]
if removebackground is None:
removebackground = t_bg is not None
spec.update(kwargs) # extra arguments are passed to plt.plot
# prepare axes. This is ridiculous, by the way.
CV_and_MS = CV_and_MS_0.copy() # 17C01
if "data_type" in CV_and_MS and CV_and_MS["data_type"][0:2] == "EC":
ax = plot_vs_potential_EC(
data=CV_and_MS,
tspan=tspan,
RE_vs_RHE=RE_vs_RHE,
A_el=A_el,
cycles="all",
ax=ax, # spec='k-',
J_str=J_str,
V_str=V_str,
t_str=t_str,
fig=fig,
spec=spec,
verbose=verbose,
)
return ax
if ax == "new":
ax1 = "new"
ax2 = "new"
elif ax is not None:
ax1 = ax[0]
ax2 = ax[1]
if ax1 != "new":
figure1 = ax1.figure
elif ax2 != "new":
figure1 = ax2.figure
else:
if fig is None:
figure1 = plt.figure()
else:
figure1 = fig
if overlay:
if ax1 == "new":
ax1 = figure1.add_subplot(111)
if ax2 == "new":
ax2 = ax1.twinx()
else:
if ax1 == "new":
if emphasis == "MS":
gs = gridspec.GridSpec(3, 1)
# gs.update(hspace=0.025)
ax1 = plt.subplot(gs[0:2, 0])
ax2 = plt.subplot(gs[2:3, 0])
elif emphasis == "ms":
gs = gridspec.GridSpec(5, 1)
# gs.update(hspace=0.025)
ax1 = plt.subplot(gs[0:3, 0])
ax2 = plt.subplot(gs[3:5, 0])
elif emphasis == "EC":
gs = gridspec.GridSpec(3, 1)
# gs.update(hspace=0.025)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1:3, 0])
else:
gs = gridspec.GridSpec(8, 1)
# gs.update(hspace=0.025)
ax1 = plt.subplot(gs[0:4, 0])
ax2 = plt.subplot(gs[4:8, 0])
gs.update(hspace=hspace, left=left_space, right=right_space)
if ax is None or len(ax) < 2:
ax = [ax1, ax2]
print(f"ax = {ax}") # debugging
if type(logplot) is int:
logplot = [logplot, logplot]
if logplot[0]:
ax1.set_yscale("log")
if logplot[1]:
ax2.set_yscale("log")
# get EC data
V_str, J_str = sync_metadata(
CV_and_MS, RE_vs_RHE=RE_vs_RHE, A_el=A_el, V_str=V_str, J_str=J_str
)
V = CV_and_MS[V_str]
J = CV_and_MS[J_str]
if t_str is None:
if "t_str" in CV_and_MS:
t_str = CV_and_MS["t_str"]
else:
t_str = "time/s"
# get time variable and plotting indexes
t = CV_and_MS[t_str]
if tspan is None: # then use the whole range of overlap
try:
tspan = CV_and_MS["tspan"]
except KeyError:
tspan = [min(t), max(t)]
mask = np.logical_and(tspan[0] < t, t < tspan[-1])
if ax2 is not None:
# plot EC-lab data
ec_spec = spec.copy()
if "color" not in ec_spec.keys():
ec_spec["color"] = "k"
# print('len(data[' + V_str + '] = ' + str(len(V))) # debugging
# print('len(data[' + J_str + '] = ' + str(len(J))) # debugging
# print('len(mask) = ' + str(len(mask))) # debugging
t_plot, V_plot, J_plot = t[mask], V[mask], J[mask]
ax2.plot(V_plot, J_plot, **ec_spec)
# maybe I should use EC.plot_cycles to have different cycles be different colors. Or rewrite that code here.
ax2.set_xlabel(V_str)
ax2.set_ylabel(J_str)
axes = [ax1, ax2]
# ---- parse inputs for (evt. calibrated) mass spectrometer to plot ------
try:
if type(mols[0]) in (list, tuple):
mols_left = mols[0]
mols_right = mols[1]
mols = None
except (IndexError, TypeError):
pass
try:
if type(masses[0]) in (list, tuple):
masses_left = masses[0]
masses_right = masses[1]
masses = None
except (IndexError, TypeError):
pass
# print(masses)
colors_right = None
colors_left = None
if ax1 is not None: # option of skipping an axis added 17C01
# check if we're going to plot signals or fluxes:
quantified = False # added 16L15
if mols is not None:
quantified = True
colors_left = mols # added 17H11
elif mols_left or mols_right is not None:
quantified = True
colors_left = mols_left
colors_right = mols_right
elif masses is not None:
# print('masses specified')
quantified = False
colors_left = masses
elif masses_left or masses_right is not None:
quantified = False
colors_left = masses_left
colors_right = masses_right
elif (
(type(colors) is dict and list(colors.keys())[0][0] == "M")
or (type(colors) is list and type(colors[0]) is str and colors[0][0] == "M")
or (type(colors) is str and colors[0] == "M")
):
if verbose:
print("uncalibrated data to be plotted.")
masses = colors
colors_left = masses
else:
quantified = True
mols = colors
if not quantified and masses == "all": # this is now the default!
masses = [
key[:-2]
for key in CV_and_MS.keys()
if key[0] == "M" and key[-2:] == "-y"
]
colors_left = masses
if (
colors_left is not None
and type(colors_left) is not list
and type(colors_left) is not dict
):
colors_left = [colors_left]
if (
colors_right is not None
and type(colors_right) is not list
and type(colors_right) is not dict
):
colors_right = [colors_right]
# print(type(colors))
if unit is None:
if quantified:
unit = "pmol/s"
else:
unit = "pA"
# then do it.
if colors_right is not None:
if ax is None or len(ax) < 3: # so I can reuse right axis
axes += [axes[0].twinx()]
else:
axes += [ax[-1]]
for colors, ax in [(colors_left, axes[0]), (colors_right, axes[-1])]:
if colors is None:
continue
if type(colors) is list:
c = colors.copy()
colors = {}
for m in c:
print(str(m))
if quantified:
if type(m) is str:
mol = Molecule(m, verbose=False)
else:
mol = m
color = standard_colors[mol.primary]
colors[mol] = color
else:
color = standard_colors[m]
colors[m] = color
for (key, color) in colors.items():
if quantified:
x, y = get_flux(
CV_and_MS,
mol=key,
tspan=tspan,
removebackground=removebackground,
background=background,
endpoints=endpoints,
t_bg=t_bg,
unit=unit,
verbose=verbose,
)
if type(key) is not str:
key = str(key) # in case key had been a Molecule object
Y_str = key + "_" + unit
else:
Y_str = key + "_" + unit #
x, y = get_signal(
CV_and_MS,
mass=key,
tspan=tspan,
removebackground=removebackground,
background=background,
endpoints=endpoints,
t_bg=t_bg,
unit=unit,
verbose=verbose,
)
try:
y_plot = np.interp(
t_plot, x, y
) # obs! np.interp has a has a different argument order than Matlab's interp1
except ValueError:
print("x " + str(x) + "\ny " + str(y) + "\nt " + str(t))
CV_and_MS[
Y_str
] = y_plot # add the interpolated value to the dictionary for future use
# 17C01: but not outside of this function.
ms_spec = spec.copy()
if "color" not in ms_spec.keys():
ms_spec["color"] = color
ax.plot(V_plot, y_plot, label=Y_str, **ms_spec)
if quantified:
M_str = "cal. signal / [" + unit + "]"
else:
M_str = "MS signal / [" + unit + "]"
ax.set_xlabel(V_str)
ax.xaxis.set_label_position("top")
ax.xaxis.tick_top()
ax.set_ylabel(M_str)
if leg:
ax1.legend()
# if colors_right is not None:
# align_zero(axes[0], axes[-1])
if verbose:
print("\nfunction 'plot_vs_potential' finished!\n\n")
for ax in axes:
if ax is not None:
ax.tick_params(axis="both", direction="in") # 17K28
# parameter order of np.interp is different than Matlab's interp1
return axes
def plot_vs_potential_EC(
data,
tspan=None,
RE_vs_RHE=None,
A_el=None,
cycles="all",
ax="new", # spec='k-',
verbose=True,
J_str=None,
V_str=None,
fig=None,
spec={},
t_str=None,
):
if ax is None or ax == "new":
fig, ax = plt.subplots()
V_str, J_str = sync_metadata(
data, RE_vs_RHE=RE_vs_RHE, A_el=A_el, V_str=V_str, J_str=J_str
)
V = data[V_str]
J = data[J_str]
if t_str is None:
if "t_str" in data:
t_str = data["t_str"]
else:
t_str = "time/s"
# get time variable and plotting indexes
t = data[t_str]
if tspan is None: # then use the whole range of overlap
try:
tspan = data["tspan"]
except KeyError:
tspan = [min(t), max(t)]
mask = np.logical_and(tspan[0] < t, t < tspan[-1])
# plot EC-lab data
ec_spec = spec.copy()
# print(ec_spec) # debugging
if "color" not in ec_spec.keys():
ec_spec["color"] = "k"
V_plot, J_plot = V[mask], J[mask]
ax.plot(V_plot, J_plot, **ec_spec)
# maybe I should use EC.plot_cycles to have different cycles be different colors. Or rewrite that code here.
ax.set_xlabel(V_str)
ax.set_ylabel(J_str)
if verbose:
print("\nfunction 'plot_vs_potential_EC' finished!\n\n")
return ax
def plot_vs_time(Dataset, cols_1="input", cols_2="input", verbose=1):
"""
Superceded by the more convenient plot_masses and plot_masses_and_I
"""
if verbose:
print("\n\nfunction 'plot_vs_time' at your service!")
if cols_1 == "input":
data_cols = Dataset["data_cols"]
prompt = (
"Choose combinations of time and non-time variables for axis 1, \n"
+ "with every other choice a time variable."
)
I_axis_1 = indeces_from_input(data_cols, prompt)
cols_1 = [
[data_cols[i], data_cols[j]] for i, j in zip(I_axis_1[::2], I_axis_1[1::2])
]
figure1 = plt.figure()
axes_1 = figure1.add_subplot(211)
for pltpair in cols_1:
label_object = pltpair[1][0:-2]
if label_object:
label_string = label_object.group()[:-1]
else:
label_string = pltpair[1]
x = Dataset[pltpair[0]]
y = np.log(Dataset[pltpair[1]]) / np.log(10)
axes_1.plot(x, y, label=label_string)
axes_1.set_xlabel("time / [s]")
axes_1.set_ylabel("log(MS signal / [a.u.])")
axes_1.legend()
if cols_2 == "input":
data_cols = Dataset["data_cols"]
prompt = (
"Choose combinations of time and non-time variables for axis 2, \n"
+ "with every other choice a time variable."
)
I_axis_2 = indeces_from_input(data_cols, prompt)
cols_2 = [
[data_cols[i], data_cols[j]] for i, j in zip(I_axis_2[::2], I_axis_2[1::2])
]
axes_2 = figure1.add_subplot(212)
for pltpair in cols_2:
label_string = pltpair[1]
x = np.insert(Dataset[pltpair[0]], 0, 0)
y = np.insert(Dataset[pltpair[1]], 0, 0)
axes_2.plot(x, y, "k--", label=label_string)
axes_2.set_ylabel("current / [mA]")
axes_2.set_xlabel("time / [s]")
axes_2.legend()
# so capacitance doesn't blow it up:
I_plt_top = np.where(x > 2)[0][0]
y_max = np.max(y[I_plt_top:])
axes_2.set_ylim(np.min(y), y_max)
if verbose:
print("function 'plot_vs_time' finished!\n\n")
def indeces_from_input(options, prompt):
"""something I used all the time back in the (Matlab) days.
not sure I'll ever actually use it again though"""
print(
prompt
+ "\n... enter the indeces you're interested in, in order,"
+ "seperated by spaces, for example:\n>>>1 4 3"
)
for nc, option in enumerate(options):
print(str(nc) + "\t\t " + options[nc])
choice_string = input("\n")
choices = choice_string.split(" ")
choices = [int(choice) for choice in choices]
return choices
def smooth_data(data, points=3, cols=None, verbose=True):
"""
Does a moving-average smoothing of data. I don't like it, but
experencing problems 17G26
cols should be a list of columns to smooth.
Operates on the original data set!
"""
if cols is None:
cols = data["data_cols"]
for col in cols:
if verbose:
print(
"smoothening '"
+ col
+ "' with a "
+ str(points)
+ "-point moving average"
)
x = data[col].copy() # in case it's linked to another column.
c = np.array([1] * points) / points
# print(str(len(c))) # debugging
X = np.convolve(x, c, mode="same")
# convolve doesn't get the endpoints quite right. I can fix them
for n in range(points): # fixing endpooints
X[n] = np.mean(x[0 : n + 1])
if n > 0:
X[-n] = np.mean(x[-n:])
data[col] = X
# print('len = ' + str(len(x))) # debugging
return data
def plot_signal(
MS_data,
masses="all",
tspan=None,
ax="new",
unit="nA",
removebackground=None,
background=None,
t_bg=None,
logplot=True,
saveit=False,
leg=False,
name=None,
spec=None,
override=False,
verbose=True,
):
"""
plots selected masses for a selected time range from MS data or EC_MS data
"""
if name is None:
try:
name = MS_data["name"]
except KeyError:
try:
name = MS_data["title"]
except KeyError:
name = ""
if spec is None:
spec = {}
if verbose:
print("\n\nfunction 'plot_signal' at your service! \n Plotting from: " + name)
if ax == "new":
fig1 = plt.figure()
ax = fig1.add_subplot(111)
lines = {}
if masses == "all": # this is now the default!
masses = [
key[:-2] for key in MS_data.keys() if key[0] == "M" and key[-2:] == "-y"
]
elif type(masses) is str:
masses = [masses]
if type(masses) is list:
c = masses
masses = {}
for m in c:
try:
color = standard_colors[m]
except KeyError:
print("Waring: no standard color for " + m + ". Using black.")
color = "k"
masses[m] = color
for mass, color in masses.items():
try:
x, y = get_signal(
MS_data,
mass,
unit=unit,
tspan=tspan,
override=override,
verbose=verbose,
removebackground=removebackground,
t_bg=t_bg,
background=background,
)
if len(x) == 0:
print("WARNING: no data for " + mass)
continue
except KeyError:
print("WARNING: Can't get signal for " + str(mass))
continue
if len(x) == 0:
print(
"WARNING: get_signal returned vector of zero length for "
+ mass
+ ". plot_signal is skipping that mass."
)
continue
if verbose:
print("plotting: " + mass)
# print(spec) # debugging
lines[mass] = ax.plot(x, y, color, label=mass, **spec)
# as it is, lines is not actually used for anything
if leg:
if type(leg) is not str:
leg = "lower right"
ax.legend(loc=leg)
ax.set_xlabel("time / [s]")
ax.set_ylabel("MS signal / [" + unit + "]")
if logplot:
ax.set_yscale("log")
ax.tick_params(axis="both", direction="in") # 17K28
if verbose:
print("function 'plot_signal' finsihed! \n\n")
return ax
def plot_masses(*args, **kwargs):
print("plot_masses renamed plot_signal. Remember that next time!")
return plot_signal(*args, **kwargs)
def plot_flux(
MS_data,
mols={"H2": "b", "CH4": "r", "C2H4": "g", "O2": "k"},
tspan=None,
ax="new",
removebackground=False,
background="constant",
endpoints=5,
t_bg=None,
A_el=None,
unit=None,
smooth_points=0,
logplot=True,
leg=False,
spec={},
alpha_under=None,
override=False,
verbose=True,
):
"""
Plots the molecular flux to QMS in nmol/s for each of the molecules in
'fluxes.keys()', using the primary mass and the F_cal value read from
the molecule's text file, with the plot specs from 'fluxes.values()'
"""
if verbose:
print("\n\nfunction 'plot_flux' at your service!\n")
if ax == "new":
fig1 = plt.figure()
ax = fig1.add_subplot(111)
# note, tspan is processed in get_flux, and not here!
if type(mols) is not list and type(mols) is not dict:
# then it's probably just one molecule object.
mols = [mols]
unit = unit if unit else "nmol/s"
if type(mols) is list:
c = mols
mols = {}
for m in c:
if type(m) is str:
mol = Molecule(m, verbose=False)
else:
mol = m # this function should accept a list of Molecule instances!
# color = standard_colors[mol.primary]
color = m.get_color()
mols[mol] = color
print("mol={}, primary={}, color={}".format(mol.name, mol.primary, color))
for (mol, color) in mols.items():
try:
[x, y] = get_flux(
MS_data,
mol,
unit=unit,
tspan=tspan,
removebackground=removebackground,
background=background,
t_bg=t_bg,
endpoints=endpoints,
override=override,
verbose=verbose,
)
if smooth_points:
y = smooth(y, smooth_points)
except KeyError as e:
print("Can't get signal for " + str(mol) + f".\nError: {e}")
continue
if type(mol) is str:
l = mol
else:
l = mol.name
print("color={}".format(color)) # debugging
ax.plot(x, y, color=color, label=l, **spec)
if alpha_under:
ax.fill_between(x, np.zeros(x.shape), y, color=color, alpha=alpha_under)
if leg:
if type(leg) is not str:
leg = "lower right"
ax.legend(loc=leg)
ax.set_xlabel("time / [s]")
ylabel = "cal. signal / [" + unit + "]"
ax.set_ylabel(ylabel)
if logplot:
ax.set_yscale("log")
ax.tick_params(axis="both", direction="in") # 17K28
if verbose:
print("\nfunction 'plot_flux' finished!\n\n")
return ax
def plot_experiment_EC(
data,
tspan=None,
verbose=True,
RE_vs_RHE=None,
A_el=None,
ax="new",
# mols will overide masses will overide colors
V_color=None,
J_color=None,
V_label=None,
J_label=None,
t_str=None,
J_str=None,
V_str=None,
fig=None,
spec={},
):
if ax == "new":
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax = [ax1, ax2]
# --------- get tspan, V_str, and J_str from input and/or dataset -------- #
if tspan is None: # then use the range of overlap
try:
tspan = data["tspan"] # changed from 'tspan_2' 17H09
except KeyError:
tspan = "all"
if type(tspan) is str and not tspan == "all":
tspan = data[tspan]
if t_str is None:
if "t_str" in data:
t_str = data["t_str"]
else:
t_str = "time/s"
if V_str is None or J_str is None or RE_vs_RHE is not None or A_el is not None:
V_str_0, J_str_0 = sync_metadata(
data, RE_vs_RHE=RE_vs_RHE, A_el=A_el, verbose=verbose
)
# added 16J27... problem caught 17G26, fixed in sync_metadata
if V_str is None: # this way I can get it to plot something other than V and J.
V_str = V_str_0
if J_str is None:
J_str = J_str_0
if A_el in data:
A_el = data["A_el"]
else:
A_el = 1
# ---------- make sure I can plot the electrochemistyr data --------- #
plotpotential = True
plotcurrent = True
try:
t = data[t_str]
except KeyError:
print(
"data doesn't contain '" + str(t_str) + "', i.e. t_str. Can't plot EC data."
)
plotpotential = False
plotcurrent = False
try:
V = data[V_str]
except KeyError:
print(
"data doesn't contain '"
+ str(V_str)
+ "', i.e. V_str. Can't plot that data."
)
plotpotential = False
try:
J = data[J_str]
except KeyError:
print(
"data doesn't contain '"
+ str(J_str)
+ "', i.e. J_str. Can't plot that data."
)
plotcurrent = False
# -------- cut the electrochemistry data according to tspan ------ #
if type(tspan) is not str and (plotcurrent or plotpotential):
mask = np.logical_and(tspan[0] < t, t < tspan[-1])
t = t[mask]
# print(np.where(mask)) #debugging
if plotpotential:
V = V[mask]
if plotcurrent:
J = J[mask]
# ---------- and plot the electrochemistry data! --------------- #
if plotcurrent:
if plotpotential:
i_ax = 1
else:
i_ax = 0
ax[i_ax].plot(t, J, color=J_color, label=J_label, **spec)
ax[i_ax].set_ylabel(J_str)
ax[i_ax].set_xlabel("time / [s]")
xlim = ax[i_ax - 1].get_xlim()
ax[i_ax].set_xlim(xlim)
if i_ax == 2:
colorax(ax[i_ax], J_color, "right")
else:
colorax(ax[i_ax], J_color, "left")
ax[i_ax].tick_params(axis="both", direction="in") # 17K28
if plotpotential:
i_ax = 0
ax[i_ax].plot(t, V, color=V_color, label=V_label, **spec)
ax[i_ax].set_ylabel(V_str)
xlim = ax[i_ax - 1].get_xlim()
ax[i_ax].set_xlim(xlim)
colorax(ax[i_ax], V_color, "left")
ax[i_ax].tick_params(axis="both", direction="in") # 17K28
ax[0].set_xlabel(t_str)
if plotcurrent or plotpotential:
ax[0].xaxis.set_label_position("top")
ax[1].set_xlabel(t_str)
if tspan is not None and not type(tspan) is str:
ax[1].set_xlim(tspan)
# -------- finishing up -------- #
print("\nfunction 'plot_experiment_EC' finished!\n\n")
return ax
def plot_experiment(
EC_and_MS,
colors=None,
tspan=None,
tspan_EC=None,
overlay=False,
logplot=[True, False],
verbose=True,
plotpotential=True,
plotcurrent=True,
ax="new",
emphasis="ms",
RE_vs_RHE=None,
A_el=None,
removebackground=None,
background=None,
endpoints=5,
t_bg=None,
saveit=False,
name=None,
leg=False,
unit=None,
unit_left=None,
unit_right=None,
masses="all",
masses_left=None,
masses_right=None,
mols=None,
mols_left=None,
mols_right=None,
# mols will overide masses will overide colors
V_color="k",
J_color="0.5",
V_label=None,
J_label=None,
t_str=None,
J_str=None,
V_str=None,
fig=None,
return_fig=False,
smooth_points=0,
override=False,
spec={},
hspace=0.1,
left_space=0.15,
right_space=0.85, # for gridspec
):
"""
TODO: write proper documentation!
this plots signals or fluxes on one axis and current and potential on other axesaxis
background can be:
'constant' - subtracts minimum value or given value
'linear' - linear background extrapolated between endpoints
'preset' - finds backgrounds in molecule objects.
By default, no background is subtracted.
"""
if name is None:
try:
name = EC_and_MS["name"]
except KeyError:
try:
name = EC_and_MS["title"]
except KeyError:
name = ""
if verbose:
print(
"\n\nfunction 'plot_experiment' at your service!\n Plotting from: " + name
)
if "data_type" in EC_and_MS and EC_and_MS["data_type"][0:2] == "EC":
print(set(EC_and_MS["col_types"].values()))
if "col_types" in EC_and_MS and set(EC_and_MS["col_types"].values()) == {"EC"}:
ax = plot_experiment_EC(
EC_and_MS,
tspan=tspan,
verbose=verbose,
RE_vs_RHE=RE_vs_RHE,
A_el=A_el,
ax=ax,
# mols will overide masses will overide colors
V_color=V_color,
J_color=J_color,
V_label=V_label,
J_label=J_label,
t_str=t_str,
J_str=J_str,
V_str=V_str,
fig=None,
spec={},
)
return ax
# ----------- prepare the axes on which to plot ------------ #
if ax == "new":
if fig is None:
figure1 = plt.figure()
else:
figure1 = fig
plt.figure(figure1.number)
print("plot_expeiriment using " + str(fig))
if overlay:
ax = [figure1.add_subplot(111)]
ax += [ax[0].twinx()]
else:
if emphasis == "MS":
gs = gridspec.GridSpec(12, 1)
# gs.update(hspace=0.025)
ax = [plt.subplot(gs[0:8, 0])]
ax += [plt.subplot(gs[8:12, 0])]
elif emphasis == "ms":
gs = gridspec.GridSpec(5, 1)
# gs.update(hspace=0.025)
ax = [plt.subplot(gs[0:3, 0])]
ax += [plt.subplot(gs[3:5, 0])]
elif emphasis == "EC":
gs = gridspec.GridSpec(3, 1)
# gs.update(hspace=0.025)
ax = [plt.subplot(gs[0, 0])]
ax += [plt.subplot(gs[1:3, 0])]
else:
gs = gridspec.GridSpec(8, 1)
# gs.update(hspace=0.025)
ax = [plt.subplot(gs[0:4, 0])]
ax += [plt.subplot(gs[4:8, 0])]
if plotcurrent and plotpotential:
ax += [ax[1].twinx()]
ax[1].set_zorder(ax[2].get_zorder() + 1) # doesn't work
ax[1].patch.set_visible(False) # hide the 'canvas'
gs.update(hspace=hspace, left=left_space, right=right_space)
# --------- get tspan, V_str, and J_str from input and/or dataset -------- #
if tspan is None: # then use the range of overlap
try:
tspan = EC_and_MS["tspan"] # changed from 'tspan_2' 17H09
except KeyError:
tspan = "all"
if type(tspan) is str and not tspan == "all":
tspan = EC_and_MS[tspan]
if type(logplot) is not list:
logplot = [logplot, False]
if t_str is None:
if "t_str" in EC_and_MS:
t_str = EC_and_MS["t_str"]
else:
t_str = "time/s"
if V_str is None or J_str is None or RE_vs_RHE is not None or A_el is not None:
V_str_0, J_str_0 = sync_metadata(
EC_and_MS, RE_vs_RHE=RE_vs_RHE, A_el=A_el, verbose=verbose
)
# added 16J27... problem caught 17G26, fixed in sync_metadata
if V_str is None: # this way I can get it to plot something other than V and J.
V_str = V_str_0
if J_str is None:
J_str = J_str_0
if A_el in EC_and_MS:
A_el = EC_and_MS["A_el"]
else:
A_el = 1
# ----------- parse input on which masses / fluxes to plot ------- #
# print(masses)
quantified = False
# print(type(colors))
# if type(colors) is list and type(colors[0]) is not str:
# print(type(colors[0]))
if mols is not None:
quantified = True
elif (
(type(colors) is dict and list(colors.keys())[0][0] == "M")
or (type(colors) is list and type(colors[0]) is str and colors[0][0] == "M")
or (type(colors) is str and colors[0] == "M")
or colors is None
):
if verbose:
print("uncalibrated data to be plotted.")
if masses is None:
masses = colors
else:
quantified = True
mols = colors
if not quantified and masses == "all": # this is now the default!
masses = [
key[:-2] for key in EC_and_MS.keys() if key[0] == "M" and key[-2:] == "-y"
]
print("quantified = " + str(quantified)) # debugging
if removebackground is None and background is None and t_bg is None:
removebackground = False
if type(mols) is dict:
mols = list(mols.values())
try:
if type(mols[0]) in (list, tuple):
mols_left = mols[0]
mols_right = mols[1]
mols = mols_left
except (IndexError, TypeError):
pass
if mols_left is not None and mols is None:
mols = mols_left
try:
if type(masses[0]) in (list, tuple):
masses_left = masses[0]
masses_right = masses[1]
masses = masses_left
except (IndexError, TypeError):
pass
if masses_left is not None and masses is None:
masses = masses_left
if removebackground == "right":
removebackground_right = True
removebackground_left = False
elif removebackground == "left":
removebackground_left = True
removebackground_right = False
else:
removebackground_left = removebackground
removebackground_right = removebackground
# ----------- Plot the MS signals! ------------- #
if quantified:
if unit_left is None:
if unit is not None:
unit_left = unit
else:
unit_left = "pmol/s"
print("removebackground = " + str(removebackground)) # debugging
plot_flux(
EC_and_MS,
mols=mols,
tspan=tspan,
A_el=A_el,
spec=spec,
ax=ax[0],
leg=leg,
logplot=logplot[0],
unit=unit_left,
removebackground=removebackground_left,
background=background,
endpoints=endpoints,
t_bg=t_bg,
override=override,
smooth_points=smooth_points,
verbose=verbose,
)
if mols_right is not None:
if unit_right is None:
if unit is not None:
unit_right = unit
else:
unit_right = "pmol/s"
ax += [ax[0].twinx()]
plot_flux(
EC_and_MS,
mols=mols_right,
tspan=tspan,
A_el=A_el,
spec=spec,
ax=ax[-1],
leg=leg,
logplot=logplot[0],
unit=unit_right,
removebackground=removebackground_right,
background=background,
endpoints=endpoints,
t_bg=t_bg,
override=override,
smooth_points=smooth_points,
verbose=verbose,
)
else:
if unit is None:
unit = "pA"
plot_signal(
EC_and_MS,
masses=masses,
tspan=tspan,
spec=spec,
ax=ax[0],
leg=leg,
logplot=logplot[0],
unit=unit,
override=override,
verbose=verbose,
removebackground=removebackground_left,
background=background,
t_bg=t_bg,
)
if masses_right is not None:
ax += [ax[0].twinx()]
plot_signal(
EC_and_MS,
masses=masses_right,
tspan=tspan,
spec=spec,
ax=ax[-1],
leg=leg,
logplot=logplot[0],
unit=unit,
override=override,
verbose=verbose,
removebackground=removebackground_right,
background=background,
t_bg=t_bg,
)
if not overlay:
ax[0].set_xlabel("")
ax[0].xaxis.tick_top()
if tspan is not None and not type(tspan) is str:
ax[0].set_xlim(tspan)
# ---------- make sure I can plot the electrochemistyr data --------- #
try:
t = EC_and_MS[t_str]
except KeyError:
print(
"data doesn't contain '" + str(t_str) + "', i.e. t_str. Can't plot EC data."
)
plotpotential = False
plotcurrent = False
try:
V = EC_and_MS[V_str]
except KeyError:
print(
"data doesn't contain '"
+ str(V_str)
+ "', i.e. V_str. Can't plot that data."
)
plotpotential = False
try:
J = EC_and_MS[J_str]
except KeyError:
print(
"data doesn't contain '"
+ str(J_str)
+ "', i.e. J_str. Can't plot that data."
)
plotcurrent = False
# to check if I have problems in my dataset
# print('len(t) = ' + str(len(t)) +
# '\nlen(V) = ' + str(len(V)) +
# '\nlen(J) = ' + str(len(J)))
# print(tspan) # debugging
# -------- cut the electrochemistry data according to tspan ------ #
if tspan_EC is None:
tspan_EC = tspan
if type(tspan_EC) is not str and (plotcurrent or plotpotential):
mask = np.logical_and(tspan_EC[0] < t, t < tspan_EC[-1])
t = t[mask]
# print(np.where(mask)) #debugging
if plotpotential:
V = V[mask]
if plotcurrent:
J = J[mask]
# ---------- and plot the electrochemistry data! --------------- #
if plotcurrent:
if plotpotential:
i_ax = 2
else:
i_ax = 1
ax[i_ax].plot(t, J, color=J_color, label=J_label, **spec)
ax[i_ax].set_ylabel(J_str)
ax[i_ax].set_xlabel("time / [s]")
xlim = ax[i_ax - 1].get_xlim()
ax[i_ax].set_xlim(xlim)
if logplot[1]:
ax[i_ax].set_yscale("log")
if i_ax == 2:
colorax(ax[i_ax], J_color, "right")
else:
colorax(ax[i_ax], J_color, "left")
ax[i_ax].tick_params(axis="both", direction="in") # 17K28
if plotpotential:
i_ax = 1
ax[i_ax].plot(t, V, color=V_color, label=V_label, **spec)
ax[i_ax].set_ylabel(V_str)
if len(logplot) > 2:
if logplot[2]:
ax[i_ax].set_yscale("log")
xlim = ax[i_ax - 1].get_xlim()
ax[i_ax].set_xlim(xlim)
colorax(ax[i_ax], V_color, "left")
ax[i_ax].tick_params(axis="both", direction="in") # 17K28
ax[0].set_xlabel(t_str)
if plotcurrent or plotpotential:
ax[0].xaxis.set_label_position("top")
ax[1].set_xlabel(t_str)
if tspan is not None and not type(tspan) is str:
ax[1].set_xlim(tspan)
# -------- finishing up -------- #
if saveit:
figure1.savefig(name + ".png")
# if colors_right is not None:
# ax[0].set_xlim(ax[1].get_xlim()) # probably not necessary
if return_fig and (fig is None):
fig = ax[0].get_figure()
if verbose:
print("function 'plot_experiment' finished!\n\n")
if return_fig:
return fig, ax
else:
return ax
def plot_masses_and_I(*args, **kwargs):
print(
"\n\n'plot_masses_and_I' has been renamed 'plot_experiment'. Remember that next time!"
)
return plot_experiment(*args, **kwargs)
def plot_folder(
folder_name,
colors={"M2": "b", "M4": "r", "M18": "0.5", "M28": "g", "M32": "k"},
RE_vs_RHE=None,
A_el=None,
):
"""
Plots an EC and MS data from an entire folder, generally corresponding to
a full day of measurements on one sample.
Will probably only be used to get an overview.
Could add text showing starts of the data files
"""
from .Data_Importing import import_folder
from .Combining import synchronize
Datasets = import_folder(folder_name)
Combined_data = synchronize(Datasets, t_zero="first", append=True)
sync_metadata(Combined_data, RE_vs_RHE, A_el)
return plot_experiment(Combined_data, colors=colors)
def plot_lines_x(values, ax="new", ylims=None, **kwargs):
if ax == "new":
fig, ax = plt.subplots()
if ylims is None:
ylims = ax.get_ylim()
ax.set_ylim(ylims)
for v in values:
ax.plot([v, v], ylims, **kwargs)
return ax
def plot_datapoints(
integrals,
colors,
ax="new",
label="",
X=None,
X_str="V",
logplot=True,
specs={},
Xrange=None,
):
"""
integrals will most often come from functino 'get_datapoitns' in module
Integrate_Signals
"""
if ax == "new":
fig1 = plt.figure()
ax = fig1.add_subplot(111)
if X is None:
X = integrals[X_str]
for (quantity, color) in colors.items():
# Here I just assme they've organized the stuff right to start with.
# I could alternately use the more intricate checks demonstrated in
# DataPoints.plot_errorbars_y
value = integrals[quantity]
if type(Xrange) is dict:
Xrange_val = Xrange[quantity]
else:
Xrange_val = Xrange
if type(color) is dict:
plot_datapoints(
value,
color,
ax=ax,
logplot=logplot,
label=label + quantity + "_",
X=X,
Xrange=Xrange_val,
specs=specs,
)
else:
if type(color) is tuple: # note a list can be a color in rbg
spec = color[0]
color = color[1]
if "markersize" not in specs:
specs["markersize"] = 5
else:
spec = "."
if "markersize" not in specs:
specs["markersize"] = 15
# print(quantity + '\n\tvalue=' + str(value) +
# '\n\tcolor=' + str(color) + '\n\tV=' + str(V))
# print(quantity + ' ' + str(color))
if Xrange is not None:
I_keep = np.array(
[
I
for (I, X_I) in enumerate(X)
if Xrange_val[0] <= float(np.round(X_I, 2)) <= Xrange_val[1]
]
)
X_plot = np.array(X)[I_keep]
value_plot = np.array(value)[I_keep]
# there was a mindnumbing case of linking here.
# tried fix it with .copy(), but new variable names needed.
else:
X_plot = X
value_plot = value
ax.plot(
X_plot, value_plot, spec, color=color, label=label + quantity, **specs,
)
if logplot:
ax.set_yscale("log")
return ax
def plot_operation(
cc=None,
t=None,
j=None,
z=None,
tspan=None,
results=None,
plot_type="heat",
ax="new",
colormap="inferno",
aspect="auto",
unit="pmol/s",
color="g",
colorbar=True,
dimensions=None,
verbose=True,
):
if verbose:
print("\n\nfunction 'plot_operation' at your service!\n")
# and plot!
if type(cc) is dict and results is None:
results = cc
cc = None
if results is None:
results = {} # just so I don't get an error later
if cc is None:
cc = results["cc"]
if t is None:
if "t" in results:
t = results["t"]
elif "x" in results:
t = results["x"]
else:
t = np.linspace(0, 1, np.size(cc, axis=0))
if z is None:
if "z" in results:
z = results["z"]
elif "y" in results:
z = results["y"]
else:
z = np.linspace(0, 1, np.size(cc, axis=1))
if j is None:
if j in results:
j = results["j"]
else:
j = cc[0, :]
if dimensions is None:
if "dimensions" in results:
dimensions = results["dimensions"]
else:
dimensions = "tz"
if tspan is None:
tspan = [t[0], t[-1]]
if plot_type == "flux":
if ax == "new":
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
else:
ax1 = ax # making a heat map will only work with a new axis.
ax1.plot(t, j, label="simulated flux")
ax1.set_xlabel("time / [s]")
ax1.set_ylabel("flux / [" + unit + "]")
axes = ax1
elif plot_type == "heat" or plot_type == "both":
if ax == "new":
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
elif type(ax) is list:
ax1 = ax[0]
else:
ax1 = ax
# t_mesh, x_mesh = np.meshgrid(t,x)
# img = ax1.contourf(t_mesh, x_mesh*1e6, np.transpose(cc,[1,0]), cmap='Spectral',
# levels=np.linspace(np.min(cc),np.max(cc),100))
# imshow objects seem more versatile than contourf for some reason.
trange = [min(t), max(t)]
if dimensions[0] == "x":
trange = [t * 1e3 for t in trange] # m to mm
zrange = [min(z * 1e6), max(z * 1e6)]
img = ax1.imshow(
np.transpose(cc, [1, 0]),
extent=trange[:] + zrange[:], # have to be lists here!
aspect=aspect,
origin="lower",
cmap=colormap,
)
# divider = make_axes_locatable(ax1)
# cax = divider.append_axes("right", size="5%", pad=0.05)
# https://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph
if colorbar:
cbar = plt.colorbar(img, ax=ax1)
cbar.set_label("concentration / [mM]")
if dimensions[0] == "t":
ax1.set_xlabel("time / [s]")
elif dimensions[0] == "x":
ax1.set_xlabel("position / [mm]")
ax1.set_ylabel("position / [um]")
# print('plot_type = ' + plot_type)
if plot_type == "both":
if type(ax) is list:
ax2 = ax[1]
else:
ax2 = ax1.twinx()
ax2.set_ylabel("flux / [" + unit + "]")
ax2.plot(t, j, "-", color=color)
cbar.remove()
ax3 = img.figure.add_axes([0.85, 0.1, 0.03, 0.8])
cbar = plt.colorbar(img, cax=ax3)
cbar.set_label("concentration / [mM]")
ax1.set_xlim(tspan)
print("returning three axes!")
axes = [ax1, ax2, ax3]
elif colorbar:
axes = [ax1, cbar]
else:
axes = ax1
if verbose:
print("\nfunction 'plot_operation' finished!\n\n")
return axes
def set_figparams(
figwidth=8, aspect=4 / 3, fontsize=7, figpad=0.15, figgap=0.08, style=None
):
import matplotlib as mpl
if style is not None:
mpl.style.use(style)
# figwidth=8 #figwidth in cm width 20.32cm = 8inches being standard and thesis textwidth being 12.
# aspect=4/3 #standard is 4/3
# fontsize=7 #standard is 12.0pt, thesis is 10.0pt and footnotesize is 8.0pt and lower case seems to be 2/3 of full font size, which makes 7pt "nice" for thesis plotting
realfigwidth = (
20 * (fontsize / 12) * 1.2
) # a factor 1.2 makes all lines "equaly thick" - make this 1 for figwidth=4cm (sniff2fig6)
# figsize=[20.32,15.24]
figsize = [realfigwidth, realfigwidth / aspect]
mpl.rc("font", size=fontsize * (realfigwidth / figwidth))
mpl.rc(
"mathtext",
fontset="custom",
rm="Helvetica",
# it='Helvetica:italic',
# bf='Helvetica:bold',
)
mpl.rc(
"figure",
figsize=[figsize[0] / 2.54, figsize[1] / 2.54],
dpi=100 * 2.54 * figwidth / realfigwidth,
)
# figpad=0.14 #fraction of figure size
# figgap=0.08 #fraction of figure size
mpl.rc(
"figure.subplot",
left=figpad,
right=1 - figpad,
bottom=figpad,
top=1 - figpad,
hspace=figgap,
)
mpl.rc("xtick", labelsize="small")
mpl.rc("ytick", labelsize="small")
# mpl.rc('axes', labelweight='medium')
mpl.rc("savefig", dpi=250 * 2.54 * figwidth / realfigwidth)
|
ScottSoren/EC_MS | src/EC_MS/preferences/colors.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 16 10:07:44 2017
@author: scott
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import colors
ColorConverter = colors.ColorConverter
plt.close("all")
colorlist = list(colors.cnames.items())
ourcolors_0 = {
"M2": "b",
"M4": "m",
"M18": "y",
"M28": "0.5",
"M32": "k",
"M40": "c",
"M44": "brown",
"M15": "r",
"M26": "g",
"M27": "limegreen",
"M30": "darkorange",
"M31": "yellowgreen",
"M43": "tan",
"M45": "darkgreen",
"M34": "r",
"M36": "g",
"M46": "purple",
"M48": "darkslategray",
"M20": "slateblue",
"M16": "steelblue",
"M19": "teal",
"M17": "chocolate",
"M41": "#FF2E2E",
"M42": "olive",
"M29": "#001146",
"M70": "purple",
"M3": "orange",
"M73": "crimson",
"M74": "r",
"M60": "g",
"M58": "darkcyan",
"M88": "darkred",
"M89": "darkmagenta",
"M130": "purple",
"M132": "purple",
}
if __name__ == "__main__":
ourcolors = {}
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
header = "{:8s},\t{:20s},\t{:21s},\t{:10s}\n".format(
"mass", "python name", "rgb", "hex"
)
line_0 = "{:8s},\t{:20s},\t[{:5.3f}, {:5.3f}, {:5.3f}],\t{:10s}\n"
f = open("color_table.txt", "w")
f.write(header)
for (mass, color) in ourcolors_0.items():
print(mass + " : " + color)
m = int(mass[1:])
ax1.barh(m, 1, align="center", color=color)
rgb = ColorConverter.to_rgb(color) # why do I have to do this?
html = colors.rgb2hex(rgb)
ourcolors[mass] = (color, rgb, html)
rgb = [np.round(a, 3) for a in rgb]
f.write(line_0.format(mass, color, rgb[0], rgb[1], rgb[2], str(html)))
f.close()
plt.savefig("colors_for_cinfdata.png")
makeECMSfile = True
if makeECMSfile:
import os
cwd = os.getcwd()
os.chdir("..")
from Object_Files import structure_to_lines
os.chdir(cwd)
lines = structure_to_lines(ourcolors_0, preamble="standard colors")
g = open("standard_colors.txt", "w")
g.writelines(lines)
g.close()
|
ScottSoren/EC_MS | src/EC_MS/Object_Files.py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 1 22:28 2016
@author: scott
[
20B14:
This module basically implements my own version of JSON, which I did not
know about when I wrote it. It is in ToDo.txt to change uses of Object_Files
to uses of JSON.
Because:
Simple is better than complex.
There should be one-- and preferably only one --obvious way to do it.
Special cases aren't special enough to break the rules.
If the implementation is hard to explain, it's a bad idea.
]
This module provides badass functions for coordinating between complex
objects and easily readable files. The compromise for so awesome a toolbox
is that the tools themselves aren't easily readible. Good luck!
"""
from __future__ import print_function, division
import os, re
import datetime
float_match = r"\s[-]?\d+[\.]?\d*(e[-]?\d+)?\s" # matches floats like -3.57e4
def group_lines(lines, indent="\t", removecomments=True):
"""
Groups indentation blocks into list elements. The line before the
indentation block is included.
"""
if removecomments:
lines = remove_comments(lines)
nest = 0 # to keep track of how deep we are in the indentation block
grouped_lines = []
whitespace = re.compile(r"\s+")
for (i, line) in enumerate(lines):
# print(line)
if len(re.sub(whitespace, "", line)) == 0:
# print('... skipped!')
# fix 17C23 to protect against empty lines
continue
line = line[:-1] # to get rid of the '\n'
while line[0:nest] != indent * nest:
nest -= 1
group = eval("grouped_lines" + "[-1]" * nest) # actually works!
if line[0 : (nest + 1)] == indent * (nest + 1):
nest += 1
group[-1] = [group[-1], line[nest:]]
elif len(line) > nest: # to drop empty lines
group += [line[nest:]]
return grouped_lines
def remove_comments(lines):
new_lines = []
for line in lines:
if "#" in line:
line = re.search("^.*\#", line).group()[:-1]
if re.search(r"\w", line): # to drop lines that only have comments
new_lines += [line]
else:
new_lines += [line] # I don't want to get rid of empty lines here
return new_lines
def structure_to_lines(
structure, nest=0, indent="\t", toplevel=False, preamble=None, title_key=None
):
"""
Formats a dictionary or list, which can have nested dictionaries or lists,
into a set of properly indented lines to write to file.
"""
lines = []
intro = ""
if preamble is not None:
intro += preamble
if type(structure) is dict:
if title_key in structure.keys(): # changed 'intro' to 'title_key' 16L14
intro += indent + "-" + indent + structure[title_key]
if not toplevel:
if len(intro) == 0:
intro += "<Dictionary>"
lines += [nest * indent + intro + "\n"]
if not toplevel:
nest += 1
for (key, value) in structure.items():
if key == title_key:
continue
lines += structure_to_lines(
value, nest, indent, preamble=key, title_key="title"
)
elif type(structure) is list:
if not toplevel:
if len(intro) == 0:
intro += "<List>"
lines += [nest * indent + intro + ":\n"]
if not toplevel:
nest += 1
for value in structure:
if type(value) is tuple and len(value) == 2:
lines += structure_to_lines(value[1], nest, indent, preamble=value[0])
# added 16L14 to enable writing of lists of (key, value)
else:
lines += structure_to_lines(value, nest, indent)
elif type(structure) is str:
if len(intro) > 0:
intro += ": "
lines += [nest * indent + intro + structure + "\n"]
else:
if len(intro) > 0:
intro += indent + "=" + indent
lines += [nest * indent + intro + str(structure) + "\n"]
return lines
def dictionary_to_lines(dictionary, indent="\t"):
return structure_to_lines(dictionary, toplevel=True)
def grouped_lines_to_structure(lines, indent="\t"):
"""
The exact inverse of write_lines, but works on grouped lines!
# as of 16L14, '\n' is removed by group_lines and not here.
"""
if type(lines) is str:
line = lines.strip()
if ":" in line: # then we've got a key and string value separated by a ': '
key = re.search(r"^.+:", line).group()[:-1] # don't want the ':'
try:
value = re.search(r":.+$", line).group()[2:] # don't want the ': '
# note: use of '$' means '\n' isn't in group()!
except AttributeError:
value = None
structure = (key, value)
elif (
"=" in line
): # then we've got a key and numerical value separated by a '\t=\t'
key = re.search(r"^.+=", line).group()[:-2] # don't want the '\t='
try:
value = re.search(r"=.+$", line).group()[2:] # don't want the '=\t'
except AttributeError:
value = None
try:
value = eval(value)
except (SyntaxError, NameError):
print("wasn" "t able to evaluate '" + value + "'")
structure = (key, value)
else: # then we've got just a string
structure = line
elif type(lines) is list:
title_line = lines[0]
if ":" in title_line: # then we want to make it into a list
key = re.search(r"^.+:", title_line).group()[:-1]
value = []
for line in lines[1:]:
value += [grouped_lines_to_structure(line)]
else: # then we want to make it into a dictionary
value = {}
if (indent + "-" + indent) in title_line:
key = re.search(r"^.+" + indent + "-", title_line).group()[:-2]
# don't want the '\t-'
title = re.search(r"-" + indent + ".+$", title_line).group()[2:]
# don't want the '-\t'
value["title"] = title
else:
key = title_line
for line in lines[1:]:
item = grouped_lines_to_structure(line)
try:
value[item[0]] = item[1]
except IndexError:
print("missing something. line = " + str(line))
if key == "<list>:" or key == "<dictionary>":
structure = value
else:
structure = (key, value)
return structure
def lines_to_structure(lines, indent="\t", removecomments=True):
"""
Have to group lines seperately to not mess up with recursion.
This function includes both steps.
"""
# print('lines:\n ' + str(lines))
grouped_lines = group_lines(lines, indent, removecomments=removecomments)
# this is necessary for it to treat the file as a single structure
# print('grouped lines:\n ' + str(grouped_lines))
return grouped_lines_to_structure(grouped_lines, indent)
def lines_to_dictionary(lines, indent="\t", removecomments=True):
lines = ["<dictionary>\n"] + lines
structure = lines_to_structure(lines, removecomments=removecomments)
dictionary = (
structure #'<dictionary>' line is now ignored in grouped_lines_to_structure
)
# this is necessary for it to treat the file as a single structure
# print('grouped lines:\n ' + str(grouped_lines))
return dictionary
def lines_to_attributes(lines, obj, verbose=1, indent="\t"):
if verbose:
print("function 'lines_to_attributes' at your service!")
lines = ["<dictionary>\n"] + lines
# gets lines_to_structure to treat it as one big dictionary
attributes = lines_to_structure(lines, indent)[1]
for (key, value) in attributes.items():
setattr(obj, key, value)
if verbose:
print("function 'lines_to_attributes' finished!")
# return obj #shouldn't be necessary
def file_to_attributes(f, obj, verbose=1, indent="\t"):
lines = f.readlines()
return lines_to_attributes(lines, obj, verbose, indent)
def attributes_to_file(f, obj, verbose=1, indent="\t"):
if verbose:
print("function 'attributes_to_file' at your service!")
attributes = obj.__dict__.copy()
for unwanted_key in ["file_lines", "attr_status", "__str__"]:
if unwanted_key in attributes.keys():
del attributes[unwanted_key] # so I don't write the whole file in itself
lines = structure_to_lines(attributes, indent=indent)
lines = [
line[1:] for line in lines[1:]
] # dropping '<dictionary>\n' and an indentation
for line in lines:
f.write(line)
if verbose:
print("function 'attributes_to_file' finished!")
# return f #shouldn't be necessary
def advanced_update(
dict1, dict2, newstuff=True, oldstuff=False, newkeys=[], oldkeys=[], mask=None
):
"""
updates dict1 with dict2, but with options about which keys to add/update.
Default values give a normal update.
"""
keys2 = list(
dict2.keys()
) # so that I don't have a dictionary changed size during iteration error
if not newstuff:
# then don't add new keys
for key in keys2:
if key not in dict1.keys() and key not in newkeys:
dict2.pop(key, None)
if oldstuff or len(oldkeys) > 0:
# then don't replace values of (evt. select) existing keys
for key in keys2:
if (oldstuff and key in dict1.keys()) or key in oldkeys:
dict2.pop(key, None)
if mask is not None:
# then mask is a function evaluating to True if
# a key shouldn't be added or updated.
for key in keys2:
if mask(key):
dict2.pop(key)
# print(type(dict2))
dict1.update(dict2)
return dict1
def update_lines(lines, dictionary, **kwargs):
"""
Does exactly what you'd think.
"""
dict1 = lines_to_dictionary(lines)
newdict = advanced_update(dict1, dictionary, **kwargs)
newlines = dictionary_to_lines(newdict)
return newlines
def date_scott(date="today"):
"""
Returns the date, default is today's, as Scott writes it.
"""
if date == "today":
a = datetime.date.today()
year = a.year
month = a.month
day = a.day
elif type(date) is str:
if len(date) == 6: # 6-digit-integer dates format
year = date[0:2]
month = date[2:4]
year = date[4:6]
else: # if you insist
return str(date)
else:
return str(date)
date_string = "{0:2d}{1:1s}{2:2d}".format(
year % 100, chr(ord("A") + month - 1), day
)
date_string = date_string.replace(" ", "0")
return date_string
def write_to_file(self, a=None, attr=None, data_directory="./data", *args, **kwargs):
"""
this is supposed to be a versitile tool for writing to the Molecule's
data file. Whether the added intricacy will be worth the lines of code
it saves, I can't tell yet.
Writes in one of the following ways:
1. If the name of an attribute is given, that attribute is written.
2. If a is a string, simply write a to the molecule's datafile.
3. If a is a function, then it's a function that writes what you want
to the data file given in the first argument
4. If a is a dictionary, list, or tuple, convert it to lines according to
the system encoded in Object_Files.
5. If a is not given but keyword arguments are, write **kwargs to the
data file according to the system encoded in Object_Files.
"""
if attr is not None:
a = (attr, getattr(self, attr))
elif a is None:
if len(kwargs) == 0:
print("nothing to write.")
return
else:
a = kwargs
cwd = os.getcwd()
os.chdir(data_directory)
file_name = self.name + ".txt"
with open(file_name, "a") as f:
if callable(a):
a(f, *args, **kwargs)
elif type(a) is str:
if a[-1] != "\n":
a += "\n"
f.write(a)
elif type(a) in (list, dict):
if "key" in kwargs.keys():
lines = structure_to_lines(a, preamble=kwargs["key"])
else:
lines = structure_to_lines(a)
f.writelines(lines)
elif type(a) is tuple:
lines = structure_to_lines(a[1], preamble=a[0])
f.writelines(lines)
else:
print("Couldn" "t write " + str(a))
os.chdir(cwd)
|
ScottSoren/EC_MS | src/EC_MS/utils/icpms.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 5 15:33:05 2019
@author: scott
"""
import re
import numpy as np
from matplotlib import pyplot as plt
unit_to_ppb = {
"ppt": 1e-3,
"ng/l": 1e-3,
"ppb": 1,
"ug/l": 1,
"ppm": 1000,
"mg/l": 1000,
}
float_match = "[-]?\d+[\.]?\d*(e[-]?\d+)?" # matches floats like '-3.5e4' or '7' or '245.13' or '1e-15'
def get_calibration_function(
ICPMS_data,
mass,
unit="ug/l",
units=[],
ax=None,
wash_str=None,
plot_unit=None,
factor=1,
):
"""
Given ICPMS_data, a dictionary of the structure returned by load_ICPMS_data,
this function finds the calibration standards (which have the specified unit,
'ug/l' by default, in their name), parses their name to get the concentration,
and plots a calibration curve. It then returns the function given in that curve.
"""
if len(units) == 0:
units = [unit]
calibration_keys = [
key for key in ICPMS_data.keys() if len([u for u in units if u in key]) > 0
]
# print(calibration_keys) # debugging
cal_amounts = np.array([])
cal_counts = np.array([])
for key in calibration_keys:
unit_i = next(u for u in units if u in key)
try:
number = re.search(float_match, key).group()
amount_i = float(number)
except (AttributeError, ValueError):
print("WARNING: could't match a float in '" + key + "'. skipping.")
continue
amount = amount_i * unit_to_ppb[unit_i] / unit_to_ppb[unit]
cal_amounts = np.append(cal_amounts, amount)
cal_counts = np.append(cal_counts, ICPMS_data[key][mass])
# print(str(cal_amounts) + '\n' + str(cal_counts)) # debugging
ln_cal_amounts = np.log(cal_amounts)
ln_cal_counts = np.log(cal_counts)
p = np.polyfit(ln_cal_counts, ln_cal_amounts, deg=1)
print(p) # debugging
def counts_to_amount(counts):
ln_counts = np.log(counts)
ln_ppb = p[0] * ln_counts + p[1]
ppb = np.exp(ln_ppb)
return ppb
if ax == "new":
fig, ax = plt.subplots()
if ax is not None:
ax.plot(cal_amounts, cal_counts, "ks", markersize=7)
x_fit, y_fit = counts_to_amount(cal_counts), cal_counts
if wash_str is not None:
wash = ICPMS_data[wash_str][mass]
mean = np.mean(wash)
std = np.std(wash)
y_fit = np.append(mean, y_fit)
x_fit = np.append(counts_to_amount(mean), x_fit)
ax.plot(x_fit, y_fit, "r--")
if wash_str is not None:
xlim = ax.get_xlim()
ax.plot(xlim, [mean, mean], "k--")
ax.plot(xlim, [mean + 3 * std, mean + 3 * std], "k:")
# ax.plot(xlim, [mean-3*std, mean-3*std], 'k:')
ax.set_xlabel("amount / [" + unit + "]")
ax.set_ylabel("counts")
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlim(xlim)
if plot_unit is not None:
ax2 = ax.twiny()
ax2.set_xlim([l * factor for l in ax.get_xlim()])
ax2.set_xscale("log")
ax2.set_xlabel("amount / [" + plot_unit + "]")
return counts_to_amount
def load_ICPMS_data(ICPMS_file, sep="\t"):
"""
This assumes a simple export, including only the averaged intensities.
If it doesn't work, you're probably included extra stuff when exporting
from the ICP-MS software!
Results are returned as a nested dictionary with the sample name as the
outer key and the mass as the inner key. If a sample name is repeated,
the function appends the values.
"""
data = {"file": ICPMS_file}
data_start = False
header = ""
with open(ICPMS_file, "r") as f:
while True:
line = f.readline().rstrip()
if len(line) == 0:
# ^ this is the case when we've reached the end of the file
break
if not data_start:
header += line
if "(KED)" in line:
cs = line.split(sep)
masses = {}
for i, c in enumerate(cs[2:]):
# there are two blank columns at the start
mass = c.split(" ")[0]
masses[i] = mass
data["masses"] = masses
# print('cs = ' + str(cs) + '\nmasses = ' + str(masses)) # debugging
if "Y (cps)" in line:
data_start = True
continue
if data_start:
cs = line.split(sep)
# the first column is a number that (I think) isn't needed
try:
name = cs[1]
except IndexError:
print("WARNING! error on line : \n" + line)
if not name in data:
data[name] = {}
for i, c in enumerate(cs[2:]):
try:
n = float(c)
except:
print("error on line:\n\t" + line)
print("can't convert " + c + " to float.")
raise
mass = masses[i]
if mass in data[name]:
data[name][mass] = np.append(data[name][mass], n)
else:
data[name][mass] = n
return data
|
ScottSoren/EC_MS | src/EC_MS/Combining.py | <filename>src/EC_MS/Combining.py
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 19 10:40:46 2016
@author: Scott
This is the core file of the package. Includes functions for combining datasets.
"""
# make python2-compatible:
from __future__ import print_function
from __future__ import division
import time
import re
# import os #, sys
import numpy as np
from .parsing_tools import get_timecol, is_time, get_type, timestamp_to_seconds
from .patches import fix_timecols
# import warnings # actually not that useful, as you loose information about
# when in the running of the code the problem appeared.
def synchronize(
data_objects,
t_zero=None,
append=None,
file_number_type=None,
cutit=None,
cut_buffer=None,
override=None,
update=True,
tz=None,
name=None,
verbose=True,
vverbose=False,
):
"""
'synchronize' is the most important function of electropy/ECpy/EC_MS/EC_Xray
It combines numpy array data from multiple dictionaries into a single
dictionary with all time variables aligned according to absolute time.
If cutit=True, data will be retained in the interval of overlap. Otherwise,
all data will be retained, but with t=0 at the start of the overlap,
unless t_zero is specified (details below).
If append=True, data columns of the same name will be joined and filled with
zeros for sets that don't have them so that all columns remain the same length
as their time columns, and a data column 'file number' will be added to
keep track of where the data comes from.
---- inputs -----
data_objects: traditionally a list of dictionaries, each of which has
a key ['data_cols'] pointing to a list of keys for columns of data to be
synchronized. data_objects can also contain objects with attribute data, in
which data_objects[i].data is used in the same way as data_objects normally is,
and then, if update is True, replaced by the combined data set.
t_zero: a string or number representing the moment that is considered t=0
in the synchronized dataset. If t_zero is a number, it is interpreted as a
unix epoch time. t_zero='start' means it starts at the start of the overlap.
'first' means t=0 at the earliest timestamp.
append: True if identically named data columns should be appended. False
if the data from the individual sets should be kept in separate columns. By
default (append=None), append will be set inside the function to True if all of
the data sets have the same 'data type'.
file_number_type: When appending data, a column file_number is added
storing the file numbers corresponding to the data from datasets of type
file_number_type. combined_data['file number'] will thus have the same length
as combined_data[timecol] where timecol is the time variable for that data
type, i.e. 'time/s' for file_number_type='EC', as is the default.
cutit: True if data from outside the timespan where all input datasets
overlap should be removed. This can make things a bit cleaner to work with
in the front-panel scripts.
override: True if you don't want the function to pause and ask for your
consent to continue in the case that there is no range of overlap in the datasets.
override = False helps you catch errors if you're importing the wrong datasets.
By default, override gets set to True if append is True.
update: True if you want object.data to be replaced with the synchronized
dataset for any non-dictionary objects in data_objects
tz: timezone for genrating timestamp, as pytz.timezone() instance or string
to be read by pytz.timezone(). Local timezone assumed by default.
name: put in the combined dataset with key 'name', referenced by some
other functions. By default it combines the names of the original datasets.
verbose: True if you want the function to talk to you. Recommended, as it
helps catch your mistakes and my bugs. False if you want a clean terminal or stdout
---- output ----
the combined and synchronized data set, as a dictionary
It's really quite nice...
But it's a monster function.
There's lots of comments in the code to explain the reasoning.
I'm happy for suggestions on how to improve! <EMAIL>
"""
if verbose:
print("\n\nfunction 'synchronize' at your service!")
from .Data_Importing import timestring_to_epoch_time, epoch_time_to_timestamp
if type(data_objects) is not list:
print(
"""The first argument to synchronize should be a list of datasets!
You have instead input a dictionary as the first argument.
I will assume that the first two arguments are the datasets you
would like to synchronize with standard settings."""
)
data_objects = [data_objects, t_zero]
t_zero = "start"
# figure out which of the inputs, if any, are objects with attribute 'data' vs simply datasets:
datasets = []
objects_with_data = []
for i, dataset in enumerate(data_objects):
if type(dataset) is dict:
datasets += [dataset]
else:
try:
data = dataset.data
except AttributeError: # just ignore objects that don't have attribute 'data'.
print("can't get data from data_object number " + str(i))
continue
objects_with_data += [dataset]
datasets += [data]
if append is None: # figure out if we're appending by looking at the datasets
# simplest is if they have one time column each:
if len(set([d["t_str"] for d in datasets if "t_str" in d])) == 1:
append = True
else: # Otherwise check if they're the same type and share at least some data columns names
data_cols_list = [d["data_cols"] for d in datasets]
append = (len({d["data_type"] for d in datasets}) == 1) and (
len(set.intersection(*data_cols_list)) > 0
)
if t_zero is None:
if append:
t_zero = "begin" # t=0 at start of earliest dataset (beginning of union)
else:
t_zero = "start" # t=0 at start of intersect
if override is None:
override = append # Without override, it checks for overlap.
# So, I should override when I expect no overlap.
# I expect no overlap when appending datasets
# Thus, override should be True when append is True.
if cutit is None:
cutit = cut_buffer is not None
elif cutit is True and cut_buffer is None:
cut_buffer = 60
if append and (file_number_type is None): # this is a tricky one.
if (
len({d["data_type"] for d in datasets}) == 1
and not datasets[0]["data_type"] == "combined"
):
file_number_type = datasets[0]["data_type"]
else:
file_number_type = "EC"
# check a few of the inputs proving tricky
if verbose:
print("append is " + str(append))
if append:
print("file_number_type = " + file_number_type)
now = time.time() # now in unix epoch time,
# ^ which is necessarily larger than the acquisition epochtime of any of the data.
# prepare to collect some data in the first loop:
tomorrow = now + 60 * 60 * 24
# ^ necessarily larger than the APPARANT acquisition epoch time of any data
# (sometimes, dataset importing assumes date='today', in which case
# we can have now<t_0, messing up determination of t_start)
recstarts = [] # first recorded time in unix epoch time
# first and last timestamps in unix time
t_first = tomorrow # earliest timestamp in unix epoch time
t_last = 0 # latest timestamp in unix epoch time
# intersection timespan in unix time
t_start = 0 # latest start time (start of overlap) in unix epoch time
t_finish = tomorrow # earliest finish time (finish of overlap) in unix epoch time
# union timespan in unix time
t_begin = tomorrow # first time recorded in data
t_end = 0 # last time recorded in data
hasdata = (
{}
) #'combining number' of dataset with False if its empty or True if it has data
combined_data = {
"data_cols": set(),
"col_types": {},
"timecols": {},
}
if append:
combined_data["data_type"] = datasets[0]["data_type"]
else:
combined_data["data_type"] = "combined"
# ^ data_cols is a SET, col_types is a DICTIONAIRY
title_combined = ""
# go through once to generate the title and get the start and end times of the files and of the overlap
if verbose:
print("---------- syncrhonize entering first loop -----------")
for nd, dataset in enumerate(datasets):
dataset["combining_number"] = nd
fix_timecols(dataset) # patch
if "data_cols" not in dataset or len(dataset["data_cols"]) == 0:
print(dataset["title"] + " is empty")
hasdata[nd] = False
recstarts += [now] # don't want dataset list to be shortened
# when sorted later according to recstarts!
continue # any dataset making it to the next line is not empty, i.e. has data.
hasdata[nd] = True
if len(title_combined) > 0:
title_combined += ", "
if nd == len(datasets) - 1:
title_combined += "and "
title_combined += "(" + dataset["title"] + ") as " + str(nd)
if verbose:
print("working on " + dataset["title"])
try:
t_0 = dataset[
"tstamp"
] # UNIX epoch time !!! The t=0 for the present dataset
except KeyError:
print("No tstamp in dataset. Trying to read it from date and timestamp.")
date = None
timestamp = None
if "date" in dataset:
date = dataset["date"]
if "timestamp" in dataset:
timestamp = dataset["timestamp"]
t_0 = timestring_to_epoch_time(timestamp, date, tz=tz, verbose=verbose)
if verbose:
print("\ttstamp is " + str(t_0) + " seconds since Epoch")
t_s = (
tomorrow # will decrease to the earliest start of time data in the dataset
)
t_f = 0 # will increase to the latest finish of time data in the dataset
hasdata[nd] = False
data_cols = dataset["data_cols"].copy()
for col in data_cols:
# print('col = ' + str(col)) # debugging
try:
istime = is_time(col, dataset)
except TypeError:
print(f"WARNING! can't tell if {col} is time. Removing from data_cols.")
dataset["data_cols"].remove(col)
continue
if istime:
try:
t_s = min(t_s, t_0 + dataset[col][0])
# ^ earliest start of time data in dataset in epoch time
t_f = max(t_f, t_0 + dataset[col][-1])
# ^ latest finish of time data in dataset in epoch time
# print('col = ' + col + ', t_s = ' + str(t_s) + ', t_f = ' + str(t_f)) # debugging
# print('t from ' + str(dataset[col][0]) + ' to ' + str(dataset[col][-1])) # debugging
hasdata[nd] = True
except IndexError: # if dataset['data_cols'] points to nonexisting data, something went wrong.
print("Time column " + col + " seems to have no data")
if not hasdata[nd]:
print(
"WARNING: "
+ dataset["title"]
+ " seems to be empty, as no time columns have data!"
)
if not hasdata[nd]: # move on from empty files
recstarts += [
-1
] # otherwise we end up skipping something we didn't mean to skip
continue
recstarts += [t_s] # first recorded time
t_first = min([t_first, t_0]) # earliest timestamp
t_last = max([t_last, t_0]) # latest timestamp
t_start = max([t_start, t_s]) # latest start of time variable overall
t_finish = min([t_finish, t_f]) # earliest finish of time variable overall
t_begin = min([t_begin, t_s]) # earliest start of time variable overall
t_end = max([t_end, t_f]) # latest finish of time variable overal
# out of the first loop. We've got the title to be given to the new combined dataset,
# the tspan of all the data in unix epoch time, info on which sets if any are empty,
# and the start of data recording for each data set. Now, we save that info
# and use it to get ready for the second loop.
# t_zero is the UNIX epoch time corresponding to t=0 in the retunred data set.
# It can be 'first', 'last', 'start', or 'finish', which work as illustrated here:
# | = timestamp, *--* = data acquisition
# dataset1 | *----------------------------*
# dataset2 | *-----------------------------------------*
# dataset3 | *----------------------*
# t = first last start finish
if verbose:
print("--- done with first loop.")
print(
"\n\nfirst: "
+ str(t_first)
+ ", last: "
+ str(t_last)
+ "\n, intersection start: "
+ str(t_start)
+ ", intersection finish: "
+ str(t_finish)
+ "\n, union start: "
+ str(t_begin)
+ ", union finish: "
+ str(t_end)
)
if t_start > t_finish and not override:
print("No overlap. Check your files.\n")
offerquit()
if t_zero == "start":
t_zero = t_start
elif t_zero == "finish":
t_zero = t_finish
elif t_zero == "first":
t_zero = t_first
elif t_zero == "last":
t_zero = t_last
elif t_zero == "begin":
t_zero = t_begin
elif t_zero == "end":
t_zero = t_end
# some stuff is now ready to put into combined_data:
if append: # tspan is the UNION
tspan_0 = np.array([t_begin, t_end]) # tspan in unix time
else: # tspan is the INTERSECTION
tspan_0 = np.array([t_start, t_finish]) # tspan in unix time
combined_data["tspan_0"] = tspan_0
# ^ start and finish times of union/intersection as unix epoch times
combined_data["tspan"] = tspan_0 - t_zero
# ^ start and finish times of dataset union/intersection as seconds since t=0
combined_data["tspan_1"] = tspan_0 - t_first
# ^ overlap start and finish times as seconds since earliest file start
combined_data["tspan_2"] = combined_data["tspan"] # old code calls this tspan_2.
combined_data["tstamp"] = t_zero
combined_data["timestamp"] = epoch_time_to_timestamp(t_zero, tz=tz, verbose=verbose)
# ^ we want that timestamp refers to t=0
combined_data["title"] = title_combined
combined_data["first"] = t_first - t_zero
combined_data["last"] = t_last - t_zero
combined_data["start"] = t_start - t_zero
combined_data["finish"] = t_finish - t_zero
combined_data["begin"] = t_begin - t_zero
combined_data["end"] = t_end - t_zero
# Deal with the cases that all or all but one dataset is empty:
N_notempty = len([1 for v in hasdata.values() if v == 1])
if N_notempty == 0:
print(
"First loop indicates that no files have data!!! "
+ "synchronize will return an empty dataset!!!"
)
elif N_notempty == 1:
print(
"First loop indicates that only one dataset has data! "
+ "Synchronize will just return that dataset!"
)
combined_data = next(datasets[nd] for nd, v in hasdata.items() if v == 1)
print("\nfunction 'synchronize' finished!\n\n")
return combined_data
# Sort datasets by start of recording so that in the second loop they are combined in the right order
I_sort = np.argsort(recstarts)
datasets = [datasets[I] for I in I_sort]
# note: EC lab techniques started together have same tstamp but different recstart
# It's very nice when appending data from multiple files (EC data especially,
# from experience), to be able to select data later based on file number
if append:
file_number = np.array([])
combined_data_keys_0 = list(combined_data.keys())
# used to avoid overwriting metadata at end of second loop
# ... And loop again to synchronize the data and put it into the combined dictionary.
if verbose:
print("\n\n---------- syncrhonize entering second loop -----------\n")
for i, dataset in enumerate(datasets):
nd = dataset["combining_number"]
# ^ note, nd is the number of the dataset from the first loop, and is
# not scrambled by the sorting of the datasets according to recstart done above.
if verbose:
print(
"working on dataset "
+ dataset["title"]
+ ", which has combining number = "
+ str(nd)
)
# print('cols in ' + dataset['title'] + ':\n' + str(dataset['data_cols']))
# print('cols in combined_data:\n' + str(combined_data['data_cols']))
if not hasdata[nd]:
if verbose:
print(
"skipping this dataset, because its combining number, {nd}, is in the empty files list"
)
continue
# the synchronization is based on the offset of the individual dataset
# with respect to t_zero, both in unix time.
t_0 = dataset["tstamp"]
offset = t_0 - t_zero
# Prepare to cut based on the absolute (unix epoch) time interval.
if cutit:
masks = (
{}
) # will store a mask for each timecol to cut the corresponding cols with
for col in dataset["data_cols"]:
if is_time(col, dataset):
if verbose:
print("preparing mask to cut according to timecol " + col)
t = dataset[col]
masks[col] = np.logical_and(
(t_start - t_0 - cut_buffer) < t,
t < (t_finish - t_0 + cut_buffer),
)
# Check how many rows will be needed for relevant data types when appending data,
# and append to in the 'file number' column
if append:
# sometimes, a certain data col is absent from some files of the same
# data type, but we still want it to match up with the corresponding
# time variable for the rest of the files in combined_data. The most
# common example is OCV between other methods in EC data, which has no
# current. When that happens, we want the filler values 0 to make sure
# all the collumns line up. oldcollength and collength will help with that:
oldcollength = {} # will store the existing length of each data col
for col in combined_data["data_cols"]:
if is_time(col, combined_data):
oldcollength[col] = len(combined_data[col])
collength = {} # will store the length to be appended to each data col
for col in dataset["data_cols"]:
if is_time(col, dataset):
collength[col] = len(dataset[col])
if col not in oldcollength.keys():
oldcollength[col] = 0
else: # the case that the timecol is in both combined_data and dataset
if vverbose:
print("prepared to append data according to timecol " + col)
if "file number" in dataset["data_cols"]:
dataset["data_cols"].remove("file number") # so that file numbers from
# previous synchronizations get stored as metadata, i.e., as '_file number'
got_file_number = False # we only want to get the file number once
# now we're ready to go through the columns and actually process the data
# for smooth entry into combined_data
# print('ENTERING THE LOOP WHERE I THINK THE PROBLEM IS!!!') # debugging
for col in dataset["data_cols"]:
data = dataset[col]
# processing: cutting
if cutit: # cut data to only return where it overlaps
# print('cutting ' + col) # for debugging
try:
data = data[masks[get_timecol(col, dataset)]]
except KeyError:
print(
f"WARNING: coulsn't cut {col} because no mask for its timecol"
)
# processing: offsetting
if is_time(col, dataset):
data = data + offset
# processing: for appended data
if append:
# get data from old column for appending
if col in combined_data:
olddata = combined_data[col]
else:
olddata = np.array([])
# but first...
# proccessing: ensure elignment with timecol for appended data
l1 = len(data) + len(olddata)
timecol = get_timecol(col, dataset)
coltype = get_type(col, dataset)
istime = col == timecol
# print('col = ' + col + ', timecol = ' + str(timecol)) #debugging
# print('coltype = ' + coltype + ', istime = ' + str(istime)) # debugging
try:
l0 = oldcollength[timecol] + collength[timecol]
# ^ I had to get these lengths before because I'm not sure whether
# timecol will have been processed first or not...
except KeyError:
print(
f"WARNING: {col} should have timecol {timecol} but this is "
+ f"not in dataset. Not adding {col} to the combined dataset."
)
continue
if (
l0 > l1
): # this is the case if the previous dataset was missing col but not timecol
if col in ["Ewe/V", "<Ewe>/V"]:
if (
False
): # debugging. This particular problem (0's for Ewe/V in CP datasets)
# turns out to be an EC-Lab text export bug, not my bug!
print(
f"col = {col}. \ndataset['data_cols'] = {dataset['data_cols']}."
+ f"\ncombined_data['data_cols'] = {combined_data['data_cols']}"
+ f"\nl1 = {l1}\nl0 = {l0}"
) # debugging DEBUGGING!
from matplotlibc import pyplot as plt
fig, ax = plt.subplots()
ax.plot(dataset[timecol], dataset[col], "k")
if col in combined_data:
ax.plot(combined_data[timecol], combined_data[col], "r")
ax.set_ylabel(col)
if (
col == "Ewe/V" and "<Ewe>/V" in combined_data
): # we don't want to extend 'Ewe/V' with zeros!
filler = dataset["<Ewe>/V"][l1:]
print(
"copying <Ewe>/V to Ewe/V to keep the latter the right length."
)
elif (
col == "<Ewe>/V" and "Ewe/V" in combined_data
): # we don't want to extend 'Ewe/V' with zeros!
filler = dataset["Ewe/V"][l1:]
print(
"copying Ewe/V to <Ewe>/V to keep the latter the right length."
)
else:
filler = np.array([0] * (l0 - l1))
print(
"Filling column "
+ col
+ " to keep in line with timecol "
+ timecol
+ "!!!"
) # debugging
olddata = np.append(olddata, filler)
# ^ and now len(olddata) = len(combined_data[timecol])
# now, fill in file number according to datasets of type file_number_type
if not got_file_number and istime and coltype == file_number_type:
fn = np.array([i] * collength[col])
# print('len(fn) = ' + str(len(fn)) + '\n collength = ' + str(collength)) # debugging
file_number = np.append(file_number, fn)
# print('len(file_number) = ' + str(len(file_number))) # debugging
got_file_number = True
# APPEND!
data = np.append(olddata, data)
# processing: ensuring unique column names for non-appended data
else:
if col in combined_data:
if len(combined_data[col]) == 0:
print(col + " in multiple datasets. Overwriting an empty one!")
else:
print("conflicting versions of " + col + ". adding subscripts.")
col = col + "_" + str(nd)
# ---- put the processed data into combined_data! ----
combined_data[col] = data
# And make sure it's in data_cols
if col not in combined_data["data_cols"]:
combined_data["data_cols"].add(col)
# And make sure the dataset knows what type it is:
combined_data["col_types"][col] = get_type(col, dataset)
# and what the timecol is:
if "timecols" in dataset and col in dataset["timecols"]:
if (
col in combined_data["timecols"]
and not combined_data["timecols"] == dataset["timecols"]
):
print(
f"WARNING!!! datasets don't agree on timecol for {col}. Using {timecol}."
)
combined_data["timecols"][col] = dataset["timecols"][col]
# keep the metadata from the original datasets
for col, value in dataset.items():
if col in combined_data["data_cols"]:
continue # Otherwise I duplicate all the data.
if (
col[0] == "_"
): # let's keep it to one level of '_', and nest dictionaries
# print(col)
if col in combined_data:
if nd in combined_data[col] and vverbose:
print(
"overwriting "
+ col
+ "["
+ str(nd)
+ "] with nested metadata."
)
combined_data[col][nd] = value
else:
combined_data[col] = {nd: value}
if verbose:
print("nesting metadata for " + col + ", nd = " + str(nd))
continue
if col not in combined_data_keys_0: # avoid ovewriting essentials
combined_data[col] = value # this will store that from the
# latest dataset as top-level
col = "_" + col # new name so that I don't overwrite
# essential combined metadata like tstamp
if col in combined_data:
# print(col) #debugging
if nd in combined_data[col]:
if verbose:
print(
col
+ "["
+ str(nd)
+ "] has nested metadata, skipping unnested."
)
else:
combined_data[col][nd] = value
else:
# I expect to arrive here only once, so good place for output
if vverbose:
print("metadata from original files stored as " + col)
combined_data[col] = {nd: value}
# ----- And now we're out of the loop! --------
if verbose:
print("--- done with second loop.\n")
# There's still a column length problem if the last dataset is missing
# columns! Fixing that here.
for col in combined_data["data_cols"]:
l1 = len(combined_data[col])
timecol = get_timecol(col, combined_data)
# print('about to cut ' + col + ' according to timecol ' + timecol) # debugging
try:
l0 = len(combined_data[timecol])
except (KeyError, TypeError):
print("can't find timecol for {}. skipping.".format(col))
continue
if l0 > l1:
if (
col == "Ewe/V" and "<Ewe>/V" in dataset
): # we don't want to extend 'Ewe/V' with zeros!
filler = dataset["<Ewe>/V"][l1:]
print("copying <Ewe>/V to Ewe/V to keep the latter the right length.")
elif (
col == "<Ewe>/V>" and "Ewe/V" in dataset
): # we don't want to extend 'Ewe/V' with zeros!
filler = dataset["Ewe/V"][l1:]
print("copying Ewe/V to <Ewe>/V to keep the latter the right length.")
else:
filler = np.array([0] * (l0 - l1))
# print('Filling column ' + col + ' to keep in line with timecol ' + timecol + '!!!') # debugging
combined_data[col] = np.append(combined_data[col], filler)
# store the name
if name is None:
name = combined_data["title"]
combined_data["name"] = name
if append:
combined_data["file number"] = file_number
combined_data["col_types"]["file number"] = file_number_type
if "file number" not in combined_data["data_cols"]: # how could it be?
combined_data["data_cols"].add("file number")
# add 't_str' to the data set (don't know where else to do it)
if "time/s" in combined_data.keys():
combined_data["t_str"] = "time/s"
# check that there's actually data in the result of all this
if len(combined_data["data_cols"]) == 0:
print("WARNING: Synchronize is returning an empty dataset!")
# update the objects (e.g. ScanImages object in EC_Xray). This is nice!
if update:
for instance in objects_with_data:
instance.data = combined_data
# and, we're done!
if verbose:
print("function 'synchronize' finsihed!\n\n")
return combined_data
# -------------------------------------------------------------------- #
def cut(x, y, tspan=None, returnindeces=False, override=False):
"""
Vectorized 17L09 for EC_Xray. Should be copied back into EC_MS
"""
if tspan is None:
return x, y
if np.size(x) == 0:
print("\nfunction 'cut' received an empty input\n")
offerquit()
mask = np.logical_and(tspan[0] <= x, x <= tspan[-1])
if True not in mask and not override:
print(
"\nWarning! cutting like this leaves an empty dataset!\n"
+ "x goes from "
+ str(x[0])
+ " to "
+ str(x[-1])
+ " and tspan = "
+ str(tspan)
+ "\n"
)
offerquit()
x = x.copy()[mask]
y = y.copy()[mask]
if returnindeces:
return x, y, mask # new 17H09
return x, y
def timeshift(dataset, t_zero="start"):
if t_zero is None:
t0 = 0
elif t_zero == "start":
t0 = dataset["tspan"][0]
else:
t0 = t_zero
for col in dataset["data_cols"]:
if is_time(col, dataset):
# print(f'{col},\n{dataset[col]},\nt0 = {t0}') # debugging
dataset[col] -= t0
try:
tspan = dataset["tspan"]
dataset["tspan"] = [tspan[0] - t0, tspan[-1] - t0]
except KeyError:
print("no tspan for timeshift to shift")
return dataset
def purge_column(dataset, col, purge=True, verbose=True):
if not purge:
return
print("removing " + col + " entirely.")
try:
dataset.pop(col)
except KeyError:
print("hmmm... it may have already been removed.")
if col in dataset["data_cols"]:
dataset["data_cols"].remove(col)
def cut_dataset(
dataset_0,
tspan=None,
t_edge=None,
tspan_0=None,
time_masks=None,
# if I directly write time_masks = {} here, it saves old values...
t_zero=None,
purge=True,
verbose=True,
):
"""
Makes a time-cut of a dataset. Written 17H09.
Unlike time_cut, does not ensure all MS data columns are the same length.
if tspan_0 is used, it is interpreted as unix time.
If some time_masks are already known, as is the case for EC data when
using EC_MS.EC.select_cycles(), these can be given as input.
"""
if verbose:
print("\n\nfunction 'cut dataset' at your service!\n")
dataset = dataset_0.copy()
dataset["data_cols"] = dataset["data_cols"].copy()
if tspan is None and tspan_0 is None:
tspan = dataset["tspan"]
if t_edge is None:
t_edge = 120
if time_masks is None:
time_masks = {}
# print('time_masks = ' + str(time_masks)) # debugging
if tspan is None and tspan_0 is not None:
t0 = dataset["tstamp"]
tspan = [tspan_0[0] - t0, tspan_0[-1] - t0]
if t_edge is not None:
tspan = [tspan[0] - t_edge, tspan[-1] + t_edge]
if verbose:
print("cutting according to tspan = " + str(tspan))
data_cols = dataset["data_cols"].copy()
# ^ use this in loop to avoid a "Set changed size during iteration" RuntimeError
for col in data_cols:
timecol = get_timecol(col, dataset)
if timecol not in dataset:
print(
f"Warning!!! can't cut {col} because dataset doesn't have timecol {timecol}"
)
purge_column(dataset, col, purge=purge, verbose=verbose)
continue
# print(col + ', length = ' + str(len(dataset[col]))) # debugging
# print(timecol + ', length = ' + str(len(dataset[timecol]))) #debugging
if timecol in time_masks.keys():
# print('already got indeces, len = ' + str(len(indeces[timecol]))) #debugging
mask = time_masks[timecol]
else:
t = dataset[timecol]
mask = np.logical_and(tspan[0] < t, t < tspan[-1])
# Don't cut off outer endpoints before evt interpolation (if used by plot_vs_potential)
extra_left = np.append(mask[1:], False)
extra_right = np.append(False, mask[:-1])
mask = np.logical_or(extra_left, extra_right)
time_masks[timecol] = mask
# print('made a time_mask. True for ' + str(len(np.where(mask)[0])) + ' points') # debugging
try:
# print('len(mask)=' + str(len(mask))) # debugging
dataset[col] = dataset[col].copy()[mask]
except (IndexError, TypeError) as e:
print("WARNING: couldn't cut " + col + " because " + str(e))
# print(col + ', length = ' + str(len(dataset[col]))) # debugging
# print(timecol + ', length = ' + str(len(dataset[timecol]))) #debugging
purge_column(dataset, col, purge=purge, verbose=verbose)
dataset["tspan"] = tspan
timeshift(dataset, t_zero)
if verbose:
print("\nfunction 'cut dataset' finsihed!\n\n")
return dataset
def deep_append(d1, d2):
combined = {}
for key in set(d1.keys()).intersection(set(d2.keys())):
if type(d1[key]) is dict and type(d2[key]) is dict:
v = deep_append(d1[key], d2[key])
else:
try:
v = np.append(d1[key], d2[key])
except TypeError:
print("couldn't append " + key)
continue
combined[key] = v
return combined
def offerquit():
yn = input("continue? y/n\n")
if yn == "n":
raise SystemExit
def remove_nans(data):
filter_fun = np.isnan
return remove_filtered_values(data, filter_fun)
def remove_negatives(data):
def filter_fun(x):
return x < 0
return remove_filtered_values(data, filter_fun)
def remove_filtered_values(data, filter_fun):
"""
The filter function has to take a np array and return a boolean numpy array
"""
masks = {}
# First loop will find all the nans
for col in data["data_cols"]:
timecol = get_timecol(col, data)
x = data[col]
try:
mask = np.logical_not(filter_fun(x))
except TypeError:
print("Warning!!! Couldn't filter for col " + col)
continue
if timecol in masks:
masks[timecol] = np.logical_and(masks[timecol], mask)
else:
masks[timecol] = mask
# Second loop will remove them, as well as corresponding data points from other cols with same timecol
for col in data["data_cols"]:
timecol = get_timecol(col, data)
# print('len(data[' + col + ']) = ' + str(len(data[col]))) # debugging
# print('len(data[' + timecol + ']) = ' + str(len(data[timecol]))) # debugging
mask = masks[timecol]
data[col] = data[col][mask]
return data # not necessary
def rename_SI_cols(dataset, removenans=True):
"""
names columns of Spectro Inlets data like EC-Lab and PyExpLabSys+cinfdata name them.
"""
if isinstance(dataset, dict):
print("WARNGING!!! The use of dataset dictionaries is no longer suported!!!")
data = dataset
else:
data = dataset.data
data_cols = data["data_cols"].copy()
# ^ to avoid changing the size of a set during iteration
for col_0 in data_cols:
col = "test"
if not get_type(col_0, dataset=dataset) == "SI":
continue
if re.search("^C[0-9]+", col_0):
try:
mass = re.search(r"M[0-9]+", col_0).group()
except AttributeError:
print("Can't rename SI col " + col_0)
continue
if "Time" in col_0:
col = mass + "-x"
else:
col = mass + "-y"
col_type = "MS"
elif re.search("^pot", col_0):
for c0, c in [
("Time", "time/s"),
("Voltage", "Ewe/V"),
("Current", "I/mA"),
("Cycle", "cycle number"),
]:
if c0 in col_0:
col = c
# print('col = ' + col + ', col_0 = ' + col_0) # debugging
break
else:
print("Can't rename SI col " + col_0)
continue
col_type = "EC"
else:
# print('Not renaming SI col ' + col_0)
continue
data[col] = data[col_0].copy()
data["col_types"][col] = col_type
data["data_cols"].add(col)
print(col_0 + " copied to " + col)
if removenans:
remove_nans(data)
data["data_type"] = "combined" # since now it has data columns of various types
def rename_ULM_cols(data):
"""
----------- add SurfCat names to data columns so Scott's functions will work -------
# note, this does not take extra memory, as new names refer to same data.
"""
# get the masses:
masses = set([])
mass_cols = set([col for col in data["data_cols"] if "ion_current" in col])
data["timecols"] = dict([(col, "time") for col in data["data_cols"]])
for col in mass_cols:
a = re.search("M[0-9]+", col)
if a is not None:
masses.add(a.group())
for mass in masses: # add columns with SurfCat names to dataset for MS data
xcol, ycol = mass + "-x", mass + "-y" # SurfCat MS data
ULMcol = "ion_current_" + mass + "_sBG_ALS_sub"
data[xcol], data[ycol] = data["time"], data[ULMcol]
data["data_cols"] = data["data_cols"].union({xcol, ycol})
data["Ewe/V"] = data["potential"]
data["I/mA"] = data["current1_mA"]
data["time/s"] = data["time"] # SurfCat EC time is called 'time/s'.
data["data_cols"] = data["data_cols"].union({"Ewe/V", "I/mA", "time/s"})
def rename_RGA_cols(data):
"""
names columns of RGA data like they would be from PyExpLabSys+cinfdata
"""
channels = data["channels"]
for channel, mass in channels.items():
if mass + "-x" in data:
print(
"WARNING: rename_RGA_cols() not copying "
+ channel
+ " to "
+ mass
+ " because there is existing data. Maybe rename_RGA_cols"
+ " was already called, or maybe the same mass appears in multiple channels."
)
else:
data[mass + "-x"] = data["Time(s)"].copy()
data[mass + "-y"] = data[channel].copy()
data["col_types"][mass + "-x"] = "MS"
data["col_types"][mass + "-y"] = "MS"
data["data_cols"].add(mass + "-x")
data["data_cols"].add(mass + "-y")
print(channel + " copied to " + mass)
def rename_MKS_cols(data):
for col in data["data_cols"].copy():
if col[:4] == "Mass":
mass = "M" + str(col[5:])
xcol, ycol = mass + "-x", mass + "-y"
data[xcol] = data["Time"].copy()
data[ycol] = data[col].copy()
print("copied " + col + " to " + mass)
data["timecols"][ycol] = xcol
for col in xcol, ycol:
data["col_types"][col] = "MS"
data["data_cols"].add(col)
def rename_CHI_cols(data):
"""
names columns of RGA data like they would be from PyExpLabSys+cinfdata
"""
data_cols = data[
"data_cols"
].copy() # to avoid changing the size of a set during iteration
for col_0 in data_cols:
# print(col_0) # debugging
for c0, c in [
("Time/s", "time/s"),
("Potential/V", "Ewe/V"),
("Current/A", "I/A"),
("Charge/C", "(Q-Qo)/C"),
]:
if c0 in col_0:
col = c
# print('col = ' + col + ', col_0 = ' + col_0) # debugging
break
else:
continue
data[col] = data[col_0].copy()
data["col_types"][col] = "EC"
data["data_cols"].add(col)
print(col_0 + " copied to " + col)
if "I/A" in data and "I/mA" not in data:
data["I/mA"] = data["I/A"] * 1e3
data["col_types"]["I/mA"] = "EC"
data["data_cols"].add("I/mA")
def parse_CHI_header(data):
header = data["header"]
lines = header.split("\n")
for tcol in ["Time/sec", "Time/s", "time/s"]:
if tcol in data:
t = data[tcol]
break
else:
print("EC_MS.Combining.parse_CHI_header() can't find timecol in CHI data!")
print("trying to get it from scan rate.")
from .EC import time_from_scanrate
t = time_from_scanrate(data)
N = len(t)
ti = 2 * t[0] - t[1] # one tstep back from the first reorded t
cols = {"i": "Current/A", "E": "Potential/V"}
data["Ns"] = np.zeros(N)
data["data_cols"].add("Ns")
data["col_types"]["Ns"] = "CHI"
i = 1
for nl, line in enumerate(lines):
# print(line) # debugging
if not re.search("T[0-9]+ \(s\) = ", line):
continue
l = line.strip()
dt = eval(l.split(" = ")[-1])
tspan = [ti, ti + dt]
mask = np.logical_and(tspan[0] <= t, t <= tspan[-1])
l2 = lines[nl - 1].strip()
# print(l + '\n' + l2) # debugging
col = cols[l2[0]]
val = eval(l2.split(" = ")[-1])
# print('tspan = ' + str(tspan) + ' , val = ' + str(val)) # debugging
if not col in data:
print("adding " + col + " to data based on CHI header.")
data[col] = np.zeros(N)
data["data_cols"].add(col)
data["col_types"][col] = "CHI"
data[col][mask] = val
data["Ns"][mask] = i
ti = ti + dt
i = i + 1
def dayshift(dataset, days=1):
"""
Adds one day's worth of seconds to the dataset's epoch timestamp (dataset['tstamp'])
"""
dataset["tstamp"] += days * 24 * 60 * 60
return dataset
def sort_time(dataset, verbose=True, vverbose=False):
"""
sorts each data column of a dataset according to its time column
"""
if verbose:
print("\nfunction 'sort_time' at your service!\n\n")
if "NOTES" in dataset.keys():
dataset["NOTES"] += "\nTime-Sorted\n"
else:
dataset["NOTES"] = "Time-Sorted\n"
sort_indeces = {} # will store sort indeces of the time variables
data_cols = dataset["data_cols"].copy()
dataset["data_cols"] = set()
for col in data_cols:
if vverbose:
print("working on " + col)
x = dataset[col] # do I need the copy?
timecol = get_timecol(col, dataset, verbose=vverbose)
if timecol in sort_indeces.keys():
indeces = sort_indeces[timecol]
elif timecol:
indeces = np.argsort(dataset[timecol])
# print('found the sort_indeces for ' + timecol + '!') # debugging
# print('indeces = ' + str(indeces)) # debugging
sort_indeces[timecol] = indeces
if len(x) != len(indeces):
if verbose:
print(
col
+ " is not the same length as its time variable!\n"
+ col
+ " will not be included in the time-sorted dataset."
)
else:
dataset[col] = x[indeces]
dataset["data_cols"].add(col)
if verbose:
print("sorted " + col + " based on time in " + timecol)
if verbose:
print("\nfunction 'sort_time' finished!\n\n")
def time_cal(
data,
ref_data=None,
points=[(0, 0)],
point_type=["time", "time"],
timecol="t",
pseudotimecol=None,
reftimecol="time/s",
verbose=True,
):
"""
Calibrates the time column of a dataset according to sync points with a
reference dataset. tstamps are never changed.
------- inputs ---------
data: the dataset for which to calibrate a timecolumn
ref_data: the reference dataset.
points: pairs of corresponding times or indeces in data and ref_data.
If only one point is given, time is calibrated just by a linear shift. If
two or more are given, time is calibrated by the linear transformation best
fitting the the calibration points (exact for two).
sync_type: Tuple specifying the mode of reference used in points, first
for data and then for ref_data. Options are 'time', in which case the reference
is to the actual value of the uncalibrated/reference time; and 'index' in
which case it is to the datapoint number of uncalibrated/reference time vector.
timecol: the name of the column of data into which to save the calibrated time
pseudotimecol: the name of the column of data containing the uncalibrated time.
By default pseudotime is taken to be the same as time, i.e. the uncalibrated
time is overwritten by the calibrated time.
reftimecol: the name of the column of ref_data containing the reference time.
verbose: True if you want the function to talk to you, useful for catching
your mistakes and my bugs. False if you want a clean terminal or stdout.
------- output --------
data: same as the input data, but with the calibrated time saved in the
specified column.
"""
if verbose:
print("\n\nfunction 'time_cal' at your service!\n")
if type(points[0]) not in [list, tuple]:
points = [points]
if type(point_type) is str:
point_type = (point_type, point_type)
t_vecs = np.zeros([2, len(points)]) # this is easiest if I pre-allocate an array
mask = np.array([True for point in points]) # to record if a poitn has problems
if ref_data in ["absolute", "epoch", None]:
ref_data = {reftimecol: None, "tstamp": 0}
# this is enough to keep the loop from crashing
print(
"time calbration referenced to absolute time! point_type[1] must be 'time'."
)
if "tstamp" not in ref_data:
offset_0 = 0
print(
"No ref_data given or no ref_data['tstamp']. "
+ "Assuming reference times are relative to the same tstamp!"
)
else:
offset_0 = ref_data["tstamp"] - data["tstamp"]
if verbose:
print("tstamp offset = " + str(offset_0))
for i, t in enumerate([data[pseudotimecol], ref_data[reftimecol]]):
# this loop will go through twice. First for time from data, then
# for the reftime from refdata. sync_type is a vector of two corresponding
# to these two iterations, as is each point of points.
# check the input
if not point_type[i] in ["time", "index", "timestamp"]:
print(
"WARNING: Don't know what you mean, dude, when you say "
+ str(point_type[i])
+ ". Options for point_type["
+ str(i)
+ "] are 'index', 'time', and 'timestamp'."
+ " Gonna try and guess from the first point of points."
)
if type(points[0][i]) is int:
point_type[i] = "index"
elif type(points[0][i]) is float:
point_type[i] = "time"
elif type(points[0][i]) is str:
point_type[i] = "timestamp"
# get the times corresponding to the syncpoints into the array
for j, point in enumerate(points):
# print('point_type[' + str(i) + '] = ' + str(point_type[i])) #debugging
try:
if point_type[i] == "index":
t_vecs[i][j] = t[point[i]]
elif point_type[i] == "time":
t_vecs[i][j] = point[i]
elif point_type[i] == "timestamp":
t_vecs[i][j] = timestamp_to_seconds(point[i])
except (IndexError, TypeError) as e:
print(
str(e)
+ " at point "
+ str(point)
+ " of "
+ str(i)
+ " (0=data, 1=refdata)"
)
mask[j] = False
N_good = len(mask[mask])
if verbose:
print("Got " + str(N_good) + " out of " + str(len(points)) + " points!")
# looks silly, but len(mask[mask]) easy way to get the number of True in mask
if N_good == 1:
offset = t_vecs[:, mask][1][0] - t_vecs[:, mask][0][0]
if verbose:
print("offset with respect to ref tstamp = " + str(offset))
print("total offset = " + str(offset + offset_0))
data[timecol] = data[pseudotimecol] + offset + offset_0
else:
t, t_ref = t_vecs[:, mask] # this drops any point that had a problem
# print(t_vecs)
pf = np.polyfit(t, t_ref, 1)
# print(pf)
if verbose:
print("with respect to ref tstamp:")
print("time = " + str(pf[0]) + " * pseudotime + " + str(pf[1]))
print("with respect to tstamp:")
print("time = " + str(pf[0]) + " * pseudotime + " + str(pf[1] + offset_0))
data[timecol] = pf[0] * data[pseudotimecol] + pf[1] + offset_0
if time not in data["data_cols"]:
data["data_cols"] += [timecol] # otherwise synchronizing later can give error
if verbose:
print("\nfunction 'time_cal' finished!\n\n")
return data
def trigger_times(x, y, threshhold=2.5, triggergap=True):
"""
"""
trigger_on = y > threshhold
trigger_on_down = np.append(False, trigger_on[0:-1])
trigger_start = np.logical_and(trigger_on, np.logical_not(trigger_on_down))
if (
triggergap
): # September 2018, noticed that MS isn't logging while trigger is on, leaving a gap.
x_up = np.append(x[1:], x[-1])
gap_start = x < x_up - 2
trigger_start = np.logical_or(trigger_start, gap_start)
times = x[trigger_start]
return times
def get_trigger_times(data, xcol=None, ycol=None, label="Analog In", threshhold=2.5):
from .Data_Importing import get_xy
x, y = get_xy(data, xcol, ycol, label)
triggers = trigger_times(x, y, threshhold=threshhold)
data["triggers"] = triggers
return triggers
def trigger_cal(
data,
triggers=None,
pseudotimecol=None,
pt_str=None,
t_str=None,
timecol=None,
shiftcol="selector",
edge=5,
verbose=True,
):
"""
data is the only required argument, given that the trigger times are
stored in data['triggers'], as they are after a call to get_trigger_times(data).
By default data['time/s'] is adjusted linearly to line up triggers with
changes in data['loop number'] and data['file number'] and stored as
data['time/s*'], which is subsequently used by other functions.
More generally, this function:
Calibrates a time column in the dataset given by pt_str or pseudotimecol
according to a list of triggers, which are assumed to come at times where
another column, given by shiftcol, changes.
Thre does not need to be a trigger for every change in data[shiftcol], but
there should be a change in data[shiftcol] at every trigger time. The
function shouldn't trip up so long as this is satisfied and the time between
adjacent triggers is much larger than the needed correction in the time.
The calibrated time is stored in the column given by t_str or timecol. The
name of the calibrated time column is stored as data['t_str'] for plotting
functions to refer to by default. The name of the uncalibrated time column
is stored as data['pt_str'].
By default shiftcol points to a custom 'selector' column which, if not
defined prior to calling this function, is defined here as a linear
combination of file number, loop number, and cycle number weighted oddly
to keep values unique. Triggers are assumed to correspond to changes in
this column.
"""
if verbose:
print("\n\nfunction 'trigger cal' at your service!\n")
# first, parse inputs. This means figuring out where in the data the
# uncalibrated time data is, i.e. data[pt_str],
# then figure out where the calibrated data should go, i.e. data[t_str]
# t_str is synonymous with timecol. pt_str is synonymous with pseudotimecol
# This is thus a mixture of old notation (i.e. sync_metadata with V_str etc)
# and new notation (i.e. time_cal with timecol etc)
if pt_str is None and "pt_str" in data:
pt_str = data["pt_str"]
if t_str is None and "t_str" in data:
t_str = data["t_str"]
if pseudotimecol is None:
if pt_str is not None:
pseudotimecol = pt_str
elif t_str is not None:
pseudotimecol = t_str
else:
pseudotimecol = "time/s"
if timecol is None:
if pseudotimecol == t_str:
timecol = pseudotimecol + "*"
t_str = timecol
elif t_str is not None:
timecol = t_str
else:
timecol = pseudotimecol + "*"
if triggers is None:
if "triggers" not in data:
get_trigger_times(data)
triggers = data["triggers"]
if type(triggers) is not np.ndarray:
if triggers in [list, tuple]:
triggers = np.array(triggers)
else:
triggers = np.array([triggers])
pt = data[pseudotimecol].copy()
# set up the selector column to use for shift data
if shiftcol == "selector" and "selector" not in data:
from .EC import make_selector
make_selector(data)
# ---- check if there are triggers in the set and in the time interval ----
if len(triggers) == 0:
if verbose:
print("There are no triggers in this data set! No calibration. Finished!")
return pseudotimecol
if verbose:
print(
str(len(triggers))
+ " triggers spanning t= "
+ str(triggers[0])
+ " to "
+ str(triggers[-1])
)
triggermask = np.logical_and(pt[0] - edge < triggers, triggers < pt[-1] + edge)
triggers = triggers[triggermask]
if len(triggers) == 0:
print("no triggers in time range. can't calibrate.")
data["t_str"] = pseudotimecol
return pseudotimecol
if verbose:
print(
"... of which "
+ str(len(triggers))
+ " are between t= "
+ str(pt[0] - edge)
+ " and "
+ str(pt[-1] + edge)
+ "."
)
if shiftcol not in data:
if verbose:
print("no column '" + shiftcol + "' in dataset.")
if len(triggers) == 1:
print("Assuming the trigger is the start of the file.")
else:
print(
"Assuming the first trigger is the start of the file, \n"
+ "... and ignoring subsequent triggers."
)
offset = triggers[0] - pt[0]
data[timecol] = pt + offset
data["pt_str"] = pseudotimecol
data["t_str"] = timecol
try:
data["col_types"][timecol] = data["col_types"][pseudotimecol]
except KeyError:
print("WARNING! Missing data['col_types'][pseudotimecol]")
return timecol
# ----- find the times at which data[shiftcol] changes value ----
shiftvalue = data[shiftcol]
shiftvalue_down = np.append(shiftvalue[0] - 1, shiftvalue[:-1])
shiftmask = np.logical_not(shiftvalue == shiftvalue_down)
print(
"shiftmask.shape = \n"
+ str(shiftmask.shape)
+ "\npt.shape = \n"
+ str(pt.shape)
) # debugging
shifttimes = pt[shiftmask]
# ----- get vectors of trigger times and corresponding shift times -----
pt_points = np.array(
[]
) # pseutodimes, corresponding to shifts in data[shiftcolumn]
pt_indeces = np.array([]) # indeces of the pseudotimes (to check for duplicates)
t_points = np.array([]) # times, corresponding to trigger times
for trigger in triggers:
# find the shift_time closest to it:
err = abs(shifttimes - trigger)
index = np.argmin(err)
pt_point = shifttimes[index]
if err[index] > edge and verbose:
print(
"Large offset (dt>"
+ str(edge)
+ ") between matched trigger at "
+ str(trigger)
+ " and "
+ shiftcol
+ " change at "
+ str(pt_point)
+ "!"
)
print("I'll ignore that match.")
continue
# Check if that shift_time has already been linked to a trigger
if index in pt_indeces:
if verbose:
print(
"Multiple triggers seem to correspond to the file start at "
+ str(pt_point)
+ ". I'll assume the last ones "
+ "were noise and just keep the first one."
)
# pt_points[pt_indeces==index] = pt_point # use the new shift_time
pass # leave it, just use the old shift time
else: # otherwise add it to the vectors.
pt_points = np.append(pt_points, pt_point)
pt_indeces = np.append(pt_indeces, index)
t_points = np.append(t_points, trigger)
print("matched " + str(len(pt_points)) + " triggers to shifts in " + shiftcol + ".")
t = pt.copy() # get ready to build the new time variable
# points before the trigger should be shifted so the first trigger matches
startmask = pt <= pt_points[0]
if np.any(startmask):
startoffset = t_points[0] - pt_points[0]
if verbose:
print("shifting start point(s) by " + str(startoffset) + ".")
# print(len(np.where(startmask))) #debugging
# turns out it's always exactly 1 point, of course
t[startmask] = pt[startmask] + startoffset
# points between the first and last trigger should be interpolated according to the triggers
middlemask = np.logical_and(pt_points[0] < pt, pt < pt_points[-1])
if np.any(middlemask):
if verbose:
print(
"interpolating to trigger times between t= "
+ str(t_points[0])
+ " and "
+ str(t_points[-1])
+ "."
)
t[middlemask] = np.interp(pt[middlemask], pt_points, t_points)
# points after the last trigger should be shifted so the last trigger matches
endmask = pt_points[-1] <= pt
if np.any(endmask):
endoffset = t_points[-1] - pt_points[-1]
if verbose:
print("shifting end points by " + str(endoffset) + ".")
t[endmask] = pt[endmask] + endoffset
data[timecol] = t
data["t_str"] = timecol
data["pt_str"] = pseudotimecol
if timecol not in data["data_cols"]:
data["data_cols"].add(t_str)
if pseudotimecol not in data["data_cols"]:
data["data_cols"] += [pt_str]
if verbose:
print("\nfunction 'trigger cal' finished!\n\n")
return timecol
|
ScottSoren/EC_MS | _tests_/20C02_Spectrum_demo/Spectrum_class_demo.py | <reponame>ScottSoren/EC_MS
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 20 19:08:46 2020
@author: scott
"""
import os
import numpy as np
from matplotlib import pyplot as plt
from EC_MS import Spectrum
plt.close("all")
"""
This script demonstrates the Spectrum class. All its methods are quite simple,
except for the initialization method whcih parses the file. This script
shows you what the methods do. There are usually more than one way of doing
things, so I sometimes show both, and you can choose what's most intuitive for you.
"""
folder = os.path.abspath(".") # for this demo, I just copied the data files into here.
file = folder + os.sep + "1_SEM_5E-6.dat"
# the data is naturally imported as a Spectrum object.
spectrum = Spectrum(file, data_type="PVMS")
# ^ it complains because it can't find the timestamp in the file name. You can just ignore that.
print("\n" + "-" * 20 + "\n")
# the spectrum knows its file and folder.
print("spectrum.file = " + spectrum.file + "\nspectrum.folder = " + spectrum.folder)
ax = spectrum.plot(color="k") # kwargs like 'color' are fed to plt.plot()
ax.set_yscale("log")
# Manually get max and integral integrate:
x, y = spectrum.get_signal(
Mspan=[27.5, 28.5]
) # spectrum.get_signal selects the part of the spectrum in Mspan
int_M28_naive = np.trapz(y, x)
print(
"The integrated intensity of the primary N2 peak is " + str(int_M28_naive) + " C."
)
print("... maximum = " + str(max(y)))
# but this is not completely right as it ignores the background, as seen here:
ax.fill_between(x, y, np.zeros(x.shape), color="r")
# the Spectrum.get_integral() function does so automatically. It plots if given an axis
int_M28 = spectrum.get_integral(Mspan=[27.5, 28.5], M_bg=[70, 80], ax=ax, color="b")
# ^ you can see from the blue fill where it set the background.
print(
"The background-corrected integrated intensity of the primary N2 peak is "
+ str(int_M28)
+ " C."
)
# we can also do a global background correction:
spectrum.set_background(M_bg=[70, 80])
# now the max is a tiny bit lower. We can also automatically get the max, like this:
y_max = spectrum.get_max(Mspan=[27.5, 28.5])
print("... background-corrected maximum = " + str(y_max) + " A.")
# we can plot it as before, or manually:
x = spectrum.x
y = spectrum.y
fig, ax = plt.subplots()
ax.plot(x, y, "k")
ax.set_yscale("log")
# ah, but it looks like shit because we're plotting it on a log scale. Luckily, we can undo the background subtraction!
spectrum.reset() # undoes background subtraction
spectrum.plot(ax=ax, Mspan=[10, 80], color="b", linestyle="--")
plt.show()
# that concludes this little demo.
|
ScottSoren/EC_MS | src/EC_MS/Calibration.py | <filename>src/EC_MS/Calibration.py
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 14 16:50:10 2016
Most recently edited: 16J27
@author: scott
"""
from __future__ import division, print_function
import os, sys, pickle
import numpy as np
from matplotlib import pyplot as plt
# then we use relative import
from . import Chem
from .EC import plot_CV_cycles, CV_difference, sync_metadata, select_cycles
from .Molecules import Molecule
from .Combining import cut_dataset
from .Chips import Chip
from .Quantification import get_signal, get_current, get_potential
from .Plotting import colorax, align_zero, plot_experiment, plot_signal
def ML_strip_cal(
CV_and_MS,
cycles=[1, 2],
t_int=200,
cycle_str="cycle number",
mol="CO2",
mass="primary",
n_el=None,
Vspan=[0.5, 1.0],
redox=1,
ax="two",
title="default",
verbose=True,
plot_instantaneous=False,
):
"""Determine F_cal and return partially populated calibration dictionary
Determines F_cal = Q_QMS / n_electrode by integrating a QMS signal over
tspan, assuming the starting value is background; and
integrating over vspan the difference between two CV cycles and converting
that to a molar amount.
The calibration factor is calibration['F_cal']
FIXME Maybe rewrite the section above to the extent that some
information is better served below by documenting the arguments.
Args:
CV_and_MS (dict): CV_and_MS dict
cycles (list): List of the two CV cycles to calculate difference between
t_int (int): ...TODO...
cycle_str (str): ...TODO...
mol (str): Molecule name
mass (str): "primary" or ...TODO...
n_el (int): ...TODO...
Vspan (list): List of start and end of Vspan
redox (int): ...TODO...
ax (str): ...TODO... something axis ...
title (str): The plot title
verbose (bool): Whether to output verbose procedure information
plot_instantaneous (bool): Whether to instantaneously plot the result
Return:
dict: Partially populated calibration dictionary
"""
if verbose:
print("\n\ncalibration function 'ML_strip_cal' at your service!\n")
if ax == "two":
fig1 = plt.figure()
ax1 = fig1.add_subplot(211)
ax2 = fig1.add_subplot(212)
elif ax is list:
ax1 = ax[0]
ax2 = ax[1]
elif ax is None:
ax1 = None
ax2 = None
else:
ax1 = ax
ax2 = None
if type(mol) is str:
mol = Molecule(mol, writenew=False)
name = mol.name
if n_el is None:
n_el = mol.n_el
if mass == "primary":
mass = mol.primary
if np.size(t_int) == 1:
# t_int = CV_and_MS['tspan_2'][0] + np.array([0, t_int]) #assumes I've cut stuff. Not necessarily true.
# better solution below (17C22)
pass
if title == "default":
title = name + "_" + mass
# print(type(CV_and_MS)) #was having a problem due to multiple outputs.
cycles_data, ax1 = plot_CV_cycles(
CV_and_MS, cycles, ax=ax1, title=title, cycle_str=cycle_str
)
ax1.xaxis.tick_top()
ax1.xaxis.set_label_position("top")
# print(type(cycles_data))
Q_diff, diff = CV_difference(cycles_data, Vspan=Vspan, redox=redox, ax=ax1)
# if ax1 is not None:
# ax1.set_title(title)
n_mol = Q_diff / (Chem.Far * n_el)
t = diff[0]
J_diff = diff[2]
A_el = CV_and_MS["A_el"]
if np.size(t_int) == 1: # 17C22
t_int = t[0] + np.array([0, t_int])
# now it starts at the same time as EC data in the vrange starts
# Q_diff seemed to be having a problem, but turned out to just be because
# I forgot to remove_delay().
# Now everything checks out, as can be seen here:
"""
Q_diff1 = A_el * np.trapz(J_diff, t) * 1e-3 #factor 1e-3 converts mA to A
print('Q_diff = ' + str(Q_diff) +'\nQ_diff1 = ' + str(Q_diff1) +
'\nratio = ' + str(Q_diff1/Q_diff))
"""
x = CV_and_MS[mass + "-x"]
y = CV_and_MS[mass + "-y"]
I_keep = [I for (I, x_I) in enumerate(x) if t_int[0] < x_I < t_int[1]]
x = x[I_keep]
y = y[I_keep]
background = min(y) # very simple background subtraction
Q_QMS = np.trapz(y - background, x)
F_cal = Q_QMS / n_mol
y_el = J_diff * A_el / (Chem.Far * n_el) * F_cal * 1e-3
# factor 1e-3 converts mA to A
if ax2 is not None:
ax2.plot(x, y * 1e9, "k-")
# ax2.plot(x, [background*1e9]*len(x), 'k-')
ax2.fill_between(
x,
background * 1e9,
y * 1e9, # where=y>background,
facecolor="g",
interpolate=True,
)
if plot_instantaneous:
ax2.plot(t, y_el * 1e9, "r--") # Without mass transport
ax2.set_xlabel("time / s")
ax2.set_ylabel("signal / nA")
# ax2.set_yscale('log')
print(
(
"QMS measured {0:5.2e} C of charge at M44 for {1:5.2e} mol "
+ name
+ ".\n"
+ "Calibration factor for CO2 at M44 is {2:5.2e} C / mol."
).format(Q_QMS, n_mol, F_cal)
)
calibration = {"type": "ML_strip"}
calibration["raw_data"] = CV_and_MS["title"]
calibration["mass"] = mass
calibration["n_mol"] = n_mol
calibration["Q_el"] = Q_diff
calibration["Q_QMS"] = Q_QMS
calibration["F_cal"] = F_cal
calibration["title"] = title
if verbose:
print("\ncalibration function 'ML_strip_cal' finished!\n\n")
if ax2 is None:
ax = ax1
else:
ax = [ax1, ax2]
return calibration, ax
def steady_state_cal(
CA_and_MS,
t_int="half",
mol="CO2",
mass="primary",
n_el=None,
ax="new",
title="default",
verbose=True,
background="min",
):
if verbose:
print("\n\ncalibration function 'steady_state_cal' at your service!\n")
if type(mol) is str:
mol = Molecule(mol, writenew=False)
name = mol.name
if n_el is None:
n_el = mol.n_el
if mass == "primary":
mass = mol.primary
if t_int == "half":
t_int = (CA_and_MS["tspan_2"][1] + np.array(CA_and_MS["tspan_2"])) / 2
elif t_int == "all":
t_int = np.array(CA_and_MS["tspan_2"])
elif np.size(t_int) == 1:
t_int = CA_and_MS["tspan_2"][1] + np.array([-t_int, 0])
# by default integrate for time t_int up to end of interval
if title == "default":
title = name + "_" + mass
x = CA_and_MS[mass + "-x"]
y = CA_and_MS[mass + "-y"]
if background == "min":
background = min(y)
elif background is None:
background = 0
I_keep = [I for (I, x_I) in enumerate(x) if t_int[0] < x_I < t_int[1]]
x_r = x[I_keep]
y_r = y[I_keep]
Q_QMS = np.trapz(y_r - background, x_r) # integrated signal in C
V_str, J_str = sync_metadata(CA_and_MS)
t = CA_and_MS["time/s"]
J = CA_and_MS[J_str]
A_el = CA_and_MS["A_el"]
I_keep = [I for (I, t_I) in enumerate(t) if t_int[0] < t_I < t_int[1]]
t_r = t[I_keep]
J_r = J[I_keep]
Q_el = A_el * np.trapz(J_r, t_r) * 1e-3 # total electrode charge passed in C
n_mol = Q_el / (Chem.Far * n_el)
F_cal = Q_QMS / n_mol
y_el = J * A_el / (Chem.Far * n_el) * F_cal * 1e-3
# expected QMS signal without mass transport etc
print(
(
"QMS measured {0:5.2e} C of charge at M44 for {1:5.2e} mol "
+ name
+ ".\n"
+ "Calibration factor for CO2 at M44 is {2:5.2e} C / mol."
).format(Q_QMS, n_mol, F_cal)
)
if ax == "new":
fig1 = plt.figure()
ax = fig1.add_subplot(111)
if ax is not None:
ax.plot(x, y, "k-")
ax.plot(t, y_el + background, "r--")
ax.set_title(title)
calibration = {"type": "steady_state"}
calibration["raw_data"] = CA_and_MS["title"]
calibration["mass"] = mass
calibration["n_mol"] = n_mol
calibration["Q_el"] = Q_el
calibration["Q_QMS"] = Q_QMS
calibration["F_cal"] = F_cal
if verbose:
print("\ncalibration function 'steady_state_cal' finished!\n\n")
return calibration
def carrier_gas_cal(
dataset=None, # if signal not given, reads average from dataset
signal=None, # steady-state signal from in-flux of carrier gas
mol="He", # calibration Molecule object or name of calibration molecule
carrier=None,
viscosity=None,
mass="primary", # mass at which to calibrate
composition=1, # mol fraction calibration molecule in carrier gas
chip="SI-3iv1-1-C5", # chip object or name of chip
tspan=None,
):
"""
returns a calibration factor based the carrier gas concentration
"""
calibration = {"type": "carrier gas"}
if type(chip) is str:
chip = Chip(chip)
if type(mol) is str:
mol = Molecule(mol)
if carrier is None:
carrier = mol
elif type(carrier) is str:
carrier = Molecule(carrier)
if mass == "primary":
mass = mol.primary
if type(composition) in (float, int):
fraction = composition
elif type(composition) is dict:
fraction = composition[mol.name]
n_dot = chip.capillary_flow(gas=carrier)
n_dot_i = fraction * n_dot
F_cal = signal / n_dot_i
if signal is None:
if tspan is None:
tspan = dataset["txpan"]
x, y = get_signal(dataset, mass=mass, tspan=tspan)
calibration["Q_QMS"] = np.trapz(y, x)
signal = calibration["Q_QMS"] / (tspan[-1] - tspan[0])
calibration["n_mol"] = n_dot_i * (tspan[-1] - tspan[0])
calibration["mass"] = mass
calibration["n_dot_i"] = n_dot_i
calibration["signal"] = signal
calibration["F_cal"] = F_cal
return calibration
air_composition = {"N2": 0.7808, "O2": 0.2095, "Ar": 0.0093, "CO2": 0.000412}
def chip_calibration(
data,
mol="O2",
F_cal=None,
primary=None,
tspan=None,
tspan_bg=None,
t_bg=None,
gas="air",
composition=None,
chip="SI-3iv1",
):
"""
Returns obect of class EC_MS.Chip, given data for a given gas (typically air) for which
one component (typically O2 at M32) has a trusted calibration. The chip object
has a capillary length (l_cap) that is set so that the capillary flux matches
the measured signal for the calibrated gas.
"""
if type(mol) is str:
m = Molecule(mol)
else:
m = mol
mol = mol.name
if F_cal is not None:
m.F_cal = F_cal
if primary is not None:
m.primary = primary
if gas == "air" and composition is None:
composition = air_composition[mol]
x, y = m.get_flux(data, tspan=tspan, unit="mol/s")
if tspan_bg is None and t_bg is not None:
tspan_bg = t_bg
if tspan_bg is not None:
x_bg, y_bg = m.get_flux(data, tspan=tspan_bg, unit="mol/s")
y0 = np.mean(y_bg)
else:
y0 = 0
n_dot = np.mean(y) - y0
if type(chip) is str:
chip = Chip(chip)
n_dot_0 = chip.capillary_flow(gas=gas) / Chem.NA * composition
l_eff = chip.l_cap * n_dot_0 / n_dot
chip.l_cap = l_eff
chip.parameters["l_cap"] = l_eff
return chip
def point_calibration(
data,
mol,
mass="primary",
cal_type=None,
tspan=None,
n_el=None,
tail=0,
t_bg=None,
tspan_bg=None,
chip=None,
composition=None,
gas=None,
carrier=None,
verbose=True,
):
"""
Returns a molecule object calibrated based in one of the following ways.
internally: cal_type='internal', need n_el
externally: cal_type='external', need chip, carrier, composition
The signal is taken as an average over the tspan, or at a linear
extrapolated point if len(tspan)==1. Same for current for internal cal.
For external calibration,
"""
if verbose:
print("\n\nfunction point_calibration at your service!\n")
if type(cal_type) is str and "semi" in cal_type:
# to avoid confusion with siQuant,
# which calls calibrations of gases throught the chip "semi-internal"
cal_type = "external"
m = Molecule(mol)
if mass == "primary":
mass = m.primary
if carrier is None and gas is not None:
carrier = gas
if composition is None:
if carrier == mol or carrier is None:
composition = 1
elif carrier == "air":
composition = air_composition[mol]
if cal_type is None:
if n_el is None:
print("n_el not given! assuming calibration is semi or external.")
cal_type = "external"
else:
print("n_el given! assuming calibration is internal.")
cal_type = "internal"
# get average signal
if tspan is None:
tspan = data["tspan"]
if tspan_bg is None and t_bg is not None:
tspan_bg = t_bg
if tspan_bg is not None:
x_bg, y_bg = get_signal(data, mass, tspan=tspan_bg)
y0 = np.mean(y_bg)
else:
y0 = 0
if type(tspan) in [int, float]:
x, y = get_signal(data, mass, [tspan - 10, tspan + 10])
S = np.interp(tspan, x, y - y0)
else:
x, y = get_signal(data, mass, tspan=[tspan[0], tspan[1] + tail])
# S = np.trapz(y-y0, x) #/ (x[-1] - x[1])
S = np.mean(y) - y0 # more accurate for few, evenly spaced datapoints
if cal_type == "internal":
if type(tspan) in [int, float]:
t, i = get_current(data, tspan=[tspan - 10, tspan + 10], unit="A")
I = np.interp(tspan, t, i)
else:
t, i = get_current(data, tspan=tspan, unit="A")
I = np.mean(i) # np.trapz(i, t) #/ (t[-1] - t[1])
n = I / (n_el * Chem.Far)
elif cal_type == "external":
if not chip:
chip = Chip()
elif type(chip) is str:
chip = Chip(chip)
if carrier == None:
carrier = mol
n = chip.capillary_flow(gas=carrier) / Chem.NA * composition
if type(tspan) not in [int, float]:
pass
# n = n * (tspan[-1]- tspan[0])
else:
print(
"not sure what you mean, dude, when you say cal_type = '" + cal_type + "'"
)
F_cal = S / n
m.F_cal = F_cal
print(
"point_calibration() results: S = "
+ str(S)
+ " , n = "
+ str(n)
+ ", F_cal = "
+ str(F_cal)
)
if verbose:
print("\nfunction point_calibration finished!\n\n")
return m
def calibration_curve(
data,
mol,
mass="primary",
n_el=-2,
name=None,
cycles=None,
cycle_str="selector",
mode="average",
t_int=15,
t_tail=30,
t_pre=15,
t_i=None,
t_f=None,
find_max=False,
t_max_buffer=5,
V_max_buffer=5,
find_min=False,
t_min_buffer=5,
V_min_buffer=5,
tspans=None,
background=None,
t_bg=None,
tspan_plot=None,
remove_EC_bg=False,
color=None,
force_through_zero=False,
tail_on_EC=False,
ax="new",
J_color="0.5",
unit=None,
plot_unit="nA",
out="Molecule",
verbose=True,
):
"""
Powerful function for integrating a molecule when the assumption of
100% faradaic efficiency can be made.
Requires a dataset, and indication of where the calibration points are.
This can be either:
(A), tspans, a list of tuples of (<start>, <finish>) times of the calibration points
(B) cycle numbers, which by default refer to data['selector']
if mode='average', it integrates over the last t_int of each cycle. If
mode='integral', it integrates from t_pre before the start until t_tail
after the end of each cycle.
If find_max=True, rather than using the full timespan of the cycle, it
finds the timespan at which the potential is within V_max_buffer mV of its
maximum value, and cuts of t_max_buffer, and then uses this timespan as above.
Correspondingly for find_min, V_min_buffer, and t_min_buffer.
A timespan for which to get the background signals at each of the masses
can be given as t_bg. Alternately, background can be set to 'linear' in
which case it draws a line connecting the signals just past the endpoints
of the timespan for each cycle.
If ax is not None, it highlights the area under the signals and EC currents
that are integrated/averaged, and also makes the calibration curve.
The can return any or multiple of the following:
'Qs': the integrated charges or averaged currents for each cycle
'ns': the corresponding amount or flux for each cycle
'Ys': the integrated or averaged signal for each cycle
'Vs': the average potential for each cycle
'F_cal': calibration factor in C/mol
'Molecule': Molecule object with the calibration factor
'ax': the axes on which the function plotted.
out specifies what the function returns.
By default, it returns the molecule
"""
if verbose:
print("\n\nfunction 'calibration_curve' at your service!\n")
# ----- parse inputs -------- #
m = Molecule(mol)
if mass == "primary":
mass = m.primary
else:
m.primary = mass
if mode in ["average", "averaging", "mean"]:
mode = "average"
elif mode in ["integral", "integrate", "integrating"]:
mode = "integral"
use_bg_fun = False
if t_bg is not None:
x_bg, y_bg = get_signal(data, mass=mass, tspan=t_bg, unit="A")
bg = np.mean(y_bg)
elif callable(background):
use_bg_fun = True
elif background is not None and type(background) is not str:
bg = background
else:
bg = 0
if unit is None:
if mode == "average":
unit = "p" # pmol/s and pA
elif mode == "integral":
unit = "n" # nmol and nC
elif unit[0] in ["p", "n", "u"]:
unit = unit[0] # I'm only going to look at the prefix
else:
print(
"WARNING! unit="
+ str(unit)
+ " not recognized. calibration_curve() using raw SI."
)
unit = ""
# ---------- shit, lots of plotting options... ---------#
ax1, ax2, ax2a, ax2b, ax2c = None, None, None, None, None
fig1, fig2 = None, None
if ax == "new":
ax1 = "new"
ax2 = "new"
else:
try:
iter(ax)
except TypeError:
ax2c = ax
else:
try:
ax1, ax2 = ax
except (TypeError, IndexError):
print("WARNING: calibration_curve couldn't use the given axes")
if ax1 == "new":
ax1 = plot_experiment(
data,
masses=[mass],
tspan=tspan_plot,
emphasis=None,
removebackground=False,
unit=plot_unit,
)
fig1 = ax1[0].get_figure()
elif ax1 is not None:
try:
ax1a = ax1[0]
except TypeError:
ax1a = ax1
plot_signal(
data,
masses=[mass],
tspan=tspan_plot,
removebackground=False,
unit=plot_unit,
ax=ax1a,
)
if ax2 == "new":
fig2, ax2a = plt.subplots()
fig2c, ax2c = plt.subplots()
# fig2, [ax2a, ax2c] = plt.subplots(ncols=2)
ax2b = ax2a.twinx()
# fig2.set_figwidth(fig1.get_figheight()*3)
elif ax2 is not None:
try:
iter(ax2)
except TypeError:
ax2c = ax2
else:
if len(ax2) == 1:
ax2c = ax2[0]
else:
try:
ax2a, ax2b, ax2c = ax2
except (TypeError, IndexError, ValueError):
print("WARNING: calibration_curve couldn't use the given ax2")
print(str([ax1, [ax2a, ax2b, ax2c]])) # debugging
# ----- cycle through and calculate integrals/averages -------- #
Ys, ns, Vs, Is, Qs = [], [], [], [], []
if tspans is not None:
cycles = tspans
for cycle in cycles:
if tspans is None:
c = select_cycles(data, [cycle], cycle_str=cycle_str, verbose=verbose)
else:
c = cut_dataset(data, tspan=cycle)
if t_i is not None or t_f is not None:
tspan_cut = [c["time/s"][0], c["time/s"][-1]]
if t_i is not None:
tspan_cut[0] += t_i
if t_f is not None:
tspan_cut[-1] -= t_f
c = cut_dataset(c, tspan=tspan_cut)
if find_max:
t_v, v = get_potential(c)
v_max = max(v)
mask = v_max - V_max_buffer * 1e-3 < v
t_max = t_v[mask]
t_start = t_max[0] + t_max_buffer
t_end = t_max[-1] - t_max_buffer
print("v_max = " + str(v_max)) # debugging
elif find_min:
t_v, v = get_potential(c)
v_min = min(v)
mask = v < v_min + V_min_buffer * 1e-3
t_min = t_v[mask]
t_start = t_min[0] + t_min_buffer
t_end = t_min[-1] - t_min_buffer
else:
t_start = c["time/s"][0]
t_end = c["time/s"][-1]
print("[t_start, t_end] = " + str([t_start, t_end]) + "\n\n") # debugging
if mode == "average":
tspan = [t_end - t_int, t_end]
elif mode == "integral":
c = select_cycles(
data,
[cycle - 1, cycle, cycle + 1],
cycle_str=cycle_str,
verbose=verbose,
)
tspan = [t_start - t_pre, t_end + t_tail]
if (
mode == "integral" and not tail_on_EC
): # in general, we just want the primary cycle for current
t, I = get_current(c, tspan=[t_start, t_end])
else:
t, I = get_current(c, tspan=tspan, verbose=verbose)
t_v, v = get_potential(c, tspan=tspan, verbose=verbose)
x, y = get_signal(c, mass=mass, tspan=tspan, verbose=verbose, unit="A")
if use_bg_fun: # has to work on x.
bg = background(x)
elif type(background) is str and background in ["linear", "endpoints"]:
if t_bg is None:
t_bg = 5
tspan_before = [t_start - t_pre - t_bg, t_start - t_pre]
tspan_after = [t_end + t_tail, t_end + t_tail + t_bg]
x_before, y_before = get_signal(data, mass=mass, tspan=tspan_before)
x_after, y_after = get_signal(data, mass=mass, tspan=tspan_after)
x0, y0 = np.mean(x_before), np.mean(y_before)
x1, y1 = np.mean(x_after), np.mean(y_after)
bg = y0 + (x - x0) * (y1 - y0) / (x1 - x0)
V = np.mean(v)
if mode == "average":
I_av = np.mean(I)
n = I_av / (n_el * Chem.Far)
Y = np.mean(y - bg)
Is += [I_av]
elif mode == "integral":
Q = np.trapz(I, t)
n = Q / (n_el * Chem.Far)
Y = np.trapz(y - bg, x)
Qs += [Q]
if ax1 is not None:
if color is None:
color = m.get_color()
try:
iter(bg)
except TypeError:
y_bg = bg * np.ones(y.shape)
else:
y_bg = bg
unit_factor = {"u": 1e6, "n": 1e9, "p": 1e12}.get(plot_unit[0], 1)
ax1[0].fill_between(
x,
y * unit_factor,
y_bg * unit_factor,
color=color,
alpha=0.5, # where=y>y_bg,
)
try:
J = I * 1e3 / data["A_el"]
except KeyError:
J = I * 1e3
J_bg = np.zeros(J.shape)
ax1[2].fill_between(t, J, J_bg, color=J_color, alpha=0.5)
if name is not None:
ax1[0].set_title(name)
ns += [n]
Ys += [Y]
Vs += [V]
# ----- evaluate the calibration factor -------- #
ns, Ys, Vs = np.array(ns), np.array(Ys), np.array(Vs)
Is, Qs = np.array(Is), np.array(Qs)
if remove_EC_bg:
ns = ns - min(ns)
if force_through_zero:
F_cal = sum(Ys) / sum(ns) # I'd actually be surprised if any fitting beat this
else:
pfit = np.polyfit(ns, Ys, deg=1)
F_cal = pfit[0]
m.F_cal = F_cal
# ----- plot the results -------- #
if color is None:
color = m.get_color()
if ax is not None:
ax2 = []
if unit == "p":
ns_plot, Ys_plot = ns * 1e12, Ys * 1e12
elif unit == "n":
ns_plot, Ys_plot = ns * 1e9, Ys * 1e9
elif unit == "u":
ns_plot, Ys_plot = ns * 1e6, Ys * 1e6
else:
ns_plot, Ys_plot = ns, Ys
if ax2a is not None: # plot the internal H2 calibration
V_str, J_str = sync_metadata(data, verbose=False)
if n_el < 0:
ax2a.invert_xaxis()
ax2a.plot(Vs, ns_plot, ".-", color=J_color, markersize=10)
ax2b.plot(Vs, Ys_plot, "s", color=color)
ax2a.set_xlabel(V_str)
if mode == "average":
ax2a.set_ylabel(
"<I>/(" + str(n_el) + "$\mathcal{F}$) / [" + unit + "mol s$^{-1}$]"
)
ax2b.set_ylabel("<" + mass + " signal> / " + unit + "A")
else:
ax2a.set_ylabel(
"$\Delta$Q/(" + str(n_el) + "$\mathcal{F}$) / " + unit + "mol"
)
ax2b.set_ylabel(mass + "signal / " + unit + "C")
if name is not None:
ax2a.set_title(name)
ax2b.set_title(name)
colorax(ax2b, color)
colorax(ax2a, J_color)
# align_zero(ax2a, ax2b)
ax2 += [ax2a, ax2b]
if ax2c is not None:
ax2c.plot(ns_plot, Ys_plot, ".", color=color, markersize=10)
# plot the best-fit line
if force_through_zero:
ns_pred_plot = np.sort(np.append(0, ns_plot))
Y_pred_plot = F_cal * ns_pred_plot
else:
ns_pred_plot = np.sort(np.append(0, ns_plot))
Y_pred_plot = (
F_cal * ns_pred_plot
) # + pfit[1] # it's actually best to use this line.
# print('ns_pred_plot = ' + str(ns_pred_plot)) # debugging
# print('Y_pred_plot = ' + str(Y_pred_plot)) # debugging
ax2c.plot(ns_pred_plot, Y_pred_plot, "--", color=color)
if mode == "average":
ax2c.set_xlabel(
"<I>/(" + str(n_el) + "$\mathcal{F}$) / [" + unit + "mol s$^{-1}$]"
)
ax2c.set_ylabel("<" + mass + " signal> / " + unit + "A")
else:
ax2c.set_xlabel(
"$\Delta$Q/(" + str(n_el) + "$\mathcal{F}$) / " + unit + "mol"
)
ax2c.set_ylabel(mass + " signal / " + unit + "C")
if name is not None:
ax2c.set_title(name)
ax2 += [ax2c]
# ------- parse 'out' and return -------- #
if fig1 and not ax1:
fig1 = ax1[0].get_figure()
if fig2 and not ax2:
fig2 = ax2[0].get_figure()
possible_outs = {
"ax": [ax1, ax2],
"fig": [fig1, fig2],
"Molecule": m,
"Is": Is,
"Qs": Qs,
"F_cal": F_cal,
"Vs": Vs,
"ns": ns,
"Ys": Ys,
}
if type(out) is str:
outs = possible_outs[out]
else:
outs = [possible_outs[o] for o in out]
if verbose:
print("\nfunction 'calibration_curve' finished!\n\n")
return outs
def save_calibration_results(mdict, f):
calibration_results = {}
for mol, m in mdict.items():
result = {}
for attr in ["primary", "F_cal", "cal_mat", "formula", "real_name"]:
if hasattr(m, attr):
result[attr] = getattr(m, attr)
calibration_results[mol] = result
if type(f) is str:
with open(f, "wb") as f:
pickle.dump(calibration_results, f) # save it
else: # then it must be a file
pickle.dump(calibration_results, f) # save it
def load_calibration_results(f, verbose=True):
"""
Given the name of a pickle file saved by save_calibration_results(),
opens the dictionary stored in the pickle file and does its best to
convert the information stored in its values into molecule objects.
"""
if verbose:
print("\n\nfunction 'load_calibration_results' at your service!\n")
if type(f) is str:
with open(f, "rb") as f: # load the calibrations!
calibration_results = pickle.load(f)
else:
calibration_results = pickle.load(f)
mdict = {} # turn the calibration results back into Molecule objects
for mol, result in calibration_results.items():
try:
m = Molecule(mol, verbose=verbose)
except FileNotFoundError: # this happens if any molecule names were changed
try:
m = Molecule(result["real_name"], verbose=verbose)
except (KeyError, FileNotFoundError):
print(
"... neither anything for the molecule's 'real_name'."
+ " Just returning what was in the pickle."
)
m = result
else:
print(
"awesome! Managed to load Molecule from result['real_name'] = "
+ result["real_name"]
+ " instead"
)
m.name = mol
for attr, value in result.items():
setattr(m, attr, value)
mdict[mol] = m
if verbose:
print("\nfunction 'load_calibration_results' finished!\n\n")
return mdict
|
ScottSoren/EC_MS | _tests_/20C02_Spectrum_demo/Spectra_class_demo.py | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 20 19:08:46 2020
@author: scott
This example is pending more writing in the Zilien.py module and its integration
with the Spectra class. You will have to wait.
"""
import os
import numpy as np
from matplotlib import pyplot as plt
from EC_MS import Spectra
plt.close("all")
print(__doc__)
|
ScottSoren/EC_MS | src/EC_MS/Quantification.py | <reponame>ScottSoren/EC_MS
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 14 13:44:31 2016
@author: scott
"""
from __future__ import print_function, division
import os
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
from .Molecules import Molecule
from .Combining import cut
from .parsing_tools import get_cols_for_mass
from .Object_Files import lines_to_dictionary, date_scott
from .EC import sync_metadata
from . import Chem
molecule_directory = (
os.path.dirname(os.path.realpath(__file__)) + os.sep + "data" + os.sep + "molecules"
)
data_directory = os.path.dirname(os.path.realpath(__file__)) + os.sep + "data"
preferencedir = os.path.dirname(os.path.realpath(__file__)) + os.sep + "preferences"
with open(preferencedir + os.sep + "standard_colors.txt", "r") as f:
lines = f.readlines()
standard_colors = lines_to_dictionary(lines)["standard colors"]
def rewrite_spectra(
NIST_file="default",
RSF_file="default",
mols="all",
writesigma=False,
overwrite=False,
writespectra=False,
writeRSF=False,
):
"""
Reformats NIST data copied to a text file given by 'file', and writes it to
the molecule files in data_directory
"""
if writesigma or writespectra:
if NIST_file == "default":
NIST_file = data_directory + os.sep + "NIST_spectra_data.txt"
with open(NIST_file, "r") as f:
lines = f.readlines()
structure = lines_to_dictionary(lines)
sigma_dict = structure[
"sigma_100eV"
] # dictionary with electron impact ionizations in Ang^2
spectra_dict = structure[
"Spectra"
] # dictionary with QMS spectra as mass1,rel_val1 mass2,relval2
if writesigma:
for (mol, sigma) in sigma_dict.items():
if not (mols == "all" or mol in mols):
continue
try:
m = Molecule(mol)
except FileNotFoundError:
with open(molecule_directory + os.sep + mol + ".txt", "w") as f:
f.write("name: " + mol)
if hasattr(m, "sigma") and m.sigma is not None:
if not overwrite:
continue
l = ("sigma_100eV", sigma)
m.write(l)
if writespectra:
for (mol, specline) in spectra_dict.items():
if not (mols == "all" or mol in mols):
continue
m = Molecule(mol)
if (
hasattr(m, "spectrum")
and m.spectrum is not None
and len(m.spectrum) > 0
):
if not overwrite:
continue
masses = []
rel_vals = []
date = date_scott()
spectrum = {"Title": "NIST_" + date}
for spec in specline.split(" "):
(mz, v) = spec.split(",")
masses += ["M" + mz]
rel_vals += [eval(v)]
rel_vals = np.array(rel_vals)
rel_vals = (
100 * rel_vals / np.max(rel_vals)
) # normalize spectrum to % of max peak
for (mass, rel_val) in zip(masses, rel_vals):
spectrum[mass] = rel_val
l = ("Spectrum", spectrum)
m.write(l)
if writeRSF:
if RSF_file == "default":
RSF_file = data_directory + os.sep + "Relative_Sensativity_Factors.txt"
with open(RSF_file) as f:
lines = f.readlines()
structure = lines_to_dictionary(lines)
for (mol, value) in structure.items():
if not (mols == "all" or mol in mols):
continue
m = Molecule(mol)
mass = "M" + str(value[0])
RSF = value[1]
l = ("Hiden", (mass, RSF))
m.write(l)
return structure
def calibration_compare(calmol=["H2", "O2", "CO2", "Cl2"]):
"""
Checks the calibration factors calculated internally at the sniffer setup
against predicted sensitivities based on the NIST database.
There is no correlation. We are crazy sensitive to H2.
"""
for mol in calmol:
m = Molecule(mol, verbose=False)
m.get_RSF()
m.calibration_fit(verbose=False, ax=None)
p = m.primary
c1 = m.F_cal
c2 = m.ifcs
r12 = c1 / c2
RSFit = "rsf" in dir(m)
if RSFit:
c3 = m.rsf
r13 = c1 / c3
print(
mol
+ " at "
+ p
+ ", calibration factors:\n"
+ "\tc1 = "
+ str(c1)
+ " C/mol (experimental from Pt) \n"
+ "\tc2 = "
+ str(c2)
+ " Ang^2 (calculated from NIST) \n"
+ "r12 = "
+ str(r12)
+ " (ratio) "
)
if RSFit:
print(
"\tc3 = "
+ str(c3)
+ " (relative sensitivity factor from Hiden) \n"
+ "r13 = "
+ str(r13)
+ " (ratio)"
)
def RSF_to_F_cal(*args, **kwargs):
print("'RSF_to_F_cal' has been renamed 'recalibrate'. Remember that next time!")
return recalibrate(*args, **kwargs)
def recalibrate(
quantify={}, # molecules we want to calc F_cal (at the given mass) for by extrapolation
trust=None, # says what to trust, e.g., 'internal', 'external', or 'all'
trusted=[], # list of Molecule objects with trusted F_cal
internal=[], # list of Molecule objects with F_cal from internal calibration
external=[], # list of Molecule objects with F_cal from internal calibration
RSF_source="NIST", #'NIST' uses partial ionization cross section
# used in Trimarco2018 (Sniffer 2.0)
#'Hiden' is RSF's from Hiden Analytical
transmission_function="default", # 'default' is T(M) = M^(-1/2)
trendline=True,
ax="new",
labels=False,
writeit=False,
writeprimary=False,
rewriteit=False,
):
"""
---- You need -----
Calibration factor for a given set of molecules at a given set
of masses.
This function returns mdict, which is a dictionairy of the form
{name:molecule, ...} where molecules are objects of the class EC_MS.Molecule,
molecule.primary is the mass at which it is calibrated, and molecule.F_cal
is its calibration factor at that mass in C/mol
------- You have -------
- The names of the molecules you need and the masses you would like them
Input these as a dictionairy:
quantify = {molecule1:mass1, molecule2:mass2, ...}, e.g.:
quantify = {'CH4':'M15', 'C2H4':'M26'}
- Molecules for which you have a trusted calibration. Input these
as a list of objects of the class EC_MS.Molecule. This list can be input
as "trusted", "internal" (for internal calibrations), or "external". If
both "internal" and "external" are given, the function by default only trusts
the internal calibrations when calculating calibration factors for molecules
in quantify, but co-plots both of them. e.g.:
internal = {H2, CO2},
external = {He, CO}
where, for example, H2.primary = 'M2', H2.F_cal = 1.72, ...
---- additional options ----
RSF_source: by default, uses ionization cross section and spectra from NIST,
and a transmission function
transmission_function: proportaional to probability an ion will make it
through quadruopole. By default 1/sqrt(M)
ax: by default, makes a new axis to plot calibration factors vs relative
sensitivity factors
trendline: whether to draw a trendline on the plot
writeit: whether to save the calibration factors to the molecules' data files
"""
print("\n\nfunction 'recalibrate' at your service!\n")
# ----------- parse inputs, prepare output -------- #
# prepare mdict, put trusted stuff in it
mdict = {} # to be returned by the function
if quantify in ["all", "standard"]:
quantify = [
("H2", "M2"),
("He", "M4"),
("CH4", "M15"),
("H2O", "M18"),
("N2", "M28"),
("CO", "M28"),
("C2H4", "M26"),
("C2H6", "M30"),
("O2", "M32"),
("Ar", "M40"),
("CO2", "M44"),
("Cl2", "M70"),
]
if type(quantify) is list:
quantify = dict(quantify)
quantmols = list(quantify.keys())
for name, mass in quantify.items():
mdict[name] = Molecule(name)
mdict[name].primary = mass
if type(internal) is dict:
print("converting internal from dict to list")
internal = list(internal.values())
if type(external) is dict:
print("converting external from dict to list")
external = list(external.values())
if type(trusted) is dict:
print("converting external from dict to list")
trusted = list(trusted.values())
if len(trusted) == 0 and trust is None:
trusted = internal
for m in external + internal + trusted:
try:
name = m.name
except AttributeError:
print("WARNING! " + str(m) + " has no name")
pass
else:
if name in mdict:
print(
"WARNING! recalibrate recieved multiple input molecules named "
+ name
)
mdict[m.name] = m
# store the mass and calibration factor of the trusted molecule in calmol and F_cals, respectively
# (this could perhaps be done smarter)
if trust == "all" or trust == "internal":
trusted += internal
if trust == "all" or trust == "external":
trusted += external
if trust == "files": # trusts F_cal saved in molecule files. never used.
trusted = [Molecule(name) for name in trusted]
if len(internal) == 0 and len(external) == 0 and len(trusted) > 0:
internal = trusted # so that trusted points show up as squares
calmol = {}
F_cals = {}
for m in trusted:
try:
calmol[m.name] = m.primary
F_cals[m.name] = m.F_cal
except AttributeError:
print(
"Cannot use "
+ str(m)
+ " to calibrate. Calibration must"
+ " be based on molecule objects with attributes 'primary' and 'F_cal'."
)
print("trusted calibrations at: " + str(calmol))
print("F_cals = " + str(F_cals))
if len(internal) == 0 and len(external) == 0:
internal = calmol # so that they plot as squares
# --------- get the F_cal vs RSF relationship for the trusted molecules ------- #
RSF_vec = []
F_cal_vec = []
if transmission_function == "default":
def T(M):
return M ** (-1 / 2)
elif transmission_function == 1:
def T(M):
return 1
else:
T = transmission_function
for m in trusted:
name, mass, F_cal = m.name, m.primary, m.F_cal
rsf = m.get_RSF(RSF_source=RSF_source, transmission_function=T, mass=mass)
print(
"F_"
+ name
+ "_"
+ mass
+ " = "
+ str(F_cal)
+ ", "
+ "rsf_"
+ name
+ "_"
+ mass
+ " = "
+ str(rsf)
)
F_cal_vec += [F_cal]
RSF_vec += [rsf]
if writeit:
m.write(
"#the following F_cal value is for "
+ mass
+ ", trusted on "
+ date_scott()
)
l = ("F_cal", F_cal)
m.write(l)
def fit_fun(x, a):
return a * x
if len(trusted) <= 1:
r = F_cal_vec[0] / RSF_vec[0]
else:
r, pcov = curve_fit(fit_fun, RSF_vec, F_cal_vec, p0=1)
try:
r = r[0] # I think it comes back as an array, but I just want a number
except TypeError:
pass
RSF_unit = "a.u." # {'Hiden':'a.u.', 'NIST':'a.u.'}[RSF_source]
print(
"\n--- Calibration Factor / rsf = " + str(r) + " (C/mol)/" + RSF_unit + "---\n"
)
# ----------- prepare the figure, plot the given F_cals
if ax == "new":
fig, ax = plt.subplots()
if ax is not None:
ax.set_xlabel("Relative Sensitivity Factor / [" + RSF_unit + "]")
ax.set_ylabel("F_cal / [C/mol]")
for m in internal:
name, mass, F_cal = m.name, m.primary, m.F_cal
rsf = m.get_RSF(
RSF_source=RSF_source, transmission_function=T, mass=mass, verbose=False
)
try:
color = m.get_color()
except AttributeError:
color = standard_colors[mass]
print("plotting " + name + " as a color=" + color + " square.\n")
ax.plot(rsf, F_cal, "s", color=color, markersize=10)
if labels:
ax.annotate(
name + " at m/z=" + mass[1:], xy=[rsf + 0.05, F_cal], color=color
)
for m in external:
name, mass, F_cal = m.name, m.primary, m.F_cal
rsf = m.get_RSF(
RSF_source=RSF_source, transmission_function=T, mass=mass, verbose=False
)
try:
color = m.get_color()
except AttributeError:
color = standard_colors[mass]
print("plotting " + name + " as a color=" + color + " triangle.\n")
ax.plot(rsf, F_cal, "^", color=color, markersize=10)
if labels:
ax.annotate(
name + " at m/z=" + mass[1:], xy=[rsf + 0.05, F_cal], color=color
)
# -------- use rsf to predict F_cal for all the other molecules
rsf_max = 0
for (name, m) in mdict.items():
print("\n\n --- working on " + name + " ----")
if name in quantify:
mass = quantify[name]
else:
mass = m.primary
color = m.get_color()
# calculate RSF
rsf = m.get_RSF(RSF_source=RSF_source, transmission_function=T, mass=mass)
rsf_max = max(rsf_max, rsf)
if writeit:
m.write(
"#the folowing rsf is calculated for " + mass + " on " + date_scott()
)
m.write(("rsf", rsf))
if rsf is None:
print("missing rsf for " + name + " at " + mass)
continue # then nothing to do
# get (already plotted) or calculate (and plot) F_cal
if name in F_cals:
F_cal = F_cals[name]
elif name in quantmols:
F_cal = r * rsf # the extrapolation!
if name in quantmols and ax is not None:
print("plotting " + name + " as a color=" + color + " dot.")
ax.plot(rsf, r * rsf, ".", color=color, markersize=10)
if labels:
ax.annotate(
name + " at m/z=" + mass[1:], xy=[rsf + 0.05, F_cal], color=color
)
print(name + ": F_cal = " + str(F_cal))
# write it to the Molecule object
if name in quantmols: # only do it for the molecules that were asked for
if "cal" in m.__dict__:
m.cal[mass] = F_cal
if "primary" not in m.__dict__:
m.primary = mass
if m.primary == mass:
m.F_cal = F_cal
# write it to the Molecule's data file
if writeit:
m.write(
"#the following F_cal value is for "
+ mass
+ ", extrapolated "
+ "from trusted values based on RSF from "
+ RSF_source
+ " on "
+ date_scott()
)
l = ("F_cal", F_cal)
m.write(l)
if writeprimary:
l = ("primary", mass)
m.write(l)
if rewriteit:
m.rewrite()
# make a trendline (needs to come here for use of rsf_max)
if ax is not None and trendline:
ax.plot([0, rsf_max], [0, r * rsf_max], "k--")
# ---- done!
print("\nfunction 'recalibrate' finished!\n\n")
if ax is not None:
return mdict, ax
return mdict
def line_through_zero(x, y):
"""
This returns a minimizing the square error of y = a * x
"""
a_i = np.mean(y / x)
pars_i = [a_i]
def ax(x, a):
return a * x
pars, pcov = curve_fit(ax, x, y, p0=pars_i)
# pars = [tau, y0, y1]
a = pars[0]
return a
def get_signal(
MS_data,
mass,
tspan=None,
removebackground=None,
background=None,
t_bg=None,
endpoints=5,
fillcolor=None,
unit="A",
verbose=False,
override=False,
plotit=False,
ax="new",
return_bg=False,
):
"""
Returns [x, y] where x is the time and y is QMS signal.
A bit trivial, but I like having this function to work in parrallel
to get_flux.
"""
if verbose:
print("getting signal for " + mass)
xcol, ycol = get_cols_for_mass(mass, MS_data)
x = MS_data[xcol]
y = MS_data[ycol]
if len(x) == 0:
print("WARNIGN: no data for " + mass)
return x, y
if unit[-1] == "A":
if unit[:-1] == "n" or unit[:-1] == "nano":
y = y * 1e9
elif unit[:-1] == "u" or unit[:-1] == "micro":
y = y * 1e6
elif unit[:-1] == "p" or unit[:-1] == "pico":
y = y * 1e12
if tspan is None:
tspan = "tspan"
if type(tspan) is str and not tspan == "all":
try:
tspan = MS_data[tspan]
except KeyError:
print("WARNING: no tspan available to get_signal()! using tspan='all'")
tspan = "all"
if not (isinstance(tspan, str) and tspan == "all"):
try:
x, y = cut(x, y, tspan, override=override)
except TypeError:
# print('x = ' + str(x) + ', y = ' + str(y) + ', tspan = ' + str(tspan)) # debugging
x = tspan
try:
y = np.interp(tspan, x, y)
except ValueError:
print("WARNING: couldn't cut according to tspan=" + str(tspan))
if len(x) == 0:
print("WARNING: no signal in the requested tspan for " + mass)
return [x, y]
if background is None and t_bg is not None:
background = "constant"
if removebackground is None:
removebackground = not (background is None)
# print('background = ' + str(background)) # debugging
if removebackground:
if background is None:
background = "constant"
if background == "start":
background = np.average(y[:endpoints])
elif background == "constant":
if type(removebackground) is float:
background = removebackground * min(y)
elif t_bg is not None:
try:
if verbose:
print("Averaging background at t_bg = " + str(t_bg))
# mask = np.logical_and(t_bg[0]<x, x<t_bg[-1])
x_bg, y_bg = get_signal(
MS_data,
mass=mass,
tspan=t_bg,
removebackground=False,
unit=unit,
)
background = np.mean(y_bg)
except TypeError:
if verbose:
print("Interpolating background at t_bg = " + str(t_bg))
background = np.interp(t_bg, x, y)
else:
background = min(y)
elif type(background) is float:
# background = background
pass
elif background == "linear":
x_end = [np.average(x[:endpoints]), np.average(x[-endpoints:])]
y_end = [np.average(y[:endpoints]), np.average(y[-endpoints:])]
background = np.interp(x, x_end, y_end)
# print('background = ' + str(background)) # debugging
if plotit:
if ax == "new":
fig, ax = plt.subplots()
ax.plot(x, y, "k")
if removebackground:
ax.plot(x, background * np.ones(x.shape), "r--")
if fillcolor:
ax.fill_between(x, background, y, where=y > background, color=fillcolor)
# ax.set_title(mass)
if removebackground:
# y = y - 0.99 * background #so that we don't break the log scale.
y = y - background
# I should get rid of this and assume the caller knows what they're doing.
if return_bg:
return [x, y, background]
else:
return [x, y]
def get_flux(MS_data, mol, **kwargs):
"""
returns [x, y] where x is the t corresponding to the primary mass of the
molecule in 'mol' and y is the molecular flux in nmol/s, calculated from
the MS_data for its primary mass and the value of F_cal read from the
molecule's text file.
Now moved to inside the class Molecules.Molecule
"""
if type(mol) is str:
m = Molecule(mol, verbose=False)
else:
m = mol
return m.get_flux(MS_data, **kwargs)
def get_current(EC_data, tspan="tspan", unit="A", verbose=False):
"""
Returns current in requested unit (default is A) over a requested tspan.
I'm not happy with this function. I need to completely upgrade the way
mass-, area-, and otherwise normalized currents and units are handled.
This should work for now.
"""
if verbose:
print("getting current in " + unit)
t_str = "time/s"
V_str, J_str = sync_metadata(EC_data, verbose=False)
if "/" in unit: # then it's normalized in some way
t, j = EC_data[t_str], EC_data[J_str]
else: # Then you want the raw current data
try:
I_str = EC_data["I_str"]
except KeyError:
I_str = "I/mA"
t, j = EC_data[t_str], EC_data[I_str]
if unit == "A":
j = j * 1e-3
if unit == "A/m^2": # SI units
j = j * 10 # mA/cm^2 --> A/m^2
print(
"Scott has not yet done something smart for current units. If you try"
+ " anything unusual it will likely mess up!"
)
if type(tspan) is str and not tspan == "all":
tspan = EC_data[tspan]
if not tspan == "all":
t, j = cut(t, j, tspan)
return [t, j]
def get_potential(EC_data, tspan="tspan", scale="RHE", verbose=False):
"""
I'm not happy with this function. I need to completely upgrade the way
mass-, area-, and otherwise normalized currents and units are handled.
This should work for now.
"""
if verbose:
print("getting potential on " + scale + " scale if possible")
t_str = "time/s"
V_str, J_str = sync_metadata(EC_data, verbose=False)
if scale == "RHE": # then it's normalized in some way
t, V = EC_data[t_str], EC_data[V_str]
elif scale == "RE": # Then you want the raw current data
t, V = EC_data[t_str], EC_data[EC_data["E_str"]]
print(
"Scott has not yet done something smart for current units. If you try"
+ " anything unusual it will likely mess up!"
)
if type(tspan) is str and not tspan == "all":
tspan = EC_data[tspan]
if not tspan == "all":
t, V = cut(t, V, tspan)
return [t, V]
def predict_current(
EC_and_MS,
mols,
tspan=None,
RE_vs_RHE=None,
A_el=None,
ax="new",
colors=None,
verbose=1,
):
"""
calculates a predicted electrical current based on MS_data and molecular
data loaded from the files named in the list 'mols.'
As of 16K28 just uses the primary masses
tspan = x1 uses the time axis of the first primary mass.
"""
V_str, J_str = sync_metadata(EC_and_MS, RE_vs_RHE, A_el)
A_el = EC_and_MS["A_el"]
if A_el is None:
A_el = 1
partials = []
for mol in mols:
molecule = Molecule(mol)
"""
mass = molecule.primary
x = EC_and_MS[mass + '-x']
y = EC_and_MS[mass + '-y']
F_cal = molecule.F_cal
i_mol = y / F_cal #molecular flux in mol/s
"""
[x, i_mol] = get_flux(EC_and_MS, molecule)
j_mol = i_mol / A_el # molecular flux densith in mol/cm^2/s
j = j_mol * molecule.n_el * Chem.Far * 1e3 # current density in mA/cm^2
partials += [[x, j]]
# this handling of tspan might be excessive for now...
# it's intricate because I might need to interpolate MS data to EC time
if tspan == None and "tspan_2" not in EC_and_MS.keys():
tspan == "x1:"
if tspan == "x1":
t = partials[0][0]
elif "time/s" in EC_and_MS.keys():
t = EC_and_MS["time/s"]
if tspan == None or tspan == "tspan_2":
tspan = EC_and_MS["tspan_2"]
if len(tspan) == 2 and type(tspan) is not str:
t = [t_i for t_i in t if tspan[0] < t_i and t_i < tspan[1]]
else:
tspan = [t[0], t[-1]]
if ax == "new":
fig = plt.figure()
ax = fig.add_subplot(111)
js = []
j_total = np.zeros(np.shape(t))
for ([x, j], mol) in zip(partials, mols):
I_keep = [I for (I, x_I) in enumerate(x) if t[0] < x_I and x_I < t[-1]]
x = x[I_keep]
j = j[I_keep]
j_int = np.interp(t, x, j)
j_total += j_int
js += [j_int]
if ax is not None and colors is not None:
ax.plot(t, j_int, colors[mol], label=mol)
if ax is not None:
ax.plot(t, j_total, "k:", label="total")
if __name__ == "__main__":
plt.close("all")
# calibration_compare()
from EC_MS import set_figparams
set_figparams(figwidth=8)
F_cals = {
"CO2": 23.371483271036237,
"H2": 29.899494846007542,
"O2": 14.756572997297784,
}
mdict, ax = RSF_to_F_cal(
calmol={"CO2": "M44"},
RSF_source="Hiden",
trust=["H2", "O2", "CO2"],
F_cals=F_cals,
)
# calmol is the molecule(s) who's calibration is extrapolated to the others
# RSF_source can be 'NIST' or 'Hiden'.
# trust gives the molecules that become the red dots.
# F_cals overrides the stored calibration factors for given molecules.
# mdict containes the Molecule objects with the extrapolated F_cal
# ax is the axis it's plotted on.
for mol, m in mdict.items():
rsf = m.get_RSF(RSF_source="Hiden", verbose=False)
print(mol + "\tF_cal = " + str(m.F_cal) + "\tRSF = " + str(rsf))
ax.set_xlabel("Relative sensitivity factor / [a.u.]")
ax.set_ylabel("F$_{\mathrm{cal}}$ / [C mol$^{-1}$]")
plt.savefig("Fcal_vs_RSF.png")
|
ScottSoren/EC_MS | src/EC_MS/Data_Importing.py | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 10 03:17:41 2017
@author: scott
"""
"""
This module is thought of as a set of high-level functions that can do
switching between file types and combining of multiple files.
TODO:
Low-level functions (many of them having to do with timestamp parsing) should
be placed in parsing_tools, so that they can be imported by any EC_MS module
Right now, the text_to_data function does a ton of stuff. Hopefully this will
not continue to be the case.
TODO: The goal is that file-type-specific things all get moved to modules named
for that specific file type or data producing equipment.
"""
import os
from pathlib import Path
import re
import codecs
import numpy as np
from .parsing_tools import (
numerize,
timestamp_match,
get_creation_time,
parse_date,
timestring_to_epoch_time,
epoch_time_to_timestamp,
)
def import_EC_data(
full_path_name,
title="get_from_file",
data_type="EC",
N_blank=10,
verbose=True,
header_string=None,
timestamp=None,
):
file_lines = import_text(full_path_name, verbose=verbose)
dataset = text_to_data(
file_lines,
title="get_from_file",
data_type="EC",
N_blank=10,
verbose=True,
header_string=None,
timestamp=None,
)
numerize(dataset)
return dataset
"""
The following couple functions are adapted from EC_MS on 17L09
as last commited to EC_MS with code c1c6efa
They might benifit from a full rewrite, but not now.
"""
def import_text(full_path_name="current", verbose=True):
"""
This method will import the full text of a file selected by user input as a
list of lines.
When I first wrote it for EC_MS, way back in the day, I made it so you can
call it without any arguments, and then input. probably unecessary.
"""
if verbose:
print("\n\nfunction 'import_text' at your service!\n")
if full_path_name == "input":
full_path_name = input(
"Enter full path for the file name as 'directory"
+ os.sep
+ "file.extension'"
)
if full_path_name == "current":
full_path_name = os.getcwd()
[directory_name, file_name] = os.path.split(full_path_name)
if directory_name == "":
directory_name = "."
original_directory = os.getcwd()
os.chdir(directory_name)
if os.path.isdir(full_path_name) and not os.path.isfile(file_name):
directory_name = full_path_name
os.chdir(directory_name)
ls_string = str(os.listdir())
print("\n" + full_path_name + "\n ls: \n" + ls_string + "\n")
file_name = input(
"Directory given. Enter the full name of the file to import\n"
)
if verbose:
print("directory: " + directory_name)
print("importing data from " + file_name)
possible_encodings = ["utf8", "iso8859_15"]
# mpt files seem to be the latter encoding, even though they refer to themselves as ascii
for encoding_type in possible_encodings:
try:
with codecs.open(file_name, "r", encoding=encoding_type) as file_object:
file_lines = file_object.readlines()
if verbose:
print("Was able to readlines() with encoding " + encoding_type)
break
except UnicodeDecodeError:
if verbose:
print("Shit, some encoding problem in readlines() for " + encoding_type)
except FileNotFoundError:
print(
"File not Found! file_name = "
+ file_name
+ "\nDirectory = "
+ os.getcwd()
+ "\nfiles = "
+ str(os.listdir(os.getcwd()))
)
raise
else:
print("couldn't read " + file_name + "\n ... may by due to an encoding issue")
os.chdir(original_directory)
if verbose:
print("\nfunction 'import_text' finished!\n\n")
return file_lines
def text_to_data(
file_lines,
title=None,
timestamp=None,
date="today",
tstamp=None,
tz=None,
data_type="EC",
sep=None,
header_string=None,
verbose=True,
):
"""
This method will organize data in the lines of text from a file useful for
electropy into a dictionary as follows (plus a few more keys)
{'title':title, 'header':header, 'timestamp':timestamp,
'data_cols':{colheader1, colheader2, ...},
colheader1:data1, colheader2:data2, ...}
Supported data types:
'EC': text output (.mpt) from Biologic for any voltammetric technique
'MS': text output from cinfdata for mass_time_scan, run by PyExpLabSys
'SPEC': .csv file saved by SPEC diffraction program at SSRL BL2.1
'XAS': text saved by XAS program at SSRL BL11.2
'SI': text ouptut (.csv) from Zilien, Kenneth's software for Spectro Inlets
'RGA': text output from Residual Gas Analysis program for mass spec
"""
if verbose:
print("\n\nfunction 'text_to_data' at your service!\n")
# disect header
N_lines = len(file_lines) # number of header lines
N_head = N_lines # this will change when I find the line that tells me how ling the header is
header_string = ""
data = {}
data["data_type"] = data_type
data["timecols"] = {}
commacols = [] # will catch if data is recorded with commas as decimals.
loop = False
if data_type == "SPEC" or data_type == "ULM":
N_head = 1 # the column headers are the first line
if sep is None:
sep = ","
elif data_type == "SI" or data_type == "MKS": # Spectro Inlets data
sep = "\t" # despite it being a .csv, the separator is a tab.
elif data_type == "RGA":
sep = ","
N_blank = 2
data["channels"] = {}
elif data_type == "CHI": # CH Instruments potentiostat
N_blank = 2
sep = ","
elif data_type == "MS":
N_blank = 10
if sep is None: # EC and MS data all work with '\t'
sep = "\t"
# print("N_head = " + str(N_head)) # debugging
n_blank = 0
got_col_headers = False
for nl, line in enumerate(file_lines):
l = line.strip()
if nl < N_head - 1: # we're in the header
if data_type == "EC":
if title is None:
if re.search("File :", line):
title_object = re.search(r"[\S]*\Z", line.strip())
title = title_object.group()
if verbose:
print("name '" + title + "' found in line " + str(nl))
if re.search(r"[Number ]*header lines", line):
N_head_object = re.search(r"[0-9][0-9]*", line)
N_head = int(N_head_object.group())
if verbose:
print("N_head '" + str(N_head) + "' found in line " + str(nl))
elif timestamp is None and re.search("Acquisition started", line):
timestamp_object = re.search(timestamp_match, l)
timestamp = timestamp_object.group()
date = parse_date(l)
if verbose:
print("timestamp '" + timestamp + "' found in line " + str(nl))
elif re.search("Number of loops", line):
# Then I want to add a loop number variable to data_cols
loop = True
data["loop number"] = []
elif re.search("Loop", line):
n = int(re.search(r"^Loop \d+", line).group()[5:])
start = int(re.search(r"number \d+", line).group()[7:])
finish = int(re.search(r"to \d+", line).group()[3:])
N = finish - start + 1
data["loop number"] += N * [n]
elif data_type == "MS":
if len(line.strip()) == 0:
n_blank += 1
if n_blank >= N_blank and len(file_lines[nl + 1].strip()) > 0:
N_head = nl + 2
continue
else:
n_blank = 0
if title is None:
object1 = re.search(r'"Comment"[\s]*"[^"]*', line)
if object1:
string1 = object1.group()
title_object = re.search(r"[\S]*\Z", string1.strip())
title = title_object.group()[1:]
if verbose:
print("name '" + title + "' found in line " + str(nl))
object2 = re.search(r'"Recorded at"[\s]*"[^"]*', line)
if object2:
string2 = object2.group()
timestamp_object = re.search(timestamp_match, string2.strip())
timestamp = timestamp_object.group()
date = parse_date(l)
# ^convert yyyy-mm-dd to dd-mm-yyyy
if verbose:
print("timestamp '" + timestamp + "' found in line " + str(nl))
elif data_type == "SI": # Spectro Inlets data format
items = [
item.strip() for item in line.split(sep) if len(item.strip()) > 0
]
if nl < 10:
# print(items) # debugging
pass
if len(items) == 0:
continue
if title is None and items[0] == "name":
title = items[-1]
if verbose:
print("title '" + str(title) + "' found in line " + str(nl))
if items[0] == "offset":
offset = float(items[-1])
data["SI offset"] = offset
if verbose:
print(
"SI offset '" + str(offset) + "' found in line " + str(nl)
)
if items[0] == "data_start":
N_head = int(items[-1])
if verbose:
print("N_head '" + str(N_head) + "' found in line " + str(nl))
if nl == N_head - 2:
col_preheaders = [item.strip() for item in line.split(sep)]
for i, preheader in enumerate(col_preheaders):
if len(preheader) == 0:
col_preheaders[i] = col_preheaders[i - 1]
elif data_type == "MKS": # old Spectro Inlets MS data format
# this actually records absolute time (in a fucked up format),
# so no need to read the timestring.
if "[Scan Data" in line: # then the data is coming.
N_head = nl + 2
elif data_type == "RGA":
if len(line.strip()) == 0:
n_blank += 1
if n_blank >= N_blank and len(file_lines[nl + 1].strip()) > 0:
N_head = nl + 2
continue
else:
n_blank = 0
if re.search("Start time", line):
tstamp, date, timestamp = timestring_to_epoch_time(
l, tz=tz, out="all", verbose=verbose
)
if re.search(r"\A[0-9]+\s", l): # lines starting with a number
items = [
item.strip()
for item in line.split(" ")
if len(item.strip()) > 0
]
channel = "Channel#" + items[0]
mass = "M" + items[1].split(".")[0]
data["channels"][channel] = mass
if "Analog Scan" in l:
col_headers = ["m/z", "signal/A"]
got_col_headers = True
print("got column headers! on line " + str(nl))
elif data_type == "CHI":
if len(line.strip()) == 0:
n_blank += 1
if n_blank >= N_blank and len(file_lines[nl + 1].strip()) > 0:
N_head = nl + 2
continue
else:
n_blank = 0
if nl == 0: # record time (measurement finish time) on top line
if verbose:
print("finding tstamp from line: " + l)
tstamp, date, timestamp = timestring_to_epoch_time(
l, tz=tz, out="all", verbose=verbose
)
if "Segment = " in line:
data["segments"] = line.split(" = ")[-1].strip()
last_segment_line = "Segment " + data["segments"] + ":"
if "segments" in data and last_segment_line in line:
N_blank = 1
if "Scan Rate (V/s)" in line:
data["scan rate"] = (
eval(line.split(" = ")[-1].strip()) * 1e3
) # in mV/s
if "Time/s" in line: # then it's actually the column header line.
N_head = (
nl + 2
) # the next line is a blank line, during which we handle the column headers
col_headers = l.split(sep)
got_col_headers = True # to be used on next line (nl=N_head-1)
header_string = header_string + line
elif nl == N_head - 1: # then it is the column-header line
# (EC-lab includes the column-header line in header lines)
# col_header_line = line
if data_type == "RGA": # there's no damn commas on the column header lines!
if not got_col_headers:
col_headers = [
col.strip() for col in l.split(" ") if len(col.strip()) > 0
]
elif data_type == "CHI" and got_col_headers:
pass
else:
col_headers = [col.strip() for col in l.split(sep=sep)]
if data_type == "MKS":
col_headers = [col.strip('"') for col in col_headers]
if data_type == "SI":
for i, col in enumerate(col_headers):
try:
col_headers[i] = col_preheaders[i] + " - " + col
except IndexError:
print("WARNING!!! Spectro Inlets pre-column header too short")
break
print(f"col_headers = {col_headers}") # debugging
data["N_col"] = len(col_headers)
data["data_cols"] = set(
col_headers.copy()
) # will store names of columns containing data
if not len(col_headers) == len(data["data_cols"]):
print("WARNING: repeated column headers!!!")
print("col_headers = " + str(col_headers))
data["col_types"] = dict([(col, data_type) for col in col_headers])
for col in col_headers:
data[col] = [] # data will go here
header_string = header_string + line # include this line in the header
if verbose:
print("Data starting on line " + str(N_head) + "\n")
elif len(l) == 0:
# rga and chi text files skip a line after the column header
continue
else: # data, baby!
line_data = [dat.strip() for dat in l.split(sep=sep)]
if data_type == "MKS":
timestring = line_data[0].replace(".", ":")[1:-1]
if "Annotations" in timestring: # last line actually doesn't have data
continue
yyyy, dd, mm = timestring[6:10], timestring[0:2], timestring[3:5]
timestring = yyyy + "/" + mm + "/" + dd + timestring[-9:]
t = timestring_to_epoch_time(timestring, verbose=False)
line_data[0] = str(t)
if not len(line_data) == len(col_headers):
# print('Mismatch between col_headers and data on line ' + str(nl) + ' of ' + title) #debugging
pass
for col, x in zip(col_headers, line_data):
if not col in data["data_cols"]:
# don't try adding data to a column that has already been determined not to have data!
continue
try:
x = float(x)
except ValueError:
if x == "":
continue # added 17C22 to deal with data acquisition crashes.
try:
if verbose and not col in commacols:
print(
f"ValueError on value {x} in column {col} line {nl}\n"
+ "Checking if yo're using commas as decimals in that column..."
)
x = x.replace(".", "")
# ^ in case there's also '.' as thousands separator, just get rid of it.
x = x.replace(",", ".") # put '.' as decimals
x = float(x)
except ValueError:
if verbose:
print(list(zip(col_headers, line_data)))
print(
f"{title} in text_to_data: \nRemoved {col} from data columns"
+ f" because ofvalue '{x}' at line {nl}\n"
)
data["data_cols"].remove(col)
else:
if not col in commacols:
if verbose:
print("... and you were, dumbass. I" "ll fix it.")
commacols += [col]
data[col].append(x)
if loop:
data["data_cols"].add("loop number")
data["title"] = title
data["header"] = header_string
data["timestamp"] = timestamp
data["date"] = date
if tstamp is None:
tstamp = timestring_to_epoch_time(
timestamp, date, tz=tz, verbose=verbose, out="tstamp"
)
if data_type == "EC":
# rename potential and current variables,
# so that synchronize can combine current data from different EC-lab techniques:
if "<I>/mA" in data["data_cols"] and "I/mA" not in data["data_cols"]:
# so that synchronize can combine current data from different EC-lab techniques
data["data_cols"].add("I/mA")
data["I/mA"] = data["<I>/mA"].copy()
if "<Ewe>/V" in data["data_cols"] and "Ewe/V" not in data["data_cols"]:
data["data_cols"].add("Ewe/V")
data["Ewe/V"] = data["<Ewe>/V"].copy()
# and populate timecols
for col in data["data_cols"]:
data["timecols"][col] = "time/s"
data["t_str"] = "time/s"
elif data_type == "MS":
data["t_str"] = "<mass>-x"
elif data_type == "RGA":
for col in data["data_cols"]:
data["timecols"][col] = "Time (s)"
data["t_str"] = "Time (s)"
elif data_type == "MKS":
print(data.keys()) # debugging
tstamp = data["Time"][0]
data["Time"] = np.array(data["Time"]) - tstamp
timestamp = epoch_time_to_timestamp(tstamp)
date = None
for col in data["data_cols"]:
data["timecols"][col] = "Time"
data["t_str"] = "Time"
# I kind of think timecols should be defined for everything here, but it might not be
data["timezone"] = tz
data["tstamp"] = tstamp # UNIX epoch time, for proper synchronization! :D
if verbose:
print("\nfunction 'text_to_data' finished!\n\n")
return data
def import_data(*args, **kwargs):
print(
"'import_data' is now called 'load_from_file'!\n"
+ "Remember that next time, goof."
)
return load_from_file(*args, **kwargs)
def load_from_file(
full_path_name="current",
title="file",
tstamp=None,
timestamp=None,
data_type="EC",
tz=None,
name=None,
verbose=True,
):
"""
This method will organize the data in a file useful for
electropy into a dictionary as follows (plus a few more keys)
{'title':title, 'header':header, 'timestamp':timestamp,
'data_cols':[colheader1, colheader2, ...],
colheader1:[data1], colheader2:[data2]...}
"""
if verbose:
print("\n\nfunction 'load_from_file' at your service!\n")
if title == "file":
folder, title = os.path.split(full_path_name)
if folder == "":
folder = "."
if data_type == "PVMS": # I want eventually to have one of these for everything
from PVMassSpec import read_PVMS
data = read_PVMS(full_path_name)
else:
file_lines = import_text(full_path_name, verbose)
try:
data = text_to_data( # I want to split up the text_to_data function
file_lines=file_lines,
title=title,
data_type=data_type,
timestamp=timestamp,
tz=tz,
tstamp=tstamp,
verbose=verbose,
)
except Exception as e:
print(
f"COULD NOT PARSE {full_path_name}!!! Got error = {e}. "
"Returning an empty dictionary."
)
data = {"data_type": data_type, "title": "empty"}
return data
if tstamp is not None: # then it overrides whatever text_to_data came up with.
data["tstamp"] = tstamp
elif data["tstamp"] is None:
print(f"WARNING: no tstamp found in {full_path_name}. Looking in file name.")
tstamp = timestring_to_epoch_time(full_path_name)
if tstamp is None:
print(
"WARNING: no tstamp found in "
+ full_path_name
+ " file name either. Using file creation time."
)
tstamp = get_creation_time(full_path_name, verbose=verbose)
data["tstamp"] = tstamp
if "data_cols" not in data or len(data["data_cols"]) == 0:
print("WARNING! empty dataset")
data["empty"] = True
else:
numerize(data)
data["empty"] = False
if name is None:
name = data["title"]
data["name"] = name
if data["empty"]:
print("WARNING! load_from_file is returning an empty dataset")
elif data_type == "SI":
from .Combining import rename_SI_cols
print("RENAMING SI COLS!") # debugging
rename_SI_cols(data)
elif data_type == "RGA":
from .Combining import rename_RGA_cols
rename_RGA_cols(data)
elif data_type == "CHI":
from .Combining import parse_CHI_header, rename_CHI_cols, timeshift
parse_CHI_header(data)
rename_CHI_cols(data)
dt = data["time/s"][-1] - data["time/s"][0]
timeshift(data, dt)
elif data_type == "ULM":
from .Combining import rename_ULM_cols
rename_ULM_cols(data)
elif data_type == "PVMS":
from .PVMassSpec import rename_PVMS_cols
rename_PVMS_cols(data)
elif data_type == "MKS":
from .Combining import rename_MKS_cols
rename_MKS_cols(data)
if verbose:
print("\nfunction 'load_from_file' finished!\n\n")
return data
def load_EC_set(
directory,
EC_files=None,
tag="01",
suffix=None,
data_type="EC",
verbose=True,
tz=None,
exclude=[],
fix_CP=False,
):
"""
inputs:
directory - path to folder containing your data, string
EC_file - list of EC_files, list
OR
tag - shared start of EC files you want to load and combine, str AND
suffix - ending of files, by default .mpt
data_type - type of EC data. By default 'EC', meaning Biologic EC-Lab files
tz - timezone, usually not needed
verbose - makes the function talk to you.
output
EC_data - a dataset with the data from all specified EC files combined
and sorted based on time. Additional columns loop_number and
file_number are added to the dataset if relevant.
"""
if verbose:
print("\n\nfunction 'load_EC_set' at your service!\n")
from .Combining import synchronize, sort_time
if suffix is None:
if data_type == "EC":
suffix = ".mpt"
elif data_type == "CHI":
suffix = ".txt"
lslist = os.listdir(directory)
if EC_files is None:
EC_files = [f for f in lslist if f.startswith(tag) and f.endswith(suffix)]
if type(exclude) is str:
exclude = [exclude]
for excl in exclude:
EC_files = [f for f in EC_files if not excl in f]
elif type(EC_files) is str:
EC_files = [EC_files]
print(f"lslist = {lslist}\nEC_files = {EC_files}") # debugging
EC_datas = []
for f in EC_files:
try:
data = load_from_file(
Path(directory) / f, data_type=data_type, tz=tz, verbose=verbose
)
except OSError:
print("WARNING: problem importing " + f + ". Continuing.")
continue
if fix_CP and "CP" in f:
try:
data["Ewe/V"] = data["Ewe-Ece/V"] + data["<Ece>/V"]
except KeyError:
print(
"WARNING! Could not fix CP for "
+ f
+ " because missing "
+ " either Ece/V or Ewe-Ece/V"
)
EC_datas += [data]
EC_data = synchronize(EC_datas, verbose=verbose, append=True, t_zero="first", tz=tz)
if "loop number" in EC_data["data_cols"]:
sort_time(EC_data, verbose=verbose) # note, sort_time no longer returns!
if verbose:
print("\nfunction 'load_EC_set' finished!\n\n")
return EC_data
def import_EC_set(*args, **kwargs):
"""
See EC_MS.load_EC_set
"""
print("import_EC_set has been renamed load_EC_set")
return load_EC_set(*args, **kwargs)
def download_cinfdata_set(
setup="sniffer", group_id=None, grouping_column=None, **kwargs
):
if grouping_column is None:
grouping_column, group_id = kwargs.popitem()
from .Combining import synchronize
try:
from cinfdata import Cinfdata
except ImportError:
print(
"the cinfdata module must be on your python path. It's here: \n"
+ "https://github.com/CINF/cinf_database/blob/master/cinfdata.py"
)
try:
cinfd = Cinfdata(
setup,
grouping_column=grouping_column,
allow_wildcards=True,
label_column="mass_label",
)
except:
raise # untill I know exactly which error I'm trying to catch.
print("couldn't connect. You should run gstm")
# os.system('gstm')
raise RuntimeError("Couldn't connect to cinfdata!")
# obj = cinfd.get_metadata_group('2018-03-30 14:13:17')
# all_datasets = cinfd.get_metadata_group('%')
# the_list = [(ID, d['time'], d['comment']) for ID, d in all_datasets.items()]
# print(the_list)
obj = cinfd.get_metadata_group(group_id)
# print(str(obj)) #
idlists = {} # keys will be time as string. values will be corresponding id's
for key, value in obj.items():
# label = value['mass_label']
# print(label)
timestamp = str(value["time"])
if timestamp not in idlists:
idlists[timestamp] = []
idlists[timestamp] += [value["id"]]
datasets = {}
for timestamp, idlist in idlists.items():
if len(idlist) == 0:
print("No data associated with timestamp '" + timestamp + "'.")
continue
dataset = {"title": timestamp, "data_type": "MS"}
metadatas = dict([(i, cinfd.get_metadata(i)) for i in idlist])
unixtimes = [metadatas[i]["unixtime"] for i in idlist]
if len(set(unixtimes)) > 1:
msg = "unix times don't match for timestamp '" + timestamp + "'!"
raise ValueError(msg)
dataset["tstamp"] = unixtimes[0]
dataset["timestamp"] = metadatas[idlist[0]]["time"].strftime("%H:%M:%S")
labels = [metadatas[i]["mass_label"] for i in idlist]
if "Mass Scan" in labels:
dataset["scan_type"] = "mass"
else:
dataset["scan_type"] = "time"
dataset["data_cols"] = set()
dataset["timecols"] = {}
for i in idlist: # avoiding id since it's got a builtin meaning
data = cinfd.get_data(i)
label = metadatas[i]["mass_label"]
if len(data.shape) == 1:
dataset[label] = data
dataset["data_cols"].add(label)
elif data.shape[1] == 2:
x = data[:, 0]
y = data[:, 1]
x_label = label + "-x"
y_label = label + "-y"
dataset["timecols"][y_label] = x_label # Fixed 20B26!!!
dataset[x_label] = x * 1e-3 # cinfdata saves time in ms!!!
dataset[y_label] = y
dataset["data_cols"].add(x_label)
dataset["data_cols"].add(y_label)
datasets[timestamp] = dataset
timescans = [
dataset for dataset in datasets.values() if dataset["scan_type"] == "time"
]
combined = synchronize(timescans, t_zero="first")
return combined
def get_xy(
data, xcol=None, ycol=None, label=None,
):
"""
"""
if xcol is None:
xcol = label + "-x"
if ycol is None:
ycol = label + "-y"
return data[xcol], data[ycol]
def set_xy(
data, x, y, xcol=None, ycol=None, label=None,
):
"""
"""
if xcol is None:
xcol = label + "-x"
if ycol is None:
ycol = label + "-y"
data[xcol] = x
data[ycol] = y
def remove_repeats(data, xcol=None, ycol=None, label=None):
"""
"""
x_0, y_0 = get_xy(data, xcol, ycol, label)
x, y = only_while_increasing(x_0, y_0)
set_xy(data, x=x, y=y, xcol=xcol, ycol=ycol, label=label)
def only_while_increasing(x=None, y=None):
"""
removes the repeats if a dataset goes back and repeats, as happens in
the Analog In anomoly first observed 18D05.
Does so in a vectorized way (only loops over "cliff points" where x falls)
x is monotonically increasing in the returned data
"""
x_up = np.append(x[1:], x[-1] + 1) # x shifted up one, so that x_up[i] = x[i+1]
cliff_points = np.where(x_up < x)[0] # points right before a drop in x
mask = np.tile(True, np.size(x))
for point in cliff_points:
x_cliff = x[point]
mask[point:] = np.logical_and(mask[point:], x[point:] > x_cliff)
return x[mask], y[mask]
def import_set(
directory,
MS_file="QMS.txt",
MS_data=None,
t_zero="start",
EC_file=None,
tag="01",
cutit=False,
cut_buffer=60,
verbose=True,
override=False,
):
from .Combining import synchronize, sort_time
if verbose:
print("\n\nfunction import_set at your service!\n")
lslist = os.listdir(directory)
if MS_data is None:
if type(MS_file) is str:
MS_file = [MS_file]
MS_datas = [
load_from_file(directory + os.sep + f, data_type="MS", verbose=verbose)
for f in MS_file
]
MS_data = synchronize(MS_datas, verbose=verbose)
if len(MS_datas) > 1:
sort_time(MS_data)
if EC_file is None:
EC_file = [f for f in lslist if f[:2] == tag and f[-4:] == ".mpt"]
elif type(EC_file) is str:
EC_file = [EC_file]
EC_datas = [
load_from_file(directory + os.sep + f, verbose=verbose, data_type="EC")
for f in EC_file
]
EC_data = synchronize(EC_datas, verbose=verbose)
if "loop number" in EC_data["data_cols"]:
sort_time(EC_data, verbose=verbose) # note, sort_time no longer returns!
data = synchronize(
[MS_data, EC_data],
t_zero=t_zero,
verbose=verbose,
override=override,
cutit=cutit,
cut_buffer=cut_buffer,
)
if verbose:
print("\nfunction import_set finished!\n\n")
return data
def save_as_text(
filename,
dataset,
cols="all",
mols=[],
tspan="all",
header=None,
N_chars=None,
timecols={},
**kwargs,
):
"""
kwargs is fed directly to Molecule.get_flux()
"""
from .Combining import get_timecol, cut
lines = []
if type(header) is list:
lines += header
elif type(header) is str:
lines += [header]
if cols == "all":
cols = list(dataset["data_cols"])
if N_chars is None:
N_chars = max([len(col) for col in cols])
col_header = ""
i_col = 0
columns = []
datas = {}
for col in cols:
if col in timecols:
tcol = timecols[col]
else:
tcol = get_timecol(col)
if tcol in dataset and tcol not in columns: # don't want same tcol twice
col_header += ("{0:>" + str(N_chars) + "s},\t").format(tcol)
columns += [tcol]
i_col += 1
if col in dataset and col not in columns: # don't want same tcol twice
col_header += ("{0:>" + str(N_chars) + "s},\t").format(col)
columns += [col]
i_col += 1
else:
print(col + " not in dataset. ignoring it.")
continue
if tcol in columns:
x, y = dataset[tcol].copy(), dataset[col].copy()
if tspan is not False and not tspan == "all":
x, y = cut(x, y, tspan=tspan)
datas[tcol], datas[col] = x, y
else:
print(
"timecol '"
+ tcol
+ "' for col '"
+ col
+ "' is not in dataset, so can't cut it."
)
datas[col] = dataset[col].copy()
for mol in mols:
tcol = mol.name + "_" + mol.primary + "-x"
col = mol.name + "_" + mol.primary + "-y"
x, y = mol.get_flux(dataset, tspan=tspan, **kwargs)
datas[tcol] = x
datas[col] = y
col_header += ("{0:>" + str(N_chars) + "s},\t").format(tcol)
columns += [tcol]
col_header += ("{0:>" + str(N_chars) + "s},\t").format(col)
columns += [col]
lines += [col_header + "\n"]
i_data = 0
finished = False
while not finished:
N_unfinished = 0
line = ""
for col in columns:
try:
d = datas[col][i_data]
line += ("{0:>" + str(N_chars) + ".6g},\t").format(d)
N_unfinished += 1
except IndexError:
line += " " * N_chars + ",\t"
if N_unfinished == 0:
finished = True
else:
lines += [line + "\n"]
i_data += 1
with open(filename, "w") as f:
f.writelines(lines)
def save_results_as_text(name, cols="all", **kwargs):
if cols == "all":
cols = list(kwargs.keys())
header_line = "".join([(col + ", \t") for col in cols])
header_line = header_line[:-3] + "\n"
lines = [header_line]
N = len(kwargs[cols[0]])
for i in range(N):
l = ""
for col in cols:
try:
v = kwargs[col][i]
except IndexError:
s = ", \t"
else:
s = "{:6.4g}".format(v) + ", \t"
l += s
l = l[:-3] + "\n"
lines += [l]
with open(name, "w") as f:
f.writelines(lines)
def import_folder(directory, tags=None, MS_file=None, verbose=True):
"""
Copined 19G24 from commit Submitting Trimarco2018 e12b8e9 on Dec 19, 2017
import everything you need from directory at once.
tags = None imports as one dataset
tags = 'all' separates by EC file tag, imports evertything
tags = ['01','02'] imports two datasets, one for each tag
"""
from .Combining import synchronize
if verbose:
print("\n\nfunction 'imoprt_folder' at your service!\n")
print("Importing from '" + directory + "'")
if directory[-1] == os.sep:
directory = directory[:-1] # import_set adds the os.sep
lslist = os.listdir(directory)
if MS_file is None:
MS_file = [f for f in lslist if "QMS" in f]
# importing MS data here rather than in import_set to avoid redundant importing
MS_datas = [
import_data(directory + os.sep + f, data_type="MS", verbose=verbose)
for f in MS_file
]
MS_data = synchronize(MS_datas, verbose=verbose)
# if len(MS_datas) > 1: #not necessary, as synchronize sorts by recstart
# sort_time(MS_data)
if tags is None:
EC_file = [f for f in lslist if ".mpt" in f]
Datasets = import_set(
directory, MS_data=MS_data, EC_file=EC_file, verbose=verbose
)
# sort_time(Datasets) #probably not necessary, as synchronize sorts by recstart
else:
if tags == "all":
taglist = {f[0:2] for f in lslist if f[-4:] == ".mpt"}
else:
taglist = tags # Renamed so I keep info on what tags originally was
Datasets = dict(
[
(t, import_set(directory, MS_data=MS_data, tag=t, verbose=verbose))
for t in taglist
]
)
if verbose:
print("\n\nfunction 'imoprt_folder' finished!\n")
return Datasets
def download_data(
IDs="today",
timestamps=None,
data_type="fullscan",
timestamp_interval=None,
comment=None,
connect={},
verbose=True,
):
"""
Copied 19G25 from commit 17G28 better combining and plotting b70b43b on Jul 28, 2017
... but it seems to be broken :( Not wasting time on this now.
Returns data columns matching a certain set of ID's.
"""
import sys
try:
import MySQLdb # known to pip as mysqlclient
# CONNECT_EXCEPTION = MySQLdb.OperationalError
# LOG.info('Using MySQLdb as the database module')
print("imported MySQLdb no problem!")
except ImportError:
try:
# .. if that fails, try with pymysql
import pymysql as MySQLdb
MySQLdb.install_as_MySQLdb()
# CONNECT_EXCEPTION = MySQLdb.err.OperationalError
# LOG.info('Using pymysql as the database module')
if sys.version_info.major > 2:
# LOG.info('pymysql is known to be broken with Python 3. Consider '
# 'installing mysqlclient!')
pass
except:
print("Error, can't connect to database!")
connect_0 = dict(
host="servcinf-sql", # your host, usually localhost, servcinf would also work, but is slower (IPv6)
# port=9995, # your forwording port
user="cinf_reader", # your username
passwd="<PASSWORD>", # your password
db="cinfdata",
) # name of the data base
for key, val in connect_0.items():
if key not in connect:
connect[key] = val
if data_type == "fullscan":
data_string_template = (
"SELECT x,y FROM xy_values_sniffer where measurement = {0} order by id"
)
# try:
print("Connecting to CINF database...")
cnxn = MySQLdb.connect(**connect)
cursor = cnxn.cursor()
print("Connection successful!")
# except:
# print('Connection failed!')
if type(IDs) is int:
IDs = [IDs]
datasets = {}
for ID in IDs:
data_string = data_string_template.format(str(ID))
cursor.execute(data_string)
raw_data = cursor.fetchall()
list_data = np.array(raw_data)
xy_data = np.swapaxes(list_data, 0, 1)
datasets[ID] = xy_data
return datasets
|
ScottSoren/EC_MS | src/EC_MS/MS.py | <reponame>ScottSoren/EC_MS
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 14 10:28:25 2020
Peak class copied from EC_Xray commit 13b3dbf
@author: scott
"""
import os, re
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
data_directory = os.path.dirname(os.path.realpath(__file__)) + os.sep + "data"
def gauss(x, center, sigma, height):
y = height * np.exp(-((x - center) ** 2) / (2 * sigma ** 2))
return y
class Peak:
"""
Copied from https://github.com/ScottSoren/EC_Xray/blob/master/XRD.py
on April 1, 2020.
"""
def __init__(self, x, y, xspan=None, name=None, color="k"):
self.name = name
self.xspan = xspan
self.color = color
if xspan is not None:
mask = np.logical_and(xspan[0] < x, x < xspan[-1])
x, y = x[mask], y[mask]
self.x = x
self.y = y
self.y_bg = np.zeros(x.shape)
self.bg = False
def get_background(self, y_bg=None, bg_mode="linear", endpoints=2):
"""
This can be much simpler than the ridiculous background subtraction
stuff I had going in EC_Xray because mass spec peaks always come
at integer m/z and so it's easy to surround the peak
"""
x, y = self.x, self.y
if bg_mode == "linear":
# draw a background line through the endpoints
x_start, x_finish = np.mean(x[:endpoints]), np.mean(x[-endpoints:])
y_start, y_finish = np.mean(y[:endpoints]), np.mean(y[-endpoints:])
y_bg = y_start + (x - x_start) / (x_finish - x_start) * (y_finish - y_start)
return y_bg
def set_background(self, bg=True, y_bg=None, bg_mode="linear", endpoints=2):
self.reset()
if bg:
y_bg = self.get_background(y_bg=y_bg, bg_mode=bg_mode, endpoints=endpoints)
self.y_bg = y_bg
else:
self.y_bg = np.zeros(self.x.shape)
def subtract_background(self, y_bg=None, bg_mode="linear", endpoints=2):
self.reset() # to avoid losing the ability to restore the original
# by subtracting a new background from background-subtracted data
y_bg = self.get_bacground(y_bg=y_bg, bg_mode=bg_mode, endpoints=endpoints)
self.y_bg = y_bg
self.bg = True
self.y = self.y - y_bg
def reset(self):
"""
so far only implemented for masses.
"""
if self.bg:
y_bg = self.y_bg
self.y = self.y + y_bg
self.bg = False
def get_integral(self, *args, **kwargs):
if "mode" in kwargs and kwargs["mode"] in ["gauss", "fit"]:
if "ax" in kwargs:
self.fit_gauss(ax=kwargs["ax"])
return self.integral_f
x, y, background = self.x, self.y, self.background
integral = np.trapz(y - background, x)
self.integral = integral
if "ax" in kwargs:
ax = kwargs["ax"]
if ax == "new":
fig, ax = plt.subplots()
if ax is not None:
ax.plot(x, y, "k.")
ax.plot(x, background, "b--")
ax.fill_between(x, background, y, where=y > background, color="g")
return integral
def fit_gauss(
self, center=None, sigma=None, ax=None, y_bg=None, bg_mode=None, endpoints=2
):
if y_bg or bg_mode:
self.set_background(y_bg=y_bg, bg_mode=bg_mode, endpoints=endpoints)
x, y, y_bg = self.x, self.y, self.y_bg
y = y - y_bg
guess_c = (x[-1] + x[0]) / 2
guess_s = (x[-1] - x[0]) / 2
guess_h = max(y)
if center is not None and sigma is not None:
def gauss_i(x, height):
return gauss(x, center=center, sigma=sigma, height=height)
guess = guess_h
popt, pcov = curve_fit(gauss_i, x, y, p0=guess)
height = popt[0]
elif center is not None:
def gauss_i(x, sigma, height):
return gauss(x, center=center, sigma=sigma, height=height)
guess = [guess_s, guess_h]
popt, pcov = curve_fit(gauss_i, x, y, p0=guess)
sigma, height = popt[0], popt[1]
elif sigma is not None:
def gauss_i(x, center, height):
return gauss(x, center=center, sigma=sigma, height=height)
guess = [guess_c, guess_h]
popt, pcov = curve_fit(gauss_i, x, y, p0=guess)
center, height = popt[0], popt[1]
else:
def gauss_i(x, center, sigma, height):
return gauss(x, center=center, sigma=sigma, height=height)
guess = [guess_c, guess_s, guess_h]
try:
popt, pcov = curve_fit(gauss_i, x, y, p0=guess)
center, sigma, height = popt[0], popt[1], popt[2]
except RuntimeError:
center, sigma, height = guess
sigma = abs(sigma)
# print(f'center={center}, sigma={sigma}, height={height}') # debugging
fit = gauss(x, center, sigma, height)
integral_f = np.sqrt(2 * np.pi) * height * sigma
self.center, self.sigma, self.height = center, sigma, height
self.fit, self.integral_f = fit, integral_f
if ax is not None:
if ax == "new":
fig, ax = plt.subplots()
ax.plot(x, y_bg, "b--")
ax.plot(x, y + y_bg, "k.")
ax.plot(x, fit + y_bg, "r--")
return center, sigma, height
def get_fwhm(self):
fwhm = 2 * np.sqrt(2 * np.log(2)) * self.sigma
return fwhm
|
ScottSoren/EC_MS | src/EC_MS/SPEC.py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 15:58:57 2020
@author: scott
This module contains stuff that was copied into Data_Importing from EC_Xray.
It should probably just be removed from EC_MS altogether.
"""
import re
from .parsing_tools import (
get_creation_timestamp,
timetag_to_timestamp,
get_empty_set,
numerize,
remove_comments,
float_match,
)
def load_from_csv(filepath, multiset=False, timestamp=None, verbose=True):
"""
This function is made a bit more complicated by the fact that some csvs
seem to have multiple datasets appended, with a new col_header line as the
only indication. If multiset=True, this will separate them and return them
as a list.
if timestamp = None, the timestamp will be the date created
I hate that SPEC doesn't save absolute time in a useful way.
"""
if verbose:
print("function 'load_from_csv' at your service!")
if timestamp is None:
a = re.search("[0-9]{2}h[0-9]{2}", filepath)
if a is None:
print("trying to read creation time")
timestamp = get_creation_timestamp(filepath)
else:
print("getting timestamp from filename " + filepath)
timestamp = timetag_to_timestamp(filepath)
with open(filepath, "r") as f: # read the file!
lines = f.readlines()
colheaders = [col.strip() for col in lines[0].split(",")]
data = get_empty_set(
set(colheaders), title=filepath, timestamp=timestamp, data_type="SPEC"
)
datasets = []
for line in lines[1:]: # put data in lists!
vals = [val.strip() for val in line.split(",")]
not_data = []
newline = {}
for col, val in zip(colheaders, vals):
if col in data["data_cols"]:
try:
val = float(val)
except ValueError:
print("value " + val + " of col " + col + " is not data.")
not_data += [col]
newline[col] = val
if len(not_data) == len(data["data_cols"]):
print("it looks like there is another data set appended!")
if multiset:
print("continuing to next set.")
numerize(data)
datasets += [data.copy()]
colheaders = [val.strip() for val in vals]
data = get_empty_set(
set(colheaders), timestamp=timestamp, data_type="SPEC"
)
continue
else:
print("returning first set.")
numerize(data)
return data
else:
for col in not_data:
data["data_cols"].remove(col)
print("column " + col + " removed from 'data_cols '.")
for col, val in zip(colheaders, vals):
data[col] += [newline[col]]
numerize(data)
datasets += [data]
if verbose:
print("function 'load_from_csv' finished!")
if multiset:
return datasets
return data
def read_macro(file):
with open(file) as macro:
lines = macro.readlines()
lines = remove_comments(lines)
settings = {
"tth": [],
"alpha": [],
"savepath": [],
"newfile": [],
"measurements": [],
}
for line in lines:
# print(line)
tth_match = re.search("umv tth " + float_match, line)
if tth_match:
# print('got tth!')
settings["tth"] += [float(tth_match.group()[7:])]
continue
alpha_match = re.search("umv th " + float_match, line)
if alpha_match:
settings["alpha"] += [float(alpha_match.group()[6:])]
continue
if "pd savepath" in line:
settings["savepath"] += [line[12:]]
continue
if "newfile " in line:
settings["newfile"] += [line[8:]]
continue
if "_timescan " in line or "ascan " in line or "pdascan " in line:
settings["measurements"] += [line]
continue
return settings
|
ScottSoren/EC_MS | src/EC_MS/Potentiostat.py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 11 14:55:48 2017
@author: murems
"""
import os, codecs, io
import numpy as np
U_lines = ["Ei (V)", "E1 (V)", "E2 (V)", "Ef (V)", "EL (V)"]
check_lines = ["vs."]
class SettingsFile:
def __init__(
self, name, file_name=None, RE_vs_RHE=None, settings={}, verbose=True, **kwargs
):
settings.update(kwargs)
self.settings = settings
self.RE_vs_RHE = RE_vs_RHE
if file_name is None and len(settings) == 0:
file_name = name
self.name = name
self.verbose = verbose
try:
with open(file_name) as f:
self.lines = f.readlines()
except UnicodeDecodeError:
print(
"why on earth don't you idiots just use utf-8 for everything? Trying iso8859_15."
)
with codecs.open(file_name, encoding="iso8859_15") as f:
self.lines = f.readlines()
def write(self, file_name=None, encoding="iso8859_15"):
if file_name is None:
file_name = self.name + ".mps"
with io.open(file_name, "w", encoding=encoding) as f:
f.writelines(self.lines)
def change_potentials(self, RE_vs_RHE=None, diff=None, verbose=None):
if verbose is None:
verbose = self.verbose
if verbose:
print("\n\nfunction 'change_potentials' at your service!\n")
if RE_vs_RHE is not None:
diff = -(RE_vs_RHE - self.RE_vs_RHE)
self.RE_vs_RHE = RE_vs_RHE
newlines = []
N_tech = 0
newtech = False
N_lines = len(self.lines)
for n in range(
N_lines
): # len(self.lines changes each step when iterating over inumerate. Wierd.)
line = self.lines[n]
if line[0:9] == "Technique":
N_tech += 1
if verbose:
print("Technique " + str(N_tech))
newtech = True
newline = line
elif newtech: # newtech stays true all through techniques like OCV
if verbose:
print(line)
newline = line
if len(line) > 2 and line[0:2] == "Ns":
N_step = (len(line)) / 20 - 1
# Hmmm, the number of characters per line is different based on OS.
# Should be len(line)-2 in ubuntu. But I'll just round down later anyway.
if verbose:
print(str(N_step) + " steps.")
newtech = False # now we're into the numerical settings!
# but there is no Ns if the technique only has one step!
elif (
n < N_lines
and len(self.lines[n + 1]) > 5
and self.lines[n + 1][0:6] in U_lines
):
N_step = 1
if verbose:
print("1 step")
newtech = False
elif N_tech == 0: # Header ends up here.
newline = line
else:
parts = [
line[20 * i : 20 * (i + 1)] for i in range(int(N_step + 1))
] # +1 for line title
# print(parts)
if len(parts) == 0 or n == N_lines:
if verbose:
print("skipping line " + str(n) + " out of " + str(N_lines))
newline = line
else:
if verbose:
print("row: " + parts[0])
if parts[0][0:6] in U_lines:
check_line = self.lines[n + 1]
check_parts = [
check_line[20 * i : 20 * (i + 1)]
for i in range(int(N_step + 1))
] # +1 for line title
newline = ""
for part, check_part in zip(parts, check_parts):
print(part + " " + check_part)
if check_part[0:3] == "Ref":
U = eval(part)
U_new = np.round(U + diff, 3)
if verbose:
print(
"replacing "
+ str(U)
+ " V vs Ref with "
+ str(U_new)
+ " V vs Ref."
)
newpart = str(U_new).ljust(20)
newline += newpart
else: # line title also ends up here.
newline += part
newline += "\n"
else:
newline = line
newlines += [newline]
self.lines = newlines
if __name__ == "__main__":
data_dir = os.path.expanduser(
"~/Dropbox/Sniffer_Experiments/06_HER/Data/17G10_Nik3/"
)
os.chdir(data_dir)
test = SettingsFile(
name="test", file_name="closing_secondary.mps", RE_vs_RHE=1.298, verbose=True
)
test.change_potentials(RE_vs_RHE=0.5900)
test.write(file_name="RE_vs_RHE_is_0p59V.mps")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.