content
stringlengths 5
1.05M
|
|---|
import os
import platform
import re
import sys
import tempfile
import shutil
from copy import copy
from conans import tools
from cpt.packager import ConanMultiPackager,load_cf_class
def loadScheme_(name):
CONANOS_SCHEME_REPO = os.environ.get('CONANOS_SCHEME_REPO')
if not CONANOS_SCHEME_REPO:
CONANOS_SCHEME_REPO = 'https://raw.githubusercontent.com/conanos/schemes/master'
tools.out.info('Conan build for scheme : %s'%name)
tools.out.info('scheme repository : %s'%CONANOS_SCHEME_REPO)
url = '%s/%s/scheme.py'%(CONANOS_SCHEME_REPO ,name)
filename = url
tempd = None
if url.find(':') >1:
tmpd = tempfile.mkdtemp()
filename = os.path.join(tmpd,'conanos_%s_scheme.py'%name)
tools.download(url,filename,overwrite=True)
try:
module_dir = os.path.dirname(filename)
module_name, ext = os.path.splitext(os.path.basename(filename))
sys.path.insert(1, module_dir)
module = __import__(module_name)
assert(module.library_types)
assert(module.options)
assert(module.dependencies)
except ImportError:
tools.out.error('failed import %s'%url)
raise 'can not import scheme file'
finally:
if tempd:
shutil.rmtree(tmpd)
return module
def _filter(pkg_name , builder):
CONANOS_SCHEME = os.environ.get('CONANOS_SCHEME')
if not CONANOS_SCHEME:
tools.out.warn('conanos build without scheme.'
'if you want to build some scheme,for example webstreamer'
'windows > set CONANOS_SCHEME=webstreamer'
'linux > export CONANOS_SCHEME=webstreamer')
return builder
items = []
scheme = loadScheme_(CONANOS_SCHEME)
for settings, options, env_vars, build_requires, reference in builder.items:
compiler = settings['compiler']
if compiler == 'Visual Studio':
compiler = 'msvc'
ltype = scheme.library_types(pkg_name,settings)
if ltype:
conanfile = load_cf_class("./conanfile.py", builder.conan_api)
if hasattr(conanfile, "options") and "shared" in conanfile.options:
shared = options['%s:shared'%pkg_name]
l = 'shared' if shared else 'static'
if l == ltype or l in ltype:
items.append([settings, options, env_vars, build_requires])
else:
items.append([settings, options, env_vars, build_requires])
builder.items = items
return builder
def Main(name,pure_c=True):
sch = os.environ.get("CONANOS_SCHEME")
if not sch:
tools.out.error('''Warning !!!!!!!!!!
Use the conanos to build package, but you didn't set sdk name.
Please set CONANOS_SCHEME to right name which you want build the package for.
!!!!!!!!!!
''')
else:
tools.out.info('''
======================================
package : %s
scheme : %s
======================================
'''%(name,sch))
scheme = loadScheme_(sch)
if hasattr(scheme,'pre_build'):
scheme.pre_build()
if platform.system() == 'Windows':
os.environ['CONAN_VISUAL_VERSIONS'] = os.environ.get('CONAN_VISUAL_VERSIONS','15')
os.environ['CONAN_USERNAME'] = os.environ.get('CONAN_USERNAME','conanos')
PATTERN = re.compile(r'conan(io|os)/(?P<compiler>gcc|clang|emcc)(?P<version>\d+)(-(?P<arch>\w+(-\w+)*))?')
m = PATTERN.match(os.environ.get('CONAN_DOCKER_IMAGE',''))
docker_entry_script = ''
if m:
compiler = m.group('compiler')
version = m.group('version')
arch = 'x86_64' if not m.group('arch') else m.group('arch')
CONANOS_SCHEME = os.environ.get("CONANOS_SCHEME")
CONANOS_SCHEME_REPO = os.environ.get("CONANOS_SCHEME_REPO")
docker_entry_script += "pip install conan --upgrade"
docker_entry_script += " && pip install conan_package_tools"
docker_entry_script += " && pip install conanos --upgrade"
if CONANOS_SCHEME:
docker_entry_script += " && export CONANOS_SCHEME=%s"%CONANOS_SCHEME
if CONANOS_SCHEME_REPO:
docker_entry_script += " && export CONANOS_SCHEME_REPO=%s"%CONANOS_SCHEME_REPO
if os.path.exists('docker_entry_script.sh'):
docker_entry_script +=' && /bin/bash docker_entry_script.sh %s %s %s'%(compiler,version,arch)
builder = ConanMultiPackager(docker_entry_script=docker_entry_script)
builder.add_common_builds(pure_c=pure_c)
_filter(name,builder)
builder.run()
def pkgconfig_adaption(conanfile,pkgconfig_folder='~pkgconfig'):
pkgconfigd=os.path.abspath(pkgconfig_folder)
requires = conanfile.requires
for (name,reference) in requires.items():
rootd = conanfile.deps_cpp_info[name].rootpath
pcd = os.path.join(rootd,'lib/pkgconfig')
for name in os.listdir(pcd):
pcfile=os.path.join(pcd,name)
if name.endswith('.pc') and os.path.isfile(pcfile) :
filename = os.path.join(pkgconfigd,name)
if not os.path.isdir(pkgconfigd):
os.makedirs(pkgconfigd)
shutil.copy(pcfile, filename)
tools.replace_prefix_in_pc_file(filename,rootd)
tools.out.info('%s ->%s'%(name,filename))
def config_scheme(conanfile):
CONANOS_SCHEME = os.environ.get('CONANOS_SCHEME')
if not CONANOS_SCHEME:
tools.out.warn('conanos build without scheme.'
'if you want to build some scheme,for example webstreamer'
'windows > set CONANOS_SCHEME=webstreamer'
'linux > export CONANOS_SCHEME=webstreamer')
return
scheme = loadScheme_(CONANOS_SCHEME)
name = conanfile.name
options = conanfile.options
settings = conanfile.settings
shared = conanfile.options.shared
# overwrite with scheme's options
s_options = scheme.options(name,settings,shared)
for name , val in s_options.items():
if name in options:
setattr(options,name, val)
# dependencies
deps = scheme.dependencies(name,settings)
requires = conanfile.requires
for (r_name,reference) in requires.items():
if r_name in deps:
conanfile.options[r_name].shared = deps[name]
else:
ltype = scheme.library_types(r_name,settings)
ltype = ltype if isinstance(ltype,str) else ltype[0]
assert(ltype in ['static','shared'])
conanfile.options[r_name].shared = True if ltype == 'shared' else False
rs_options = scheme.options(r_name,settings,shared)
for key , val in rs_options.items():
setattr(conanfile.options[r_name],key,val)
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides the base class for implementing video object tracking inference."""
from typing import List
from typing import Union
import numpy as np
from automl_video_ondevice.shot_classification.config import ShotClassificationConfig
from automl_video_ondevice.types import ShotClassificationAnnotation
from automl_video_ondevice.types import Size
class BaseShotClassificationInference:
"""Interface that must be implemented for support of different model types."""
def __init__(self, frozen_graph_path: str, label_map_path: str,
config: ShotClassificationConfig):
"""Constructor for BaseShotClassificationInference.
Args:
frozen_graph_path: String value for the file path of frozen graph.
label_map_path: String value for the file path of the label map.
config: ShotClassificationConfig object with shot classification configs.
"""
raise NotImplementedError()
def input_size(self) -> Size:
"""Calculate / grab optimal input size.
The user is expected to ensure the size of their input image is correct.
This is in case the user wants to do any acceleration of image resizing
themselves.
Some inference engines require a specific input image size such as the
TFLite models, however some Tensorflow models accept a dynamic input. For
a dynamic input, the size outputed will have the dimensions -1, -1.
Returns:
The expected input size, of the type Size.
"""
return Size(256, 256)
def run(self, timestamp: Union[int, float], frame: np.ndarray,
annotations: List[ShotClassificationAnnotation]) -> bool:
"""Run inferencing for a single frame, to calculate annotations.
Args:
timestamp: Generally an integer representing the microsecond of the frame,
however any unique number is also accepted.
frame: A numpy array of the shape (h, w, 3), representing an RGB image.
Each color channel should be a number [0, 256).
annotations: A list to append the output annotations to. For normal use-
case, this should be an empty list. The output annotations will be of
type ShotClassificationAnnotation.
Returns:
A boolean, True if successful and False if unsuccessful.
"""
raise NotImplementedError('Shot classification has not been implemented.')
|
from __future__ import print_function, division, absolute_import, unicode_literals
from builtins import bytes, dict, object, range, map, input, str
from future.utils import itervalues, viewitems, iteritems, listvalues, listitems
from io import open
import pickle
import os
from copy import deepcopy
import numpy as np
from math import cos, radians
from numpy.lib.recfunctions import append_fields
from collections import OrderedDict
import matplotlib as mpl
from astropy import time, coordinates, units
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
from rfpipe import version, fileLock
from bokeh.plotting import ColumnDataSource, Figure, save, output_file
from scipy.stats import mstats
from scipy import signal
from scipy.special import erfinv
from bokeh.models import HoverTool
from bokeh.models import Row
from collections import OrderedDict
from sklearn import cluster
import logging
logger = logging.getLogger(__name__)
class CandData(object):
""" Object that bundles data from search stage to candidate visualization.
Provides some properties for the state of the phased data and candidate.
"""
def __init__(self, state, loc, image, data, **kwargs):
""" Instantiate with pipeline state, candidate location tuple,
image, and resampled data phased to candidate.
TODO: Need to use search_dimensions to infer candloc meaning
"""
self.state = state
self.loc = tuple(loc)
self.image = image
self.data = np.ma.masked_equal(data, 0j)
if 'snrk' in kwargs: # hack to allow detection level calculation in
self.snrk = kwargs['snrk']
else:
self.snrk = None
if 'snrarms' in kwargs: # hack to allow detection level calculation in
self.snrarms = kwargs['snrarms']
else:
self.snrarms = None
if 'cluster' in kwargs:
self.cluster = kwargs['cluster']
else:
self.cluster = None
if 'clustersize' in kwargs:
self.clustersize = kwargs['clustersize']
else:
self.clustersize = None
assert len(loc) == len(self.state.search_dimensions), ("candidate location "
"should set each of "
"the st.search_dimensions")
def __repr__(self):
return 'CandData for scanId {0} at loc {1}'.format(self.state.metadata.scanId, self.loc)
@property
def searchtype(self):
return self.state.prefs.searchtype
@property
def features(self):
return self.state.features
@property
def snrtot(self):
""" Optimal SNR given searchtype (e.g., snr1 with snrk, if snrk measured)
Note that snrk can be calclated after detection, so snrtot represents post detection
significance.
"""
if self.state.prefs.searchtype in ['image', 'imagek', 'armkimage']:
return (self.snrk**2 + self.snr1**2)**0.5
elif self.state.prefs.searchtype == 'armk':
return (self.snrk**2 + self.snrarms**2)**0.5
@property
def snr1(self):
# TODO: find good estimate that can be implemented in both CPU and GPU
# imstd = util.madtostd(image) # outlier resistant
return self.image.max()/self.image.std()
@property
def immax1(self):
return self.image.max()
@property
def l1(self):
return self.peak_lm[0]
@property
def m1(self):
return self.peak_lm[1]
@property
def spec(self):
return self.data.real.mean(axis=2)[self.integration_rel]
@property
def specstd(self):
return self.spec.std()
@property
def specskew(self):
return mstats.skew(self.spec)
@property
def speckur(self):
return mstats.kurtosis(self.spec)
@property
def imskew(self):
return mstats.skew(self.image.flatten())
@property
def imkur(self):
return mstats.kurtosis(self.image.flatten())
@property
def lc(self):
return self.data.real.mean(axis=2).mean(axis=1)
@property
def tskew(self):
return mstats.skew(self.lc)
@property
def tkur(self):
return mstats.kurtosis(self.lc)
@property
def integration_rel(self):
""" Candidate integration relative to data time window
"""
if self.loc[1] < self.state.prefs.timewindow//2:
return self.loc[1]
else:
return self.state.prefs.timewindow//2
@property
def peak_lm(self):
"""
"""
return self.state.pixtolm(self.peak_xy)
@property
def peak_xy(self):
""" Peak pixel in image
Only supports positive peaks for now.
"""
return np.where(self.image == self.image.max())
@property
def time_top(self):
""" Time in mjd where burst is at top of band
"""
return (self.state.segmenttimes[self.loc[0]][0] +
(self.loc[1]*self.state.inttime)/(24*3600))
@property
def candid(self):
scanId = self.state.metadata.scanId
segment, integration, dmind, dtind, beamnum = self.loc
return '{0}_seg{1}-i{2}-dm{3}-dt{4}'.format(scanId, segment, integration, dmind, dtind)
class CandCollection(object):
""" Wrap candidate array with metadata and
prefs to be attached and pickled.
"""
def __init__(self, array=np.array([]), prefs=None, metadata=None,
canddata=[]):
self.array = array
self.prefs = prefs
self.metadata = metadata
self.rfpipe_version = version.__version__
self._state = None
self.soltime = None
self.canddata = canddata
def __repr__(self):
if self.metadata is not None:
return ('CandCollection for {0}, scan {1}, segment {2} with {3} candidate{4}'
.format(self.metadata.datasetId, self.metadata.scan, self.segment,
len(self), 's'[not len(self)-1:]))
else:
return ('CandCollection with {0} rows'.format(len(self.array)))
def __len__(self):
""" Removes locs with integration < 0, which is a flag
"""
goodlocs = [loc for loc in self.locs if loc[1] >= 0]
return len(goodlocs)
def __add__(self, cc):
""" Allow candcollections to be added within a given scan.
(same dmarr, dtarr, segmenttimes)
Adding empty cc ok, too.
"""
# TODO: update to allow different simulated_transient fields that get added into single list
assert self.prefs.name == cc.prefs.name, "Cannot add collections with different preference name/hash"
assert self.state.dmarr == cc.state.dmarr, "Cannot add collections with different dmarr"
assert self.state.dtarr == cc.state.dtarr, "Cannot add collections with different dmarr"
# shortcut calculations if one is empty
if len(self) and not len(cc):
return self
elif not len(self) and len(cc):
return cc
elif not len(self) and not len(cc):
return self
# standard case
# if self.state.nsegment == cc.state.nsegment:
# assert (self.state.segmenttimes == cc.state.segmenttimes).all(), "Cannot add collections with different segmenttimes"
# OTF case (one later than the other)
# else:
# if self.state.nsegment > cc.state.nsegment:
# assert self.metadata.starttime_mjd == cc.metadata.starttime_mjd, "OTF segments should have same start time"
# assert (self.state.segmenttimes[:cc.state.nsegment, 0] == cc.state.segmenttimes[:,0]).all(), "OTF segments should have shared segmenttimes"
if hasattr(self, 'canddata'):
canddata = self.canddata.copy()
else:
canddata = []
later = CandCollection(prefs=self.prefs, metadata=self.metadata,
array=self.array.copy())
if self.state.nsegment < cc.state.nsegment:
assert self.metadata.starttime_mjd == cc.metadata.starttime_mjd, "OTF segments should have same start time"
# assert (self.state.segmenttimes[:,0] == cc.state.segmenttimes[:self.state.nsegment,0]).all(), "OTF segments should have shared segmenttimes"
later = CandCollection(prefs=cc.prefs, metadata=cc.metadata,
array=cc.array.copy())
# combine candidate arrays
# if len(self) and len(cc):
later.array = np.concatenate((self.array.copy(), cc.array.copy()))
if hasattr(later, 'canddata'):
later.canddata = self.canddata + cc.canddata
# elif (not len(self)) and (len(cc) > 0):
# later.array = cc.array.copy()
# if hasattr(cc, 'canddata'):
# later.canddata = cc.canddata
# elif (not len(cc)) and (len(self) > 0):
# later.array = self.array.copy()
# if hasattr(self, 'canddata'):
# later.canddata = self.canddata
# combine prefs simulated_transient
later.prefs.simulated_transient = later.prefs.simulated_transient or cc.prefs.simulated_transient
return later
def __radd__(self, other):
""" Support recursive add so we can sum(ccs)
"""
if other == 0:
return self
else:
return self.__add__(other)
def __getitem__(self, key):
if len(self.canddata):
return CandCollection(array=self.array.take([key]), prefs=self.prefs,
metadata=self.metadata, canddata=[self.canddata[key]])
else:
return CandCollection(array=self.array.take([key]), prefs=self.prefs,
metadata=self.metadata)
@property
def scan(self):
if self.metadata is not None:
return self.metadata.scan
else:
return None
@property
def segment(self):
if len(self):
segments = np.unique(self.array['segment'])
if len(segments) == 1:
return int(segments[0])
elif len(segments) > 1:
logger.warning("Multiple segments in this collection")
return segments
else:
return None
@property
def locs(self):
if len(self.array):
return self.array[['segment', 'integration', 'dmind', 'dtind',
'beamnum']].tolist()
else:
return np.array([], dtype=int)
@property
def candmjd(self):
""" Candidate MJD at top of band
"""
# dt_inf = util.calc_delay2(1e5, self.state.freq.max(), self.canddm)
t_top = np.array(self.state.segmenttimes)[self.array['segment'], 0] + (self.array['integration']*self.canddt)/(24*3600)
return t_top
@property
def canddm(self):
""" Candidate DM in pc/cm3
"""
dmarr = np.array(self.state.dmarr)
return dmarr[self.array['dmind']]
@property
def canddt(self):
""" Candidate dt in seconds
"""
dtarr = np.array(self.state.dtarr)
return self.metadata.inttime*dtarr[self.array['dtind']]
@property
def candl(self):
""" Return l1 for candidate (offset from phase center in RA direction)
"""
# beamnum not yet supported
return self.array['l1']
@property
def candm(self):
""" Return m1 for candidate (offset from phase center in Dec direction)
"""
# beamnum not yet supported
return self.array['m1']
@property
def candids(self):
scanId = self.metadata.scanId
return ['{0}_seg{1}-i{2}-dm{3}-dt{4}'.format(scanId, segment, integration, dmind, dtind) for (segment, integration, dmind, dtind, beamnum) in self.locs]
@property
def cluster(self):
""" Return cluster label
"""
if self.prefs.clustercands:
return self.array['cluster']
else:
return None
@property
def clustersize(self):
""" Return size of cluster
"""
if self.prefs.clustercands:
return self.array['clustersize']
else:
return None
@property
def snrtot(self):
""" Optimal SNR, given fields in cc (quadrature sum)
"""
fields = self.array.dtype.fields
snr = 0.
if 'snr1' in fields:
snr += self.array['snr1']**2
if 'snrk' in fields:
snr += self.array['snrk']**2
if 'snrarms' in fields:
snr += self.array['snrkarms']**2
return snr**0.5
@property
def state(self):
""" Sets state by regenerating from the metadata and prefs.
"""
from rfpipe import state
if self._state is None:
self._state = state.State(inmeta=self.metadata, inprefs=self.prefs,
showsummary=False, validate=False)
return self._state
@property
def mock_map(self):
""" Look for mock in candcollection
TODO: return values that help user know mocks found and missed.
"""
if self.prefs.simulated_transient is not None:
clusters = self.array['cluster'].astype(int)
cl_rank, cl_count = calc_cluster_rank(self)
mock_labels = []
map_mocks = {}
for mock in self.prefs.simulated_transient:
(segment, integration, dm, dt, amp, l0, m0) = mock
dmind0 = np.abs((np.array(self._state.dmarr)-dm)).argmin()
dtind0 = np.abs((np.array(self._state.dtarr)*self._state.inttime-dt)).argmin()
integration0 = integration//self._state.dtarr[dtind0]
mockloc = (segment, integration0, dmind0, dtind0, 0)
if mockloc in self.locs:
label = clusters[self.locs.index(mockloc)]
mock_labels.append(label)
clustersize = cl_count[self.locs.index(mockloc)]
map_mocks[mock] = np.array(self.locs)[clusters == label].tolist()
logger.info("Found mock ({0}, {1}, {2:.2f}, {3:.2f}, {4:.2f}, {5:.4f}, {6:.4f}) at loc {7} with label {8} of size {9}"\
.format(segment, integration, dm, dt, amp, l0,\
m0, mockloc, label, clustersize))
else:
map_mocks[mock] = []
mock_labels.append(-2)
logger.info("The mock ({0}, {1}, {2:.2f}, {3:.2f}, {4:.2f}, {5:.4f}, {6:.4f}) wasn't found at loc {7}"\
.format(segment, integration, dm, dt, amp, l0, m0 ,mockloc))
return map_mocks, mock_labels
else:
return None
def sdmname(self):
""" Get name of SDM created by realfast based on naming convention
"""
segment = self.segment
segmenttimes = self.state.segmenttimes
startTime = segmenttimes[segment][0]
bdftime = int(time.Time(startTime, format='mjd').unix*1e3)
return 'realfast_{0}_{1}'.format(self.state.metadata.datasetId, bdftime)
def cd_to_cc(canddata):
""" Converts canddata into plot and a candcollection
with added features from CandData instance.
Returns structured numpy array of candidate features labels defined in
st.search_dimensions.
Generates png plot for peak cands, if so defined in preferences.
"""
assert isinstance(canddata, CandData)
logger.info('Calculating features for candidate.')
st = canddata.state
featurelists = []
for feature in st.features:
featurelists.append([canddata_feature(canddata, feature)])
kwargs = dict(zip(st.features, featurelists))
candlocs = canddata_feature(canddata, 'candloc')
kwargs['candloc'] = [candlocs]
if canddata.cluster is not None:
clusters = canddata_feature(canddata, 'cluster')
kwargs['cluster'] = [clusters]
clustersizes = canddata_feature(canddata, 'clustersize')
kwargs['clustersize'] = [clustersizes]
candcollection = make_candcollection(st, **kwargs)
if st.prefs.returncanddata:
candcollection.canddata = [canddata]
return candcollection
def canddata_feature(canddata, feature):
""" Calculate a feature (or candloc) from a canddata instance.
feature must be name from st.features or 'candloc'.
"""
# TODO: update this to take feature as canddata property
if feature == 'candloc':
return canddata.loc
elif feature == 'snr1':
return canddata.snr1
elif feature == 'snrarms':
return canddata.snrarms
elif feature == 'snrk':
return canddata.snrk
elif feature == 'cluster':
return canddata.cluster
elif feature == 'clustersize':
return canddata.clustersize
elif feature == 'specstd':
return canddata.specstd
elif feature == 'specskew':
return canddata.specskew
elif feature == 'speckur':
return canddata.speckur
elif feature == 'immax1':
return canddata.immax1
elif feature == 'l1':
return canddata.l1
elif feature == 'm1':
return canddata.m1
elif feature == 'imskew':
return canddata.imskew
elif feature == 'imkur':
return canddata.imkur
elif feature == 'tskew':
return canddata.tskew
elif feature == 'tkur':
return canddata.tkur
else:
raise NotImplementedError("Feature {0} calculation not implemented"
.format(feature))
def make_candcollection(st, **kwargs):
""" Construct a candcollection with columns set by keywords.
Minimal cc has a candloc (segment, int, dmind, dtind, beamnum).
Can also provide features as keyword/value pairs.
keyword is the name of the column (e.g., "l1", "snr")
and the value is a list of values of equal length as candlocs.
"""
if len(kwargs):
remove = []
for k, v in iteritems(kwargs):
if (len(v) == 0) and (k != 'candloc'):
remove.append(k)
for k in remove:
_ = kwargs.pop(k)
# assert 1-to-1 mapping of input lists
assert 'candloc' in kwargs
assert isinstance(kwargs['candloc'], list)
for v in itervalues(kwargs):
assert len(v) == len(kwargs['candloc'])
candlocs = kwargs['candloc']
features = [kw for kw in list(kwargs.keys())]
features.remove('candloc')
fields = []
types = []
for ff in st.search_dimensions + tuple(features):
fields.append(str(ff))
if ff in st.search_dimensions + ('cluster', 'clustersize'):
tt = '<i4'
else:
tt = '<f4'
types.append(str(tt))
dtype = np.dtype({'names': fields, 'formats': types})
array = np.zeros(len(candlocs), dtype=dtype)
for i in range(len(candlocs)):
ff = list(candlocs[i])
for feature in features:
ff.append(kwargs[feature][i])
array[i] = tuple(ff)
candcollection = CandCollection(array=array, prefs=st.prefs,
metadata=st.metadata)
else:
candcollection = CandCollection(prefs=st.prefs,
metadata=st.metadata)
return candcollection
def cluster_candidates(cc, downsample=None, returnclusterer=False,
label_unclustered=True):
""" Perform density based clustering on candidates using dbscan
parameters used for clustering: dm, time, l,m.
downsample will group spatial axes prior to running clustering.
Taken from cc.prefs.cluster_downsampling by default.
label_unclustered adds new cluster label for each unclustered candidate.
Returns label for each row in candcollection.
"""
cc1 = deepcopy(cc)
if len(cc1) > 1:
if isinstance(cc1.prefs.clustercands, tuple):
min_cluster_size, min_samples = cc1.prefs.clustercands
else:
logger.info("Using default clustercands parameters")
min_cluster_size = 5
min_samples = 3
if downsample is None:
downsample = cc1.prefs.cluster_downsampling
candl = cc1.candl
candm = cc1.candm
npixx = cc1.state.npixx
npixy = cc1.state.npixy
uvres = cc1.state.uvres
dmind = cc1.array['dmind']
dtind = cc1.array['dtind']
dtarr = cc1.state.dtarr
timearr_ind = cc1.array['integration'] # time index of all the candidates
time_ind = np.multiply(timearr_ind, np.array(dtarr).take(dtind))
peakx_ind, peaky_ind = cc1.state.calcpix(candl, candm, npixx, npixy,
uvres)
# stacking indices and taking at most one per bin
data = np.transpose([peakx_ind//downsample,
peaky_ind//downsample,
dmind, time_ind])
logger.info("Clustering parameters set to ({0},{1}) and downsampling xy by {2}."
.format(min_cluster_size, min_samples, downsample))
if min_cluster_size > len(data):
logger.info("Setting min_cluster_size to number of unique cands {0}"
.format(len(data)))
min_cluster_size = len(data)
# clusterer = hdbscan.HDBSCAN(metric='euclidean',
# min_cluster_size=min_cluster_size,
# min_samples=min_samples,
# cluster_selection_method='eom',
# allow_single_cluster=True).fit(data)
clusterer = cluster.DBSCAN(metric='chebyshev', min_samples=min_samples,
eps=14, algorithm='auto', leaf_size=23).fit(data)
nclustered = np.max(clusterer.labels_ + 1)
nunclustered = len(np.where(clusterer.labels_ == -1)[0])
logger.info("Found {0} clusters and {1} unclustered candidates for "
"min cluster size {2}"
.format(nclustered, nunclustered, min_cluster_size))
labels = clusterer.labels_.astype(np.int32)
if len(np.where(clusterer.labels_ < -1)[0]):
logger.warning("Unexpected cluster labels: {0}"
.format(clusterer.labels_))
else:
clusterer = None
labels = -1*np.ones(len(cc1), dtype=np.int32)
if -1 in labels and label_unclustered:
unclustered = np.where(labels == -1)[0]
logger.info("Adding {0} unclustered candidates as individual clusters"
.format(len(unclustered)))
newind = max(labels)
for cli in unclustered:
newind += 1
labels[cli] = newind
# TODO: rebuild array with new col or accept broken python 2 or create cc with 'cluster' set to -1
if 'cluster' not in cc1.array.dtype.fields:
cc1.array = append_fields(cc1.array, 'cluster', labels, usemask=False)
else:
cc1.array['cluster'] = labels
if returnclusterer:
return cc1, clusterer
else:
return cc1
def calc_cluster_rank(cc):
""" Given cluster array of candcollection, calculate rank relative
to total count in each cluster.
Rank ranges from 1 (highest SNR in cluster) to total count in cluster.
"""
assert 'cluster' in cc.array.dtype.fields
# get count in cluster and snr rank of each in its cluster
clusters = cc.array['cluster'].astype(int)
cl_rank = np.zeros(len(clusters), dtype=int)
cl_count = np.zeros(len(clusters), dtype=int)
# TODO: check on best way to find max SNR with kalman, etc
for cluster in np.unique(clusters):
clusterinds = np.where(cluster == clusters)[0]
snrs = cc.array['snr1'][clusterinds]
cl_rank[clusterinds] = np.argsort(np.argsort(snrs)[::-1])+1
cl_count[clusterinds] = len(clusterinds)
return cl_rank, cl_count
def normprob(snrs, ntrials):
""" Uses SNR rank order to estimate significance based on normal probability distribution.
snrs can be single SNR or list or array.
ntrials should be an int of number of trials to generate snrs.
Returns expected snr (Z-score) for each input snr, given each input value's frequency of occurrence.
Plotting snr versus Z-score will show anomalies.
"""
# define norm quantile functions
Z = lambda quan: np.sqrt(2)*erfinv( 2*quan - 1)
quantile = lambda ntrials, i: (ntrials + 1/2. - i)/ntrials
if isinstance(snrs, list):
snrs = np.array(list)
elif isinstance(snrs, int) or isinstance(snrs, float):
snrs = np.array([snrs])
# calc number of trials
logger.info('Calculating normal probability distribution for {0} event in {1} trials'.format(len(snrs), ntrials))
# calc normal quantile
# purely sort and numpy-based
sortinds = np.argsort(snrs)
lenpos = len(np.where(snrs >= 0)[0])
lenneg = len(np.where(snrs < 0)[0])
unsortinds = np.zeros(len(sortinds), dtype=int)
unsortinds[sortinds] = np.arange(len(sortinds))
rank = np.concatenate( (np.arange(1, lenneg+1), np.arange(1, lenpos+1)[::-1]) )
zscores = Z(quantile(ntrials, rank[unsortinds]))
return zscores
def save_cands(st, candcollection):
""" Save candidate collection to pickle file.
Collection saved as array with metadata and preferences attached.
Writes to location defined by state using a file lock to allow multiple
writers.
"""
if st.prefs.savecandcollection:
# if not saving canddata, copy cc and save version without canddata
if not st.prefs.savecanddata:
cc = CandCollection(prefs=candcollection.prefs,
metadata=candcollection.metadata,
array=candcollection.array.copy())
else:
cc = candcollection
wwo = 'with' if st.prefs.savecanddata else 'without'
logger.info('Saving {0} candidate{1} {2} canddata to {3}.'
.format(len(candcollection),
's'[not len(candcollection)-1:], wwo, st.candsfile))
try:
with fileLock.FileLock(st.candsfile+'.lock', timeout=60):
with open(st.candsfile, 'ab+') as pkl:
pickle.dump(cc, pkl)
except fileLock.FileLock.FileLockException:
segment = cc.segment
newcandsfile = ('{0}_seg{1}.pkl'
.format(st.candsfile.rstrip('.pkl'), segment))
logger.warning('Candidate file writing timeout. '
'Spilling to new file {0}.'.format(newcandsfile))
with open(newcandsfile, 'ab+') as pkl:
pickle.dump(cc, pkl)
else:
logger.info('Not saving candcollection.')
def pkl_to_h5(pklfile, save_png=True, outdir=None, show=False):
""" Read candidate pkl file and save h5 file
"""
cds = list(iter_cands(pklfile, select='canddata'))
cds_to_h5(cds, save_png=save_png, outdir=outdir, show=show)
def cds_to_h5(cds, save_png=True, outdir=None, show=False):
""" Convert list of canddata objects to h5 file
"""
for cd in cds:
logger.info('Processing candidate at candloc {0}'.format(cd.loc))
if cd.data.any():
cand = cd_to_fetch(cd, classify=False, save_h5=True,
save_png=save_png, outdir=outdir, show=show)
else:
logger.warning('Canddata is empty. Skipping Candidate')
# globals
fetchmodel = None
tfgraph = None
def cd_to_fetch(cd, classify=True, devicenum=None, save_h5=False,
save_png=False, outdir=None, show=False, f_size=256,
t_size=256, dm_size=256, mode='CPU'):
""" Read canddata object for classification in fetch.
Optionally save png or h5.
"""
import h5py
from rfpipe.search import make_dmt
from skimage.transform import resize
segment, candint, dmind, dtind, beamnum = cd.loc
st = cd.state
width_m = st.dtarr[dtind]
timewindow = st.prefs.timewindow
tsamp = st.inttime*width_m
dm = st.dmarr[dmind]
ft_dedisp = np.flip((cd.data.real.sum(axis=2).T), axis=0)
chan_freqs = np.flip(st.freq*1000, axis=0) # from high to low, MHz
nf, nt = np.shape(ft_dedisp)
logger.debug('Size of the FT array is ({0}, {1})'.format(nf, nt))
try:
assert nt > 0
except AssertionError as err:
logger.exception("Number of time bins is equal to 0")
raise err
try:
assert nf > 0
except AssertionError as err:
logger.exception("Number of frequency bins is equal to 0")
raise err
roll_to_center = nt//2 - cd.integration_rel
ft_dedisp = np.roll(ft_dedisp, shift=roll_to_center, axis=1)
# If timewindow is not set during search, set it equal to the number of time bins of candidate
if nt != timewindow:
logger.info('Setting timewindow equal to nt = {0}'.format(nt))
timewindow = nt
else:
logger.info('Timewindow length is {0}'.format(timewindow))
try:
assert nf == len(chan_freqs)
except AssertionError as err:
logger.exception("Number of frequency channel in data should match the frequency list")
raise err
if dm is not 0:
dm_start = 0
dm_end = 2*dm
else:
dm_start = -10
dm_end = 10
logger.info('Generating DM-time for candid {0} in DM range {1:.2f}--{2:.2f} pc/cm3'
.format(cd.candid, dm_start, dm_end))
if devicenum is None:
# assume first gpu, but try to infer from worker name
devicenum = '0'
try:
from distributed import get_worker
name = get_worker().name
devicenum = name.split('g')[1]
except IndexError:
logger.warning("Could not parse worker name {0}. Using default GPU devicenum {1}"
.format(name, devicenum))
except ValueError:
logger.warning("No worker found. Using default GPU devicenum {0}"
.format(devicenum))
except ImportError:
logger.warning("distributed not available. Using default GPU devicenum {0}"
.format(devicenum))
else:
devicenum = str(devicenum)
logger.info("Using gpu devicenum: {0}".format(devicenum))
os.environ['CUDA_VISIBLE_DEVICES'] = str(devicenum)
# note that dmt range assuming data already dispersed to dm
dmt = make_dmt(ft_dedisp, dm_start-dm, dm_end-dm, 256, chan_freqs/1000,
tsamp, mode=mode, devicenum=int(devicenum))
reshaped_ft = resize(ft_dedisp, (f_size, nt), anti_aliasing=True)
if nt == t_size:
reshaped_dmt = dmt
elif nt > t_size:
reshaped_dmt = resize(dmt, (dm_size, t_size), anti_aliasing=True)
reshaped_ft = resize(reshaped_ft, (f_size, t_size), anti_aliasing=True)
else:
reshaped_ft = pad_along_axis(reshaped_ft, target_length=t_size,
loc='both', axis=1, mode='median')
reshaped_dmt = pad_along_axis(dmt, target_length=t_size,
loc='both', axis=1, mode='median')
logger.info('FT reshaped from ({0}, {1}) to {2}'
.format(nf, nt, reshaped_ft.shape))
logger.info('DMT reshaped to {0}'.format(reshaped_dmt.shape))
if outdir is not None:
fnout = outdir+'{0}'.format(cd.candid)
else:
fnout = '{0}'.format(cd.candid),
if save_h5:
with h5py.File(fnout+'_classify.h5', 'w') as f:
freq_time_dset = f.create_dataset('data_freq_time',
data=reshaped_ft)
freq_time_dset.dims[0].label = b"time"
freq_time_dset.dims[1].label = b"frequency"
dm_time_dset = f.create_dataset('data_dm_time',
data=reshaped_dmt)
dm_time_dset.dims[0].label = b"dm"
dm_time_dset.dims[1].label = b"time"
logger.info('Saved h5 as {0}_classify.h5'.format(fnout))
if save_png:
if nt > t_size:
ts = np.arange(timewindow)*tsamp
else:
ts = np.arange(t_size)*tsamp
fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(8, 10), sharex=True)
ax[0].imshow(reshaped_ft, aspect='auto',
extent=[ts[0], ts[-1], np.min(chan_freqs),
np.max(chan_freqs)])
ax[0].set_ylabel('Freq')
ax[0].title.set_text('Dedispersed FT')
ax[1].imshow(reshaped_dmt, aspect='auto', extent=[ts[0], ts[-1],
dm_end, dm_start])
ax[1].set_ylabel('DM')
ax[1].title.set_text('DM-Time')
ax[1].set_xlabel('Time (s)')
plt.tight_layout()
plt.savefig(fnout+'_classify.png')
logger.info('Saved png as {0}_classify.png'.format(fnout))
if show:
plt.show()
else:
plt.close()
cand = prepare_to_classify(reshaped_ft, reshaped_dmt)
if classify:
from fetch.utils import get_model
import tensorflow as tf
global fetchmodel
global tfgraph
if fetchmodel is None and tfgraph is None:
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.5
sess = tf.Session(config=config)
set_session(sess) # set this TensorFlow session as the default session for Keras
fetchmodel = get_model('a')
tfgraph = tf.get_default_graph()
with tfgraph.as_default():
preds = fetchmodel.predict(cand).tolist()
logger.info("Fetch probabilities {0}".format(preds))
frbprob = preds[0][1]
return frbprob
else:
return cand
def pad_along_axis(array, target_length, loc='end', axis=0, **kwargs):
"""
:param array: Input array to pad
:param target_length: Required length of the axis
:param loc: Location to pad: start: pad in beginning, end: pad in end, else: pad equally on both sides
:param axis: Axis to pad along
:return:
"""
pad_size = target_length - array.shape[axis]
axis_nb = len(array.shape)
if pad_size < 0:
return array
npad = [(0, 0) for x in range(axis_nb)]
if loc == 'start':
npad[axis] = (pad_size, 0)
elif loc == 'end':
npad[axis] = (0, pad_size)
else:
if pad_size % 2 == 0:
npad[axis] = (pad_size // 2, pad_size // 2)
else:
npad[axis] = (pad_size // 2, pad_size // 2 + 1)
return np.pad(array, pad_width=npad, **kwargs)
def crop(data, start_sample, length, axis):
"""
:param data: Data array to crop
:param start_sample: Sample to start the output cropped array
:param length: Final Length along the axis of the output
:param axis: Axis to crop
:return:
"""
if data.shape[axis] > start_sample + length:
if axis:
return data[:, start_sample:start_sample + length]
else:
return data[start_sample:start_sample + length, :]
elif data.shape[axis] == length:
return data
else:
raise OverflowError('Specified length exceeds the size of data')
def prepare_to_classify(ft, dmt):
""" Data prep and packaging for input to fetch.
"""
data_ft = signal.detrend(np.nan_to_num(ft))
data_ft /= np.std(data_ft)
data_ft -= np.median(data_ft)
data_dt = np.nan_to_num(dmt)
data_dt /= np.std(data_dt)
data_dt -= np.median(data_dt)
X = np.reshape(data_ft, (256, 256, 1))
Y = np.reshape(data_dt, (256, 256, 1))
X[X != X] = 0.0
Y[Y != Y] = 0.0
X = X.reshape(-1, 256, 256, 1)
Y = Y.reshape(-1, 256, 256, 1)
X = X.copy(order='C')
Y = Y.copy(order='C')
payload = {"data_freq_time": X, "data_dm_time": Y}
return payload
def iter_cands(candsfile, select='candcollection'):
""" Iterate through (new style) candsfile and return either
a candidatecollection or canddata.
select defines what kind of object to return:
- 'canddata' is heavier object with image and spectrum (used to make plots)
- 'candcollection' is lighter object with features.
"""
assert select.lower() in ['candcollection', 'canddata']
try:
with open(candsfile, 'rb') as pkl:
while True: # step through all possible segments
try:
candobj = pickle.load(pkl)
if select.lower() == 'candcollection':
yield candobj
elif select.lower() == 'canddata':
yield candobj.canddata
except EOFError:
logger.debug('Reached end of pickle.')
break
except UnicodeDecodeError:
with open(candsfile, 'rb') as pkl:
while True: # step through all possible segments
try:
candobj = pickle.load(pkl, encoding='latin-1')
if select.lower() == 'candcollection':
yield candobj
elif select.lower() == 'canddata':
yield candobj.canddata
except EOFError:
logger.debug('Reached end of pickle.')
break
def iter_noise(noisefile):
""" Iterate through (new style) noisefile and return a list of tuples
for each segment.
"""
try:
with open(noisefile, 'rb') as pkl:
while True: # step through all possible segments
try:
noises = pickle.load(pkl)
for noise in noises:
yield noise
except EOFError:
logger.debug('No more CandCollections.')
break
except UnicodeDecodeError:
with open(noisefile, 'rb') as pkl:
while True: # step through all possible segments
try:
noises = pickle.load(pkl, encoding='latin-1')
for noise in noises:
yield noise
except EOFError:
logger.debug('No more CandCollections.')
break
def visualize_clustering(cc, clusterer):
"""Generate Bokeh summary plot of all the candidates, color coded by
the clusters they are assigned.
"""
import seaborn as sns
from bokeh.plotting import figure, output_file, show, output_notebook
from bokeh.layouts import row,column
from bokeh.models import HoverTool
from bokeh.models.sources import ColumnDataSource
from matplotlib import colors
color_palette = sns.color_palette('deep', np.max(cc.array['cluster']) + 1) #get a color palette with number of colors = number of clusters
cluster_colors = [color_palette[x] if x >= 0
else (0.5, 0.5, 0.5)
for x in cc.array['cluster']] #assigning each cluster a color, and making a list
cluster_member_colors = [sns.desaturate(x, p) for x, p in
zip(cluster_colors, clusterer.probabilities_)]
cluster_colors = list(map(colors.rgb2hex, cluster_member_colors)) #converting sns colors to hex for bokeh
width = 450
height = 350
alpha = 0.1
output_notebook()
TOOLS = 'crosshair, box_zoom, reset, box_select, tap, hover, wheel_zoom'
candl = cc.candl
candm = cc.candm
npixx = cc.state.npixx
npixy = cc.state.npixy
uvres = cc.state.uvres
dmind = cc.array['dmind']
dtind = cc.array['dtind']
dtarr = cc.state.dtarr
timearr_ind = cc.array['integration'] # time index of all the candidates
time_ind = np.multiply(timearr_ind, np.array(dtarr).take(dtind))
peakx_ind, peaky_ind = cc.state.calcpix(candl, candm, npixx, npixy, uvres)
snr = cc.snrtot
data = dict(l= peakx_ind, m= peaky_ind, dm= dmind, time= time_ind, snr= snr, colors = cluster_colors)
source=ColumnDataSource(data=data)
p = figure(title="m vs l", x_axis_label='l', y_axis_label='m',plot_width=width, plot_height=height, tools = TOOLS)
p.circle(x='l',y='m', size='snr', line_width = 1, color = 'colors', fill_alpha=alpha, source = source) # linewidth=0,
#p.circle(x=df.l,y=df.m, size=5, line_width = 1, color = cluster_colors, fill_alpha=0.5) # linewidth=0,
hover = p.select(dict(type=HoverTool))
hover.tooltips = [("m", "@m"), ("l", "@l"), ("time", "@time"), ("DM", "@dm"), ("SNR", "@snr")]
#p.circle(x,y, size=5, line_width = 1, color = colors)#, , fill_alpha=1) # linewidth=0,
#p.circle(x="x", y="y", source=source, size=7, color="color", line_color=None, fill_alpha="alpha")
p2 = figure(title="DM vs time", x_axis_label='time', y_axis_label='DM',plot_width=width, plot_height=height, tools = TOOLS)
p2.circle(x='time',y='dm', size='snr', line_width = 1, color = 'colors', fill_alpha=alpha, source=source) # linewidth=0,
hover = p2.select(dict(type=HoverTool))
hover.tooltips = [("m", "@m"), ("l", "@l"), ("time", "@time"), ("DM", "@dm"), ("SNR", "@snr")]
p3 = figure(title="DM vs l", x_axis_label='l', y_axis_label='DM',plot_width=width, plot_height=height, tools = TOOLS)
p3.circle(x='l',y='dm', size='snr', line_width = 1, color = 'colors', fill_alpha=alpha, source=source) # linewidth=0,
hover = p3.select(dict(type=HoverTool))
hover.tooltips = [("m", "@m"), ("l", "@l"), ("time", "@time"), ("DM", "@dm"), ("SNR", "@snr")]
p4 = figure(title="time vs l", x_axis_label='l', y_axis_label='time',plot_width=width, plot_height=height, tools = TOOLS)
p4.circle(x='l',y='time', size='snr', line_width = 1, color = 'colors', fill_alpha=alpha, source=source) # linewidth=0,
hover = p4.select(dict(type=HoverTool))
hover.tooltips = [("m", "@m"), ("l", "@l"), ("time", "@time"), ("DM", "@dm"), ("SNR", "@snr")]
# show the results
(show(column(row(p, p2), row(p3, p4))))
def makesummaryplot(cc=None, candsfile=None):
""" Given a candcollection of candsfile, create
bokeh summary plot
TODO: modify to take candcollection
"""
if cc is None and candsfile is not None:
ccs = list(iter_cands(candsfile))
cc = sum(ccs)
if cc is not None and candsfile is None:
candsfile = cc.state.candsfile
if not len(cc):
return 0
time = []
segment = []
integration = []
dmind = []
dtind = []
snr = []
dm = []
dt = []
l1 = []
m1 = []
time.append(cc.candmjd*(24*3600))
segment.append(cc.array['segment'])
integration.append(cc.array['integration'])
dmind.append(cc.array['dmind'])
dtind.append(cc.array['dtind'])
snr.append(cc.snrtot)
dm.append(cc.canddm)
dt.append(cc.canddt)
l1.append(cc.array['l1'])
m1.append(cc.array['m1'])
time = np.concatenate(time)
# time = time - time.min() # TODO: try this, or ensure nonzero time array
segment = np.concatenate(segment)
integration = np.concatenate(integration)
dmind = np.concatenate(dmind)
dtind = np.concatenate(dtind)
snr = np.concatenate(snr)
dm = np.concatenate(dm)
dt = np.concatenate(dt)
l1 = np.concatenate(l1)
m1 = np.concatenate(m1)
keys = ['seg{0}-i{1}-dm{2}-dt{3}'.format(segment[i], integration[i],
dmind[i], dtind[i])
for i in range(len(segment))]
sizes = calcsize(snr)
colors = colorsat(l1, m1)
data = dict(snrs=snr, dm=dm, l1=l1, m1=m1, time=time, sizes=sizes,
colors=colors, keys=keys)
dmt = plotdmt(data, yrange=(min(cc.state.dmarr), max(cc.state.dmarr)))
loc = plotloc(data, extent=radians(cc.state.fieldsize_deg))
combined = Row(dmt, loc, width=950)
htmlfile = candsfile.replace('.pkl', '.html')
output_file(htmlfile)
save(combined)
logger.info("Saved summary plot {0} with {1} candidate{2} in scan"
.format(htmlfile, len(segment), 's'[not len(segment)-1:]))
return len(cc)
def plotdmt(data, circleinds=[], crossinds=[], edgeinds=[],
tools="hover,pan,box_select,wheel_zoom,reset", plot_width=450,
plot_height=400, yrange=None):
""" Make a light-weight dm-time figure """
fields = ['dm', 'time', 'sizes', 'colors', 'snrs', 'keys']
if not len(circleinds):
circleinds = list(range(len(data['snrs'])))
# set ranges
inds = circleinds + crossinds + edgeinds
dm = [data['dm'][i] for i in inds]
if yrange is None:
dm_min = min(min(dm), max(dm)/1.05)
dm_max = max(max(dm), min(dm)*1.05)
else:
assert isinstance(yrange, tuple)
dm_min, dm_max = yrange
t0 = min(data['time'])
t1 = max(data['time'])
data['time'] = data['time'] - t0
time_range = t1-t0
time_min = -0.05*time_range
time_max = 1.05*time_range
source = ColumnDataSource(data=dict({(key, tuple([value[i] for i in circleinds if i not in edgeinds]))
for (key, value) in list(data.items())
if key in fields}))
dmt = Figure(plot_width=plot_width, plot_height=plot_height,
toolbar_location="left", x_axis_label='Time (s; from {0})'.format(t0),
y_axis_label='DM (pc/cm3)', x_range=(time_min, time_max),
y_range=(dm_min, dm_max),
output_backend='webgl', tools=tools)
dmt.circle('time', 'dm', size='sizes', fill_color='colors',
line_color=None, fill_alpha=0.2, source=source)
# if crossinds:
# sourceneg = ColumnDataSource(data = dict({(key, tuple([value[i] for i in crossinds]))
# for (key, value) in list(data.items()) if key in fields}))
# dmt.cross('time', 'dm', size='sizes', fill_color='colors',
# line_alpha=0.3, source=sourceneg)
#
# if edgeinds:
# sourceedge = ColumnDataSource(data=dict({(key, tuple([value[i] for i in edgeinds]))
# for (key, value) in list(data.items()) if key in fields}))
# dmt.circle('time', 'dm', size='sizes', line_color='colors',
# fill_color='colors', line_alpha=0.5, fill_alpha=0.2,
# source=sourceedge)
hover = dmt.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('keys', '@keys')])
return dmt
def plotloc(data, circleinds=[], crossinds=[], edgeinds=[],
tools="hover,pan,box_select,wheel_zoom,reset", plot_width=450,
plot_height=400, extent=None):
"""
Make a light-weight loc figure
extent is half size of (square) lm plot.
"""
fields = ['l1', 'm1', 'sizes', 'colors', 'snrs', 'keys']
if not len(circleinds):
circleinds = list(range(len(data['snrs'])))
# set ranges
inds = circleinds + crossinds + edgeinds
l1 = [data['l1'][i] for i in inds]
m1 = [data['m1'][i] for i in inds]
if extent is None:
extent = max([max(m1), -min(m1), max(l1), -min(l1)])
source = ColumnDataSource(data=dict({(key, tuple([value[i] for i in circleinds if i not in edgeinds]))
for (key, value) in list(data.items())
if key in fields}))
loc = Figure(plot_width=plot_width, plot_height=plot_height,
toolbar_location="left", x_axis_label='l1 (rad)',
y_axis_label='m1 (rad)', x_range=(-extent, extent),
y_range=(-extent, extent),
output_backend='webgl', tools=tools)
loc.circle('l1', 'm1', size='sizes', fill_color='colors',
line_color=None, fill_alpha=0.2, source=source)
hover = loc.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('keys', '@keys')])
return loc
def calcsize(values, sizerange=(4, 70), inds=None, plaw=2):
""" Use set of values to calculate symbol size.
values is a list of floats for candidate significance.
inds is an optional list of indexes to use to calculate symbol size.
Scaling of symbol size min max set by sizerange tuple (min, max).
plaw is powerlaw scaling of symbol size from values
"""
if inds:
smax = max([abs(values[i]) for i in inds])
smin = min([abs(values[i]) for i in inds])
else:
smax = max([abs(val) for val in values])
smin = min([abs(val) for val in values])
if smax == smin:
return [sizerange[1]]*len(values)
else:
return [sizerange[0] + sizerange[1] * ((abs(val) - smin)/(smax - smin))**plaw for val in values]
def colorsat(l, m):
""" Returns color for given l,m
Designed to look like a color wheel that is more saturated in middle.
"""
lm = np.zeros(len(l), dtype='complex')
lm.real = l
lm.imag = m
red = 0.5*(1+np.cos(np.angle(lm)))
green = 0.5*(1+np.cos(np.angle(lm) + 2*3.14/3))
blue = 0.5*(1+np.cos(np.angle(lm) - 2*3.14/3))
amp = np.where(lm == 0, 256, 256*np.abs(lm)/np.abs(lm).max())
return ["#%02x%02x%02x" % (np.floor(amp[i]*red[i]).astype(int),
np.floor(amp[i]*green[i]).astype(int),
np.floor(amp[i]*blue[i]).astype(int))
for i in range(len(l))]
def calcinds(data, threshold, ignoret=None):
""" Find indexes for data above (or below) given threshold. """
inds = []
for i in range(len(data['time'])):
snr = data['snrs'][i]
time = data['time'][i]
if (threshold >= 0 and snr > threshold):
if ignoret:
incl = [t0 for (t0, t1) in ignoret if np.round(time).astype(int) in range(t0,t1)]
logger.debug('{} {} {} {}'.format(np.round(time).astype(int), t0, t1, incl))
if not incl:
inds.append(i)
else:
inds.append(i)
elif threshold < 0 and snr < threshold:
if ignoret:
incl = [t0 for (t0, t1) in ignoret if np.round(time).astype(int) in range(t0,t1)]
logger.debug('{} {} {} {}'.format(np.round(time).astype(int), t0, t1, incl))
if not incl:
inds.append(i)
else:
inds.append(i)
return inds
def candplot(canddatalist, snrs=None, outname=''):
""" Takes output of search_thresh (CandData objects) to make
candidate plots.
Expects pipeline state, candidate location, image, and
phased, dedispersed data (cut out in time, dual-pol).
snrs is array for an (optional) SNR histogram plot.
Written by Bridget Andersen and modified by Casey for rfpipe.
"""
if not isinstance(canddatalist, list):
logger.debug('Wrapping solo CandData object')
canddatalist = [canddatalist]
logger.info('Making {0} candidate plots.'.format(len(canddatalist)))
for i in range(len(canddatalist)):
canddata = canddatalist[i]
st = canddata.state
candloc = canddata.loc
im = canddata.image
data = canddata.data
scan = st.metadata.scan
segment, candint, dmind, dtind, beamnum = candloc
# calc source location
# imstd = util.madtostd(im) # outlier resistant
imstd = im.std() # consistent with rfgpu
snrim = im.max()/imstd
l1, m1 = st.pixtolm(np.where(im == im.max()))
logger.info('Plotting candloc {0} with SNR {1:.1f} and image/data shapes: {2}/{3}'
.format(str(candloc), snrim, str(im.shape), str(data.shape)))
# either standard radec or otf phasecenter radec
pc = st.get_pc(segment)
pt_ra, pt_dec = st.get_radec(pc=pc)
src_ra, src_dec = source_location(pt_ra, pt_dec, l1, m1, format='hourstr')
logger.info('Peak (RA, Dec): ({0}, {1})'.format(src_ra, src_dec))
# convert l1 and m1 from radians to arcminutes
l1arcm = np.degrees(l1)*60
m1arcm = np.degrees(m1)*60
# build overall plot
fig = plt.Figure(figsize=(12.75, 8))
# add metadata in subfigure
ax = fig.add_subplot(2, 3, 1, facecolor='white')
# calculate the overall dispersion delay: dd
f1 = st.metadata.freq_orig[0]
f2 = st.metadata.freq_orig[-1]
dd = 4.15*st.dmarr[dmind]*(f1**(-2)-f2**(-2))
# add annotating info
# set spacing and location of the annotating information
start = 1.1
space = 0.07
left = 0.0
ax.text(left, start, st.fileroot, fontname='sans-serif',
transform=ax.transAxes, fontsize='small')
ax.text(left, start-space, 'Peak (arcmin): ('
+ str(np.round(l1arcm, 3)) + ', '
+ str(np.round(m1arcm, 3)) + ')',
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
# split the RA and Dec and display in a nice format
ax.text(left, start-2*space, 'Peak (RA, Dec): (' + src_ra + ', ' + src_dec + ')',
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
ax.text(left, start-3*space, 'Source: ' + str(st.metadata.source),
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
ax.text(left, start-4*space, 'scan: ' + str(scan),
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
ax.text(left, start-5*space, 'segment: ' + str(segment),
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
ax.text(left, start-6*space, 'integration: ' + str(candint),
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
ax.text(left, start-7*space, 'DM = ' + str(st.dmarr[dmind])
+ ' (index ' + str(dmind) + ')',
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
ax.text(left, start-8*space, 'dt = '
+ str(np.round(st.inttime*st.dtarr[dtind], 3)*1e3)
+ ' ms' + ' (index ' + str(dtind) + ')',
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
ax.text(left, start-9*space, 'disp delay = ' + str(np.round(dd, 1))
+ ' ms',
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
defstr = 'SNR (im'
snrstr = str(np.round(snrim, 1))
if canddata.snrk is not None:
defstr += '/k): '
snrstr += '/' + str(np.round(canddata.snrk, 1))
else:
defstr += '): '
ax.text(left, start-10*space, defstr+snrstr,
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
if canddata.cluster is not None:
label, size = canddata.cluster, canddata.clustersize
ax.text(left, start-11*space, 'Cluster label: {0}'.format(str(label)),
fontname='sans-serif',
transform=ax.transAxes, fontsize='small')
ax.text(left, start-12*space, 'Cluster size: {0}'.format(size),
fontname='sans-serif', transform=ax.transAxes,
fontsize='small')
# set the plot invisible so that it doesn't interfere with annotations
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.spines['bottom'].set_color('white')
ax.spines['top'].set_color('white')
ax.spines['right'].set_color('white')
ax.spines['left'].set_color('white')
# plot full dynamic spectra
left, width = 0.75, 0.2*2./3.
bottom, height = 0.2, 0.7
# three rectangles for each panel of the spectrum (RR, RR+LL, LL)
rect_dynsp1 = [left, bottom, width/3., height]
rect_dynsp2 = [left+width/3., bottom, width/3., height]
rect_dynsp3 = [left+2.*width/3., bottom, width/3., height]
rect_lc1 = [left, bottom-0.1, width/3., 0.1]
rect_lc2 = [left+width/3., bottom-0.1, width/3., 0.1]
rect_lc3 = [left+2.*width/3., bottom-0.1, width/3., 0.1]
rect_sp = [left+width, bottom, 0.1*2./3., height]
ax_dynsp1 = fig.add_axes(rect_dynsp1)
# sharey so that axes line up
ax_dynsp2 = fig.add_axes(rect_dynsp2, sharey=ax_dynsp1)
ax_dynsp3 = fig.add_axes(rect_dynsp3, sharey=ax_dynsp1)
# hide RR+LL and LL dynamic spectra y labels to avoid overlap
[label.set_visible(False) for label in ax_dynsp2.get_yticklabels()]
[label.set_visible(False) for label in ax_dynsp3.get_yticklabels()]
ax_sp = fig.add_axes(rect_sp, sharey=ax_dynsp3)
[label.set_visible(False) for label in ax_sp.get_yticklabels()]
ax_lc1 = fig.add_axes(rect_lc1)
ax_lc2 = fig.add_axes(rect_lc2, sharey=ax_lc1)
ax_lc3 = fig.add_axes(rect_lc3, sharey=ax_lc1)
[label.set_visible(False) for label in ax_lc2.get_yticklabels()]
[label.set_visible(False) for label in ax_lc3.get_yticklabels()]
# now actually plot the data
spectra = np.swapaxes(data.real, 0, 1)
dd1 = spectra[..., 0]
dd2 = spectra[..., 0] + spectra[..., 1]
dd3 = spectra[..., 1]
colormap = 'viridis'
logger.debug('{0}'.format(dd1.shape))
logger.debug('{0}'.format(dd2.shape))
logger.debug('{0}'.format(dd3.shape))
_ = ax_dynsp1.imshow(dd1, origin='lower', interpolation='nearest',
aspect='auto', cmap=plt.get_cmap(colormap))
_ = ax_dynsp2.imshow(dd2, origin='lower', interpolation='nearest',
aspect='auto', cmap=plt.get_cmap(colormap))
_ = ax_dynsp3.imshow(dd3, origin='lower', interpolation='nearest',
aspect='auto', cmap=plt.get_cmap(colormap))
ax_dynsp1.set_yticks(list(range(0, len(st.freq), 30)))
ax_dynsp1.set_yticklabels(st.freq[::30].round(3))
ax_dynsp1.set_ylabel('Freq (GHz)')
ax_dynsp1.set_xlabel('RR')
ax_dynsp1.xaxis.set_label_position('top')
ax_dynsp2.set_xlabel('RR+LL')
ax_dynsp2.xaxis.set_label_position('top')
ax_dynsp3.set_xlabel('LL')
ax_dynsp3.xaxis.set_label_position('top')
# hide xlabels invisible so that they don't interefere with lc plots
[label.set_visible(False) for label in ax_dynsp1.get_xticklabels()]
# This one y label was getting in the way
ax_dynsp1.get_yticklabels()[0].set_visible(False)
# plot stokes I spectrum of the candidate pulse (assume middle bin)
# select stokes I middle bin
spectrum = spectra[:, canddata.integration_rel].mean(axis=1)
ax_sp.plot(spectrum, list(range(len(spectrum))), 'k.')
# plot 0 Jy dotted line
ax_sp.plot(np.zeros(len(spectrum)), list(range(len(spectrum))), 'r:')
xmin, xmax = ax_sp.get_xlim()
ax_sp.set_xticks(np.linspace(xmin, xmax, 3).round(2))
ax_sp.set_xlabel('Flux (Jy)')
# plot mean flux values for each time bin
lc1 = dd1.mean(axis=0)
lc2 = dd2.mean(axis=0)
lc3 = dd3.mean(axis=0)
lenlc = len(data)
ax_lc1.plot(list(range(0, lenlc)), list(lc1)[:lenlc], 'k.')
ax_lc2.plot(list(range(0, lenlc)), list(lc2)[:lenlc], 'k.')
ax_lc3.plot(list(range(0, lenlc)), list(lc3)[:lenlc], 'k.')
# plot 0 Jy dotted line for each plot
ax_lc1.plot(list(range(0, lenlc)), list(np.zeros(lenlc)), 'r:')
ax_lc2.plot(list(range(0, lenlc)), list(np.zeros(lenlc)), 'r:')
ax_lc3.plot(list(range(0, lenlc)), list(np.zeros(lenlc)), 'r:')
ax_lc2.set_xlabel('Integration (rel)')
ax_lc1.set_ylabel('Flux (Jy)')
ax_lc1.set_xticks([0, 0.5*lenlc, lenlc])
# only show the '0' label for one of the plots to avoid messy overlap
ax_lc1.set_xticklabels(['0', str(lenlc//2), str(lenlc)])
ax_lc2.set_xticks([0, 0.5*lenlc, lenlc])
ax_lc2.set_xticklabels(['', str(lenlc//2), str(lenlc)])
ax_lc3.set_xticks([0, 0.5*lenlc, lenlc])
ax_lc3.set_xticklabels(['', str(lenlc//2), str(lenlc)])
ymin, ymax = ax_lc1.get_ylim()
ax_lc1.set_yticks(np.linspace(ymin, ymax, 3).round(2))
# adjust the x tick marks to line up with the lc plots
ax_dynsp1.set_xticks([0, 0.5*lenlc, lenlc])
ax_dynsp2.set_xticks([0, 0.5*lenlc, lenlc])
ax_dynsp3.set_xticks([0, 0.5*lenlc, lenlc])
# plot second set of dynamic spectra
left, width = 0.45, 0.1333
bottom, height = 0.1, 0.4
rect_dynsp1 = [left, bottom, width/3., height]
rect_dynsp2 = [left+width/3., bottom, width/3., height]
rect_dynsp3 = [left+2.*width/3., bottom, width/3., height]
rect_sp = [left+width, bottom, 0.1*2./3., height]
ax_dynsp1 = fig.add_axes(rect_dynsp1)
ax_dynsp2 = fig.add_axes(rect_dynsp2, sharey=ax_dynsp1)
ax_dynsp3 = fig.add_axes(rect_dynsp3, sharey=ax_dynsp1)
# hide RR+LL and LL dynamic spectra y labels
[label.set_visible(False) for label in ax_dynsp2.get_yticklabels()]
[label.set_visible(False) for label in ax_dynsp3.get_yticklabels()]
ax_sp = fig.add_axes(rect_sp, sharey=ax_dynsp3)
[label.set_visible(False) for label in ax_sp.get_yticklabels()]
# calculate the channels to average together for SNR=2
n = int((2.*(len(spectra))**0.5/snrim)**2)
if n == 0: # if n==0 then don't average
dd1avg = dd1
dd3avg = dd3
else:
# otherwise, add zeros onto the data so that it's length is cleanly
# divisible by n (makes it easier to average over)
dd1zerotemp = np.concatenate((np.zeros((n-len(spectra) % n,
len(spectra[0])),
dtype=dd1.dtype), dd1), axis=0)
dd3zerotemp = np.concatenate((np.zeros((n-len(spectra) % n,
len(spectra[0])),
dtype=dd3.dtype), dd3), axis=0)
# make masked arrays so appended zeros do not affect average
zeros = np.zeros((len(dd1), len(dd1[0])))
ones = np.ones((n-len(spectra) % n, len(dd1[0])))
masktemp = np.concatenate((ones, zeros), axis=0)
dd1zero = np.ma.masked_array(dd1zerotemp, mask=masktemp)
dd3zero = np.ma.masked_array(dd3zerotemp, mask=masktemp)
# average together the data
dd1avg = np.array([], dtype=dd1.dtype)
for i in range(len(spectra[0])):
temp = dd1zero[:, i].reshape(-1, n)
tempavg = np.reshape(np.mean(temp, axis=1), (len(temp), 1))
# repeats the mean values to create more pixels
# (easier to properly crop when it is finally displayed)
temprep = np.repeat(tempavg, n, axis=0)
if i == 0:
dd1avg = temprep
else:
dd1avg = np.concatenate((dd1avg, temprep), axis=1)
dd3avg = np.array([], dtype=dd3.dtype)
for i in range(len(spectra[0])):
temp = dd3zero[:, i].reshape(-1, n)
tempavg = np.reshape(np.mean(temp, axis=1), (len(temp), 1))
temprep = np.repeat(tempavg, n, axis=0)
if i == 0:
dd3avg = temprep
else:
dd3avg = np.concatenate((dd3avg, temprep), axis=1)
dd2avg = dd1avg + dd3avg # add together to get averaged RR+LL spectrum
colormap = 'viridis'
# if n==0 then don't crop the spectra because no zeroes were appended
if n == 0:
dd1avgcrop = dd1avg
dd2avgcrop = dd2avg
dd3avgcrop = dd3avg
else: # otherwise, crop off the appended zeroes
dd1avgcrop = dd1avg[len(ones):len(dd1avg), :]
dd2avgcrop = dd2avg[len(ones):len(dd2avg), :]
dd3avgcrop = dd3avg[len(ones):len(dd3avg), :]
logger.debug('{0}'.format(dd1avgcrop.shape))
logger.debug('{0}'.format(dd2avgcrop.shape))
logger.debug('{0}'.format(dd3avgcrop.shape))
_ = ax_dynsp1.imshow(dd1avgcrop, origin='lower',
interpolation='nearest', aspect='auto',
cmap=plt.get_cmap(colormap))
_ = ax_dynsp2.imshow(dd2avgcrop, origin='lower',
interpolation='nearest', aspect='auto',
cmap=plt.get_cmap(colormap))
_ = ax_dynsp3.imshow(dd3avgcrop, origin='lower',
interpolation='nearest', aspect='auto',
cmap=plt.get_cmap(colormap))
spw_reffreq = np.sort(st.metadata.spw_reffreq)
# TODO: need to find best chan for label even for overlapping spw
spw_chans = [np.abs(reffreq/1e9-st.freq).argmin() for reffreq in spw_reffreq]
ax_dynsp1.set_yticks(spw_chans)
ax_dynsp1.set_yticklabels((spw_reffreq/1e9).round(3))
ax_dynsp1.set_ylabel('Freq of SPW (GHz)')
ax_dynsp1.set_xlabel('RR')
ax_dynsp1.xaxis.set_label_position('top')
ax_dynsp2.set_xlabel('Integration (rel)')
ax2 = ax_dynsp2.twiny()
ax2.set_xlabel('RR+LL')
[label.set_visible(False) for label in ax2.get_xticklabels()]
ax_dynsp3.set_xlabel('LL')
ax_dynsp3.xaxis.set_label_position('top')
# plot stokes I spectrum of the candidate pulse from middle integration
ax_sp.plot(dd2avgcrop[:, canddata.integration_rel]/2.,
list(range(len(dd2avgcrop))), 'k.')
ax_sp.plot(np.zeros(len(dd2avgcrop)), list(range(len(dd2avgcrop))),
'r:')
xmin, xmax = ax_sp.get_xlim()
ax_sp.set_xticks(np.linspace(xmin, xmax, 3).round(2))
ax_sp.get_xticklabels()[0].set_visible(False)
ax_sp.set_xlabel('Flux (Jy)')
# readjust the x tick marks on the dynamic spectra
ax_dynsp1.set_xticks([0, 0.5*lenlc, lenlc])
ax_dynsp1.set_xticklabels(['0', str(lenlc//2), str(lenlc)])
ax_dynsp2.set_xticks([0, 0.5*lenlc, lenlc])
ax_dynsp2.set_xticklabels(['', str(lenlc//2), str(lenlc)])
ax_dynsp3.set_xticks([0, 0.5*lenlc, lenlc])
ax_dynsp3.set_xticklabels(['', str(lenlc//2), str(lenlc)])
# plot the image and zoomed cutout
ax = fig.add_subplot(2, 3, 4)
fov = np.degrees(1./st.uvres)*60.
_ = ax.imshow(im.transpose(), aspect='equal', origin='upper',
interpolation='nearest',
extent=[fov/2, -fov/2, -fov/2, fov/2],
cmap=plt.get_cmap('viridis'), vmin=0,
vmax=0.5*im.max())
ax.set_xlabel('RA Offset (arcmin)')
ax.set_ylabel('Dec Offset (arcmin)')
# to set scale when we plot the triangles that label the location
ax.autoscale(False)
# add markers on the axes at measured position of the candidate
ax.scatter(x=[l1arcm], y=[-fov/2], c='#ffff00', s=60, marker='^',
clip_on=False)
ax.scatter(x=[fov/2], y=[m1arcm], c='#ffff00', s=60, marker='>',
clip_on=False)
# makes it so the axis does not intersect the location triangles
ax.set_frame_on(False)
# add a zoomed cutout image of the candidate (set width at 5*beam)
sbeam = np.mean(st.beamsize_deg)*60
# figure out the location to center the zoomed image on
xratio = len(im[0])/fov # pix/arcmin
yratio = len(im)/fov # pix/arcmin
mult = 5 # sets how many times the synthesized beam the zoomed FOV is
xmin = max(0, int(len(im[0])//2-(m1arcm+sbeam*mult)*xratio))
xmax = int(len(im[0])//2-(m1arcm-sbeam*mult)*xratio)
ymin = max(0, int(len(im)//2-(l1arcm+sbeam*mult)*yratio))
ymax = int(len(im)//2-(l1arcm-sbeam*mult)*yratio)
left, width = 0.231, 0.15
bottom, height = 0.465, 0.15
rect_imcrop = [left, bottom, width, height]
ax_imcrop = fig.add_axes(rect_imcrop)
logger.debug('{0}'.format(im.transpose()[xmin:xmax, ymin:ymax].shape))
logger.debug('{0} {1} {2} {3}'.format(xmin, xmax, ymin, ymax))
_ = ax_imcrop.imshow(im.transpose()[xmin:xmax,ymin:ymax], aspect=1,
origin='upper', interpolation='nearest',
extent=[-1, 1, -1, 1],
cmap=plt.get_cmap('viridis'), vmin=0,
vmax=0.5*im.max())
# setup the axes
ax_imcrop.set_ylabel('Dec (arcmin)')
ax_imcrop.set_xlabel('RA (arcmin)')
ax_imcrop.xaxis.set_label_position('top')
ax_imcrop.xaxis.tick_top()
xlabels = [str(np.round(l1arcm+sbeam*mult/2, 1)), '',
str(np.round(l1arcm, 1)), '',
str(np.round(l1arcm-sbeam*mult/2, 1))]
ylabels = [str(np.round(m1arcm-sbeam*mult/2, 1)), '',
str(np.round(m1arcm, 1)), '',
str(np.round(m1arcm+sbeam*mult/2, 1))]
ax_imcrop.set_xticklabels(xlabels)
ax_imcrop.set_yticklabels(ylabels)
# change axis label loc of inset to avoid the full picture
ax_imcrop.get_yticklabels()[0].set_verticalalignment('bottom')
# create SNR versus N histogram for the whole observation
# (properties for each candidate in the observation given by prop)
if snrs is not None:
left, width = 0.45, 0.2
bottom, height = 0.6, 0.3
rect_snr = [left, bottom, width, height]
ax_snr = fig.add_axes(rect_snr)
pos_snrs = snrs[snrs >= 0]
neg_snrs = snrs[snrs < 0]
if not len(neg_snrs): # if working with subset and only pos snrs
neg_snrs = pos_snrs
nonegs = True
else:
nonegs = False
minval = 5.5
maxval = 8.0
# determine the min and max values of the x axis
if min(pos_snrs) < min(np.abs(neg_snrs)):
minval = min(pos_snrs)
else:
minval = min(np.abs(neg_snrs))
if max(pos_snrs) > max(np.abs(neg_snrs)):
maxval = max(pos_snrs)
else:
maxval = max(np.abs(neg_snrs))
# positive SNR bins are in blue
# absolute values of negative SNR bins are taken and plotted as
# red x's on top of positive blue bins for compactness
n, b, patches = ax_snr.hist(pos_snrs, 50, (minval, maxval),
facecolor='blue', zorder=1)
vals, bin_edges = np.histogram(np.abs(neg_snrs), 50,
(minval, maxval))
bins = np.array([(bin_edges[i]+bin_edges[i+1])/2.
for i in range(len(vals))])
vals = np.array(vals)
if not nonegs:
ax_snr.scatter(bins[vals > 0], vals[vals > 0], marker='x',
c='orangered', alpha=1.0, zorder=2)
ax_snr.set_xlabel('SNR')
ax_snr.set_xlim(left=minval-0.2)
ax_snr.set_xlim(right=maxval+0.2)
ax_snr.set_ylabel('N')
ax_snr.set_yscale('log')
# draw vertical line where the candidate SNR is
ax_snr.axvline(x=snrim, linewidth=1, color='y', alpha=0.7)
if not outname:
outname = os.path.join(st.prefs.workdir,
'cands_{0}.png'
.format(canddata.candid))
try:
canvas = FigureCanvasAgg(fig)
canvas.print_figure(outname)
logger.info('Wrote candidate plot to {0}'.format(outname))
except ValueError:
logger.warning('Could not write figure to {0}'.format(outname))
def source_location(pt_ra, pt_dec, l1, m1, format='hourstr'):
""" Takes phase center and src l,m in radians to get ra,dec of source.
Returns sexagesimal string as from astropy.coordinates by default (format='hourstr')
Option to set format='degfloat' for ra/dec in deg as floats.
"""
co0 = coordinates.SkyCoord(pt_ra, pt_dec, unit=units.rad)
co = coordinates.SkyCoord(l1, m1, unit=units.rad,
frame=co0.skyoffset_frame()).transform_to(coordinates.ICRS)
srcra = co.ra.value
srcdec = co.dec.value
# srcra = np.degrees(pt_ra + l1/cos(pt_dec))
# srcdec = np.degrees(pt_dec + m1)
# co = coordinates.SkyCoord(srcra, srcdec, unit=(units.deg, units.deg))
if format == 'hourstr':
ra = str(co.ra.to_string(units.hourangle))
dec = str(co.dec.to_string(units.deg))
elif format == 'degfloat':
ra = co.ra.to_value(units.deg)
dec = co.dec.to_value(units.deg)
else:
logger.warn("format not known")
return (ra, dec)
def make_voevent(candcollection, role='test'):
""" Script to generate a VOEvent file from the CandCollection
Takes Candcollection info and writes a .xml file with relevant inforation
VOEvent format based on Petroff et al. 2017 VOEvent Standard for Fast Radio Busrts
See https://github.com/ebpetroff/FRB_VOEvent
written by Justin D. Linford with input from Casey Law, Sarah Burke-Spolaor
and Kshitij Aggarwal.
Returns name of xml file that was created.
"""
import random
import string
logger.info('Making voevents from candcollection with {0} candidates'.format(len(candcollection)))
assert role.lower() in ["test", "observation", "utility"]
#get candata separated into useful parts
st = candcollection.state
#LOOP TO STEP THROUGH ENTREES IN CANDCOLLECTION
outnames = []
for n1 in range(len(candcollection.locs)):
candloc = candcollection.locs[n1]
#get some usefult info out of candidate location
segment = candcollection.segment
candint = candloc[1]
dmind = candloc[2]
dtind = candloc[3]
beamnum = candloc[4]
candid = candcollection.candids[n1]
#Basic data easily accessible from CandCollection
FRB_DM = candcollection.canddm[n1]
#FRB_DM_err = -999 #TODO: need to figure out how to get DM nucertainty
#DM uncertainty: From Cordes & McLaughlin 2003, FWHM in S/N vs delDM distribution should be
#delta-DM ~ 506 * pulse width [ms] * observing freq ^3 [Ghz] / bandwidth [MHz] (Eq. 14)
#by definition, FWHM = 2*sqrt(2*ln2)*sigma for a Gaussian
#DM_err ~ 506/(2*sqrt(2 ln2)) * Width(ms) * ObsFrequency(GHz)^3 / bandwidth(MHz)
FRB_obsmjd = candcollection.candmjd[n1]
FRB_width = candcollection.canddt[n1]*1.0e3 #approximate pulse width in ms
snr1 = candcollection.array['snr1'].flatten()
FRB_SNR = snr1[n1]
l1 = candcollection.candl[n1]
m1 = candcollection.candm[n1]
#get FRB RA & DEC location in degrees
pc = st.get_pc(segment)
pt_ra, pt_dec = st.get_radec(pc=pc)
srcra, srcdec = source_location(pt_ra, pt_dec, l1, m1, format='degfloat')
im_pix_scale = np.degrees((st.npixx*st.uvres)**-1.0) #degrees per pixel
srcloc_err = im_pix_scale #set source location uncertainty to the pixel scale, for now --> assumes source only fills a single pixel
#put location into SkyCoord
FRB_loc = coordinates.SkyCoord(srcra, srcdec, frame='icrs',unit='deg')
#FRB galactic coordinates
FRB_gl = FRB_loc.galactic.l.deg
FRB_gb = FRB_loc.galactic.b.deg
#WHAT fields
#observatory parameters
beam_size = st.beamsize_deg #estimate of beam size in degrees
#TODO: is st.beamsize)deg an estimate of the primary beam or the restoring beam?
beam_semimaj = max(beam_size) * 3600.0 # TODO: figure out how to get this info
beam_semimin = min(beam_size) * 3600.0 # TODO: figure out how to get this info
beam_rot_ang = -999 # TODO: figure out how to get this info
samp_time = np.round(st.inttime, 3)*1e3 #sampling time in ms
band_width = np.round((st.freq.max() - st.freq.min())*1.0e3,4)#bandwidth in MHz
num_chan = np.sum(st.metadata.spw_nchan)
center_freq = st.metadata.spw_reffreq[int(len(st.metadata.spw_reffreq)/2.0)]/1.0e6 #should be center freq in MHz
num_pol = int(len(st.metadata.pols_orig))
bits_per_sample = 2 #TODO: check that this is always accurate
gain_KJy = -999 #TODO: figure out how to get this info
Tsys_K = -999 #TODO: figure out how to get this info
VLA_backend = 'WIDAR' #may need to find out how to get this info from the data
VLA_beam = beamnum #VLA only has a single beam
#should now have all the necessary numbers to calculate DM uncertainty
FRB_DM_err = (506.0/(2.0*np.sqrt(2.0*np.log(2.0)))) * FRB_width * (center_freq*1.0e-3)**3 / band_width
#now compare beam size to pixel scale
#if the 1/2 beam semi-minor axis is larger than the pixel scale, set the location uncertainty to 1/2 the semi-minor axis
if 0.5*min(beam_size)>im_pix_scale: srcloc_err = 0.5*min(beam_size)
FRB_obstime = time.Time(FRB_obsmjd, format='mjd',scale='utc')
#print(FRB_obstime)
FRB_ISOT = FRB_obstime.isot #convert time to ISOT
#print(FRB_ISOT)
#get the hour of the observation for FRB name
t_pos = FRB_ISOT.find('T')
FRB_ISOT_UTHH = 'UT'+FRB_ISOT[t_pos+1:t_pos+3]
#Importance parameter
FRB_importance = 0.5 #default importance
if candcollection.clustersize is not None:
if candcollection.clustersize[n1]>5. and FRB_SNR>10.:
FRB_importance=1.0
elif candcollection.clustersize[n1]>1. and FRB_SNR>10.:
FRB_importance=0.9
elif candcollection.clustersize[n1]<5.0 and FRB_SNR<10.0:
FRB_importance=0.6
else:
if FRB_SNR>20.:
FRB_importance=0.95
elif FRB_SNR>10.:
FRB_importance=0.55
#build FRB name
FRB_YY = FRB_ISOT[2:4] #last 2 digits of year
FRB_MM = FRB_ISOT[5:7] #2-digit month
FRB_DD = FRB_ISOT[8:10] #2-digit day
FRB_RADEC_str = FRB_loc.to_string('hmsdms') #convert FRB coordinates to HH:MM:SS.SSSS (+/-)DD:MM:SS.SSSS
suffix = ''.join([random.choice(string.ascii_letters) for _ in range(3)])
#FRB_NAME = 'FRB'+FRB_YY+FRB_MM+FRB_DD + '.J' + FRB_RAhh+FRB_RAmm+FRB_RAss + FRB_DECdd+FRB_DECmm+FRB_DECss
FRB_NAME = 'rfcand'+FRB_YY+FRB_MM+FRB_DD + FRB_ISOT_UTHH + suffix
#set filename to FRB_NAME + '_detection.xml'
outname = os.path.join(st.prefs.workdir, FRB_NAME+'.xml')
try:
#write VOEvent file
#create a text file with all the VLA fluxes to include in paper
VOEvent_of = open(outname, 'w')
#header
VOEvent_of.write("<?xml version='1.0' encoding='UTF-8'?>"+'\n')
VOEvent_of.write('<voe:VOEvent xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:voe="http://www.ivoa.net/xml/VOEvent/v2.0" xsi:schemaLocation="http://www.ivoa.net/xml/VOEvent/v2.0 http://www.ivoa.net/xml/VOEvent/VOEvent-v2.0.xsd" version="2.0" role="'+role.lower()+'" ivorn="ivo://realfast.io/realfast#'+FRB_NAME+'/'+str(FRB_obsmjd)+'">'+'\n')
#WHO
VOEvent_of.write('\t'+'<Who>'+'\n')
VOEvent_of.write('\t\t'+'<AuthorIVORN>ivo://realfast.io/contact</AuthorIVORN>'+'\n')
VOEvent_of.write('\t\t'+'<Date>'+FRB_ISOT+'</Date>\n')
VOEvent_of.write('\t\t'+'<Author><contactEmail>claw@astro.caltech.edu</contactEmail><contactName>Casey Law</contactName></Author>\n')
VOEvent_of.write('\t</Who>\n')
#What
VOEvent_of.write('\t<What>\n')
VOEvent_of.write('\t\t<Param name="AlertType" dataType="string" value="Preliminary">\n')
VOEvent_of.write('\t\t</Param>\n')
VOEvent_of.write('\t\t<Group name="observatory parameters">\n')
VOEvent_of.write('\t\t\t<Param dataType="float" name="beam_semi-major_axis" ucd="instr.beam;pos.errorEllipse;phys.angSize.smajAxis" unit="SS" value="'+str(beam_semimaj)+'"/>\n')
VOEvent_of.write('\t\t\t<Param dataType="float" name="beam_semi-minor_axis" ucd="instr.beam;pos.errorEllipse;phys.angSize.sminAxis" unit="SS" value="'+str(beam_semimin)+'"/>\n')
VOEvent_of.write('\t\t\t<Param dataType="float" name="beam_rotation_angle" ucd="instr.beam;pos.errorEllipse;instr.offset" unit="Degrees" value="'+str(beam_rot_ang)+'"/>\n')
VOEvent_of.write('\t\t\t<Param dataType="float" name="sampling_time" ucd="time.resolution" unit="ms" value="'+str(samp_time)+'"/>\n')
VOEvent_of.write('\t\t\t<Param dataType="float" name="bandwidth" ucd="instr.bandwidth" unit="MHz" value="'+str(band_width)+'"/>\n')
VOEvent_of.write('\t\t\t<Param dataType="int" name="nchan" ucd="meta.number;em.freq;em.bin" unit="None" value="'+str(num_chan)+'"/>\n')
VOEvent_of.write('\t\t\t<Param dataType="float" name="centre_frequency" ucd="em.freq;instr" unit="MHz" value="'+str(center_freq)+'"/>\n')
VOEvent_of.write('\t\t\t<Param dataType="int" name="npol" unit="None" value="'+str(num_pol)+'"/>\n')
VOEvent_of.write('\t\t\t<Param dataType="int" name="bits_per_sample" unit="None" value="'+str(bits_per_sample)+'"/>\n')
#VOEvent_of.write('\t\t\t<Param dataType="float" name="gain" unit="K/Jy" value="'+str(gain_KJy)+'"/>\n') #FOR NOW: do not report gain
#VOEvent_of.write('\t\t\t<Param dataType="float" name="tsys" ucd="phot.antennaTemp" unit="K" value="'+str(Tsys_K)+'"/>\n') #FOR NOW: do not report Tsys
VOEvent_of.write('\t\t\t<Param name="backend" value="'+VLA_backend+'"/>\n')
#VOEvent_of.write('\t\t\t<Param name="beam" value="'+str(VLA_beam)+'"/><Description>Detection beam number if backend is a multi beam receiver</Description>\n')
VOEvent_of.write('\t\t</Group>\n')
VOEvent_of.write('\t\t<Group name="event parameters">\n')
VOEvent_of.write('\t\t\t<Param dataType="float" name="dm" ucd="phys.dispMeasure;em.radio.'+str(int(np.floor(st.freq.min())))+'000-'+str(int(np.ceil(st.freq.max())))+'000MHz" unit="pc/cm^3" value="'+str(FRB_DM)+'"/>\n')
VOEvent_of.write('\t\t\t<Param dataType="float" name="dm_error" ucd="stat.error;phys.dispMeasure" unit="pc/cm^3" value="'+str(int(np.ceil(FRB_DM_err)))+'"/>\n')
VOEvent_of.write('\t\t\t<Param dataType="float" name="width" ucd="time.duration;src.var.pulse" unit="ms" value="'+str(FRB_width)+'"/>\n')
VOEvent_of.write('\t\t\t<Param dataType="float" name="snr" ucd="stat.snr" unit="None" value="'+str(FRB_SNR)+'"/>\n')
#VOEvent_of.write('\t\t\t<Param dataType="float" name="flux" ucd="phot.flux" unit="Jy" value="'+str(FRB_flux)+'"/>\n') #FOR NOW: do not report flux density. We do not have good enough absolute flux density calibration
VOEvent_of.write('\t\t\t<Param dataType="float" name="gl" ucd="pos.galactic.lon" unit="Degrees" value="'+str(FRB_gl)+'"/>\n')
VOEvent_of.write('\t\t\t<Param dataType="float" name="gb" ucd="pos.galactic.lat" unit="Degrees" value="'+str(FRB_gb)+'"/>\n')
VOEvent_of.write('\t\t</Group>\n')
VOEvent_of.write('\t\t<Group name="advanced parameters">\n')
#VOEvent_of.write('\t\t\t<Param dataType="float" name="MW_dm_limit" unit="pc/cm^3" value="34.9"/>\n')
#VOEvent_of.write('\t\t\t\t</Param>\n')
VOEvent_of.write('\t\t</Group>\n')
VOEvent_of.write('\t</What>\n')
#WhereWhen
VOEvent_of.write('\t<WhereWhen>\n')
VOEvent_of.write('\t\t<ObsDataLocation>\n')
VOEvent_of.write('\t\t\t<ObservatoryLocation id="VLA">\n')
VOEvent_of.write('\t\t\t<AstroCoordSystem id="UTC-GEOD-TOPO"/>\n')
VOEvent_of.write('\t\t\t<AstroCoords coord_system_id="UTC-GEOD-TOPO">\n')
VOEvent_of.write('\t\t\t<Position3D unit="deg-deg-m">\n')
VOEvent_of.write('\t\t\t <Value3>\n')
VOEvent_of.write('\t\t\t <C1>107.6184</C1>\n')
VOEvent_of.write('\t\t\t <C2>34.0784</C2>\n')
VOEvent_of.write('\t\t\t <C3>2124.456</C3>\n')
VOEvent_of.write('\t\t\t </Value3>\n')
VOEvent_of.write('\t\t\t</Position3D>\n')
VOEvent_of.write('\t\t\t</AstroCoords>\n')
VOEvent_of.write('\t\t\t</ObservatoryLocation>\n')
VOEvent_of.write('\t\t\t<ObservationLocation>\n')
VOEvent_of.write('\t\t\t\t<AstroCoordSystem id="UTC-FK5-GEO"/><AstroCoords coord_system_id="UTC-FK5-GEO">\n')
VOEvent_of.write('\t\t\t\t<Time unit="s"><TimeInstant><ISOTime>'+FRB_ISOT+'</ISOTime></TimeInstant></Time>\n')
VOEvent_of.write('\t\t\t\t<Position2D unit="deg"><Name1>RA</Name1><Name2>Dec</Name2><Value2><C1>'+str(srcra)+'</C1><C2>'+str(srcdec)+'</C2></Value2><Error2Radius>'+str(srcloc_err)+'</Error2Radius></Position2D>\n')
VOEvent_of.write('\t\t\t\t</AstroCoords>\n')
VOEvent_of.write('\t\t\t</ObservationLocation>\n')
VOEvent_of.write('\t\t</ObsDataLocation>\n')
VOEvent_of.write('\t</WhereWhen>\n')
#How
VOEvent_of.write('\t<How>\n')
VOEvent_of.write('\t\t<Description>Discovered by realfast as candId {0}</Description>'.format(candid))
VOEvent_of.write('\t\t<Reference uri="http://realfast.io"/>')
VOEvent_of.write('\t\t</How>\n')
#Why
VOEvent_of.write('\t<Why importance="'+str(FRB_importance)+'">\n')
VOEvent_of.write('\t\t\t<Concept></Concept><Description>Detection of a new FRB by RealFast</Description>\n')
VOEvent_of.write('\t\t<Name>'+FRB_NAME+'</Name>\n')
VOEvent_of.write('\t</Why>\n')
VOEvent_of.write('</voe:VOEvent>\n')
#close file
VOEvent_of.close()
logger.info('Wrote VOEvent file to {0}'.format(outname))
outnames.append(outname)
except ValueError:
logger.warn('Could not write VOEvent file {0}'.format(outname))
return outnames
def atel_plot(cd, ticksize=15, labelsize=15, show=False, save=True):
"""Generates a 3 panel plot of the candidate with time-series,
spectogram and the image. Can be used for ATels.
"""
from matplotlib import gridspec
segment, candint, dmind, dtind, beamnum = cd.loc
st = cd.state
scanid = cd.state.metadata.scanId
timewindow = cd.data.shape[0] #st.prefs.timewindow
width_m = st.dtarr[dtind]
tsamp = st.inttime*width_m
dm = st.dmarr[dmind]
ft_dedisp = np.flip((cd.data.real.sum(axis=2).T), axis=0)
nf, nt = np.shape(ft_dedisp)
chan_freqs = np.flip(st.freq*1000, axis=0) # from high to low, MHz
roll_to_center = nt//2 - cd.integration_rel
ft_dedisp = np.roll(ft_dedisp, shift=roll_to_center, axis=1)
im = cd.image
l1, m1 = st.pixtolm(np.where(im == im.max()))
ts = np.arange(timewindow)*tsamp
fov = np.degrees(1./st.uvres)*60.
l1arcm = np.degrees(l1)*60
m1arcm = np.degrees(m1)*60
# either standard radec or otf phasecenter radec
if st.otfcorrections is not None:
pc = st.get_pc(segment)
pt_ra_deg, pt_dec_deg = st.get_radec(pc=pc)
pt_ra = np.radians(pt_ra_deg)
pt_dec = np.radians(pt_dec_deg)
else:
pt_ra, pt_dec = st.metadata.radec
plt.figure(figsize=(14,9))
gs = gridspec.GridSpec(2, 2, width_ratios=[1, 1], height_ratios=[1, 1], wspace=0.2, hspace=0.12)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0])
ax3 = plt.subplot(gs[:, 1])
ax1.plot(ts, ft_dedisp.sum(0), c='k')
ax1.set_ylabel('Flux (Arb. units)', fontsize=labelsize)
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(ticksize)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(ticksize)
ax2.imshow(ft_dedisp, aspect='auto', extent=[ts[0], ts[-1], np.min(chan_freqs), np.max(chan_freqs)])
ax2.set_ylabel('Freq (MHz)', fontsize=labelsize)
ax2.set_xlabel('Time (s)', fontsize=labelsize)
for tick in ax2.xaxis.get_major_ticks():
tick.label.set_fontsize(ticksize)
for tick in ax2.yaxis.get_major_ticks():
tick.label.set_fontsize(ticksize)
sbeam = np.mean(st.beamsize_deg)*60
# figure out the location to center the zoomed image on
xratio = len(im[0])/fov # pix/arcmin
yratio = len(im)/fov # pix/arcmin
mult = 40 # sets how many times the synthesized beam the zoomed FOV is
xmin = max(0, int(len(im[0])//2-(m1arcm+sbeam*mult)*xratio))
xmax = int(len(im[0])//2-(m1arcm-sbeam*mult)*xratio)
ymin = max(0, int(len(im)//2-(l1arcm+sbeam*mult)*yratio))
ymax = int(len(im)//2-(l1arcm-sbeam*mult)*yratio)
left, width = 0.231, 0.15
bottom, height = 0.465, 0.15
_ = ax3.imshow(im.transpose()[xmin:xmax,ymin:ymax], aspect=1,
origin='upper', interpolation='nearest',
extent=[-1, 1, -1, 1],
cmap=plt.get_cmap('viridis'), vmin=0,
vmax=0.5*im.max())
# setup the axes
ax3.set_ylabel('Dec', fontsize=labelsize)
ax3.set_xlabel('RA', fontsize=labelsize)
ax3.xaxis.set_label_position('top')
ax3.yaxis.set_label_position("left")
ax3.yaxis.tick_right()
src_ra, src_dec = source_location(pt_ra, pt_dec, l1, m1)
src_ra_high, _ = source_location(pt_ra, pt_dec, (l1arcm+sbeam*mult/2)/(57.3*60), m1)
src_ra_low, _ = source_location(pt_ra, pt_dec, (l1arcm-sbeam*mult/2)/(57.3*60), m1)
_, src_dec_high = source_location(pt_ra, pt_dec, l1, (m1arcm+sbeam*mult/2)/(57.3*60))
_, src_dec_low = source_location(pt_ra, pt_dec, l1, (m1arcm-sbeam*mult/2)/(57.3*60))
xlabels = [src_ra_high, '', '', '', src_ra, '', '', '', src_ra_low]
ylabels = [src_dec_low, '', '', '', src_dec, '' , '', '', src_dec_high]
ax3.set_xticklabels(xlabels, fontsize=ticksize)
ax3.set_yticklabels(ylabels, fontsize= ticksize)
ax3.get_yticklabels()[0].set_verticalalignment('bottom')
if save:
plt.savefig(f'{cd.candid}_atel.pdf', bbox_inches='tight')
plt.savefig(f'{cd.candid}_atel.png', bbox_inches='tight')
if show:
plt.show()
|
"""
ASGI config for djangoProject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
asgi_application = get_asgi_application()
# pylint: disable=wrong-import-position
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter
from channels.routing import URLRouter
from .routing import ws_urlpatterns
# pylint: enable=wrong-import-position
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'embark.settings')
application = ProtocolTypeRouter({
'http': asgi_application,
'websocket': AuthMiddlewareStack(URLRouter(ws_urlpatterns))
})
|
"""The Intent integration."""
import voluptuous as vol
from homeassistant.components import http
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.const import SERVICE_TOGGLE, SERVICE_TURN_OFF, SERVICE_TURN_ON
from homeassistant.core import DOMAIN as HA_DOMAIN, HomeAssistant
from homeassistant.helpers import config_validation as cv, integration_platform, intent
from homeassistant.helpers.typing import ConfigType
from .const import DOMAIN
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Intent component."""
hass.http.register_view(IntentHandleView())
await integration_platform.async_process_integration_platforms(
hass, DOMAIN, _async_process_intent
)
intent.async_register(
hass,
intent.ServiceIntentHandler(
intent.INTENT_TURN_ON, HA_DOMAIN, SERVICE_TURN_ON, "Turned {} on"
),
)
intent.async_register(
hass,
intent.ServiceIntentHandler(
intent.INTENT_TURN_OFF, HA_DOMAIN, SERVICE_TURN_OFF, "Turned {} off"
),
)
intent.async_register(
hass,
intent.ServiceIntentHandler(
intent.INTENT_TOGGLE, HA_DOMAIN, SERVICE_TOGGLE, "Toggled {}"
),
)
return True
async def _async_process_intent(hass: HomeAssistant, domain: str, platform):
"""Process the intents of an integration."""
await platform.async_setup_intents(hass)
class IntentHandleView(http.HomeAssistantView):
"""View to handle intents from JSON."""
url = "/api/intent/handle"
name = "api:intent:handle"
@RequestDataValidator(
vol.Schema(
{
vol.Required("name"): cv.string,
vol.Optional("data"): vol.Schema({cv.string: object}),
}
)
)
async def post(self, request, data):
"""Handle intent with name/data."""
hass = request.app["hass"]
try:
intent_name = data["name"]
slots = {
key: {"value": value} for key, value in data.get("data", {}).items()
}
intent_result = await intent.async_handle(
hass, DOMAIN, intent_name, slots, "", self.context(request)
)
except intent.IntentHandleError as err:
intent_result = intent.IntentResponse()
intent_result.async_set_speech(str(err))
if intent_result is None:
intent_result = intent.IntentResponse()
intent_result.async_set_speech("Sorry, I couldn't handle that")
return self.json(intent_result)
|
class Groups:
id = 0
name = ""
members = []
def serialize(self):
return f'[{self.id},${self.name},${self.members}]'
def serialize_normal(self):
return f'[{self.id},{self.name},{self.members}]'
|
#!/usr/bin/env python3
from test_definition_base import CommonTestTraits
from test_definition_base import TestParamsProviderBase
import utils
class TestTraits (CommonTestTraits):
@property
def operator_ir_type_string(self):
return 'ConvolutionBackpropData'
@property
def test_params_provider_class(self):
return TestParamsProvider
@property
def cpp_test_filename(self):
return 'convolution_backprop_data.cpp'
@property
def template_filename(self):
return 'convolution_backprop_data.cpp.jinja2'
@property
def default_cpp_test_class_name(self):
return None
class TestParamsProvider (TestParamsProviderBase):
def __init__(self, list_of_equal_operators, test_traits):
super().__init__(list_of_equal_operators, test_traits)
@property
def cpp_test_class_name(self):
if self.has_outputshape_input:
return 'ConvolutionBackpropDataExtendedLayerTest'
else:
return 'ConvolutionBackpropDataLayerTest'
@property
def cpp_list_input_shape(self):
return utils.cpp_list_from_tuple_of_ints(self.op.input_ports[0].shape)
@property
def has_outputshape_input(self):
return len(self.op.input_ports) == 3
@property
def cpp_list_output_shape(self):
for op in self.equal_operators:
try:
shape_dims_list = op.input_ports[2].data_from_connected_const_operator()
assert len(shape_dims_list) == self.op.input_ports[2].shape[0]
return utils.cpp_list_from_tuple_of_ints(tuple(shape_dims_list))
except (TypeError, FileNotFoundError):
pass
raise ValueError("Test for {}:\n"
"\tFailed to determine the content of input tensor at index 2: "
"connected operator is not a constant, "
"IR model '*.bin' file reading error, "
"etc.".format(str(self.equal_operator_ids)))
@property
def cpp_list_kernel(self):
return utils.cpp_list_from_tuple_of_ints(self.op.input_ports[1].shape[2:])
@property
def cpp_list_strides(self):
return utils.cpp_list_from_tuple_of_ints(self.op.data['strides'].as_tuple_of_int())
@property
def cpp_list_pads_begin(self):
return utils.cpp_list_from_tuple_of_ints(self.op.data['pads_begin'].as_tuple_of_int())
@property
def cpp_list_pads_end(self):
return utils.cpp_list_from_tuple_of_ints(self.op.data['pads_end'].as_tuple_of_int())
@property
def cpp_list_dilations(self):
return utils.cpp_list_from_tuple_of_ints(self.op.data['dilations'].as_tuple_of_int())
@property
def cpp_num_output_channels(self):
return str(self.op.output_ports[0].shape[1])
@property
def cpp_auto_pad(self):
return utils.cpp_ngraph_autopad(self.op.data.get('auto_pad').as_str())
|
# -*- coding: utf-8 -*-
"""Supports SuperMAG ground magnetometer measurements and SML/SMU indices.
Downloading is supported; please follow their rules of the road:
http://supermag.jhuapl.edu/info/?page=rulesoftheroad
Parameters
----------
platform : string
'supermag'
name : string
'magnetometer'
tag : string
Select {'indices', '', 'all', 'stations'}
Note
----
Files must be downloaded from the website, and is freely available after
registration.
This material is based upon work supported by the
National Science Foundation under Grant Number 1259508.
Any opinions, findings, and conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views
of the National Science Foundation.
Warnings
--------
- Currently no cleaning routine, though the SuperMAG description indicates that
these products are expected to be good. More information about the processing
is available
- Module not written by the SuperMAG team.
Custom Functions
-----------------
"""
from __future__ import print_function, absolute_import
import pandas as pds
import numpy as np
from os import path
import pysat
platform = 'supermag'
name = 'magnetometer'
tags = {'indices':'SMU and SML indices',
'':'magnetometer measurements',
'all':'magnetometer measurements and indices',
'stations':'magnetometer stations'}
sat_ids = {'':tags.keys()}
test_dates = {'':{kk:pysat.datetime(2009,1,1) for kk in tags.keys()}}
def init(self):
print("When using this data please acknowledge the SuperMAG collaboration "
+ "according to the request outlined in the metadata attribute "
+ "'acknowledgements'")
return
def list_files(tag='', sat_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for chosen SuperMAG data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are 'indices', 'all',
'stations', and '' (for just magnetometer measurements). (default='')
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
"""
if format_str is None and data_path is not None:
file_base = 'supermag_magnetometer'
if tag == "indices" or tag == "all":
file_base += '_all' # Can't just download indices
if tag == "indices":
psplit = path.split(data_path[:-1])
data_path = path.join(psplit[0], "all", "")
if tag == "stations":
min_fmt = '_'.join([file_base, '{year:4d}.???'])
doff = pds.DateOffset(years=1)
else:
min_fmt = '_'.join([file_base, '{year:4d}{month:02d}{day:02d}.???'])
doff = pds.DateOffset(days=1)
files = pysat.Files.from_os(data_path=data_path, format_str=min_fmt)
# station files are once per year but we need to
# create the illusion there is a file per year
if not files.empty:
files = files.sort_index()
if tag == "stations":
orig_files = files.copy()
new_files = []
# Assigns the validity of each station file to be 1 year
for orig in orig_files.iteritems():
files.ix[orig[0] + doff - pds.DateOffset(days=1)] = orig[1]
files = files.sort_index()
new_files.append(files.ix[orig[0]: orig[0] + doff - \
pds.DateOffset(days=1)].asfreq('D', method='pad'))
files = pds.concat(new_files)
files = files.dropna()
files = files.sort_index()
# add the date to the filename
files = files + '_' + files.index.strftime('%Y-%m-%d')
return files
elif format_str is None:
estr = 'A directory must be passed to the loading routine for SuperMAG'
raise ValueError (estr)
else:
return pysat.Files.from_os(data_path=data_path, format_str=format_str)
def load(fnames, tag='', sat_id=None):
""" Load the SuperMAG files
Parameters
-----------
fnames : (list)
List of filenames
tag : (str or NoneType)
Denotes type of file to load. Accepted types are 'indices', 'all',
'stations', and '' (for just magnetometer measurements). (default='')
sat_id : (str or NoneType)
Satellite ID for constellations, not used. (default=None)
Returns
--------
data : (pandas.DataFrame)
Object containing satellite data
meta : (pysat.Meta)
Object containing metadata such as column names and units
"""
# Ensure that there are files to load
if len(fnames) <= 0 :
return pysat.DataFrame(None), pysat.Meta(None)
# Ensure that the files are in a list
if isinstance(fnames, str):
fnames = [fnames]
# Initialise the output data
data = pds.DataFrame()
baseline = list()
# Cycle through the files
for fname in fnames:
fname = fname[:-11] # Remove date index from end of filename
file_type = path.splitext(fname)[1].lower()
# Open and load the files for each file type
if file_type == ".csv":
if tag != "indices":
temp = load_csv_data(fname, tag)
else:
temp, bline = load_ascii_data(fname, tag)
if bline is not None:
baseline.append(bline)
# Save the loaded data in the output data structure
if len(temp.columns) > 0:
data = pds.concat([data, temp], axis=0)
del temp
# If data was loaded, update the meta data
if len(data.columns) > 0:
meta = pysat.Meta()
for cc in data.columns:
meta[cc] = update_smag_metadata(cc)
meta.info = {'baseline':format_baseline_list(baseline)}
else:
meta = pysat.Meta(None)
return data, meta
def load_csv_data(fname, tag):
"""Load data from a comma separated SuperMAG file
Parameters
------------
fname : (str)
CSV SuperMAG file name
tag : (str)
Denotes type of file to load. Accepted types are 'indices', 'all',
'stations', and '' (for just magnetometer measurements).
Returns
--------
data : (pandas.DataFrame)
Pandas DataFrame
"""
import re
if tag == "stations":
# Because there may be multiple operators, the default pandas reader
# cannot be used.
ddict = dict()
dkeys = list()
date_list = list()
# Open and read the file
with open(fname, "r") as fopen:
dtime = pds.datetime.strptime(fname.split("_")[-1].split(".")[0],
"%Y")
for fline in fopen.readlines():
sline = [ll for ll in re.split(r'[,\n]+', fline) if len(ll) > 0]
if len(ddict.items()) == 0:
for kk in sline:
kk = re.sub("-", "_", kk)
ddict[kk] = list()
dkeys.append(kk)
else:
date_list.append(dtime)
for i,ll in enumerate(sline):
if i >= 1 and i <= 4:
ddict[dkeys[i]].append(float(ll))
elif i == 6:
ddict[dkeys[i]].append(int(ll))
elif i < len(dkeys):
ddict[dkeys[i]].append(ll)
else:
ddict[dkeys[-1]][-1] += " {:s}".format(ll)
# Create a data frame for this file
data = pds.DataFrame(ddict, index=date_list, columns=ddict.keys())
else:
# Define the date parser
def parse_smag_date(dd):
return pysat.datetime.strptime(dd, "%Y-%m-%d %H:%M:%S")
# Load the file into a data frame
data = pds.read_csv(fname, parse_dates={'datetime':[0]},
date_parser=parse_smag_date, index_col='datetime')
return data
def load_ascii_data(fname, tag):
"""Load data from a self-documenting ASCII SuperMAG file
Parameters
------------
fname : (str)
ASCII SuperMAG filename
tag : (str)
Denotes type of file to load. Accepted types are 'indices', 'all',
'stations', and '' (for just magnetometer measurements).
Returns
--------
data : (pandas.DataFrame)
Pandas DataFrame
baseline : (list)
List of strings denoting the presence of a standard and file-specific
baselines for each file. None of not present or not applicable.
"""
import re
ndata = {"indices":2, "":4, "all":4, "stations":8}
dkeys = {'stations':list(), '':['IAGA', 'N', 'E', 'Z']}
data = pds.DataFrame(None)
baseline = None
# Ensure that the tag indicates a type of data we know how to load
if not tag in ndata.keys():
return data, baseline
# Read in the text data, processing the header, indices, and
# magnetometer data (as desired)
with open(fname, "r") as fopen:
# Set the processing flags
hflag = True # header lines
pflag = False # parameter line
dflag = False if tag == "stations" else True # date line
snum = 0 # number of magnetometer stations
ddict = dict()
date_list = list()
if tag == "stations":
dtime = pds.datetime.strptime(fname.split("_")[-1].split(".")[0],
"%Y")
for fline in fopen.readlines():
# Cycle past the header
line_len = len(fline)
if hflag:
if pflag:
pflag = False # Unset the flag
if fline.find("-mlt") > 0:
ndata[''] += 2
dkeys[''].extend(['MLT', 'MLAT'])
if fline.find("-sza") > 0:
ndata[''] += 1
dkeys[''].append('SZA')
if fline.find("-decl") > 0:
ndata[''] += 1
dkeys[''].append('IGRF_DECL')
if tag == "indices" and fline.find("-envelope") < 0:
# Indices not included in this file
break
# Save the baseline information
lsplit = fline.split()
idelta = lsplit.index('-delta') + 1
ibase = lsplit.index('-baseline') + 1
isd = lsplit.index('-sd') + 1
ist = lsplit.index('-st') + 1
iex = lsplit.index('-ex') + 1
baseline = " ".join([lsplit[ibase], lsplit[idelta],
lsplit[isd], lsplit[ist], lsplit[iex]])
if fline.find("Selected parameters:") >= 0:
pflag = True
if fline.count("=") == line_len - 1 and line_len > 2:
hflag = False
else:
# Load the desired data
lsplit = [ll for ll in re.split(r'[\t\n]+', fline)
if len(ll) > 0]
if dflag:
dflag = False # Unset the date flag
dstring = " ".join(lsplit[:6])
dtime = pysat.datetime.strptime(dstring,
"%Y %m %d %H %M %S")
snum = int(lsplit[6]) # Set the number of stations
# Load the times
if tag == "indices":
date_list.append(dtime)
else:
date_list.extend([dtime for i in range(snum)])
elif len(lsplit) == ndata['indices']:
if tag is not '':
if lsplit[0] not in ddict.keys():
ddict[lsplit[0]] = list()
if tag == "indices":
ddict[lsplit[0]].append(int(lsplit[1]))
else:
# This works because indices occur before
# magnetometer measurements
ddict[lsplit[0]].extend([int(lsplit[1])
for i in range(snum)])
else:
if tag == "stations" and len(lsplit) >= ndata[tag]:
if len(dkeys[tag]) == 0:
# Station files include column names and data files
# do not. Read in the column names here
for ll in lsplit:
ll = re.sub("-", "_", ll)
dkeys[tag].append(ll)
ddict[ll] = list()
else:
# Because stations can have multiple operators,
# ndata supplies the minimum number of columns
date_list.append(dtime)
for i,ll in enumerate(lsplit):
if i >= 1 and i <= 4:
ddict[dkeys[tag][i]].append(float(ll))
elif i == 6:
ddict[dkeys[tag][i]].append(int(ll))
elif i < len(dkeys[tag]):
ddict[dkeys[tag][i]].append(ll)
else:
ddict[dkeys[tag][-1]][-1] += \
" {:s}".format(ll)
elif len(lsplit) == ndata['']:
snum -= 1 # Mark the ingestion of a station
if tag != "indices":
if len(ddict.keys()) < ndata['']:
for kk in dkeys['']:
ddict[kk] = list()
for i,kk in enumerate(dkeys['']):
if i == 0:
ddict[kk].append(lsplit[i])
else:
ddict[kk].append(float(lsplit[i]))
if tag != "stations" and snum == 0 and len(ddict.items()) >= 2:
# The previous value was the last value, prepare for
# next block
dflag = True
# Create a data frame for this file
data = pds.DataFrame(ddict, index=date_list, columns=ddict.keys())
fopen.close()
return data, baseline
def update_smag_metadata(col_name):
"""Update SuperMAG metadata
Parameters
-----------
col_name : (str)
Data column name
Returns
--------
col_dict : (dict)
Dictionary of strings detailing the units and long-form name of the data
"""
smag_units = {'IAGA':'none', 'N':'nT', 'E':'nT', 'Z':'nT', 'MLT':'hours',
'MLAT':'degrees', 'SZA':'degrees', 'IGRF_DECL':'degrees',
'SMU':'none', 'SML':'none', 'datetime':'YYYY-MM-DD HH:MM:SS',
'GEOLON':'degrees', 'GEOLAT':'degrees', 'AACGMLON':'degrees',
'AACGMLAT':'degrees', 'STATION_NAME':'none',
'OPERATOR_NUM':'none', 'OPERATORS':'none'}
smag_name = {'IAGA':'Station Code', 'N':'B along local magnetic North',
'E':'B along local magnetic East', 'Z':'B vertically downward',
'MLT':'Magnetic Local Time', 'MLAT':'Magnetic Latitude',
'SZA':'Solar Zenith Angle',
'IGRF_DECL':'IGRF magnetic declination',
'SMU': 'Maximum eastward auroral electrojets strength.\n'
'Upper envelope of N-component for stations between 40 and '
'80 degrees magnetic north.',
'SML':'Maximum westward auroral electrojets strength.\n'
'Lower envelope of N-component for stations between 40 and 80'
' degrees magnetic north.', 'datetime':'UT date and time',
'GEOLON':'geographic longitude',
'GEOLAT':'geographic latitude',
'AACGMLON':'Altitude-Adjusted Corrected Geomagnetic longitude',
'AACGMLAT':'Altitude-Adjusted Corrected Geomagnetic latitude',
'STATION_NAME':'Long form station name',
'OPERATOR_NUM':'Number of station operators',
'OPERATORS':'Station operator name(s)',}
ackn = "When using this data please include the following reference:\n"
ackn += "Gjerloev, J. W., The SuperMAG data processing technique, "
ackn += "Geophys. Res., 117, A09213, doi:10.1029/2012JA017683, 2012\n\n"
ackn += "For publications and presentations, please include the following"
ackn += "acknowledgement:\nFor the ground magnetometer data we gratefully "
ackn += "acknowledge: Intermagnet; USGS, Jeffrey J. Love; CARISMA, PI Ian "
ackn += "Mann; CANMOS; The S-RAMP Database, PI K. Yumoto and Dr. K. "
ackn += "Shiokawa; The SPIDR database; AARI, PI Oleg Troshichev; The "
ackn += "MACCS program, PI M. Engebretson, Geomagnetism Unit of the "
ackn += "Geological Survey of Canada; GIMA; MEASURE, UCLA IGPP and Florida"
ackn += " Institute of Technology; SAMBA, PI Eftyhia Zesta; 210 Chain, PI "
ackn += "K. Yumoto; SAMNET, PI Farideh Honary; The institutes who maintain"
ackn += " the IMAGE magnetometer array, PI Eija Tanskanen; PENGUIN; AUTUMN,"
ackn += " PI Martin Connors; DTU Space, PI Dr. Rico Behlke; South Pole and "
ackn += " McMurdo Magnetometer, PI's Louis J. Lanzarotti and Alan T. "
ackn += "Weatherwax; ICESTAR; RAPIDMAG; PENGUIn; British Artarctic Survey; "
ackn += "McMac, PI Dr. Peter Chi; BGS, PI Dr. Susan Macmillan; Pushkov "
ackn += "Institute of Terrestrial Magnetism, Ionosphere and Radio Wave "
ackn += "Propagation (IZMIRAN); GFZ, PI Dr. Juergen Matzka; MFGI, PI B. "
ackn += "Heilig; IGFPAS, PI J. Reda; University of L’Aquila, PI M. "
ackn += "Vellante; BCMT, V. Lesur and A. Chambodut; Data obtained in "
ackn += "cooperation with Geoscience Australia, PI Marina Costelloe; "
ackn += "SuperMAG, PI Jesper W. Gjerloev."
col_dict = {'units':smag_units[col_name], 'long_name':smag_name[col_name],
'acknowledgements':ackn}
return col_dict
def format_baseline_list(baseline_list):
"""Format the list of baseline information from the loaded files into a
cohesive, informative string
Parameters
------------
baseline_list : (list)
List of strings specifying the baseline information for each
SuperMAG file
Returns
---------
base_string : (str)
Single string containing the relevent data
"""
uniq_base = dict()
uniq_delta = dict()
for bline in baseline_list:
bsplit = bline.split()
bdate = " ".join(bsplit[2:])
if bsplit[0] not in uniq_base.keys():
uniq_base[bsplit[0]] = ""
if bsplit[1] not in uniq_delta.keys():
uniq_delta[bsplit[1]] = ""
uniq_base[bsplit[0]] += "{:s}, ".format(bdate)
uniq_delta[bsplit[1]] += "{:s}, ".format(bdate)
if len(uniq_base.items()) == 1:
base_string = "Baseline {:s}".format(list(uniq_base.keys())[0])
else:
base_string = "Baseline "
for i,kk in enumerate(uniq_base.keys()):
if i == 1:
base_string += "{:s}: {:s}".format(kk, uniq_base[kk][:-2])
else:
base_string += " {:s}: {:s}".format(kk,
uniq_base[kk][:-2])
else:
base_string += "unknown"
if len(uniq_delta.items()) == 1:
base_string += "\nDelta {:s}".format(list(uniq_delta.keys())[0])
else:
base_string += "\nDelta "
for i,kk in enumerate(uniq_delta.keys()):
if i == 1:
base_string += "{:s}: {:s}".format(kk, uniq_delta[kk][:-2])
else:
base_string += " {:s}: {:s}".format(kk,
uniq_delta[kk][:-2])
else:
base_string += "unknown"
return base_string
def clean(supermag):
""" Data has been cleaned, but should be examined before use
"""
return
def download(date_array, tag, sat_id='', data_path=None, user=None,
password=None, baseline='all', delta='none', options='all',
file_fmt='ascii'):
"""Routine to download SuperMAG data
Parameters
-----------
date_array : np.array
Array of datetime objects
tag : string
String denoting the type of file to load, accepted values are 'indices',
'all', 'stations', and '' (for only magnetometer data)
sat_id : string
Not used (default='')
data_path : string or NoneType
Data path to save downloaded files to (default=None)
user : string or NoneType
SuperMAG requires user registration (default=None)
password : string or NoneType
Not used; SuperMAG does not require a password (default=None)
file_fmt : string
File format options: 'ascii' and 'csv'. (default='ascii')
baseline : string
Baseline to remove from magnetometer data. Options are 'all', 'yearly',
and 'none'. (default='all')
delta : string
Remove a value from the magnetometer data. Options are 'none', 'start',
and 'median'. (default='none')
options : string or NoneType
Additional parameter options for magnetometer data. Includes 'mlt'
(MLat and MLT), 'decl' (IGRF declination), 'sza' (Solar Zenith Angle),
'all', and None. (default='all')
Returns
-------
"""
import sys
import requests
global platform, name
max_stations = 470
if user is None:
raise ValueError('SuperMAG requires user registration')
remoteaccess = {'method':'http', 'host':'supermag.jhuapl.edu',
'path':'mag/lib/services', 'user':'user={:s}'.format(user),
'service':'service=', 'options':'options='}
remotefmt = "{method}://{host}/{path}/??{user}&{service}&{filefmt}&{start}"
# Set the tag information
if tag == "indices":
tag = "all"
if tag != "stations":
remotefmt += "&{interval}&{stations}&{delta}&{baseline}&{options}"
# Determine whether station or magnetometer data is requested
remoteaccess['service'] += tag if tag == "stations" else "mag"
# Add request for file type
file_fmt = file_fmt.lower()
if not file_fmt in ['ascii', 'csv']:
estr = "unknown file format [{:s}], using 'ascii'".format(file_fmt)
print("WARNING: {:s}".format(estr))
file_fmt = 'ascii'
remoteaccess['filefmt'] = 'fmt={:s}'.format(file_fmt)
# If indices are requested, add them now.
if not tag in [None, 'stations']:
remoteaccess['options'] += "+envelope"
# Add other download options (for non-station files)
if tag != "stations":
if options is not None:
options = options.lower()
if options is 'all':
remoteaccess['options'] += "+mlt+sza+decl"
else:
remoteaccess['options'] += "+{:s}".format(options)
# Add requests for baseline substraction
baseline = baseline.lower()
if not baseline in ['all', 'yearly', 'none']:
estr = "unknown baseline [{:s}], using 'all'".format(baseline)
print("WARNING: {:s}".format(estr))
baseline = 'all'
remoteaccess['baseline'] = "baseline={:s}".format(baseline)
delta = delta.lower()
if not delta in ['none', 'median', 'start']:
estr = "unknown delta [{:s}], using 'none'".format(delta)
print("WARNING: {:s}".format(estr))
delta = 'none'
remoteaccess['delta'] = 'delta={:s}'.format(delta)
# Set the time information and format
remoteaccess['interval'] = "interval=23:59"
sfmt = "%Y-%m-%dT00:00:00.000"
tag_str = "_" if tag is None else "_all_"
ffmt = "{:s}_{:s}{:s}%Y%m%d.{:s}".format(platform, name, tag_str,
"txt" if file_fmt == "ascii"
else file_fmt)
start_str = "start="
else:
# Set the time format
sfmt = "%Y"
ffmt = "{:s}_{:s}_{:s}_%Y.{:s}".format(platform, name, tag,
"txt" if file_fmt == "ascii"
else file_fmt)
start_str = "year="
# Cycle through all of the dates, formatting them to achieve a unique set
# of times to download data
date_fmts = list(set([dd.strftime(sfmt) for dd in date_array]))
# Now that the unique dates are known, construct the file names
name_fmts = [None for dd in date_fmts]
for dd in date_array:
i = date_fmts.index(dd.strftime(sfmt))
name_fmts[i] = dd.strftime(ffmt)
if None in name_fmts:
raise ValueError("unable to construct all unique file names")
# Cycle through all of the unique dates. Stations lists are yearly and
# magnetometer data is daily
station_year = None
istr = 'SuperMAG {:s}'.format(tag if tag == "stations" else "data")
for i,date in enumerate(date_fmts):
print("Downloading {:s} for {:s}".format(istr, date.split("T")[0]))
sys.stdout.flush()
nreq = 1
# Add the start time and download period to query
remoteaccess['start'] = "{:s}{:s}".format(start_str, date)
if tag != "stations":
# Station lists are for each year, see if this year is loaded
current_date = pds.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.000")
if current_date.year != station_year:
# Get all of the stations for this time
smag_stat = pysat.Instrument(platform=platform, name=name,
tag='stations')
# try to load data
smag_stat.load(date=current_date)
if smag_stat.empty:
# no data
etime = current_date + pds.DateOffset(days=1)
smag_stat.download(start=current_date, stop=etime,
user=user, password=password,
file_fmt=file_fmt)
smag_stat.load(date=current_date)
if smag_stat.empty:
# no data
estr = "unable to format station query for "
estr += "[{:d}]".format(current_date.year)
raise ValueError(estr)
# Format a string of the station names
if smag_stat.data.IAGA.shape[0] > max_stations:
station_year = current_date.year
nreq = int(np.ceil(smag_stat.data.IAGA.shape[0] /
float(max_stations)))
out = list()
for ireq in range(nreq):
if tag != "stations":
if station_year is None:
raise RuntimeError("unable to load station data")
stat_str = ",".join(smag_stat.data.IAGA[ireq*max_stations:
(ireq+1)*max_stations])
remoteaccess['stations'] = "stations={:s}".format(stat_str)
# Format the query
url = remotefmt.format(**remoteaccess)
# Set up a request
try:
# print (url)
result = requests.post(url)
result.encoding = 'ISO-8859-1'
# handle strings differently for python 2/3
if sys.version_info.major == 2:
out.append(str(result.text.encode('ascii', 'replace')))
else:
out.append(result.text)
except:
raise RuntimeError("unable to connect to [{:s}]".format(url))
# Test the result
if "requested URL was rejected" in out[-1]:
estr = "Requested url was rejected:\n{:s}".format(url)
raise RuntimeError(estr)
# Build the output file name
if tag is '':
fname = path.join(data_path, name_fmts[i])
else:
fname = path.join(data_path, name_fmts[i])
# If more than one data pass was needed, append the files
if len(out) > 1:
out_data = append_data(out, file_fmt, tag)
else:
out_data = out[0]
# Save the file data
with open(fname, "w") as local_file:
local_file.write(out_data)
local_file.close()
del out_data
return
def append_data(file_strings, file_fmt, tag):
""" Load the SuperMAG files
Parameters
-----------
file_strings : array-like
Lists or arrays of strings, where each string contains one file of data
file_fmt : str
String denoting file type (ascii or csv)
tag : string
String denoting the type of file to load, accepted values are 'indices',
'all', 'stations', and '' (for only magnetometer data)
Returns
-------
out_string : string
String with all data, ready for output to a file
"""
# Determine the right appending routine for the file type
if file_fmt.lower() == "csv":
return append_csv_data(file_strings)
else:
return append_ascii_data(file_strings, tag)
def append_ascii_data(file_strings, tag):
""" Append data from multiple files for the same time period
Parameters
-----------
file_strings : array-like
Lists or arrays of strings, where each string contains one file of data
tag : string
String denoting the type of file to load, accepted values are 'indices',
'all', 'stations', and None (for only magnetometer data)
Returns
-------
out_string : string
String with all data, ready for output to a file
"""
import re
# Start with data from the first list element
out_lines = file_strings[0].split('\n')
iparam = -1 # Index for the parameter line
ihead = -1 # Index for the last header line
idates = list() # Indices for the date lines
date_list = list() # List of dates
num_stations = list() # Number of stations for each date line
ind_num = 2 if tag in ['all', 'indices', ''] else 0
# ind_num = 2 if tag == '' else ind_num
# Find the index information for the data
for i,line in enumerate(out_lines):
if line == "Selected parameters:":
iparam = i + 1
elif line.count("=") == len(line) and len(line) > 2:
ihead = i
break
# Find the time indices and number of stations for each date line
i = ihead + 1
while i < len(out_lines) - 1:
idates.append(i)
lsplit = re.split('\t+', out_lines[i])
dtime = pds.datetime.strptime(" ".join(lsplit[0:-1]),
"%Y %m %d %H %M %S")
date_list.append(dtime)
num_stations.append(int(lsplit[-1]))
i += num_stations[-1] + 1 + ind_num
idates = np.array(idates)
# Initialize a list of station names
station_names = list()
# Cycle through each additional set of file strings
for ff in range(len(file_strings)-1):
file_lines = file_strings[ff+1].split('\n')
# Find the index information for the data
head = True
snum = 0
for i,line in enumerate(file_lines):
if head:
if line.count("=") == len(line) and len(line) > 2:
head = False
elif len(line) > 0:
lsplit = re.split('\t+', line)
if snum == 0:
dtime = pds.datetime.strptime(" ".join(lsplit[0:-1]),
"%Y %m %d %H %M %S")
try:
idate = date_list.index(dtime)
except:
# SuperMAG outputs date lines regardless of the
# number of stations. These files shouldn't be
# appended together.
raise ValueError("Unexpected date ", dtime)
snum = int(lsplit[-1])
onum = num_stations[idate]
inum = ind_num
# Adjust reference data for new number of station lines
idates[idate+1:] += snum
num_stations[idate] += snum
# Adjust date line for new number of station lines
oline = "{:s}\t{:d}".format( \
dtime.strftime("%Y\t%m\t%d\t%H\t%M\t%S"),
num_stations[idate])
out_lines[idates[idate]] = oline
else:
if inum > 0:
inum -= 1
else:
# Insert the station line to the end of the date section
onum += 1
snum -= 1
out_lines.insert(idates[idate]+onum, line)
# Save the station name to update the parameter line
if not lsplit[0] in station_names:
station_names.append(lsplit[0])
# Update the parameter line
out_lines[iparam] += "," + ",".join(station_names)
# Join the output lines into a single string
out_string = "\n".join(out_lines)
return out_string
def append_csv_data(file_strings):
""" Append data from multiple csv files for the same time period
Parameters
-----------
file_strings : array-like
Lists or arrays of strings, where each string contains one file of data
Returns
-------
out_string : string
String with all data, ready for output to a file
"""
# Start with data from the first list element
out_lines = list()
head_line = None
# Cycle through the lists of file strings, creating a list of line strings
for fstrings in file_strings:
file_lines = fstrings.split('\n')
# Remove and save the header line
head_line = file_lines.pop(0)
# Save the data lines
out_lines.extend(file_lines)
# Sort the output lines by date and station (first two columns) in place
out_lines.sort()
# Remove all zero-length lines from front, add one to back, and add header
i = 0
while len(out_lines[i]) == 0:
out_lines.pop(i)
out_lines.insert(0, head_line)
out_lines.append('')
# Join the output lines into a single string
out_string = "\n".join(out_lines)
return out_string
|
import unittest
from pprint import pprint
from rdflib import Graph, Namespace
from pyshex import ShExEvaluator
rdf = """
@prefix : <http://example.org/model/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix xml: <http://www.w3.org/XML/1998/namespace> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<http://example.org/context/42> a :Person ;
foaf:age 43 ;
foaf:firstName "Bob",
"Joe" ;
foaf:lastName "smith" .
"""
shex = """
<http://example.org/sample/example1/String> <http://www.w3.org/2001/XMLSchema#string>
<http://example.org/sample/example1/Int> <http://www.w3.org/2001/XMLSchema#integer>
<http://example.org/sample/example1/Boolean> <http://www.w3.org/2001/XMLSchema#boolean>
<http://example.org/sample/example1/Person> CLOSED {
( <http://xmlns.com/foaf/0.1/firstName> @<http://example.org/sample/example1/String> * ;
<http://xmlns.com/foaf/0.1/lastName> @<http://example.org/sample/example1/String> ;
<http://xmlns.com/foaf/0.1/age> @<http://example.org/sample/example1/Int> ? ;
<http://example.org/model/living> @<http://example.org/sample/example1/Boolean> ? ;
<http://xmlns.com/foaf/0.1/knows> @<http://example.org/sample/example1/Person> *
)
}
"""
EXC = Namespace("http://example.org/context/")
EXE = Namespace("http://example.org/sample/example1/")
class Issue41TestCase(unittest.TestCase):
def test_closed(self):
""" Test closed definition """
e = ShExEvaluator(rdf=rdf, schema=shex, focus=EXC['42'], start=EXE.Person)
# This causes issue 42
# pprint(e.evaluate())
self.assertFalse(e.evaluate()[0].result)
from pyshex.evaluate import evaluate
g = Graph()
g.parse(data=rdf, format="turtle")
pprint(evaluate(g, shex, focus=EXC['42'], start=EXE.Person))
if __name__ == '__main__':
unittest.main()
|
from typing import Optional
from fastapi import FastAPI
from sqlmodel import (
SQLModel,
Field,
create_engine,
select,
Session
)
# Criar engine do banco
engine = create_engine('sqlite:///database.db')
class Pessoa(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
nome: str
idade: int
# Cria o banco de dados
SQLModel.metadata.create_all(engine)
app = FastAPI()
@app.get('/')
def home():
return {'message': 'Deu bom'}
@app.get('/pessoa')
def get_pessoa():
query = select(Pessoa)
with Session(engine) as session:
result = session.execute(query).scalars().all()
return result
@app.get('/pessoa-nome')
def get_pessoa():
query = select(Pessoa.nome)
with Session(engine) as session:
result = session.execute(query).scalars().all()
return result
@app.get('/pessoa-nome-idade')
def get_pessoa():
query = select(Pessoa.nome, Pessoa.idade)
with Session(engine) as session:
result = session.execute(query).scalars().all()
return result
|
import proxy
import json
import re
class Web1C(proxy.HttpSniffer):
folder = '/data'
def on_post_request_buh3_ru_RU_e1cib_logForm(self, _request, request):
data = self.escape_res(_request['data'].decode())
data = json.loads(data, encoding='utf-8')
try:
if data['root']['key'] == "Справочник.Контрагенты.ФормаСписка":
self.save_data_to_file(
'ЗапросКонтрагентФормаСписка_{0}'.format(_request['headers'].get('pragma', 0)),
data
)
except KeyError:
pass
def on_post_request_buh3_ru_RU_e1cib_dlist(self, _request, request):
data = self.escape_res(_request['data'].decode())
data = json.loads(data, encoding='utf-8')
try:
# if data['root']['key'] == "Справочник.Контрагенты.ФормаСписка":
self.save_data_to_file(
'ЗапросDlistКонтрагенты_{0}'.format(_request['headers'].get('pragma', 0)),
data
)
except KeyError:
pass
@staticmethod
def escape_res(text):
text = text.replace('\t', '\\t').replace('\r', '\\r')
text = text.replace('\n', '\\n').replace(b'\xef\xbb\xbf'.decode(), '')
text = re.sub(r"([^\"])undefined", r'\1"undefined"', text)
return text
def save_data_to_file(self, name, data):
with open('{0}/{1}.json'.format(self.folder, name), "w", encoding='utf-8') as file:
json.dump(data, file, indent=4, ensure_ascii=False)
if __name__ == '__main__':
Web1C.run_proxy('http://10.76.172.92') # , proxy="http://127.0.0.1:8888") # Fiddler
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import redis
from utils.get_configure import get_conf
_logger = logging.getLogger(__name__)
def get_redis_conf():
namespace = "redis"
REDIS_DB = get_conf("REDIS_DB", namespace=namespace)
REDIS_HOST = get_conf("REDIS_HOST", namespace=namespace)
REDIS_PORT = get_conf("REDIS_PORT", namespace=namespace, conf_type="int")
REDIS_PASS = get_conf("REDIS_PASS", namespace=namespace)
return REDIS_HOST, REDIS_PORT, REDIS_DB, REDIS_PASS
def redis_handle():
host, port, db, password = get_redis_conf()
r = redis.Redis(host=host, port=port, db=db, password=password)
return r
|
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the reproman package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import datetime
import logging
import os
from fabric import Connection
from importlib import import_module
import pytest
import tempfile
import uuid
from ..session import get_updated_env, Session
from ...support.exceptions import CommandError
from ...utils import chpwd, swallow_logs
from ...tests.utils import create_tree
from ...tests.skip import mark
from ...tests.fixtures import get_docker_fixture
from ...tests.fixtures import get_singularity_fixture
from ...consts import TEST_SSH_DOCKER_DIGEST
testing_container = get_docker_fixture(
TEST_SSH_DOCKER_DIGEST,
name='testing-container',
portmaps={
49000: 22
},
custom_params={
'host': 'localhost',
'user': 'root',
'port': 49000,
},
scope='module'
)
singularity_resource = get_singularity_fixture()
@pytest.fixture(scope="module")
def ssh_connection(testing_container, request):
# Initialize SSH connection to testing Docker container.
connection = Connection(
'localhost',
user='root',
port=49000,
connect_kwargs={
'password': 'root'
}
)
connection.open()
return connection
@pytest.mark.skip(reason="TODO")
def test_check_envvars_handling():
# TODO: test that all the handling of variables works with set_envvar
# get_envvar etc
pass
# TODO: make it into a fixture I guess if needed, or just import and call within
# specific backend tests
def check_session_passing_envvars(session):
# TODO: do not set/pass any env variables, test that PATH is set within remote
default_envvars = session.query_envvars()
assert default_envvars['PATH']
assert 'EXPORTED' not in session.query_envvars()
session.set_envvar('EXPORTED_NOT_PERMANENT', 'VALUE')
assert session.query_envvars()['EXPORTED_NOT_PERMANENT'] == 'VALUE'
session.set_envvar('EXPORTED_PERMANENT', 'VALUE2')
assert session.query_envvars()['EXPORTED_NOT_PERMANENT'] == 'VALUE'
assert session.query_envvars()['EXPORTED_PERMANENT'] == 'VALUE2'
# TODO: we should add functionality to record the state of the env
# upon finishing create (or install? login?) and here could test
# smth like
# session = session.restart()
# envvars = assert session.query_envvars()
# assert 'EXPORTED_NOT_PERMANENT' not in envvars
# assert envvars['EXPORTED_NOT_PERMANENT'] == 'VALUE2'
def test_get_updated_env():
assert get_updated_env({'a': 1}, {'a': 2}) == {'a': 2}
assert get_updated_env({'a': None}, {'a': 2}) == {'a': 2}
assert get_updated_env({'a': 1}, {'a': None}) == {}
assert get_updated_env({'a': 1, 'b': 2}, {'a': None}) == {'b': 2}
assert get_updated_env({'a': 1, 'b': 2}, {'a': None, 'b': 3}) == {'b': 3}
assert get_updated_env({'a': '/foo', 'b': 2}, {'a': '/bar:$a', 'b': 3}) \
== {'a': '/bar:/foo', 'b': 3}
assert get_updated_env({'a': '/foo', 'b': 2}, {'a': '/bar:$ab', 'b': 3}) \
== {'a': '/bar:$ab', 'b': 3}
assert get_updated_env({'a': '/foo', 'b': 2}, {'a': '/bar:${a}:/blee', 'b': 3}) \
== {'a': '/bar:/foo:/blee', 'b': 3}
def test_get_local_session():
# get_local_session(env={'LC_ALL': 'C'}, pty=False, shared=None)
return
def test_session_class():
with Session() as session:
# Check class constructor
assert type(session) == Session
# Check __call__ is passing through to _execute_command()
with pytest.raises(NotImplementedError):
stdout, stderr = session(['ls'], env={'ENV_VAR': 'ENV_VALUE'})
session._env = { 'VAR': 'VAR_VALUE' }
session._env_permanent = { 'PERM_VAR': 'PERM_VAR_VALUE' }
# Check we can read _envs properly
envvars = session.get_envvars()
assert envvars['VAR'] == 'VAR_VALUE'
assert 'PERM_VAR' not in envvars
# Check we can read permanent envs
envvars = session.get_envvars(permanent=True)
assert envvars['PERM_VAR'] == 'PERM_VAR_VALUE'
assert 'VAR' not in envvars
# Check we can add an env through the setter
session.set_envvar('NEW_VAR', value='NEW_VAR_VALUE')
envvars = session.get_envvars()
assert envvars['NEW_VAR'] == 'NEW_VAR_VALUE'
assert envvars['VAR'] == 'VAR_VALUE'
# Check we can set an env var by passing a dict
session.set_envvar({'DICT_VAR': 'DICT_VAR_VALUE'})
envvars = session.get_envvars()
assert envvars['DICT_VAR'] == 'DICT_VAR_VALUE'
assert envvars['VAR'] == 'VAR_VALUE'
# Check we can delete an existing env var
session.set_envvar('DICT_VAR', None)
envvars = session.get_envvars()
assert 'DICT_VAR' not in envvars
assert envvars['VAR'] == 'VAR_VALUE'
# Check formatting of env values
session.set_envvar('VAR', 'FORMATTED {}', format=True)
envvars = session.get_envvars()
assert envvars['VAR'] == 'FORMATTED VAR_VALUE'
assert envvars['NEW_VAR'] == 'NEW_VAR_VALUE'
# At this time, setting permanent env vars is not supported
with pytest.raises(NotImplementedError):
session.set_envvar('NEW_VAR', value='NEW_VAR_VALUE',
permanent=True)
# Check we raise an exception if user tries to set an env value while
# passing a dict
with pytest.raises(AssertionError):
session.set_envvar({'WILL': 'FAIL'}, value='!')
# Check query_envvars() method not implemented
with pytest.raises(NotImplementedError):
session.query_envvars()
# Check source_script() method not implemented
with pytest.raises(NotImplementedError):
session.source_script(['ls'])
# Check unauthorized commands raise CommandError exception
with pytest.raises(CommandError):
session.reproman_exec('sudo', ['rm', '-r', '/'])
# Check mangled arg raises exception
with pytest.raises(CommandError):
session.reproman_exec('mkdir', ['bad=passed=argument'])
# Check exec command to valid method passes through to not implemented
# exception
with pytest.raises(NotImplementedError):
session.reproman_exec('mkdir', ['/my/new/dir', 'parents=True'])
# Check abstract methods raise NotImplementedError
with pytest.raises(NotImplementedError):
session.exists('/path')
with pytest.raises(NotImplementedError):
session.put('src_path', 'dest_path')
with pytest.raises(NotImplementedError):
session.get('src_path', 'dest_path')
with pytest.raises(NotImplementedError):
session.get_mtime('path')
with pytest.raises(NotImplementedError):
session.read('path')
with pytest.raises(NotImplementedError):
session.mkdir('path')
with pytest.raises(NotImplementedError):
session.mktmpdir()
with pytest.raises(NotImplementedError):
session.isdir('path')
with pytest.raises(NotImplementedError):
session.chmod('path', 'mode')
with pytest.raises(NotImplementedError):
session.chown('path', 100)
@pytest.fixture
def check_methods(resource_test_dir):
def fn(cls_name, session):
# Check the validity of the env vars
envs = session.query_envvars()
assert 'HOME' in envs
assert 'PATH' in envs
# Check sourcing new env variables
# new_envs = session.source_script(['export', 'SCRIPT_VAR=SCRIPT_VALUE'])
# assert 'SCRIPT_VAR' in new_envs
# Check _execute_command by checking file system
out, err = session._execute_command(['cat', '/etc/hosts'])
assert '127.0.0.1' in out
assert 'localhost' in out
assert err == ''
# Check _execute_command failure
with pytest.raises(CommandError):
session._execute_command(['cat', '/no/such/file'])
# Check _execute_command with env set
out, err = session._execute_command(['env'],
env={'NEW_VAR': 'NEW_VAR_VALUE'})
assert 'NEW_VAR=NEW_VAR_VALUE' in out
# Check _execute_command with cwd set
out, err = session._execute_command(['pwd'],
cwd='/var')
assert '/var' == out.rstrip("\n")
# Check exists() method
result = session.exists('/etc')
assert result
result = session.exists('/etc/hosts')
assert result
result = session.exists('/no/such/file')
assert not result
# exists() doesn't get confused by an empty string.
assert not session.exists('')
# Check isdir() method
result = session.isdir('/etc')
assert result
result = session.isdir('/etc/hosts') # A file, not a dir
assert not result
result = session.isdir('/no/such/dir')
assert not result
# Create a temporary test file
with tempfile.TemporaryDirectory(dir=resource_test_dir) as tdir:
create_tree(tdir,
{'f0': 'ReproMan test content\nline 2\nline 3',
'f1': 'f1',
'd0': {'f2': 'f2',
'd2': {'f3': 'f3'}}})
local_path = os.path.join(tdir, 'f0')
remote_path = '{}/reproman upload/{}'.format(resource_test_dir,
uuid.uuid4().hex)
# Check put() method
# session.put(local_path, remote_path, uid=3, gid=3) # UID for sys, GID for sys
# TODO: Sort out permissions issues with chown for SSH when no sudo
session.put(local_path, remote_path)
result = session.exists(remote_path)
assert result
# TODO: Check uid and gid of remote file
# Check recursive put().
remote_path_rec = '{}/recursive-put/{}'.format(
resource_test_dir, uuid.uuid4().hex)
session.put(tdir, remote_path_rec)
assert session.exists(remote_path_rec + "/d0/f2")
assert session.exists(remote_path_rec + "/d0/d2/f3")
# We can use a relative name for the target
basename_put_dir = os.path.join(resource_test_dir, "basename-put")
if not os.path.exists(basename_put_dir):
os.mkdir(basename_put_dir)
# Change directory to avoid polluting test directory for local shell.
with chpwd(basename_put_dir):
try:
session.put(local_path, os.path.basename(remote_path))
except ValueError:
# Docker and Singularity don't accept non-absolute paths.
assert "Docker" in cls_name or "Singularity" in cls_name
# Check get_mtime() method by checking new file has today's date
result = int(session.get_mtime(remote_path).split('.')[0])
assert datetime.datetime.fromtimestamp(result).month == \
datetime.date.today().month
assert datetime.datetime.fromtimestamp(result).day == \
datetime.date.today().day
# Check read() method
output = session.read(remote_path).split('\n')
assert output[0] == 'ReproMan test content'
assert output[1] == 'line 2'
# Check get() method
local_path = '{}/download/{}'.format(resource_test_dir,
uuid.uuid4().hex)
session.get(remote_path, local_path)
# TODO: In some cases, updating uid and gid does not work if not root
assert os.path.isfile(local_path)
with open(local_path, 'r') as f:
content = f.read().split('\n')
assert content[0] == 'ReproMan test content'
os.remove(local_path)
os.rmdir(os.path.dirname(local_path))
with chpwd(resource_test_dir):
# We can get() without a leading directory.
session.get(remote_path, "just base")
assert os.path.exists("just base")
remote_basename = os.path.basename(remote_path)
# We can get() without specifying a target.
session.get(remote_path)
assert os.path.exists(remote_basename)
# Or by specifying just the directory.
session.get(remote_path, "subdir" + os.path.sep)
assert os.path.exists(os.path.join("subdir", remote_basename))
# Check mkdir() method
test_dir = '{}/{}'.format(resource_test_dir, uuid.uuid4().hex)
session.mkdir(test_dir)
result = session.isdir(test_dir)
assert result
# Check listdir() method
if hasattr(session, 'listdir'):
subdir = uuid.uuid4().hex
subfile = uuid.uuid4().hex
session.mkdir(os.path.join(test_dir, subdir))
session.put('/etc/hosts', os.path.join(test_dir, subfile))
assert set(session.listdir(test_dir)) == set((subdir, subfile))
# Check making parent dirs without setting flag
test_dir = '{}/tmp/i fail/{}'.format(resource_test_dir, uuid.uuid4().hex)
with pytest.raises(CommandError):
session.mkdir(test_dir, parents=False)
result = session.isdir(test_dir)
assert not result
# Check making parent dirs when parents flag set
test_dir = '{}/i succeed/{}'.format(resource_test_dir, uuid.uuid4().hex)
session.mkdir(test_dir, parents=True)
result = session.isdir(test_dir)
assert result
# Check mktmpdir() method
test_dir = session.mktmpdir()
result = session.isdir(test_dir)
assert result, "The path %s is not a directory" % test_dir
# All sessions will take the command in string form...
output_string = "{}/stringtest {}".format(
resource_test_dir, session.__class__.__name__)
assert not session.exists(output_string)
session.execute_command("touch '{}'".format(output_string))
assert session.exists(output_string)
# and the list form.
output_list = "{}/listtest {}".format(
resource_test_dir, session.__class__.__name__)
assert not session.exists(output_list)
session.execute_command(["touch", output_list])
assert session.exists(output_list)
# TODO: How to test chmod and chown? Need to be able to read remote file attributes
# session.chmod(self, path, mode, recursive=False):
# session.chown(self, path, uid=-1, gid=-1, recursive=False, remote=True):
return fn
def test_session_shell(check_methods):
from reproman.resource.shell import ShellSession
check_methods("ShellSession", ShellSession())
def import_resource(mod, cls):
return getattr(import_module("reproman.resource." + mod),
cls)
@pytest.mark.parametrize(
"location",
[ # module, class
("singularity", "SingularitySession"),
("singularity", "PTYSingularitySession")
],
ids=lambda x: x[1])
def test_session_singularity(location, singularity_resource, check_methods):
"""Test sessions that depend on `singularity_resource` fixture.
"""
cls = import_resource(*location)
session = cls(singularity_resource.name)
check_methods(location[1], session)
@mark.skipif_no_ssh
@pytest.mark.parametrize(
"location",
[ # module, class
("ssh", "SSHSession"),
("ssh", "PTYSSHSession"),
],
ids=lambda x: x[1])
def test_session_ssh(location, ssh_connection, check_methods):
"""Test sessions that depend on `ssh_connection` fixture.
"""
cls = import_resource(*location)
check_methods(location[1], cls(ssh_connection))
@pytest.mark.parametrize(
"location",
[ # module, class
("docker_container", "DockerSession"),
("docker_container", "PTYDockerSession"),
],
ids=lambda x: x[1])
def test_session_container(location, testing_container, check_methods):
"""Test sessions that depend on `testing_container` fixture.
"""
cls = import_resource(*location)
import docker
client = docker.APIClient()
container = next(c for c in client.containers()
if '/testing-container' in c['Names'])
assert container
check_methods(location[1], cls(client, container))
|
"""
Defines overloaded operators for basic mathematical operations over unit-containing members (Constant, Parameter, Variables)
"""
class ExposedVariableError(Exception):
"""
Error raised by the utilization of non-exposed variables for connection of two Model objects.
"""
def __init__(self, model_1_exposed_vars, model_2_exposed_vars, output_var, input_var):
self.m1_exposed_names = [var_i.name for var_i in model_1_exposed_vars]
self.m2_exposed_names = [var_i.name for var_i in model_2_exposed_vars]
self.input_var_name = input_var.name
self.output_var_name = output_var.name
def __str__(self):
msg = "Non-exposed variable declaration in the output model(1) \n %s \n and/or input model(2) \n %s. \n The declared output variable name is %s, and the input variable name is %s." % (self.m1_exposed_names, self.m2_exposed_names, self.output_var_name, self.input_var_name)
return(msg)
class UnexpectedObjectDeclarationError(Exception):
"""
Error raised by the utilization of a non-registered Variable, Parameter or Constant for the current Model.
"""
def __init__(self, objects, declared_objects):
self.objects = objects
self.declared_objects = declared_objects
def __str__(self):
msg = "Unexpected object declaration error. \n The following objects were used: %s \n But the following objects were declared for the current model. \n %s" % (self.objects, self.declared_objects)
return(msg)
class AbsentRequiredObjectError(Exception):
"""
Error raised by the absence of an required object.
"""
def __init__(self, expected_type, supplied_object=""):
self.expected_type = expected_type
self.supplied_object = supplied_object
def __str__(self):
msg = "Absent required object error. A %s was expected, but no one was supplied." % (self.expected_type)
if self.supplied_object is not "":
msg += " Supplied object was: %s" % (self.supplied_object)
return(msg)
class UnexpectedValueError(Exception):
"""
Error raised by input of an unexpected value.
"""
def __init__(self, expected_type):
self.expected_type = expected_type
def __str__(self):
msg = "Unexpected value error. A %s was expected, but one divergent type was supplied." % (self.expected_type)
return msg
class UnresolvedPanicError(Exception):
"""
Error raised by unresolved problems. Ideally this exception would never arises. It is included only for debugging purposes
"""
def __init__(self, msg=''):
self.msg=msg
def __str__(self):
err_msg = "Unresolved Panic Error.%s\n This should not have ocurrred. \n Perhaps you should debug your code."%self.msg
return(err_msg)
class NumericalError(Exception):
"""
Error raised by unsolvable numeric problems. Ideally this exception would never arises.
"""
def __init__(self, msg=''):
self.msg=msg
def __str__(self):
if self.msg is '':
err_msg = "NumericalError.%s\n This should not have ocurrred. \n " % self.msg
else:
err_msg = self.msg
return(err_msg)
class NonDimensionalArgumentError(Exception):
"""
Error raised when a non-dimensional argument was expected but a dimensional one was provided.
Typically occurs in transcendental functions (log, log10, sin, cos, etc...)
"""
def __init__(self, unit):
self.unit = unit
def __str__(self):
msg = "A dimensionless argument was expected \n" + \
str(self.unit.dimension)
return(msg)
class DimensionalCoherenceError(Exception):
"""
Error raised when two non-coherent dimensions are summed or subtracted
"""
def __init__(self, unit_1, unit_2):
if unit_1 == None:
self.unit_1 = {'m':0.0,'kg':0.0,'s':0.0,'A':0.0,'K':0.0,'mol':0.0,'cd':0.0}
else:
self.unit_1 = unit_1
if unit_2 == None:
self.unit_2 = {'m':0.0,'kg':0.0,'s':0.0,'A':0.0,'K':0.0,'mol':0.0,'cd':0.0}
else:
self.unit_2 = unit_2
def __str__(self):
msg = "Dimensions are incoherent \n(" + \
str(self.unit_1.dimension) + \
"\n != \n" + \
str(self.unit_2.dimension) + \
")."
return(msg)
class UnitOperationError(Exception):
"""
Error raised by error in definitions of UnitOp objects
"""
def __init__(self, port, n, elem):
self.n = n
self.port = port
self.elem = elem
def __str__(self):
msg = "UnitOp object was defined for port {} with lenght {}, but a element of size {} was supplied.".format(self.port, self.n, len(self.elem))
def _addUnitContainingOperations(a,b):
return(a._checkDimensionalCoherence(b))
|
import io
from zipfile import ZipFile
import pandas as pd
import pytest
from sqlalchemy import delete
from athenian.api.models.state.models import UserAccount
from athenian.api.models.web import ContributorIdentity, MatchedIdentity, PullRequestMetricID
from athenian.api.serialization import FriendlyJson
async def test_match_identities_smoke(client, headers):
body = {
"account": 1,
"identities": [
{
"names": ["Vadim", "Markovtsv"],
},
{
"emails": ["eiso@athenian.co", "contact@eisokant.com"],
},
{
"names": ["Denys Smyrnov"],
"emails": ["denys@sourced.tech"],
},
],
}
response = await client.request(
method="POST", path="/v1/match/identities", headers=headers, json=body,
)
rbody = (await response.read()).decode("utf-8")
assert response.status == 200, "Response body is : " + rbody
model = [MatchedIdentity.from_dict(i) for i in FriendlyJson.loads(rbody)]
for i in range(len(body["identities"])):
assert model[i].from_ == ContributorIdentity.from_dict(body["identities"][i])
assert model[0].to == "github.com/vmarkovtsev"
assert model[0].confidence < 1
assert model[1].to == "github.com/eiso"
assert model[1].confidence == 1
assert model[2].to == "github.com/dennwc"
assert model[2].confidence < 1
@pytest.mark.parametrize("body, code", [
({"account": 1, "identities": [{}]}, 400),
({"account": 1, "identities": [{"emails": []}]}, 400),
({"account": 1, "identities": [{"names": []}]}, 400),
({"account": 1, "identities": [{"names": [], "emails": []}]}, 400),
({"account": 1, "identities": [{"emails": ["x@y.co"]}, {"emails": ["x@y.co"]}]}, 400),
({"account": 2, "identities": [{"emails": ["x@y.co"]}]}, 422),
({"account": 4, "identities": [{"emails": ["x@y.co"]}]}, 404),
])
async def test_match_identities_nasty_input(client, headers, body, code):
response = await client.request(
method="POST", path="/v1/match/identities", headers=headers, json=body,
)
rbody = (await response.read()).decode("utf-8")
assert response.status == code, "Response body is : " + rbody
async def test_get_everything_smoke(client, headers, dummy_deployment_label):
# preheat
body = {
"for": [
{
"with": {},
"repositories": [
"github.com/src-d/go-git",
],
},
],
"metrics": [PullRequestMetricID.PR_LEAD_TIME],
"date_from": "2015-10-13",
"date_to": "2020-01-23",
"granularities": ["all"],
"exclude_inactive": False,
"account": 1,
}
response = await client.request(
method="POST", path="/v1/metrics/pull_requests", headers=headers, json=body,
)
body = (await response.read()).decode("utf-8")
assert response.status == 200, "Response body is : " + body
response = await client.request(
method="GET", path="/v1/get/export?account=1", headers=headers,
)
assert response.status == 200
body = await response.read()
developer_dfs = {
"jira_mapping": 206,
"active_active0_commits_pushed_lines_changed": 3135,
"prs_created": 681,
"prs_merged": 554,
"releases": 53,
"prs_reviewed_review_approvals_review_neutrals_review_rejections_reviews": 1352,
"regular_pr_comments": 1035,
"review_pr_comments": 1604,
"pr_comments": 2639,
}
with ZipFile(io.BytesIO(body)) as zipf:
with zipf.open("prs.parquet") as prsf:
prs_df = pd.read_parquet(prsf)
with zipf.open("releases.parquet") as releasesf:
releases_df = pd.read_parquet(releasesf)
for key, size in developer_dfs.items():
with zipf.open(f"developers_{key}.parquet") as devf:
df = pd.read_parquet(devf)
assert len(df) == size
with zipf.open("check_runs.parquet") as checkf:
check_runs_df = pd.read_parquet(checkf)
with zipf.open("jira_issues.parquet") as jiraf:
jira_issues_df = pd.read_parquet(jiraf)
with zipf.open("deployments.parquet") as depsf:
deps_df = pd.read_parquet(depsf)
with zipf.open("deployments_components.parquet") as depscompsf:
depscomps_df = pd.read_parquet(depscompsf)
with zipf.open("deployments_releases.parquet") as depsrelsf:
depsrels_df = pd.read_parquet(depsrelsf)
with zipf.open("deployments_labels.parquet") as depslblsf:
depslbls_df = pd.read_parquet(depslblsf)
assert len(prs_df) == 679
assert set(prs_df) == {
"first_comment_on_first_review", "merged_by_login", "first_commit", "stage_time_review",
"title", "updated_at", "acc_id", "base_ref", "last_commit", "stage_time_wip", "deletions",
"repository_node_id", "author", "hidden", "merged", "merged_at", "number", "created",
"merge_commit_sha", "first_review_request_exact", "stage_time_release", "released",
"stage_time_merge", "created_at", "user_login", "htmlurl", "approved", "closed_at",
"changed_files", "last_commit_before_first_review", "force_push_dropped", "additions",
"work_began", "releaser", "merged_by_id", "user_node_id", "repository_full_name",
"first_review_request", "last_review", "activity_days", "closed", "merge_commit_id",
"head_ref", "jira_ids", "merger", "done", "size", "reviews", "release_url",
"release_node_id", "review_comments", "participants", "deployments", "deployed",
"environments", "deployment_conclusions",
}
assert len(releases_df) == 53
assert set(releases_df) == {
"additions", "age", "commit_authors", "commits_count", "deletions", "matched_by", "name",
"prs_additions", "prs_deletions", "prs_node_id", "prs_number", "prs_title", "prs_jira",
"prs_user_node_id", "published", "publisher", "repository_full_name", "sha", "url",
"deployments",
}
assert len(check_runs_df) == 4614
assert set(check_runs_df) == {
"acc_id", "additions", "author_login", "author_user_id", "authored_date", "changed_files",
"check_run_node_id", "check_suite_conclusion", "check_suite_node_id", "check_suite_status",
"commit_node_id", "committed_date", "completed_at", "conclusion", "deletions", "name",
"pull_request_node_id", "repository_full_name", "repository_node_id", "sha", "started_at",
"status", "url", "check_suite_started", "check_suite_completed", "pull_request_created_at",
"pull_request_closed_at", "pull_request_merged",
}
assert len(jira_issues_df) == 1797
assert set(jira_issues_df) == {
"assignee", "category_name", "commenters", "created", "epic_id", "labels", "pr_ids",
"priority_name", "prs_began", "prs_count", "prs_released", "reporter", "resolved",
"status", "type", "updated", "work_began",
}
assert len(deps_df) == 1
assert set(deps_df.columns) == {
"commit_authors", "commits_overall", "commits_prs", "conclusion", "environment",
"finished_at", "lines_overall", "lines_prs", "pr_authors", "prs", "prs_offsets",
"release_authors", "repositories", "started_at", "url",
}
assert len(depscomps_df) == 1
assert set(depscomps_df.columns) == {
"repository_node_id", "reference", "resolved_commit_node_id", "deployment_name",
}
assert len(depsrels_df) == 51
assert set(depsrels_df.columns) == {
"commit_authors", "prs_node_id", "prs_number", "prs_additions",
"prs_deletions", "prs_user_node_id", "prs_title", "prs_jira",
"deployments", "age", "additions", "deletions", "commits_count",
"repository_node_id", "author_node_id", "name",
"published_at", "tag", "url", "sha", "commit_id", "matched_by",
"author", "deployment_name",
}
assert len(depslbls_df) == 1
assert set(depslbls_df.columns) == {"deployment_name", "key", "value"}
@pytest.mark.parametrize("query, code", [
("?account=2", 422),
("?account=3", 404),
("?account=1&format=other", 400),
("?account=1&format=parquet", 200),
("", 400),
("<empty>", 200),
])
async def test_get_everything_nasty_input(client, headers, query, code, sdb):
if query == "<empty>":
query = ""
await sdb.execute(delete(UserAccount).where(UserAccount.account_id == 2))
response = await client.request(
method="GET", path=f"/v1/get/export{query}", headers=headers,
)
assert response.status == code
|
import time
import psycopg2
from neo4j import GraphDatabase
from redis import Redis
from timeout_decorator import timeout
from common.log import logger
from config import cfg
def wait_for_external_services(
wait_for_pg: bool = True,
wait_for_neo4j: bool = True,
wait_for_pubsub: bool = True,
wait_for_cache: bool = True,
interval: float = 5,
max_retries: int = 60) -> bool:
"""Wait for external services to be responsive."""
retry_count = 0
everything_is_up = False
while not everything_is_up:
try:
if wait_for_neo4j:
logger.info("Waiting for Neo4j...")
driver = GraphDatabase.driver(
cfg.neo4j_uri,
auth=(cfg.neo4j_user, cfg.neo4j_password))
@timeout(3)
def neo4j_healthcheck():
with driver.session() as session:
session.read_transaction(lambda tx: list(tx.run("""
MATCH (healthcheck:Profile {id: 'healthcheck'})
RETURN healthcheck""")))
neo4j_healthcheck()
logger.info("Neo4j is ready!")
if wait_for_pg:
logger.info("Waiting for PostgreSQL...")
psycopg2.connect(f"postgresql://{cfg.postgres_uri}")
logger.info("PostgreSQL is ready!")
if wait_for_pubsub:
logger.info("Waiting for PubSub...")
Redis(
host=cfg.pubsub_uri.split("//")[1].split(":")[0],
port=cfg.pubsub_uri.split("//")[1].split(":")[1]) \
.get("healthcheck")
logger.info("PubSub is ready!")
if wait_for_cache:
logger.info("Waiting for Cache...")
Redis(
host=cfg.cache_uri.split("//")[1].split(":")[0],
port=cfg.cache_uri.split("//")[1].split(":")[1]) \
.get("healthcheck")
logger.info("Cache is ready!")
logger.info("All external services are ready.")
everything_is_up = True
except:
retry_count += 1
if retry_count > max_retries:
raise TimeoutError()
logger.info(f"Retry n. {retry_count}...")
time.sleep(interval)
return True
if __name__ == "__main__":
wait_for_external_services(wait_for_cache=False)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('section', '0004_aboutsection'),
]
operations = [
migrations.CreateModel(
name='Catalog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=80, blank=True)),
('slug', models.SlugField(unique=True)),
('image', models.ImageField(upload_to=b'category_pic', verbose_name='Image', blank=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100, blank=True)),
('slug', models.SlugField(unique=True)),
('image', models.ImageField(upload_to=b'product_pic', verbose_name='Image', blank=True)),
('description', models.TextField(help_text=b'Describe product', blank=True)),
('price', models.DecimalField(max_digits=15, decimal_places=2)),
('sale_price', models.DecimalField(null=True, max_digits=15, decimal_places=2)),
('is_active', models.BooleanField(default=True)),
('is_featured', models.BooleanField(default=False)),
('quantity', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('catalog', models.ForeignKey(to='section.Catalog')),
],
),
]
|
import os
import numpy as np
import processor
import paddlehub as hub
import paddle
import paddle.fluid as fluid
from mobilenet_ssd import mobile_net
def build_program():
image_shape = [3, 300, 300]
class_num = 21
image = fluid.layers.data(dtype="float32", shape=image_shape, name="image")
gt_box = fluid.layers.data(
dtype="float32", shape=[4], name="gtbox", lod_level=1)
gt_label = fluid.layers.data(
dtype="int32", shape=[1], name="label", lod_level=1)
difficult = fluid.layers.data(
dtype="int32", shape=[1], name="difficult", lod_level=1)
with fluid.unique_name.guard():
locs, confs, box, box_var = mobile_net(class_num, image, image_shape)
nmsed_out = fluid.layers.detection_output(
locs, confs, box, box_var, nms_threshold=0.45)
return image, nmsed_out
def create_module():
image, nmsed_out = build_program()
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
pretrained_model = "resources/ssd_mobilenet_v1_pascalvoc"
def if_exist(var):
return os.path.exists(os.path.join(pretrained_model, var.name))
fluid.io.load_vars(exe, pretrained_model, predicate=if_exist)
assets = ["resources/label_list.txt"]
sign = hub.create_signature(
"object_detection",
inputs=[image],
outputs=[nmsed_out],
for_predict=True)
hub.create_module(
sign_arr=[sign],
module_dir="hub_module_ssd",
module_info="resources/module_info.yml",
exe=exe,
processor=processor.Processor,
assets=assets)
if __name__ == '__main__':
create_module()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Lint as: python3
"""Moses detokenizer.
Moses detokenizer
"""
from __future__ import print_function
import re
import unicodedata
import six
OUTPUT = ('third_party/tensorflow_models/mlperf/models'
'/rough/nmt/testdata/deen_output')
def is_currency(token):
for c in token:
if unicodedata.category(c) != 'Sc':
return False
return True
# operates on unicode string and returns unicode string in Python 2
# operates on string and returns string in Python 3
def detokenize_sentence(token_list):
"""Detokenize a single sentence."""
right_shift_punctuations = re.compile(r'^[\(\[\{]+$')
left_shift_punctuations = re.compile(r'^[,\.\?!:;\\\%\}\]\)]+$')
contraction = re.compile(u'^[\'|’][a-zA-Z]')
pre_contraction = re.compile('[a-zA-Z0-9]$')
quotes = re.compile(u'^[\'\"„“”]$')
detok_str = ''
prepend_space = ' '
quote_count = {'\'': 0, '\"': 0}
for i in range(len(token_list)):
if is_currency(token_list[i]) or \
right_shift_punctuations.match(token_list[i]):
detok_str += prepend_space + token_list[i]
prepend_space = ''
elif left_shift_punctuations.match(token_list[i]):
detok_str += token_list[i]
prepend_space = ' '
elif i > 0 and contraction.match(token_list[i]) and \
pre_contraction.search(token_list[i-1]):
detok_str += token_list[i]
prepend_space = ' '
elif quotes.match(token_list[i]):
normalized_quo = token_list[i]
normalized_quo = '\"' if re.match(u'^[„“”]$', token_list[i]) \
else normalized_quo
assert normalized_quo in quote_count
if quote_count[normalized_quo] % 2 == 0:
if normalized_quo == '\'' and i > 0 and \
re.search('s$', token_list[i-1]):
detok_str += token_list[i]
prepend_space = ' '
else:
detok_str += prepend_space + token_list[i]
prepend_space = ''
quote_count[normalized_quo] += 1
else:
detok_str += token_list[i]
prepend_space = ' '
quote_count[normalized_quo] += 1
else:
detok_str += prepend_space + token_list[i]
prepend_space = ' '
detok_str = detok_str.strip()
if detok_str:
detok_str += '\n'
if six.PY2:
detok_str = detok_str.encode('utf-8')
return detok_str
def deescape(text):
"""De-escape text."""
text = re.sub('&bar;', '|', text)
text = re.sub('|', '|', text)
text = re.sub('<', '<', text)
text = re.sub('>', '>', text)
text = re.sub('&bra;', '[', text)
text = re.sub('&ket;', ']', text)
text = re.sub('"', '\"', text)
text = re.sub(''', '\'', text)
text = re.sub('[', '[', text)
text = re.sub(']', ']', text)
text = re.sub('&', '&', text)
return text
def detokenize(text):
detok_list = []
for line in text:
if line == '\n':
detok_list.append(line)
detok = detokenize_sentence(deescape(line.strip()).split())
if detok:
detok_list.append(detok)
return detok_list
def main():
if six.PY3:
with open(OUTPUT, 'r', encoding='utf-8') as fobj:
detok_list = detokenize(fobj.readlines())
for line in detok_list:
print(line, end='')
else:
with open(OUTPUT, 'r') as fobj:
detok_list = detokenize([x.decode('utf-8') for x in fobj.readlines()])
for line in detok_list:
print(line, end='')
if __name__ == '__main__':
main()
|
import apache_beam as beam
import logging
from joblib import load
import numpy as np
import pandas as pd
from google.cloud import storage
from apache_beam.options.pipeline_options import StandardOptions, GoogleCloudOptions, SetupOptions, PipelineOptions
from sklearn.ensemble import RandomForestClassifier
#setup global
dummy_dict = {'Yes': 1, 'No': 0}
internet_dict = {'No': 0, 'No internet service': 1, 'Yes': 2}
yesno_cols = ['Partner', 'Dependents', 'PhoneService', 'PaperlessBilling']
internet_cols = ['OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies']
# =============================================================================
# Build and run the pipeline
# =============================================================================
def run(argv=None):
pipeline_options = PipelineOptions(flags=argv)
google_cloud_options = pipeline_options.view_as(GoogleCloudOptions)
google_cloud_options.project = 'your-project' #change this
google_cloud_options.job_name = 'telco-churn-prediction'
google_cloud_options.staging_location = 'gs://path/to/your/staging' #change this
google_cloud_options.temp_location = 'gs://path/to/your/temp' #change this
pipeline_options.view_as(StandardOptions).runner = 'DataflowRunner'
pipeline_options.view_as(SetupOptions).save_main_session = True
pipeline_options.view_as(SetupOptions).setup_file = "./setup.py"
logging.info("Pipeline arguments: {}".format(pipeline_options))
# table_schema = 'customerID: STRING, prediction: FLOAT'
query = ('select * from `your-project.Telco_Churn.input`')
bq_source = beam.io.BigQuerySource(query=query, use_standard_sql=True)
p = beam.Pipeline(options=pipeline_options)
(p
| "Read data from BQ" >> beam.io.Read(bq_source)
| "Preprocess data" >> beam.ParDo(FormatInput())
| "predicting" >> beam.ParDo(
PredictSklearn(project='your-project', bucket_name='your-bucket-name', model_path='/path/to/model_rf.joblib',
destination_name='model_rf.joblib'))
| "Write data to BQ" >> beam.io.WriteToBigQuery(table='prediction', dataset='Telco_Churn', project='your-project',
# schema=table_schema,
# create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE)
)
result = p.run()
result.wait_until_finish()
# =============================================================================
# Function to download model from bucket
# =============================================================================
def download_blob(bucket_name=None, source_blob_name=None, project=None, destination_file_name=None):
storage_client = storage.Client(project)
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
class FormatInput(beam.DoFn):
def process(self, element):
""" Format the input to the desired shape"""
df = pd.DataFrame([element], columns=element.keys())
df[yesno_cols] = df[yesno_cols].apply(lambda x: x.map(dummy_dict))
df[internet_cols] = df[internet_cols].apply(lambda x: x.map(internet_dict))
df['gender'] = df['gender'].map({'Female': 0, 'Male': 1})
df['MultipleLines'] = df['MultipleLines'].map({'No': 0, 'No phone service': 1, 'Yes': 2})
df['InternetService'] = df['InternetService'].map({'DSL': 0, 'Fiber optic': 1, 'No': 2})
df['Contract'] = df['Contract'].map({'Month-to-month': 0, 'One year': 1, 'Two year': 2})
df['PaymentMethod'] = df['PaymentMethod'].map(
{'Bank transfer (automatic)': 0, 'Credit card (automatic)': 1, 'Electronic check': 2, 'Mailed check': 3})
output = df.to_dict('records')
return output #return a dict for easier comprehension
class PredictSklearn(beam.DoFn):
def __init__(self, project=None, bucket_name=None, model_path=None, destination_name=None):
self._model = None
self._project = project
self._bucket_name = bucket_name
self._model_path = model_path
self._destination_name = destination_name
def setup(self):
"""Download sklearn model from GCS"""
logging.info(
"Sklearn model initialisation {}".format(self._model_path))
download_blob(bucket_name=self._bucket_name, source_blob_name=self._model_path,
project=self._project, destination_file_name=self._destination_name)
# unpickle sklearn model
self._model = load(self._destination_name)
def process(self, element):
"""Predicting using developed model"""
input_dat = {k: element[k] for k in element.keys() if k not in ['customerID']}
tmp = np.array(list(i for i in input_dat.values()))
tmp = tmp.reshape(1, -1)
element["prediction"] = self._model.predict_proba(tmp)[:,1].item()
output = {k: element[k] for k in element.keys() if k in ['customerID', 'prediction']}
output['customerID'] = str(output['customerID'])
return [output]
# log the output
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
|
with open("input1.txt","r") as f:
l=[(int(line)//3)-2 for line in f.readlines()]
print(sum(l))
|
## Nov 2018: Plotting map of MEaSUREs outlets over satellite imagery
## Apr 2019 edit: Visualise hindcast validation, e.g. total retreat and misfit with observation
## EHU
from mpl_toolkits.basemap import Basemap
import mpl_toolkits.basemap.pyproj as pyproj
import numpy as np
import matplotlib.pyplot as plt
import shapefile
## Special import for SERMeQ modules
import sys
sys.path.insert(0, 'Documents/GitHub/plastic-networks')
from SERMeQ.GL_model_tools import Greenland_map, flowline_latlon, read_termini
##--------------------------
## SET UP OUTLET MARKERS
##--------------------------
gl_termpos_fldr = 'Documents/GitHub/Data_unsynced/MEaSUREs-termini'
terminus_basefile = '/termini_1415_v01_2'
init_year = 2014
fn = gl_termpos_fldr + terminus_basefile #filename to read in for termini that will be traced
termini_init = read_termini(fn, init_year)
all_coords_latlon = {}
for gid in termini_init.keys():
latlon_coords = flowline_latlon(termini_init[gid])
all_coords_latlon[gid] = np.asarray(latlon_coords)
##--------------------------
## MAKE PLOT
##--------------------------
gld_backdrop = Basemap(projection='npstere', boundinglat=70, lon_0=315, epsg=3413, llcrnrlon=300, llcrnrlat=57, urcrnrlon=20, urcrnrlat=80, resolution='h')
plt.figure()
gld_backdrop.arcgisimage(service='ESRI_Imagery_World_2D', xpixels=5000)
for k in termini_init.keys():
pt = all_coords_latlon[k][0]
gld_backdrop.scatter(pt[0], pt[1], s=40, marker='o', color='Blue', edgecolors='DarkViolet', latlon=True)
#term_marker = gld_backdrop(pt[0], pt[1])
#offset = 100 * np.mod(k,2)
#plt.annotate(s=str(k), xy=term_marker, fontsize='small', fontweight='demi', color='MediumBlue')
# Now plot ticked glaciers on top
for k in np.arange(10, 201, 10):
pt = all_coords_latlon[k][0]
gld_backdrop.scatter(pt[0], pt[1], s=180, marker='*', color='Yellow', edgecolors='Gold', latlon=True)
plt.show()
|
# Copyright (c) 2008,2015,2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the `kinematics` module."""
import numpy as np
import pytest
from metpy.calc import (advection, convergence_vorticity, divergence,
frontogenesis, geostrophic_wind, get_wind_components, h_convergence,
lat_lon_grid_deltas, lat_lon_grid_spacing, montgomery_streamfunction,
shearing_deformation, shearing_stretching_deformation,
storm_relative_helicity, stretching_deformation, total_deformation,
v_vorticity, vorticity)
from metpy.constants import g, omega, Re
from metpy.deprecation import MetpyDeprecationWarning
from metpy.testing import assert_almost_equal, assert_array_equal
from metpy.units import concatenate, units
def test_default_order_warns():
"""Test that using the default array ordering issues a warning."""
u = np.ones((3, 3)) * units('m/s')
with pytest.warns(UserWarning):
vorticity(u, u, 1 * units.meter, 1 * units.meter)
def test_zero_gradient():
"""Test divergence_vorticity when there is no gradient in the field."""
u = np.ones((3, 3)) * units('m/s')
with pytest.warns(MetpyDeprecationWarning):
c, v = convergence_vorticity(u, u, 1 * units.meter, 1 * units.meter, dim_order='xy')
truth = np.zeros_like(u) / units.sec
assert_array_equal(c, truth)
assert_array_equal(v, truth)
def test_cv_zero_vorticity():
"""Test divergence_vorticity when there is only divergence."""
a = np.arange(3)
u = np.c_[a, a, a] * units('m/s')
with pytest.warns(MetpyDeprecationWarning):
c, v = convergence_vorticity(u, u.T, 1 * units.meter, 1 * units.meter, dim_order='xy')
true_c = 2. * np.ones_like(u) / units.sec
true_v = np.zeros_like(u) / units.sec
assert_array_equal(c, true_c)
assert_array_equal(v, true_v)
def test_divergence_vorticity():
"""Test of vorticity and divergence calculation for basic case."""
a = np.arange(3)
u = np.c_[a, a, a] * units('m/s')
with pytest.warns(MetpyDeprecationWarning):
c, v = convergence_vorticity(u, u, 1 * units.meter, 1 * units.meter, dim_order='xy')
true_c = np.ones_like(u) / units.sec
true_v = np.ones_like(u) / units.sec
assert_array_equal(c, true_c)
assert_array_equal(v, true_v)
def test_vorticity_divergence_asym():
"""Test vorticity and divergence calculation with a complicated field."""
u = np.array([[2, 4, 8], [0, 2, 2], [4, 6, 8]]) * units('m/s')
v = np.array([[6, 4, 8], [2, 6, 0], [2, 2, 6]]) * units('m/s')
with pytest.warns(MetpyDeprecationWarning):
c, vort = convergence_vorticity(u, v, 1 * units.meters, 2 * units.meters,
dim_order='yx')
true_c = np.array([[-2, 5.5, -2.5], [2., 0.5, -1.5], [3., -1.5, 8.5]]) / units.sec
true_vort = np.array([[-2.5, 3.5, 13.], [8.5, -1.5, -11.], [-5.5, -1.5, 0.]]) / units.sec
assert_array_equal(c, true_c)
assert_array_equal(vort, true_vort)
# Now try for xy ordered
with pytest.warns(MetpyDeprecationWarning):
c, vort = convergence_vorticity(u.T, v.T, 1 * units.meters, 2 * units.meters,
dim_order='xy')
assert_array_equal(c, true_c.T)
assert_array_equal(vort, true_vort.T)
def test_zero_vorticity():
"""Test vorticity calculation when zeros should be returned."""
a = np.arange(3)
u = np.c_[a, a, a] * units('m/s')
v = vorticity(u, u.T, 1 * units.meter, 1 * units.meter, dim_order='xy')
true_v = np.zeros_like(u) / units.sec
assert_array_equal(v, true_v)
def test_vorticity():
"""Test vorticity for simple case."""
a = np.arange(3)
u = np.c_[a, a, a] * units('m/s')
v = vorticity(u, u, 1 * units.meter, 1 * units.meter, dim_order='xy')
true_v = np.ones_like(u) / units.sec
assert_array_equal(v, true_v)
def test_vorticity_asym():
"""Test vorticity calculation with a complicated field."""
u = np.array([[2, 4, 8], [0, 2, 2], [4, 6, 8]]) * units('m/s')
v = np.array([[6, 4, 8], [2, 6, 0], [2, 2, 6]]) * units('m/s')
vort = vorticity(u, v, 1 * units.meters, 2 * units.meters, dim_order='yx')
true_vort = np.array([[-2.5, 3.5, 13.], [8.5, -1.5, -11.], [-5.5, -1.5, 0.]]) / units.sec
assert_array_equal(vort, true_vort)
# Now try for xy ordered
vort = vorticity(u.T, v.T, 1 * units.meters, 2 * units.meters, dim_order='xy')
assert_array_equal(vort, true_vort.T)
def test_zero_divergence():
"""Test divergence calculation when zeros should be returned."""
a = np.arange(3)
u = np.c_[a, a, a] * units('m/s')
c = divergence(u, u.T, 1 * units.meter, 1 * units.meter, dim_order='xy')
true_c = 2. * np.ones_like(u) / units.sec
assert_array_equal(c, true_c)
def test_divergence():
"""Test divergence for simple case."""
a = np.arange(3)
u = np.c_[a, a, a] * units('m/s')
c = divergence(u, u, 1 * units.meter, 1 * units.meter, dim_order='xy')
true_c = np.ones_like(u) / units.sec
assert_array_equal(c, true_c)
def test_divergence_asym():
"""Test divergence calculation with a complicated field."""
u = np.array([[2, 4, 8], [0, 2, 2], [4, 6, 8]]) * units('m/s')
v = np.array([[6, 4, 8], [2, 6, 0], [2, 2, 6]]) * units('m/s')
c = divergence(u, v, 1 * units.meters, 2 * units.meters, dim_order='yx')
true_c = np.array([[-2, 5.5, -2.5], [2., 0.5, -1.5], [3., -1.5, 8.5]]) / units.sec
assert_array_equal(c, true_c)
# Now try for xy ordered
c = divergence(u.T, v.T, 1 * units.meters, 2 * units.meters, dim_order='xy')
assert_array_equal(c, true_c.T)
def test_shst_zero_gradient():
"""Test shear_stretching_deformation when there is zero gradient."""
u = np.ones((3, 3)) * units('m/s')
with pytest.warns(MetpyDeprecationWarning):
sh, st = shearing_stretching_deformation(u, u, 1 * units.meter, 1 * units.meter,
dim_order='xy')
truth = np.zeros_like(u) / units.sec
assert_array_equal(sh, truth)
assert_array_equal(st, truth)
def test_shst_zero_stretching():
"""Test shear_stretching_deformation when there is only shearing."""
a = np.arange(3)
u = np.c_[a, a, a] * units('m/s')
with pytest.warns(MetpyDeprecationWarning):
sh, st = shearing_stretching_deformation(u, u.T, 1 * units.meter, 1 * units.meter,
dim_order='yx')
true_sh = 2. * np.ones_like(u) / units.sec
true_st = np.zeros_like(u) / units.sec
assert_array_equal(sh, true_sh)
assert_array_equal(st, true_st)
def test_shst_deformation():
"""Test of shearing and stretching deformation calculation for basic case."""
a = np.arange(3)
u = np.c_[a, a, a] * units('m/s')
with pytest.warns(MetpyDeprecationWarning):
sh, st = shearing_stretching_deformation(u, u, 1 * units.meter, 1 * units.meter,
dim_order='xy')
true_sh = np.ones_like(u) / units.sec
true_st = np.ones_like(u) / units.sec
assert_array_equal(sh, true_st)
assert_array_equal(st, true_sh)
def test_shst_deformation_asym():
"""Test shearing and stretching deformation calculation with a complicated field."""
u = np.array([[2, 4, 8], [0, 2, 2], [4, 6, 8]]) * units('m/s')
v = np.array([[6, 4, 8], [2, 6, 0], [2, 2, 6]]) * units('m/s')
with pytest.warns(MetpyDeprecationWarning):
sh, st = shearing_stretching_deformation(u, v, 1 * units.meters, 2 * units.meters,
dim_order='yx')
true_sh = np.array([[-7.5, -1.5, 1.], [9.5, -0.5, -11.], [1.5, 5.5, 12.]]) / units.sec
true_st = np.array([[4., 0.5, 12.5], [4., 1.5, -0.5], [1., 5.5, -4.5]]) / units.sec
assert_array_equal(sh, true_sh)
assert_array_equal(st, true_st)
# Now try for yx ordered
with pytest.warns(MetpyDeprecationWarning):
sh, st = shearing_stretching_deformation(u.T, v.T, 1 * units.meters, 2 * units.meters,
dim_order='xy')
assert_array_equal(sh, true_sh.T)
assert_array_equal(st, true_st.T)
def test_shearing_deformation_asym():
"""Test shearing deformation calculation with a complicated field."""
u = np.array([[2, 4, 8], [0, 2, 2], [4, 6, 8]]) * units('m/s')
v = np.array([[6, 4, 8], [2, 6, 0], [2, 2, 6]]) * units('m/s')
sh = shearing_deformation(u, v, 1 * units.meters, 2 * units.meters, dim_order='yx')
true_sh = np.array([[-7.5, -1.5, 1.], [9.5, -0.5, -11.], [1.5, 5.5, 12.]]) / units.sec
assert_array_equal(sh, true_sh)
# Now try for yx ordered
sh = shearing_deformation(u.T, v.T, 1 * units.meters, 2 * units.meters,
dim_order='xy')
assert_array_equal(sh, true_sh.T)
def test_stretching_deformation_asym():
"""Test stretching deformation calculation with a complicated field."""
u = np.array([[2, 4, 8], [0, 2, 2], [4, 6, 8]]) * units('m/s')
v = np.array([[6, 4, 8], [2, 6, 0], [2, 2, 6]]) * units('m/s')
st = stretching_deformation(u, v, 1 * units.meters, 2 * units.meters, dim_order='yx')
true_st = np.array([[4., 0.5, 12.5], [4., 1.5, -0.5], [1., 5.5, -4.5]]) / units.sec
assert_array_equal(st, true_st)
# Now try for yx ordered
st = stretching_deformation(u.T, v.T, 1 * units.meters, 2 * units.meters,
dim_order='xy')
assert_array_equal(st, true_st.T)
def test_total_deformation_asym():
"""Test total deformation calculation with a complicated field."""
u = np.array([[2, 4, 8], [0, 2, 2], [4, 6, 8]]) * units('m/s')
v = np.array([[6, 4, 8], [2, 6, 0], [2, 2, 6]]) * units('m/s')
tdef = total_deformation(u, v, 1 * units.meters, 2 * units.meters,
dim_order='yx')
true_tdef = np.array([[8.5, 1.58113883, 12.5399362], [10.30776406, 1.58113883, 11.0113578],
[1.80277562, 7.7781746, 12.8160056]]) / units.sec
assert_almost_equal(tdef, true_tdef)
# Now try for xy ordered
tdef = total_deformation(u.T, v.T, 1 * units.meters, 2 * units.meters,
dim_order='xy')
assert_almost_equal(tdef, true_tdef.T)
def test_frontogenesis_asym():
"""Test frontogensis calculation with a complicated field."""
u = np.array([[2, 4, 8], [0, 2, 2], [4, 6, 8]]) * units('m/s')
v = np.array([[6, 4, 8], [2, 6, 0], [2, 2, 6]]) * units('m/s')
theta = np.array([[303, 295, 305], [308, 310, 312], [299, 293, 289]]) * units('K')
fronto = frontogenesis(theta, u, v, 1 * units.meters, 2 * units.meters,
dim_order='yx')
true_fronto = np.array([[-52.4746386, -37.3658646, -50.3996939],
[3.5777088, -2.1221867, -16.9941166],
[-23.1417334, 26.0499143, -158.4839684]]
) * units.K / units.meter / units.sec
assert_almost_equal(fronto, true_fronto)
# Now try for xy ordered
fronto = frontogenesis(theta.T, u.T, v.T, 1 * units.meters, 2 * units.meters,
dim_order='xy')
assert_almost_equal(fronto, true_fronto.T)
def test_advection_uniform():
"""Test advection calculation for a uniform 1D field."""
u = np.ones((3,)) * units('m/s')
s = np.ones_like(u) * units.kelvin
a = advection(s, u, (1 * units.meter,), dim_order='xy')
truth = np.zeros_like(u) * units('K/sec')
assert_array_equal(a, truth)
def test_advection_1d_uniform_wind():
"""Test advection for simple 1D case with uniform wind."""
u = np.ones((3,)) * units('m/s')
s = np.array([1, 2, 3]) * units('kg')
a = advection(s, u, (1 * units.meter,), dim_order='xy')
truth = -np.ones_like(u) * units('kg/sec')
assert_array_equal(a, truth)
def test_advection_1d():
"""Test advection calculation with varying wind and field."""
u = np.array([1, 2, 3]) * units('m/s')
s = np.array([1, 2, 3]) * units('Pa')
a = advection(s, u, (1 * units.meter,), dim_order='xy')
truth = np.array([-1, -2, -3]) * units('Pa/sec')
assert_array_equal(a, truth)
def test_advection_2d_uniform():
"""Test advection for uniform 2D field."""
u = np.ones((3, 3)) * units('m/s')
s = np.ones_like(u) * units.kelvin
a = advection(s, [u, u], (1 * units.meter, 1 * units.meter), dim_order='xy')
truth = np.zeros_like(u) * units('K/sec')
assert_array_equal(a, truth)
def test_advection_2d():
"""Test advection in varying 2D field."""
u = np.ones((3, 3)) * units('m/s')
v = 2 * np.ones((3, 3)) * units('m/s')
s = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) * units.kelvin
a = advection(s, [u, v], (1 * units.meter, 1 * units.meter), dim_order='xy')
truth = np.array([[-6, -4, 2], [-8, 0, 8], [-2, 4, 6]]) * units('K/sec')
assert_array_equal(a, truth)
def test_advection_2d_asym():
"""Test advection in asymmetric varying 2D field."""
u = np.arange(9).reshape(3, 3) * units('m/s')
v = 2 * u
s = np.array([[1, 2, 4], [4, 8, 4], [8, 6, 4]]) * units.kelvin
a = advection(s, [u, v], (2 * units.meter, 1 * units.meter), dim_order='yx')
truth = np.array([[0, -20.75, -2.5], [-33., -16., 20.], [-48, 91., 8]]) * units('K/sec')
assert_array_equal(a, truth)
# Now try xy ordered
a = advection(s.T, [u.T, v.T], (2 * units.meter, 1 * units.meter), dim_order='xy')
assert_array_equal(a, truth.T)
def test_geostrophic_wind():
"""Test geostrophic wind calculation with basic conditions."""
z = np.array([[48, 49, 48], [49, 50, 49], [48, 49, 48]]) * 100. * units.meter
# Using g as the value for f allows it to cancel out
ug, vg = geostrophic_wind(z, g.magnitude / units.sec,
100. * units.meter, 100. * units.meter, dim_order='xy')
true_u = np.array([[-2, 0, 2]] * 3) * units('m/s')
true_v = -true_u.T
assert_array_equal(ug, true_u)
assert_array_equal(vg, true_v)
def test_geostrophic_wind_asym():
"""Test geostrophic wind calculation with a complicated field."""
z = np.array([[1, 2, 4], [4, 8, 4], [8, 6, 4]]) * 200. * units.meter
# Using g as the value for f allows it to cancel out
ug, vg = geostrophic_wind(z, g.magnitude / units.sec,
200. * units.meter, 100. * units.meter, dim_order='yx')
true_u = -np.array([[5, 20, 0], [7, 4, 0], [9, -12, 0]]) * units('m/s')
true_v = np.array([[0.5, 1.5, 2.5], [8, 0, -8], [-2, -2, -2]]) * units('m/s')
assert_array_equal(ug, true_u)
assert_array_equal(vg, true_v)
# Now try for xy ordered
ug, vg = geostrophic_wind(z.T, g.magnitude / units.sec,
200. * units.meter, 100. * units.meter, dim_order='xy')
assert_array_equal(ug, true_u.T)
assert_array_equal(vg, true_v.T)
def test_geostrophic_geopotential():
"""Test geostrophic wind calculation with geopotential."""
z = np.array([[48, 49, 48], [49, 50, 49], [48, 49, 48]]) * 100. * units('m^2/s^2')
ug, vg = geostrophic_wind(z, 1 / units.sec, 100. * units.meter, 100. * units.meter,
dim_order='xy')
true_u = np.array([[-2, 0, 2]] * 3) * units('m/s')
true_v = -true_u.T
assert_array_equal(ug, true_u)
assert_array_equal(vg, true_v)
def test_geostrophic_3d():
"""Test geostrophic wind calculation with 3D array."""
z = np.array([[48, 49, 48], [49, 50, 49], [48, 49, 48]]) * 100.
# Using g as the value for f allows it to cancel out
z3d = np.dstack((z, z)) * units.meter
ug, vg = geostrophic_wind(z3d, g.magnitude / units.sec,
100. * units.meter, 100. * units.meter, dim_order='xy')
true_u = np.array([[-2, 0, 2]] * 3) * units('m/s')
true_v = -true_u.T
true_u = concatenate((true_u[..., None], true_u[..., None]), axis=2)
true_v = concatenate((true_v[..., None], true_v[..., None]), axis=2)
assert_array_equal(ug, true_u)
assert_array_equal(vg, true_v)
def test_geostrophic_gempak():
"""Test of geostrophic wind calculation against gempak values."""
z = np.array([[5586387.00, 5584467.50, 5583147.50],
[5594407.00, 5592487.50, 5591307.50],
[5604707.50, 5603247.50, 5602527.50]]).T \
* (9.80616 * units('m/s^2')) * 1e-3
dx = np.deg2rad(0.25) * Re * np.cos(np.deg2rad(44))
# Inverting dy since latitudes in array increase as you go up
dy = -np.deg2rad(0.25) * Re
f = (2 * omega * np.sin(np.deg2rad(44))).to('1/s')
ug, vg = geostrophic_wind(z * units.m, f, dx, dy, dim_order='xy')
true_u = np.array([[21.97512, 21.97512, 22.08005],
[31.89402, 32.69477, 33.73863],
[38.43922, 40.18805, 42.14609]])
true_v = np.array([[-10.93621, -7.83859, -4.54839],
[-10.74533, -7.50152, -3.24262],
[-8.66612, -5.27816, -1.45282]])
assert_almost_equal(ug[1, 1], true_u[1, 1] * units('m/s'), 2)
assert_almost_equal(vg[1, 1], true_v[1, 1] * units('m/s'), 2)
def test_streamfunc():
"""Test of Montgomery Streamfunction calculation."""
t = 287. * units.kelvin
hgt = 5000. * units.meter
msf = montgomery_streamfunction(hgt, t)
assert_almost_equal(msf, 337468.2500 * units('m^2 s^-2'), 4)
def test_storm_relative_helicity_no_storm_motion():
"""Test storm relative helicity with no storm motion and differing input units."""
u = np.array([0, 20, 10, 0]) * units('m/s')
v = np.array([20, 0, 0, 10]) * units('m/s')
u = u.to('knots')
heights = np.array([0, 250, 500, 750]) * units.m
positive_srh, negative_srh, total_srh = storm_relative_helicity(u, v, heights,
depth=750 * units.meters)
assert_almost_equal(positive_srh, 400. * units('meter ** 2 / second ** 2 '), 6)
assert_almost_equal(negative_srh, -100. * units('meter ** 2 / second ** 2 '), 6)
assert_almost_equal(total_srh, 300. * units('meter ** 2 / second ** 2 '), 6)
def test_storm_relative_helicity_storm_motion():
"""Test storm relative helicity with storm motion and differing input units."""
u = np.array([5, 25, 15, 5]) * units('m/s')
v = np.array([30, 10, 10, 20]) * units('m/s')
u = u.to('knots')
heights = np.array([0, 250, 500, 750]) * units.m
pos_srh, neg_srh, total_srh = storm_relative_helicity(u, v, heights,
depth=750 * units.meters,
storm_u=5 * units('m/s'),
storm_v=10 * units('m/s'))
assert_almost_equal(pos_srh, 400. * units('meter ** 2 / second ** 2 '), 6)
assert_almost_equal(neg_srh, -100. * units('meter ** 2 / second ** 2 '), 6)
assert_almost_equal(total_srh, 300. * units('meter ** 2 / second ** 2 '), 6)
def test_storm_relative_helicity_with_interpolation():
"""Test storm relative helicity with interpolation."""
u = np.array([-5, 15, 25, 15, -5]) * units('m/s')
v = np.array([40, 20, 10, 10, 30]) * units('m/s')
u = u.to('knots')
heights = np.array([0, 100, 200, 300, 400]) * units.m
pos_srh, neg_srh, total_srh = storm_relative_helicity(u, v, heights,
bottom=50 * units.meters,
depth=300 * units.meters,
storm_u=5 * units('m/s'),
storm_v=10 * units('m/s'))
assert_almost_equal(pos_srh, 400. * units('meter ** 2 / second ** 2 '), 6)
assert_almost_equal(neg_srh, -100. * units('meter ** 2 / second ** 2 '), 6)
assert_almost_equal(total_srh, 300. * units('meter ** 2 / second ** 2 '), 6)
def test_storm_relative_helicity():
"""Test function for SRH calculations on an eigth-circle hodograph."""
# Create larger arrays for everything except pressure to make a smoother graph
hgt_int = np.arange(0, 2050, 50)
hgt_int = hgt_int * units('meter')
dir_int = np.arange(180, 272.25, 2.25)
spd_int = np.zeros((hgt_int.shape[0]))
spd_int[:] = 2.
u_int, v_int = get_wind_components(spd_int * units('m/s'), dir_int * units.degree)
# Put in the correct value of SRH for a eighth-circle, 2 m/s hodograph
# (SRH = 2 * area under hodo, in this case...)
srh_true_p = (.25 * np.pi * (2 ** 2)) * units('m^2/s^2')
# Since there's only positive SRH in this case, total SRH will be equal to positive SRH and
# negative SRH will be zero.
srh_true_t = srh_true_p
srh_true_n = 0 * units('m^2/s^2')
p_srh, n_srh, T_srh = storm_relative_helicity(u_int, v_int,
hgt_int, 1000 * units('meter'),
bottom=0 * units('meter'),
storm_u=0 * units.knot,
storm_v=0 * units.knot)
assert_almost_equal(p_srh, srh_true_p, 2)
assert_almost_equal(n_srh, srh_true_n, 2)
assert_almost_equal(T_srh, srh_true_t, 2)
def test_lat_lon_grid_spacing_1d():
"""Test for lat_lon_grid_spacing for variable grid."""
lat = np.arange(40, 50, 2.5)
lon = np.arange(-100, -90, 2.5)
dx, dy = lat_lon_grid_spacing(lon, lat)
dx_truth = np.array([[212943.5585, 212943.5585, 212943.5585],
[204946.2305, 204946.2305, 204946.2305],
[196558.8269, 196558.8269, 196558.8269],
[187797.3216, 187797.3216, 187797.3216]]) * units.meter
dy_truth = np.array([[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857]]) * units.meter
assert_almost_equal(dx, dx_truth, 4)
assert_almost_equal(dy, dy_truth, 4)
def test_lat_lon_grid_spacing_2d():
"""Test for lat_lon_grid_spacing for variable grid."""
lat = np.arange(40, 50, 2.5)
lon = np.arange(-100, -90, 2.5)
lon, lat = np.meshgrid(lon, lat)
dx, dy = lat_lon_grid_spacing(lon, lat)
dx_truth = np.array([[212943.5585, 212943.5585, 212943.5585],
[204946.2305, 204946.2305, 204946.2305],
[196558.8269, 196558.8269, 196558.8269],
[187797.3216, 187797.3216, 187797.3216]]) * units.meter
dy_truth = np.array([[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857]]) * units.meter
assert_almost_equal(dx, dx_truth, 4)
assert_almost_equal(dy, dy_truth, 4)
def test_lat_lon_grid_spacing_mismatched_shape():
"""Test for lat_lon_grid_spacing for variable grid."""
lat = np.arange(40, 50, 2.5)
lon = np.array([[-100., -97.5, -95., -92.5],
[-100., -97.5, -95., -92.5],
[-100., -97.5, -95., -92.5],
[-100., -97.5, -95., -92.5]])
with pytest.raises(ValueError):
dx, dy = lat_lon_grid_spacing(lon, lat)
def test_v_vorticity():
"""Test that v_vorticity wrapper works (deprecated in 0.7)."""
a = np.arange(3)
u = np.c_[a, a, a] * units('m/s')
with pytest.warns(MetpyDeprecationWarning):
v = v_vorticity(u, u, 1 * units.meter, 1 * units.meter, dim_order='xy')
true_v = np.ones_like(u) / units.sec
assert_array_equal(v, true_v)
def test_convergence():
"""Test that convergence wrapper works (deprecated in 0.7)."""
a = np.arange(3)
u = np.c_[a, a, a] * units('m/s')
with pytest.warns(MetpyDeprecationWarning):
c = h_convergence(u, u, 1 * units.meter, 1 * units.meter, dim_order='xy')
true_c = np.ones_like(u) / units.sec
assert_array_equal(c, true_c)
def test_convergence_vorticity():
"""Test that convergence_vorticity wrapper works (deprecated in 0.7)."""
a = np.arange(3)
u = np.c_[a, a, a] * units('m/s')
with pytest.warns(MetpyDeprecationWarning):
c, v = convergence_vorticity(u, u, 1 * units.meter, 1 * units.meter, dim_order='xy')
true_c = np.ones_like(u) / units.sec
true_v = np.ones_like(u) / units.sec
assert_array_equal(c, true_c)
assert_array_equal(v, true_v)
def test_lat_lon_grid_deltas_1d():
"""Test for lat_lon_grid_spacing for variable grid."""
lat = np.arange(40, 50, 2.5)
lon = np.arange(-100, -90, 2.5)
dx, dy = lat_lon_grid_deltas(lon, lat)
dx_truth = np.array([[212943.5585, 212943.5585, 212943.5585],
[204946.2305, 204946.2305, 204946.2305],
[196558.8269, 196558.8269, 196558.8269],
[187797.3216, 187797.3216, 187797.3216]]) * units.meter
dy_truth = np.array([[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857]]) * units.meter
assert_almost_equal(dx, dx_truth, 4)
assert_almost_equal(dy, dy_truth, 4)
@pytest.mark.parametrize('flip_order', [(False, True)])
def test_lat_lon_grid_deltas_2d(flip_order):
"""Test for lat_lon_grid_spacing for variable grid with negative delta distances."""
lat = np.arange(40, 50, 2.5)
lon = np.arange(-100, -90, 2.5)
dx_truth = np.array([[212943.5585, 212943.5585, 212943.5585],
[204946.2305, 204946.2305, 204946.2305],
[196558.8269, 196558.8269, 196558.8269],
[187797.3216, 187797.3216, 187797.3216]]) * units.meter
dy_truth = np.array([[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857]]) * units.meter
if flip_order:
lon = lon[::-1]
lat = lat[::-1]
dx_truth = -1 * dx_truth[::-1]
dy_truth = -1 * dy_truth[::-1]
lon, lat = np.meshgrid(lon, lat)
dx, dy = lat_lon_grid_deltas(lon, lat)
assert_almost_equal(dx, dx_truth, 4)
assert_almost_equal(dy, dy_truth, 4)
def test_lat_lon_grid_deltas_mismatched_shape():
"""Test for lat_lon_grid_spacing for variable grid."""
lat = np.arange(40, 50, 2.5)
lon = np.array([[-100., -97.5, -95., -92.5],
[-100., -97.5, -95., -92.5],
[-100., -97.5, -95., -92.5],
[-100., -97.5, -95., -92.5]])
with pytest.raises(ValueError):
lat_lon_grid_deltas(lon, lat)
|
import pybem2d.core.bases as pcb
import pybem2d.core.segments as pcs
import pybem2d.core.quadrules as pcq
import pybem2d.core.kernels as pck
import pybem2d.core.mesh as pcm
import pybem2d.core.assembly as pca
import pybem2d.core.evaluation as pce
import pybem2d.core.visualization as pcv
import numpy as np
k=20
nelems=50
dirs=1/np.sqrt(2)*np.array([1.0,1.0])
# Define the mesh
sq1=pcs.polygon([[0,0],[1,0],[1,1],[0,1]])
sq2=pcs.polygon([[1.5,0],[2.5,0],[2.5,1],[1.5,1]])
mesh=pcm.Mesh([sq1,sq2])
mesh.discretize(nelems)
quadrule=pcq.GaussQuadrature() # A standard Gauss Quadrature with default parameters
mToB=pcb.Legendre.legendreBasis(mesh,2) # A basis of Legendre polynomials of degree 2
kernel=pck.AcousticCombined(k,k) # The combined potential layer
singleLayer=pck.AcousticSingleLayer(k)
assembly=pca.Assembly(mToB,quadrule)
rhsfun=lambda t,x,n: 2j*k*np.exp(1j*k*(dirs[0]*x[0]+dirs[1]*x[1]))*(dirs[0]*n[0]+dirs[1]*n[1]-1)
rhs=assembly.projFun([rhsfun])
mKernel=assembly.getKernel(kernel)
mIdentity=assembly.getIdentity()
op=mIdentity+2*mKernel
print op.shape
coeffs=np.linalg.solve(op,rhs)
ev=pce.Evaluator(mToB,singleLayer,quadrule)
v=pcv.Visualizer(ev,[-1,4,-1,3],200,200,incWave=lambda x: np.exp(1j*k*(x[0]*dirs[0]+x[1]*dirs[1])))
v.fullField(-coeffs[:,0])
|
import uuid
import sqlalchemy
from web import db
def new_uuid():
return str(uuid.uuid4()).lower()
def new_double_uuid():
return u"{}-{}".format(uuid.uuid4(), uuid.uuid4()).lower()
class RecipientTypes(object):
campaign = 0
character = 1
chat = 2
class MessageTypes(object):
action = 0
speech = 1
group_users = db.Table('group_user',
db.Column('user_id', db.String(36), db.ForeignKey("user.id"), nullable=False, primary_key=True),
db.Column('group_id', db.String(36), db.ForeignKey("group.id"), nullable=False, primary_key=True)
)
campaign_characters = db.Table('campaign_character',
db.Column('character_id', db.String(36), db.ForeignKey("character.id"), nullable=False, primary_key=True),
db.Column('campaign_id', db.String(36), db.ForeignKey("campaign.id"), nullable=False, primary_key=True)
)
chat_participants = db.Table('chat_participant',
db.Column('chat_id', db.String(36), db.ForeignKey("chat.id"), nullable=False, primary_key=True),
db.Column('character_id', db.String(36), db.ForeignKey("character.id"), nullable=False, primary_key=True)
)
class OAuthCredential(db.Model):
sub = db.Column(db.String(255), primary_key=True)
refresh_token = db.Column(db.String(255), nullable=False)
id_token_json = db.Column(db.Text, nullable=False)
credential_json = db.Column(db.Text, nullable=False)
user_id = db.Column(db.String(36), db.ForeignKey("user.id"), nullable=False)
user = db.relationship('User', backref=db.backref('oauth_credential', uselist=False), uselist=False)
class User(db.Model):
id = db.Column(db.String(36), primary_key=True, default=new_uuid)
name = db.Column(db.Text)
groups = db.relationship('Group', secondary=group_users, back_populates='users')
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
class Reservation(db.Model):
email = db.Column(db.String(320), primary_key=True)
user_id = db.Column(db.String(36), db.ForeignKey("user.id"), nullable=False)
class Invitation(db.Model):
id = db.Column(db.String(73), primary_key=True, default=new_double_uuid)
campaign_id = db.Column(db.String(36), db.ForeignKey("campaign.id"), nullable=False)
campaign = db.relationship('Campaign', backref='invitations', uselist=False)
class Group(db.Model):
id = db.Column(db.String(36), primary_key=True, default=new_uuid)
name = db.Column(db.Text)
users = db.relationship('User', secondary=group_users, back_populates='groups')
class Character(db.Model):
# a character can be in many campaigns
# a character is played by one user (at a time)
id = db.Column(db.String(36), primary_key=True, default=new_uuid)
name = db.Column(db.Text)
bio = db.Column(db.Text)
notes = db.Column(db.Text)
dm_notes = db.Column(db.Text)
dm_secret_notes = db.Column(db.Text)
can_talk = db.Column(db.Boolean, default=True)
user_id = db.Column(db.String(36), db.ForeignKey("user.id"), nullable=True)
campaigns = db.relationship('Campaign', secondary=campaign_characters, back_populates='characters')
chats = db.relationship('Chat', secondary=chat_participants, back_populates='participants')
played_by = db.relationship('User', backref='characters', uselist=False)
recipient_type_id = db.Column(db.Integer, db.ForeignKey("recipient_type.id"), nullable=False, default=RecipientTypes.character)
__table_args__ = (
db.ForeignKeyConstraint(['id', 'recipient_type_id'], ['recipient.id', 'recipient.recipient_type_id']),
)
class Campaign(db.Model):
id = db.Column(db.String(36), primary_key=True, default=new_uuid)
name = db.Column(db.Text)
description = db.Column(db.Text)
characters = db.relationship('Character', secondary=campaign_characters, back_populates='campaigns')
user_id = db.Column(db.String(36), db.ForeignKey("user.id"), nullable=True)
run_by = db.relationship('User', backref='campaigns', uselist=False)
recipient_type_id = db.Column(db.Integer, db.ForeignKey("recipient_type.id"), nullable=False, default=RecipientTypes.campaign)
__table_args__ = (
db.ForeignKeyConstraint(['id', 'recipient_type_id'], ['recipient.id', 'recipient.recipient_type_id']),
)
class Chat(db.Model):
id = db.Column(db.String(36), primary_key=True, default=new_uuid)
topic = db.Column(db.Text)
participants = db.relationship('Character', secondary=chat_participants, back_populates='chats')
recipient_type_id = db.Column(db.Integer, db.ForeignKey("recipient_type.id"), nullable=False, default=RecipientTypes.chat)
__table_args__ = (
db.ForeignKeyConstraint(['id', 'recipient_type_id'], ['recipient.id', 'recipient.recipient_type_id']),
)
class Message(db.Model):
id = db.Column(db.String(36), primary_key=True, default=new_uuid)
message_type_id = db.Column(db.Integer, db.ForeignKey("message_type.id"), nullable=False, default=MessageTypes.speech)
content = db.Column(db.Text)
sender_id = db.Column(db.String(36), db.ForeignKey("character.id"), nullable=True)
sender = db.relationship('Character', backref='messages', uselist=False)
recipient_id = db.Column(db.String(36), db.ForeignKey("recipient.id"), nullable=False)
timestamp = db.Column(db.DateTime)
__table_args__ = (
db.Index("messages_by_timestamp", "timestamp"),
)
class ChatReadUpTo(db.Model):
user_id = db.Column(db.String(36), db.ForeignKey("user.id"), primary_key=True, nullable=False)
recipient_id = db.Column(db.String(36), db.ForeignKey("recipient.id"), primary_key=True, nullable=False)
timestamp = db.Column(db.DateTime)
class MessageType(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.Text)
class RecipientType(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.Text)
class Recipient(db.Model):
id = db.Column(db.String(36), primary_key=True, default=new_uuid)
recipient_type_id = db.Column(db.Integer, db.ForeignKey("recipient_type.id"), nullable=False)
__table_args__ = (
db.UniqueConstraint('id', 'recipient_type_id', name='unique_recipients'),
)
db.create_all()
try:
db.session.add(RecipientType(id=RecipientTypes.campaign, name="campaign"))
db.session.commit()
except sqlalchemy.exc.IntegrityError:
db.session.rollback()
try:
db.session.add(RecipientType(id=RecipientTypes.character, name="character"))
db.session.commit()
except sqlalchemy.exc.IntegrityError:
db.session.rollback()
try:
db.session.add(RecipientType(id=RecipientTypes.chat, name="chat"))
db.session.commit()
except sqlalchemy.exc.IntegrityError:
db.session.rollback()
try:
db.session.add(MessageType(id=MessageTypes.action, name="action"))
db.session.commit()
except sqlalchemy.exc.IntegrityError:
db.session.rollback()
try:
db.session.add(MessageType(id=MessageTypes.speech, name="speech"))
db.session.commit()
except sqlalchemy.exc.IntegrityError:
db.session.rollback()
db.session.execute("""
CREATE TRIGGER IF NOT EXISTS create_linked_character_recipient BEFORE INSERT ON character
BEGIN
INSERT INTO recipient VALUES(NEW.id, NEW.recipient_type_id);
END;
""")
db.session.execute("""
CREATE TRIGGER IF NOT EXISTS create_linked_campaign_recipient BEFORE INSERT ON campaign
BEGIN
INSERT INTO recipient VALUES(NEW.id, NEW.recipient_type_id);
END;
""")
db.session.execute("""
CREATE TRIGGER IF NOT EXISTS create_linked_chat_recipient BEFORE INSERT ON chat
BEGIN
INSERT INTO recipient VALUES(NEW.id, NEW.recipient_type_id);
END;
""")
db.session.execute("""
CREATE TRIGGER IF NOT EXISTS delete_linked_character_recipient AFTER DELETE ON character
BEGIN
DELETE FROM recipient WHERE recipient.id = OLD.id AND recipient.recipient_type_id = OLD.recipient_type_id;
END;
""")
db.session.execute("""
CREATE TRIGGER IF NOT EXISTS delete_linked_campaign_recipient AFTER DELETE ON campaign
BEGIN
DELETE FROM recipient WHERE recipient.id = OLD.id AND recipient.recipient_type_id = OLD.recipient_type_id;
END;
""")
db.session.execute("""
CREATE TRIGGER IF NOT EXISTS delete_linked_chat_recipient AFTER DELETE ON chat
BEGIN
DELETE FROM recipient WHERE recipient.id = OLD.id AND recipient.recipient_type_id = OLD.recipient_type_id;
END;
""")
|
# %%
import torch
import matplotlib.pyplot as plt
from qmc.mcmc import metropolis_symmetric, clip_mvnormal_proposal
from qmc.wavefunction import HarmonicTrialFunction
# %%
d=3
tf = HarmonicTrialFunction(torch.ones(1))
#
n_walkers=10
init_config = torch.rand(n_walkers,1)
results = metropolis_symmetric(tf, init_config, clip_mvnormal_proposal, num_walkers=n_walkers, num_steps=10000)
# %%
#results_numpy = results.view(-1,3).numpy()
# %%
#plt.scatter(results_numpy[:,0],results_numpy[:,1], s=1)
#plt.savefig("box.png")
print(torch.mean(tf.local_energy(results)))
|
"""
cylinder module.
cylinder follows radiance structure
mod cylinder name
0
0
4 x1 y1 z1 r1 x2 y2 z2 r2
"""
class Cylinder(object):
"""
Cylinder class.
"""
def __init__(self):
pass
|
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: a binary search tree
@return: Root of a tree
"""
def increasingBST(self, root):
# Write your code here.
St = []
curr = root
prev = None
while curr or St:
if curr:
St.append(curr)
curr = curr.left
else:
curr = St.pop()
curr.left = None
if prev:
prev.right = curr
else:
result = curr
prev = curr
curr = curr.right
return result
|
from django.apps import AppConfig
class AjaxConnConfig(AppConfig):
name = 'ajax_conn'
|
import findCourses
import getTime
import sendMail
import isHomework
import os
def sendHomework(info, args):
tmp = findCourses.findCourses() # Ders ismini ve linkini aliyor
names = tmp[0]
links = tmp[1]
argss = ""
for i in range(len(args)):
argss += args[i] + " "
if len(names) == 0 or len(links) == 0:
# print("Odevin yok!! Ya da bir hata var.")
print("Calismadi bastan aliyorum")
os.system("python3 " + argss)
exit()
homeworks = []
for i in range(len(names)):
messages = isHomework.isHomework(links[i], names[i]) # Her ders icin odev var mi diye kontrol ediyor
for j in range(len(messages)):
remaining_time = ""
if 'assign' in messages[j][2]:
remaining_time = getTime.remTime(messages[j][2], messages[j][1])
else:
remaining_time = "Sure yok"
mp = {"className": messages[j][0], "homeworkName": messages[j][1], "remTime": remaining_time, "url": messages[j][2]}
# homeworks.append(messages[j][0] + " odevin var! Odevin adi: " + messages[j][1] + ". Odevin icin kalan sure: " + remaining_time + ". Odevin linki: " + messages[j][2] + ".")
homeworks.append(mp)
return homeworks
|
import torch
from torchvision import transforms
from PIL import Image
from .resnest_model import resnest50
model = resnest50(pretrained=True)
model.eval()
preproc_transform = transforms.Compose([
transforms.Resize((448,448)), # TODO is this really 448 or 224 as said in docs?
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def preproc_to_feats(image):
input_tensor = preproc_transform(image)
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
device_model = model
if torch.cuda.is_available():
input_batch = input_batch.to('cuda')
device_model = model.to('cuda')
with torch.no_grad():
output = device_model(input_batch).detach().squeeze().cpu()
output = output.numpy().reshape((2048, -1)).transpose()
return output
if __name__ == "__main__":
output = preproc_to_feats(Image.open('/Users/macbook/Downloads/vqa/val2014/COCO_val2014_000000000042.jpg'))
print ('output shape', output.shape)
|
# imports
from PIL import ImageFont, ImageDraw, Image
import cv2
import numpy as np
import imutils
# typically we'll import modularly
try:
from .annotation import Annotation
unit_test = False
# otherwise, we're running main test code at the bottom of this script
except:
import sys
import os
sys.path.append(os.path.abspath(os.pardir))
from annotation import Annotation
unit_test = True
class Score(Annotation):
def __init__(self):
super(Score, self).__init__()
# load Luckiest Guy font
if unit_test:
self.font = ImageFont.truetype(
"../../fonts/Luckiest_Guy/LuckiestGuy-Regular.ttf",
size=220)
else:
self.font = ImageFont.truetype(
"fonts/Luckiest_Guy/LuckiestGuy-Regular.ttf",
size=220)
def _annotate(self, frame, score=None, relFrameSize=0.20, *args, **kwargs):
# force score to 0-0 if no score is passed or if it is unknown
if score is None:
score = (0, 0)
# convert to string and fill with another digit
teamHomeScore = str(score[0]).zfill(2)
teamAwayScore = str(score[1]).zfill(2)
if unit_test:
scoreboard = Image.open(
"../graphics/scoreboard.png")
else:
scoreboard = Image.open("video_production/graphics/scoreboard.png")
# prepare the image for drawing
draw = ImageDraw.Draw(scoreboard)
# determine the placement of characters
x = scoreboard.size[0] / 16
y = scoreboard.size[1] / 4
# first digit
xMult = 0.32 if teamHomeScore[0] == "0" else 1
draw.text(xy=(x*xMult, y),
text=teamHomeScore[0],
align="center",
font=self.font)
# second digit
draw.text(xy=(x*4.2, y),
text=teamHomeScore[1],
align="center",
font=self.font)
# third digit
xMult = 8.25 if teamAwayScore[0] == "0" else 8.6
draw.text(xy=(x*xMult, y),
text=teamAwayScore[0],
align="center",
font=self.font)
# fourth digit
draw.text(xy=(x*12.3, y),
text=teamAwayScore[1],
align="center",
font=self.font)
# load the scoreboard into OpenCV format
scoreboard = cv2.cvtColor(np.array(scoreboard), cv2.COLOR_RGBA2BGR)
# resize the scoreboard relative to the frame
h, w = frame.shape[:2]
scoreboard = imutils.resize(scoreboard, width=int(w * relFrameSize))
sH, sW = scoreboard.shape[:2]
# slice the scoreboard into the frame
frame[20:20+sH, 20:20+sW] = scoreboard
# return the frame
return frame
# test code
if __name__ == "__main__":
homeScore = 10
awayScore = 10
score = (homeScore, awayScore)
frame = cv2.imread("../../views/ui/oddball.png")
s = Score()
s.activate()
scoreboard = s.annotate(frame, score)
cv2.imwrite("tmp_home-{}_away-{}.png".format(
str(homeScore).zfill(2),
str(awayScore).zfill(2)),
scoreboard)
|
#!/usr/bin/env python
#
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a JWT assertion for test purposes.
Requires package `python-jose`. This script was written/tested on Python 2.7.
Usage: Change any parameters in the contents dictionary below as necessary, then
simply run this script with no arguments to dump the JWT assertion string.
"""
from jose import jwk
from jose import jws
# Make sure the values in this section are identical to the values in
# service_account_credentials_test.cc.
############################################
# pylint: disable=line-too-long
# The private key JSON below was obtained by downloading a service account
# private key file from the Google Cloud Console, then deleting/invalidating
# that key (along with replacing the other identifiable attributes here).
CONTENTS_DICT = {
"type": "service_account",
"project_id": "foo-project",
"private_key_id": "a1a111aa1111a11a11a11aa111a111a1a1111111",
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCltiF2oP3KJJ+S\ntTc1McylY+TuAi3AdohX7mmqIjd8a3eBYDHs7FlnUrFC4CRijCr0rUqYfg2pmk4a\n6TaKbQRAhWDJ7XD931g7EBvCtd8+JQBNWVKnP9ByJUaO0hWVniM50KTsWtyX3up/\nfS0W2R8Cyx4yvasE8QHH8gnNGtr94iiORDC7De2BwHi/iU8FxMVJAIyDLNfyk0hN\neheYKfIDBgJV2v6VaCOGWaZyEuD0FJ6wFeLybFBwibrLIBE5Y/StCrZoVZ5LocFP\nT4o8kT7bU6yonudSCyNMedYmqHj/iF8B2UN1WrYx8zvoDqZk0nxIglmEYKn/6U7U\ngyETGcW9AgMBAAECggEAC231vmkpwA7JG9UYbviVmSW79UecsLzsOAZnbtbn1VLT\nPg7sup7tprD/LXHoyIxK7S/jqINvPU65iuUhgCg3Rhz8+UiBhd0pCH/arlIdiPuD\n2xHpX8RIxAq6pGCsoPJ0kwkHSw8UTnxPV8ZCPSRyHV71oQHQgSl/WjNhRi6PQroB\nSqc/pS1m09cTwyKQIopBBVayRzmI2BtBxyhQp9I8t5b7PYkEZDQlbdq0j5Xipoov\n9EW0+Zvkh1FGNig8IJ9Wp+SZi3rd7KLpkyKPY7BK/g0nXBkDxn019cET0SdJOHQG\nDiHiv4yTRsDCHZhtEbAMKZEpku4WxtQ+JjR31l8ueQKBgQDkO2oC8gi6vQDcx/CX\nZ23x2ZUyar6i0BQ8eJFAEN+IiUapEeCVazuxJSt4RjYfwSa/p117jdZGEWD0GxMC\n+iAXlc5LlrrWs4MWUc0AHTgXna28/vii3ltcsI0AjWMqaybhBTTNbMFa2/fV2OX2\nUimuFyBWbzVc3Zb9KAG4Y7OmJQKBgQC5324IjXPq5oH8UWZTdJPuO2cgRsvKmR/r\n9zl4loRjkS7FiOMfzAgUiXfH9XCnvwXMqJpuMw2PEUjUT+OyWjJONEK4qGFJkbN5\n3ykc7p5V7iPPc7Zxj4mFvJ1xjkcj+i5LY8Me+gL5mGIrJ2j8hbuv7f+PWIauyjnp\nNx/0GVFRuQKBgGNT4D1L7LSokPmFIpYh811wHliE0Fa3TDdNGZnSPhaD9/aYyy78\nLkxYKuT7WY7UVvLN+gdNoVV5NsLGDa4cAV+CWPfYr5PFKGXMT/Wewcy1WOmJ5des\nAgMC6zq0TdYmMBN6WpKUpEnQtbmh3eMnuvADLJWxbH3wCkg+4xDGg2bpAoGAYRNk\nMGtQQzqoYNNSkfus1xuHPMA8508Z8O9pwKU795R3zQs1NAInpjI1sOVrNPD7Ymwc\nW7mmNzZbxycCUL/yzg1VW4P1a6sBBYGbw1SMtWxun4ZbnuvMc2CTCh+43/1l+FHe\nMmt46kq/2rH2jwx5feTbOE6P6PINVNRJh/9BDWECgYEAsCWcH9D3cI/QDeLG1ao7\nrE2NcknP8N783edM07Z/zxWsIsXhBPY3gjHVz2LDl+QHgPWhGML62M0ja/6SsJW3\nYvLLIc82V7eqcVJTZtaFkuht68qu/Jn1ezbzJMJ4YXDYo1+KFi+2CAGR06QILb+I\nlUtj+/nH3HDQjM4ltYfTPUg=\n-----END PRIVATE KEY-----\n",
"client_email": "foo-email@foo-project.iam.gserviceaccount.com",
"client_id": "100000000000000000001",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/foo-email%40foo-project.iam.gserviceaccount.com",
}
# Timestamp used to represent the current time in the JWT assertion.
TIMESTAMP = 1530060324
# Scopes that the returned access token should be valid for use with. This is
# also used in constructing the JWT assertion.
SCOPE_STR = "https://www.googleapis.com/auth/cloud-platform"
ALT_SCOPE_STR = "https://www.googleapis.com/auth/devstorage.full_control"
# pylint: enable=line-too-long
############################################
def ordered_json_str(ordered_dict):
"""Dump a dict to a minified JSON string with ordered keys."""
kv_strs = []
# Keys must be iterated over in alphabetical order in order to have
# deterministic string dump functionality.
for key, val in sorted(ordered_dict.items()):
kv_strs.append(
'"{k}":{q}{v}{q}'.format(
k=key,
v=val,
# Numeric values don't have quotes around them. Note that this falls
# apart for JSON objects or arrays, but these keyfile attributes
# only map to strings and ints, so we can take this shortcut.
q=("" if isinstance(val, int) else '"'),
)
)
return "{" + ",".join(kv_strs) + "}"
def payload_str(scopes, subject=None):
payload_dict = {
"aud": CONTENTS_DICT["token_uri"],
"exp": TIMESTAMP + 3600,
"iat": TIMESTAMP,
"iss": CONTENTS_DICT["client_email"],
"scope": scopes,
}
if subject:
payload_dict["sub"] = subject
return jwk.base64url_encode(ordered_json_str(payload_dict))
def main():
"""Print out the JWT assertion."""
signing_algorithm_str = "RS256" # RSA
headers_str = jwk.base64url_encode(
ordered_json_str(
{
"alg": signing_algorithm_str,
"kid": CONTENTS_DICT["private_key_id"],
"typ": "JWT",
}
)
)
payload_str_for_defaults = payload_str(SCOPE_STR)
print("Assertion for default scope and no subject:")
print(
jws._sign_header_and_claims( # pylint: disable=protected-access
headers_str,
payload_str_for_defaults,
signing_algorithm_str,
CONTENTS_DICT["private_key"],
)
)
print()
payload_str_for_nondefaults = payload_str(ALT_SCOPE_STR, "user@foo.bar")
print("Assertion for non-default scope and using a subject:")
print(
jws._sign_header_and_claims( # pylint: disable=protected-access
headers_str,
payload_str_for_nondefaults,
signing_algorithm_str,
CONTENTS_DICT["private_key"],
)
)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Extract a distribution for the selected feature of the population of morphologies among
the exponential, normal and uniform distribution, according to the minimum ks distance.
"""
from itertools import chain
import argparse
import json
import neurom as nm
from neurom import stats
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='Morphology fit distribution extractor',
epilog='Note: Outputs json of the optimal distribution \
and corresponding parameters.')
parser.add_argument('datapath',
help='Path to morphology data directory')
parser.add_argument('feature',
help='Feature to be extracted with neurom.get')
return parser.parse_args()
def extract_data(data_path, feature):
"""Loads a list of morphologies, extracts feature
and transforms the fitted distribution in the correct format.
Returns the optimal distribution, corresponding parameters,
minimun and maximum values.
"""
population = nm.load_morphologies(data_path)
feature_data = [nm.get(feature, n) for n in population]
feature_data = list(chain(*feature_data))
return stats.optimal_distribution(feature_data)
if __name__ == '__main__':
args = parse_args()
d_path = args.datapath
feat = args.feature
_result = stats.fit_results_to_dict(extract_data(d_path, feat))
print(json.dumps(_result, indent=2, separators=(',', ': ')))
|
import os.path as path
import re
import os
import random
import pickle
import warnings
import torchtext
import torch
import spacy
import numpy as np
from ._choose_tokenizer import choose_tokenizer
from ._vocab_tokenizer import VocabTokenizer
from ._single_sequence_dataset import SingleSequenceDataset
class SSTTokenizer(VocabTokenizer):
def __init__(self):
super().__init__()
self._tokenizer = spacy.load('en_core_web_sm',
disable=['parser', 'tagger', 'ner', 'lemmatizer'])
def tokenize(self, sentence):
sentence = sentence.strip()
sentence = sentence.replace("-LRB-", '')
sentence = sentence.replace("-RRB-", ' ')
sentence = re.sub(r'\W+', ' ', sentence)
sentence = re.sub(r'\s+', ' ', sentence)
return [t.text.lower() for t in self._tokenizer(sentence)]
class SSTDataset(SingleSequenceDataset):
"""Loads the Stanford Sentiment Dataset
Uses the same tokenization procedure as in "Attention is not Explanation"
The paper's tokenizer can be found in:
https://github.com/successar/AttentionExplanation/blob/master/preprocess/SST/SST.ipynb
In general:
* Uses spacy tokenizer
* Lower case tokens
* Does not drop tokens
* Replaces \W = [^a-zA-Z0-9_] with <space>
* Removes "-LRB-"
* Replaces "-RRB-" with <space>
* Removes sentences shorter than 5 (https://github.com/successar/AttentionExplanation/blob/master/Trainers/DatasetBC.py#L103)
* Batch size of 32 (https://github.com/successar/AttentionExplanation/blob/master/configurations.py#L19)
The paper's embedding code is in:
https://github.com/successar/AttentionExplanation/blob/master/preprocess/vectorizer.py#L103
In general:
* use 'fasttext.simple.300d'
* set [PAD] embedding to zero
"""
def __init__(self, cachedir, model_type, seed=0, **kwargs):
"""Creates an SST dataset instance
Args:
cachedir (str): Directory to use for caching the compiled dataset.
seed (int): Seed used for shuffling the dataset.
batch_size (int, optional): The batch size used in the data loader. Defaults to 32.
num_workers (int, optional): The number of pytorch workers in the data loader. Defaults to 4.
"""
tokenizer = choose_tokenizer(cachedir, model_type, SSTTokenizer)
super().__init__(cachedir, 'sst', model_type, tokenizer, **kwargs)
self.label_names = ['negative', 'positive']
def embedding(self):
"""Creates word embedding matrix.
Returns:
np.array: shape = (vocabulary, 300)
"""
if self.model_type != 'rnn':
return None
lookup = torchtext.vocab.pretrained_aliases['fasttext.simple.300d'](cache=f'{self._cachedir}/embeddings')
embeddings = []
for word in self.tokenizer.ids_to_token:
if word in set(self.tokenizer.special_symbols) or word not in lookup.stoi:
embeddings.append(np.zeros(300))
else:
embeddings.append(lookup[word].numpy())
return np.vstack(embeddings)
def prepare_data(self):
"""Download, compiles, and cache the dataset.
"""
# Load embeddings
torchtext.vocab.pretrained_aliases['fasttext.simple.300d'](cache=f'{self._cachedir}/embeddings')
# Load dataset
if (not path.exists(f'{self._cachedir}/vocab/sst.vocab') or
not path.exists(f'{self._cachedir}/encoded/sst.pkl')):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
# SST has not been migrated to the new torchtext.datasets yet
train, val, test = torchtext.legacy.datasets.SST.splits(
torchtext.legacy.data.Field(), torchtext.legacy.data.Field(sequential=False),
filter_pred=lambda ex: len(ex.text) > 5 and ex.label != 'neutral',
root=f'{self._cachedir}/datasets')
# Create vocabulary from training data, if it hasn't already been done
if not path.exists(f'{self._cachedir}/vocab/sst.vocab'):
os.makedirs(f'{self._cachedir}/vocab', exist_ok=True)
self.tokenizer.from_iterable(' '.join(row.text) for row in train)
self.tokenizer.to_file(f'{self._cachedir}/vocab/sst.vocab')
else:
self.tokenizer.from_file(f'{self._cachedir}/vocab/sst.vocab')
# Encode data
if not path.exists(f'{self._cachedir}/encoded/sst_{self.model_type}.pkl'):
os.makedirs(f'{self._cachedir}/encoded', exist_ok=True)
rng = random.Random(self._seed)
data = {}
for name, dataset in [('train', train), ('val', val), ('test', test)]:
observations = []
for index, observation in enumerate(dataset):
observations.append({
'sentence': self.tokenizer.encode(' '.join(observation.text)),
'label': self.label_names.index(observation.label),
'index': index
})
data[name] = rng.sample(observations, len(observations))
with open(f'{self._cachedir}/encoded/sst_{self.model_type}.pkl', 'wb') as fp:
pickle.dump(data, fp)
|
#! /usr/bin/env python
"""
Various stat functions.
"""
from __future__ import division, print_function
__author__ = 'C. Gomez @ ULg'
__all__ = ['descriptive_stats']
import numpy as np
from matplotlib.pyplot import boxplot
def descriptive_stats(array, verbose=True, label='', mean=False, plot=False):
""" Simple statistics from vector.
"""
if mean: mean = np.mean(array)
median = np.median(array)
mini = np.min(array)
maxi = np.max(array)
first_qu = np.percentile(array, 25)
third_qu = np.percentile(array, 75)
if verbose:
msg = label
if mean:
msg += 'min={:.1f} / 1st QU={:.1f} / ave={:.1f} / med={:.1f} / 3rd QU={:.1f} / max={:.1f}'
print(msg.format(mini, first_qu, mean, median, third_qu, maxi))
else:
msg += 'min={:.1f} / 1st QU={:.1f} / med={:.1f} / 3rd QU={:.1f} / max={:.1f}'
print(msg.format(mini, first_qu, median, third_qu, maxi))
if plot:
boxplot(array, vert=False, meanline=mean, showfliers=True, sym='.') #whis=range)
if mean:
return mini, first_qu, mean, median, third_qu, maxi
else:
return mini, first_qu, median, third_qu, maxi
|
from __future__ import print_function
from lldbsuite.test import lldbtest
from lldbsuite.test import decorators
class NonExistentDecoratorTestCase(lldbtest.TestBase):
mydir = lldbtest.TestBase.compute_mydir(__file__)
@decorators.nonExistentDecorator(bugnumber="yt/1300")
def test(self):
"""Verify non-existent decorators are picked up by test runner."""
pass
|
""" Constants for qmotion module
"""
# Long default, qsync can be slow. Can be overriden during initialization
DEFAULT_TIMEOUT = 20
TCP_PORT = 9760
UDP_PORT = 9720
BROADCAST_ADDRESS = "255.255.255.255"
|
# -*- coding: utf-8 -*-
from pyxb.bundles.reqif.raw._xh11d import *
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Static file API endpoint.
Used for storing data such as img files.
Nothing in the static directory will be treated as a template,
just (potentially) cacheable data that needs to be served to clients.
"""
from flask import send_from_directory
from flask_restx import Namespace, Resource
from foremanlite.fsdata import DataFile
from foremanlite.logging import get as get_logger
from foremanlite.serve.context import get_context
from foremanlite.vars import STATIC_DIR
ns: Namespace = Namespace(
"static", description="Get static files as needed for misc. uses."
)
_logger = get_logger("static")
@ns.route("/<string:filename>", endpoint="staticfiles")
@ns.param("filename", "Filename of static file to retrieve")
class StaticFiles(Resource):
"""Resource representing static files."""
@staticmethod
def get(filename: str):
"""Get the requested static file."""
context = get_context()
static_dir_path = context.data_dir / STATIC_DIR
requested_path = static_dir_path / filename
_logger.info(f"Got request for static file: {str(requested_path)}")
data_file = DataFile(
requested_path,
cache=context.cache,
)
try:
data_file.validate()
except ValueError as err:
_logger.warning(
"Validation failed for requested static file %s: %s",
repr(str(requested_path)),
err,
)
return ("Requested file cannot be found", 404)
try:
return send_from_directory(
directory=data_file.path.parent,
path=data_file.path,
filename=data_file.path.name,
cache_timeout=0,
)
except ValueError as err:
_logger.warning(
"Error occurred while reading static file "
f"{str(requested_path)}: {err}"
)
raise err
|
class Solution(object):
def isPerfectSquare(self, num):
return int(num ** 0.5) ** 2 == num
|
## script to generate the graphics in figure 3 of koven et al. paper
## written by c. koven
## dependent on several libraries: PyNgl, python-netCDF4, numpy, rpy2, and my personal plotting library, which is here: https://github.com/ckoven/ckplotlib
import numpy as np
import map_funcs
import netCDF4 as nc
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
stats = importr('stats')
base = importr('base')
def q10func(temp):
k_ref = 0.06
q10 = 1.5
tref = 15.
output = k_ref * q10 ** ((temp[:] - tref)/10.)
return output
def q10func_frozen(temp):
k_ref = 0.06
q10 = 1.5
tref = 15.
output = k_ref * q10 ** ((temp[:] - tref)/10.)
output[temp < 0.] = 0.
return output
def q10func_frozenq10(temp):
k_ref = 0.06
q10 = 1.5
q10_frozen=5.
tref = 15.
output = k_ref * q10 ** ((temp[:] - tref)/10.)
output[temp < 0.] = k_ref * (q10 ** ((0. - tref)/10.)) * q10_frozen ** ((temp[temp < 0.] - 0)/10.)
return output
def lloyd_taylor_eq11(temp):
k_ref = .03820684 ### at 10C
output = k_ref * np.exp(308.56 * (1./56.02 - 1/(temp + 273.15 - 227.13)))
return output
coloreddotfig_dotsize = 0.005
textfile = open('quadratic_parameters_strawmodels.txt', 'w')
# ### the quadratic fit to filtered observational data from other script
# quadratic_fit = np.array([ 7.70383464e-04, -4.00038392e-02, 1.57171229e+00])
# xd = np.linspace(-25, 29, 60)
# yd_quad2_kspace = 1./(10. ** (quadratic_fit[2] + quadratic_fit[1]*xd + quadratic_fit[0]*xd**2 ))
### read quadratic fit and prediction intervals from other script
x_confint = np.loadtxt('x_confint.txt')
y_confint = np.loadtxt('y_confint.txt')
y_confint_q10 = np.row_stack([y_confint,1./q10func(x_confint)])
y_confint_froz_q10 = np.ma.masked_array(np.row_stack([y_confint,1./q10func(x_confint)]))
y_confint_froz_q10[3,x_confint[:] < 0.] = np.ma.masked_all((x_confint[:] < 0.).sum())
y_confint_lt = np.row_stack([y_confint,1./lloyd_taylor_eq11(x_confint)])
f = nc.MFDataset("datasets/clm5_cdk_r162_2degGSWP3_1850spin_allN_modpsimin_v01.clm2.h1.0*.nc")
tsa = f.variables['TSA']
tsoi = f.variables['TSOI']
lat = f.variables['lat']
lon = f.variables['lon']
levgrnd = f.variables['levgrnd']
f2 = nc.Dataset("datasets/clm5_cdk_r162_2degGSWP3_1850spin_allN_modpsimin_v01__dzsoi.nc")
dzsoi = f2.variables['DZSOI'][:]
f2.close()
f3 = nc.Dataset("datasets/surfdata_1.9x2.5_16pftsmidarctic_simyr1850_c160112.nc")
icefrac = f3.variables["PCT_GLACIER"][:] / 100.
f3.close()
dz = dzsoi[:,0,0]
lev_edges = np.zeros(len(dz)+1)
lev_edges[1:] = dz.cumsum()
depth_1m = 1.
nlev_lt_1m = (lev_edges < depth_1m).sum()
dz_to1m = dz[0:nlev_lt_1m].data
dz_to1m[nlev_lt_1m-1] = dz_to1m[nlev_lt_1m-1] - (lev_edges[nlev_lt_1m] - depth_1m)
frozen = (tsoi[:,0:nlev_lt_1m, :,:] < 273.15).mean(axis=0)
frozen_1m = (np.rollaxis(np.rollaxis(frozen,1), 2, 1) * dz_to1m).sum(axis=2)
tsa_mean = tsa[:].mean(axis=0)
temprange = [-22,30]
index_10cm = 2
q10func_10cm = q10func(tsoi[:,index_10cm,:,:]-273.15).mean(axis=0)
froz_q10func_10cm = q10func_frozen(tsoi[:,index_10cm,:,:]-273.15).mean(axis=0)
froz_q10func_0to1mm = (np.rollaxis(np.rollaxis(q10func_frozen(tsoi[:,0:nlev_lt_1m,:,:]-273.15).mean(axis=0),1), 2, 1) * dz_to1m).sum(axis=2)
lloydtayl_10cm = lloyd_taylor_eq11(tsoi[:,index_10cm,:,:]-273.15).mean(axis=0)
### mask values below certain level
mintemp = 273.15 -30.
icefracmax = 0.2
glaciercoldmask = np.logical_and(tsa_mean > mintemp, icefrac < icefracmax)
lloydtayl_10cm = lloydtayl_10cm[glaciercoldmask]
q10func_10cm = q10func_10cm[glaciercoldmask]
froz_q10func_10cm = froz_q10func_10cm[glaciercoldmask]
froz_q10func_0to1mm = froz_q10func_0to1mm[glaciercoldmask]
tsa_mean = tsa_mean[glaciercoldmask]
yrange=[1e0, 5e3]
map_funcs.xyplot(tsa_mean-273.15, 1./q10func_10cm, ylog=True, yrange=yrange, xrange=temprange, dots=True, overlay_x = x_confint, overlay_y = y_confint_q10,overlay_color=['red','red','red','blue'],overlay_linethickness=[2.5,1.,1.,2.5], xtitle='Mean Air Temperature (~S~o~N~C)', ytitle='Turnover time of respiration function (yr)', inset_title='Q~B~10~N~=1.5 at 10cm', inset_title_x=27.5, inset_textjust="CenterRight", inset_title_y=2.9e3, file='diagnosed_straight_q10_10cm')
map_funcs.xyplot(tsa_mean-273.15, 1./np.maximum(froz_q10func_10cm, 1e-4), ylog=True, yrange=yrange, xrange=temprange, dots=True, overlay_x = x_confint, overlay_y = y_confint_froz_q10,overlay_color=['red','red','red','blue'],overlay_linethickness=[2.5,1.,1.,2.5], xtitle='Mean Air Temperature (~S~o~N~C)', ytitle='Turnover time of respiration function (yr)', inset_title='Thawed-only Q~B~10~N~=1.5 at 10cm', inset_title_x=27.5, inset_textjust="CenterRight", inset_title_y=2.9e3, file='diagnosed_frozen_q10_10cm')
map_funcs.xyplot(tsa_mean-273.15, 1./np.maximum(lloydtayl_10cm, 1e-4), ylog=True, yrange=yrange, xrange=temprange, dots=True, overlay_x = x_confint, overlay_y = y_confint_lt,overlay_color=['red','red','red','blue'],overlay_linethickness=[2.5,1.,1.,2.5], xtitle='Mean Air Temperature (~S~o~N~C)', ytitle='Turnover time of respiration function (yr)', inset_title='Lloyd-Taylor at 10cm', inset_title_x=27.5, inset_textjust="CenterRight", inset_title_y=2.9e3, file='diagnosed_lloyd-taylor_10cm')
map_funcs.xyplot(tsa_mean-273.15, 1./np.maximum(froz_q10func_0to1mm, 1e-4), ylog=True, yrange=yrange, xrange=temprange, dots=True, overlay_x = x_confint, overlay_y = y_confint_froz_q10,overlay_color=['red','red','red','blue'],overlay_linethickness=[2.5,1.,1.,2.5], xtitle='Mean Air Temperature (~S~o~N~C)', ytitle='Turnover time of respiration function (yr)', inset_title='Thawed-only Q~B~10~N~=1.5 over 0-1m interval', inset_title_x=27.5, inset_textjust="CenterRight", inset_title_y=2.9e3, file='diagnosed_frozen_q10_0to1m_integral')
### for each of these, calculate the quadratic regression as for the obs and ESMs
### first the simple q10 case
xdata_nomask = tsa_mean-273.15
ydata_nomask = np.log10(1./q10func_10cm)
xdata = xdata_nomask[np.logical_not(ydata_nomask.mask)]
ydata = ydata_nomask[np.logical_not(ydata_nomask.mask)]
xdata_rvect_simpleq10 = robjects.FloatVector(xdata)
ydata_rvect_simpleq10 = robjects.FloatVector(ydata)
#
robjects.globalenv["xdata_rvect_simpleq10"] = xdata_rvect_simpleq10
robjects.globalenv["ydata_rvect_simpleq10"] = ydata_rvect_simpleq10
#
quadreg_r_simpleq10 = stats.lm("ydata_rvect_simpleq10 ~ poly(xdata_rvect_simpleq10,2)")
robjects.globalenv["quadreg_r_simpleq10"] = quadreg_r_simpleq10
rconfint_simpleq10 = robjects.r['confint']
rpredict_simpleq10 = robjects.r['predict']
rsummary_simpleq10 = robjects.r['summary']
thepredictint_simpleq10 = rpredict_simpleq10(quadreg_r_simpleq10, interval='prediction', level=0.50)
print(rsummary_simpleq10(quadreg_r_simpleq10))
predictint_simpleq10 = np.array(thepredictint_simpleq10)
n_toshow_predictlines_simpleq10 = 50
n_toskip_predictlines_simpleq10 = xdata.shape[0]/n_toshow_predictlines_simpleq10
indices_toshow_simpleq10 = xdata.argsort()[::n_toskip_predictlines_simpleq10]
x_confint_simpleq10 = xdata[indices_toshow_simpleq10]
y_confint_simpleq10 = 10.**(predictint_simpleq10[indices_toshow_simpleq10,:].transpose())
#
# do in numpy for coefficients
quadratic_fit_simpleq10 = np.polyfit(xdata,ydata,2)
textfile.write('quadratic_fit_simpleq10 '+str(quadratic_fit_simpleq10)+'\n')
#
map_funcs.xyplot(tsa_mean-273.15, 1./q10func_10cm, dots=True, ylog=True, yrange=[1., 5e3], xrange=temprange, file='simpleq10_MRT_soilc_temp_quadraticregression_r50pctpredint_', dotsize=coloreddotfig_dotsize, xtitle='Mean Air Temperature (~S~o~N~C)', ytitle='Inferred Turnover Time (yr)', overlay_x = x_confint_simpleq10, overlay_y = y_confint_simpleq10,overlay_color='red',overlay_linethickness=[2.5,1.,1.], inset_title='Q~B~10~N~=1.5 at 10cm', inset_title_x=27.5, inset_textjust="CenterRight", inset_title_y=2.9e3 )
### next the lloyd-taylor
xdata_nomask = tsa_mean-273.15
ydata_nomask = np.log10(1./lloydtayl_10cm)
xdata = xdata_nomask[np.logical_not(ydata_nomask.mask)]
ydata = ydata_nomask[np.logical_not(ydata_nomask.mask)]
xdata_rvect_lloydtaylor = robjects.FloatVector(xdata)
ydata_rvect_lloydtaylor = robjects.FloatVector(ydata)
#
robjects.globalenv["xdata_rvect_lloydtaylor"] = xdata_rvect_lloydtaylor
robjects.globalenv["ydata_rvect_lloydtaylor"] = ydata_rvect_lloydtaylor
#
quadreg_r_lloydtaylor = stats.lm("ydata_rvect_lloydtaylor ~ poly(xdata_rvect_lloydtaylor,2)")
robjects.globalenv["quadreg_r_lloydtaylor"] = quadreg_r_lloydtaylor
rconfint_lloydtaylor = robjects.r['confint']
rpredict_lloydtaylor = robjects.r['predict']
rsummary_lloydtaylor = robjects.r['summary']
thepredictint_lloydtaylor = rpredict_lloydtaylor(quadreg_r_lloydtaylor, interval='prediction', level=0.50)
print(rsummary_lloydtaylor(quadreg_r_lloydtaylor))
predictint_lloydtaylor = np.array(thepredictint_lloydtaylor)
n_toshow_predictlines_lloydtaylor = 50
n_toskip_predictlines_lloydtaylor = xdata.shape[0]/n_toshow_predictlines_lloydtaylor
indices_toshow_lloydtaylor = xdata.argsort()[::n_toskip_predictlines_lloydtaylor]
x_confint_lloydtaylor = xdata[indices_toshow_lloydtaylor]
y_confint_lloydtaylor = 10.**(predictint_lloydtaylor[indices_toshow_lloydtaylor,:].transpose())
#
# do in numpy for coefficients
quadratic_fit_lloydtaylor = np.polyfit(xdata,ydata,2)
textfile.write('quadratic_fit_lloydtaylor '+str(quadratic_fit_lloydtaylor)+'\n')
#
#
map_funcs.xyplot(tsa_mean-273.15, 1./lloydtayl_10cm, dots=True, ylog=True, yrange=[1., 5e3], xrange=temprange, file='lloydtaylor_MRT_soilc_temp_quadraticregression_r50pctpredint_', dotsize=coloreddotfig_dotsize, xtitle='Mean Air Temperature (~S~o~N~C)', ytitle='Inferred Turnover Time (yr)', overlay_x = x_confint_lloydtaylor, overlay_y = y_confint_lloydtaylor,overlay_color='red',overlay_linethickness=[2.5,1.,1.], inset_title='Lloyd-Taylor at 10cm', inset_title_x=27.5, inset_textjust="CenterRight", inset_title_y=2.9e3, )
### next frozen q10 case
xdata_nomask = tsa_mean-273.15
ydata_nomask = np.log10(1./froz_q10func_10cm)
xdata = xdata_nomask[np.logical_not(ydata_nomask.mask)]
ydata = ydata_nomask[np.logical_not(ydata_nomask.mask)]
xdata_rvect_froz_q10func_10cm = robjects.FloatVector(xdata)
ydata_rvect_froz_q10func_10cm = robjects.FloatVector(ydata)
#
robjects.globalenv["xdata_rvect_froz_q10func_10cm"] = xdata_rvect_froz_q10func_10cm
robjects.globalenv["ydata_rvect_froz_q10func_10cm"] = ydata_rvect_froz_q10func_10cm
#
quadreg_r_froz_q10func_10cm = stats.lm("ydata_rvect_froz_q10func_10cm ~ poly(xdata_rvect_froz_q10func_10cm,2)")
robjects.globalenv["quadreg_r_froz_q10func_10cm"] = quadreg_r_froz_q10func_10cm
rconfint_froz_q10func_10cm = robjects.r['confint']
rpredict_froz_q10func_10cm = robjects.r['predict']
rsummary_froz_q10func_10cm = robjects.r['summary']
thepredictint_froz_q10func_10cm = rpredict_froz_q10func_10cm(quadreg_r_froz_q10func_10cm, interval='prediction', level=0.50)
print(rsummary_froz_q10func_10cm(quadreg_r_froz_q10func_10cm))
predictint_froz_q10func_10cm = np.array(thepredictint_froz_q10func_10cm)
n_toshow_predictlines_froz_q10func_10cm = 50
n_toskip_predictlines_froz_q10func_10cm = xdata.shape[0]/n_toshow_predictlines_froz_q10func_10cm
indices_toshow_froz_q10func_10cm = xdata.argsort()[::n_toskip_predictlines_froz_q10func_10cm]
x_confint_froz_q10func_10cm = xdata[indices_toshow_froz_q10func_10cm]
y_confint_froz_q10func_10cm = 10.**(predictint_froz_q10func_10cm[indices_toshow_froz_q10func_10cm,:].transpose())
#
# do in numpy for coefficients
quadratic_fit_froz_q10func_10cm = np.polyfit(xdata,ydata,2)
textfile.write('quadratic_fit_froz_q10func_10cm '+str(quadratic_fit_froz_q10func_10cm)+'\n')
#
#
map_funcs.xyplot(tsa_mean-273.15, 1./froz_q10func_10cm, dots=True, ylog=True, yrange=[1., 5e3], xrange=temprange, file='froz_q10func_10cm_MRT_soilc_temp_quadraticregression_r50pctpredint_', dotsize=coloreddotfig_dotsize, xtitle='Mean Air Temperature (~S~o~N~C)', ytitle='Inferred Turnover Time (yr)', overlay_x = x_confint_froz_q10func_10cm, overlay_y = y_confint_froz_q10func_10cm,overlay_color='red',overlay_linethickness=[2.5,1.,1.], inset_title='Thawed-only Q~B~10~N~=1.5 at 10cm', inset_title_x=27.5, inset_textjust="CenterRight", inset_title_y=2.9e3 )
### and the depth-averaged frozen q10 case
xdata_nomask = tsa_mean-273.15
ydata_nomask = np.log10(1./froz_q10func_0to1mm)
xdata = xdata_nomask[np.logical_not(ydata_nomask.mask)]
ydata = ydata_nomask[np.logical_not(ydata_nomask.mask)]
xdata_rvect_froz_q10func_0to1mm = robjects.FloatVector(xdata)
ydata_rvect_froz_q10func_0to1mm = robjects.FloatVector(ydata)
#
robjects.globalenv["xdata_rvect_froz_q10func_0to1mm"] = xdata_rvect_froz_q10func_0to1mm
robjects.globalenv["ydata_rvect_froz_q10func_0to1mm"] = ydata_rvect_froz_q10func_0to1mm
#
quadreg_r_froz_q10func_0to1mm = stats.lm("ydata_rvect_froz_q10func_0to1mm ~ poly(xdata_rvect_froz_q10func_0to1mm,2)")
robjects.globalenv["quadreg_r_froz_q10func_0to1mm"] = quadreg_r_froz_q10func_0to1mm
rconfint_froz_q10func_0to1mm = robjects.r['confint']
rpredict_froz_q10func_0to1mm = robjects.r['predict']
rsummary_froz_q10func_0to1mm = robjects.r['summary']
thepredictint_froz_q10func_0to1mm = rpredict_froz_q10func_0to1mm(quadreg_r_froz_q10func_0to1mm, interval='prediction', level=0.50)
print(rsummary_froz_q10func_0to1mm(quadreg_r_froz_q10func_0to1mm))
predictint_froz_q10func_0to1mm = np.array(thepredictint_froz_q10func_0to1mm)
n_toshow_predictlines_froz_q10func_0to1mm = 50
n_toskip_predictlines_froz_q10func_0to1mm = xdata.shape[0]/n_toshow_predictlines_froz_q10func_0to1mm
indices_toshow_froz_q10func_0to1mm = xdata.argsort()[::n_toskip_predictlines_froz_q10func_0to1mm]
x_confint_froz_q10func_0to1mm = xdata[indices_toshow_froz_q10func_0to1mm]
y_confint_froz_q10func_0to1mm = 10.**(predictint_froz_q10func_0to1mm[indices_toshow_froz_q10func_0to1mm,:].transpose())
#
# do in numpy for coefficients
quadratic_fit_froz_q10func_0to1mm = np.polyfit(xdata,ydata,2)
textfile.write('quadratic_fit_froz_q10func_0to1mm ' +str(quadratic_fit_froz_q10func_0to1mm)+'\n')#
#
#
map_funcs.xyplot(tsa_mean-273.15, 1./froz_q10func_0to1mm, dots=True, ylog=True, yrange=[1., 5e3], xrange=temprange, file='froz_q10func_0to1mm_MRT_soilc_temp_quadraticregression_r50pctpredint_', dotsize=coloreddotfig_dotsize, xtitle='Mean Air Temperature (~S~o~N~C)', ytitle='Inferred Turnover Time (yr)', overlay_x = x_confint_froz_q10func_0to1mm, overlay_y = y_confint_froz_q10func_0to1mm,overlay_color='red',overlay_linethickness=[2.5,1.,1.], inset_title='Thawed-only Q~B~10~N~=1.5 over 0-1m interval', inset_title_x=27.5, inset_textjust="CenterRight", inset_title_y=2.9e3 )
temps = np.arange(-30,30,0.1)
map_funcs.xyplot(temps, 10**(1.54871640e+00 + temps * (-3.68422434e-02) + temps **2 * (5.79263319e-04) ), ylog=True, yrange=[1., 5e3], xrange=temprange, file='sanitycheck')
textfile.close()
|
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if len(matrix) == 0:
return False
row = 0
col = len(matrix[0]) - 1
while row < len(matrix) and col >= 0:
val = matrix[row][col]
if target == val:
return True
if target > val:
row += 1
if target < val:
col -= 1
return False
|
import os, sys
def get_path():
return os.path.dirname(os.path.abspath(root))
def start():
print "start()"
print "current path =", get_path()
|
# -*- coding: utf-8 -*-
from nlplib.base import language_langid
def test_language_langid():
assert(language_langid(None) is None)
assert(language_langid(u'Happy face') == 'en')
assert (language_langid(u'Elle est contente') == 'fr')
|
thislist = ["apple", "banana", "cherry"]
print(thislist[1])
# Author: Bryan G
|
#!/usr/bin/env python3
import json
import logging
import pprint
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning, SNIMissingWarning, InsecurePlatformWarning
from . import site
class UniFiException(Exception):
apimsg = None
def __init__(self, apimsg, s=None):
m = s
if m is None:
m = apimsg
super(UniFiException, self).__init__(m)
self.apimsg = apimsg
class UniFi(object):
def __init__(self, addr, username, password):
self.addr = addr
self.username = username
self.password = password
self.cookies = {}
self.session = requests.Session()
self.login()
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(SNIMissingWarning)
requests.packages.urllib3.disable_warnings(InsecurePlatformWarning)
'''
Unifi API endpoints are described in wiki:
(https://ubntwiki.com/products/software/unifi-controller/api)
'''
def api_addr(self, endpoint):
if endpoint == "login":
return self.addr + '/api/auth/' + endpoint
if endpoint == "status":
return self.addr + '/proxy/network/' + endpoint
else:
#print(self.addr + '/proxy/network/api/' + endpoint)
return self.addr + '/proxy/network/api/' + endpoint
def clear_session(self):
self.session.cookies.clear()
def api_process_response(self, r):
# Will raise exceptions if failing
self.set_error(r)
# parse json output
data = r.json()
return data
def api_post(self, endpoint, payload):
logging.debug('API POST ' + endpoint)
try:
headers = {"Accept": "application/json", "Content-Type": "application/json"}
r = self.session.post(self.api_addr(endpoint), headers = headers, json = payload, verify=False, timeout = 1)
self.set_error(r)
return self.api_process_response(r)
except UniFiException as e:
if endpoint != 'login' and e.apimsg is not None and e.apimsg == 'api.err.LoginRequired':
self.login()
r = self.session.post(self.api_addr(endpoint), headers = headers, json = payload, verify = False, timeout = 1)
return self.api_process_response(r)
else:
raise e
def api_get(self, endpoint):
logging.debug('API GET ' + endpoint)
headers = {"Accept": "application/json", "Content-Type": "application/json"}
try:
r = self.session.get(self.api_addr(endpoint), headers = headers, verify = False, timeout = 1)
return self.api_process_response(r)
except UniFiException as e:
if e.apimsg is not None and e.apimsg == 'api.err.LoginRequired':
self.login()
r = self.session.get(self.api_addr(endpoint), headers = headers, verify = False, timeout = 1)
return self.api_process_response(r)
else:
raise e
def set_error(self, r):
if r.status_code != 200:
print("ERROR - Status Code: ", status_code)
return
data = r.json()
if 'meta' in data:
if data['meta']['rc'] == 'ok':
return
elif data['meta']['rc'] == 'error':
raise UniFiException(data['meta']['msg'])
else:
raise UniFiException(None, 'FAIL: \n' + pprint.pformat(data))
def login(self):
# https://hemma:8443/api/login
# > POST {"username":"ubnt","password":"ubnt","strict":true}:
# < Set-Cookie: unifises=k8U3umwhciVfp8e43evU95mwQI3eAxK3; Path=/; Secure; HttpOnly
# < Set-Cookie: csrf_token=k8U3umwhciVfp8e43evU95mwQI3eAxK3; Path=/; Secure
# { "data" : [ ] , "meta" : { "rc" : "ok"}}
logging.info('Login ' + self.addr)
payload = { 'username': self.username, 'password': self.password }
self.api_post('login', payload)
def sites(self):
# https://hemma:8443/api/self/sites
# { "data" : [ { "_id" : "56c87bc1b41038d25762ce86" , "attr_hidden_id" : "default" , "attr_no_delete" : true , "desc" : "Default" , "name" : "default" , "num_ap" : 2 , "num_sta" : 22 , "role" : "admin"}] , "meta" : { "rc" : "ok"}}
data = self.api_get('self/sites')
ret = []
for s in data.get('data'):
ret.append(site.Site(self, s))
return ret
|
from cmath import sqrt
def main():
x = int(input("Введите x: "))
z1 = x ** 2 + 2 * x - 3 + (x + 1) * sqrt((x ** 2) - 9)
z2 = sqrt((x + 3) / (x - 3))
return f"Формула z1: {z1}\nФормула z2: {z2}"
if __name__ == '__main__':
print(main())
|
from ..s3 import GetFile, UploadFromContent
def CheckIn(body):
local_ip = body['local_ip']
public_ip = body['public_ip']
moniker = body['moniker']
node_type = body['type']
if node_type == 'sentry':
# TODO: Get file (if file doesn't exist, it's ok). Then json.loads it
# Payload should be:
'''
{
"sentrys": [
"10.0.0.0:93939",
"10.0.0.0:93939"
]
}
'''
# Write a nodes/sentrys/10-0-0-0.json
# write a new nodes/sentrys.latest
current_file = GetFile('nodes/sentrys.latest')
sentr
elif node_type == 'validator':
return
|
"""
Version 0.1
Soft Robot Controller UI
Author: YingGwan
Function: A tool to conveniently adjust airbag pressure
"""
import sys
from PyQt5 import QtCore, QtGui, QtWidgets,Qt
import qdarkstyle
from NEWUI import Ui_MainWindow
from thread1 import Thread1
class CtrlWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(CtrlWindow, self).__init__(parent)
self.MyUI=Ui_MainWindow();
self.MyUI.setupUi(self);
self.setWindowTitle('Soft Robot Controller')
# pg.setConfigOption('background', '#31363b')
# pg.setConfigOption('foreground', 'w')
#Connect to slot functions
self.MyUI.ConnectButton.clicked.connect(self.connect_buttonClicked)
self.MyUI.QuitButton.clicked.connect(self.quit_buttonClicked)
self.MyUI.ClearAllVolt.clicked.connect(self.resetvoltageClicked)
self.MyUI.ClearBridgeButton.clicked.connect(self.resethalfBridgeClicked)
self.MyUI.ChannelOpen.clicked.connect(self.SwitchOn)
self.MyUI.ChannelClose.clicked.connect(self.SwitchOff)
self.MyUI.Regulator1.clicked.connect(self.Regulator1_Slot)
self.MyUI.Regulator2.clicked.connect(self.Regulator2_Slot)
#Slot Functions Area
#Connecting
def connect_buttonClicked(self):
t1.link();
t1.run();
#Quit
def quit_buttonClicked(self):
print("Quit")
QtWidgets.qApp.quit();
#Reset all voltage to 0V.
def resetvoltageClicked(self):
t1.sendResetCmd();
#Reset all bridge out to 0V
def resethalfBridgeClicked(self):
t1.HalfBridgeLowCmd();
#Switches on.
def SwitchOn(self):
Channel=self.MyUI.lineEdit_2.text()
# t1.SetHigh(4);
ChannelNo=int(Channel)
print(ChannelNo)
t1.SetHigh(ChannelNo)
# print(Channel + " ON")
# Switches off.
def SwitchOff(self):
Channel = self.MyUI.lineEdit_2.text()
# t1.SetHigh(4);
ChannelNo = int(Channel)
t1.SetLow(ChannelNo);
print(Channel+" OFF")
def Regulator1_Slot(self):
VoltageStr = self.MyUI.lineEdit_3.text()
Voltage=float(VoltageStr);
print("Voltage 1>--->")
print(Voltage);
t1.SendVoltage(1, Voltage);
self.MyUI.lcdNumber.display(VoltageStr)
def Regulator2_Slot(self):
VoltageStr = self.MyUI.lineEdit_4.text()
Voltage=float(VoltageStr);
print("Voltage 2>--->")
print(Voltage);
t1.SendVoltage(2,Voltage);
self.MyUI.lcdNumber_2.display(VoltageStr)
t1=1;
if __name__ == "__main__":
t1 = Thread1(); #Get thread 1.
app = QtWidgets.QApplication(sys.argv)
#Set UI style
#app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
# app.setStyleSheet('QMainWindow{background-color: dark;border: 1px solid black;}')
myWin = CtrlWindow()
myWin.show()
sys.exit(app.exec_())
|
import numpy as np
import MDAnalysis as mda
# All these functions for virtual sites definitions are explained
# in the GROMACS manual part 5.5.7 (page 379 in manual version 2020)
# Check also the bonded potentials table best viewed here:
# http://manual.gromacs.org/documentation/2020/reference-manual/topologies/topology-file-formats.html#tab-topfile2
# TODO: test all these functions
# Functions for virtual_sites2
def vs2_func_1(ns, traj, vs_def_beads_ids, vs_params):
"""Function for virtual_sites2.
s_2 func 1 -> Linear combination using 2 reference points weighted COG using a percentage
in [0, 1] the weight is applied on the bead ID that comes first.
ns requires:
aa2cg_universe (edited inplace)
"""
i, j = vs_def_beads_ids
a = vs_params # weight
weights = np.array([1 - a, a])
for ts in ns.aa2cg_universe.trajectory:
traj[ts.frame] = ns.aa2cg_universe.atoms[[i, j]].center(weights)
def vs2_func_2(ns, traj, vs_def_beads_ids, vs_params):
"""Function for virtual_sites2.
vs_2 func 2 -> Linear combination using 2 reference points on the vector from i to j,
at given distance (nm).
NOTE: it seems this one exists only since GROMACS 2020.
ns requires:
aa2cg_universe (edited inplace)
"""
# TODO: check this one with a GMX 2020 installation
i, j = vs_def_beads_ids
a = vs_params # nm
a = a * 10 # retrieve amgstrom for MDA
for ts in ns.aa2cg_universe.trajectory:
pos_i = ns.aa2cg_universe.atoms[i].position
pos_j = ns.aa2cg_universe.atoms[j].position
r_ij = pos_j - pos_i
traj[ts.frame] = pos_i + a * r_ij / mda.lib.mdamath.norm(r_ij)
# Functions for virtual_sites3
def vs3_func_1(ns, traj, vs_def_beads_ids, vs_params):
"""Function for virtual_sites3.
vs_3 func 1 -> Linear combination using 3 reference points in the plane, using sum of vectors
from i to j and from k to i.
ns requires:
aa2cg_universe (edited inplace)
"""
i, j, k = vs_def_beads_ids
a, b = vs_params # nm, nm
a, b = a * 10, b * 10 # retrieve amgstrom for MDA
for ts in ns.aa2cg_universe.trajectory:
pos_i = ns.aa2cg_universe.atoms[i].position
pos_j = ns.aa2cg_universe.atoms[j].position
pos_k = ns.aa2cg_universe.atoms[k].position
r_ij = pos_j - pos_i
r_ik = pos_k - pos_i
traj[ts.frame] = pos_i + a * r_ij / mda.lib.mdamath.norm(r_ij) / 2 + b * r_ik / mda.lib.mdamath.norm(r_ik) / 2
# I used their formula (hopefully) so the form differs from the explanation on line above, but
# it should be identical
def vs3_func_2(ns, traj, vs_def_beads_ids, vs_params):
"""Function for virtual_sites3.
vs_3 func 2 -> Linear combination using 3 reference points in the plane, using WEIGHTS sum of
vectors from j to i and from k to i + fixed distance.
ns requires:
aa2cg_universe (edited inplace)
"""
i, j, k = vs_def_beads_ids
a, b = vs_params # weight, nm
b = b * 10 # retrieve amgstrom for MDA
for ts in ns.aa2cg_universe.trajectory:
pos_i = ns.aa2cg_universe.atoms[i].position
pos_j = ns.aa2cg_universe.atoms[j].position
pos_k = ns.aa2cg_universe.atoms[k].position
r_ij = pos_j - pos_i
r_jk = pos_k - pos_j
comb_ijk = (1 - a) * r_ij + a * r_jk
traj[ts.frame] = pos_i + b * (comb_ijk / mda.lib.mdamath.norm(comb_ijk))
def vs3_func_3(ns, traj, vs_def_beads_ids, vs_params):
"""Function for virtual_sites3.
vs_3 func 3 -> Linear combination using 3 reference points angle in the plane defined,
at given distance of the 3rd point.
ns requires:
aa2cg_universe (edited inplace)
"""
i, j, k = vs_def_beads_ids
ang_deg, d = vs_params # degrees, nm
ang_rad = np.deg2rad(ang_deg) # retrieve radians
d = d * 10 # retrieve amgstrom for MDA
for ts in ns.aa2cg_universe.trajectory:
pos_i = ns.aa2cg_universe.atoms[i].position
pos_j = ns.aa2cg_universe.atoms[j].position
pos_k = ns.aa2cg_universe.atoms[k].position
r_ij = pos_j - pos_i
r_jk = pos_k - pos_j
comb_ijk = r_jk - (np.dot(r_ij, r_jk) / np.dot(r_ij, r_ij)) * r_ij
traj[ts.frame] = pos_i + d * np.cos(ang_rad) * (r_ij / mda.lib.mdamath.norm(r_ij)) + d * np.sin(ang_rad) * (
comb_ijk / mda.lib.mdamath.norm(comb_ijk))
def vs3_func_4(ns, traj, vs_def_beads_ids, vs_params):
"""Function for virtual_sites3.
vs_3 func 4 -> Linear combination using 3 reference points out of plane.
ns requires:
aa2cg_universe (edited inplace)
"""
i, j, k = vs_def_beads_ids
a, b, c = vs_params # weight, weight, nm**(-1)
c = c / 10 # retrieve amgstrom**(-1) for MDA
for ts in ns.aa2cg_universe.trajectory:
pos_i = ns.aa2cg_universe.atoms[i].position
pos_j = ns.aa2cg_universe.atoms[j].position
pos_k = ns.aa2cg_universe.atoms[k].position
r_ij = pos_j - pos_i
r_ik = pos_k - pos_i
traj[ts.frame] = pos_i + a * r_ij + b * r_ik - c * (
r_ij / mda.lib.mdamath.norm(r_ij) * r_ik / mda.lib.mdamath.norm(r_ik))
# Functions for virtual_sites4
def vs4_func_2(ns, traj, vs_def_beads_ids, vs_params):
"""Function for virtual_sites4.
vs_4 func 2 -> Linear combination using 3 reference points.
NOTE: only function 2 is defined for vs_4 in GROMACS, because it replaces function 1
which still exists for retro compatibility but its usage must be avoided
ns requires:
aa2cg_universe (edited inplace)
"""
i, j, k, l = vs_def_beads_ids
a, b, c = vs_params # weight, weight, nm
c = c * 10 # retrieve amgstrom for MDA
for ts in ns.aa2cg_universe.trajectory:
pos_i = ns.aa2cg_universe.atoms[i].position
pos_j = ns.aa2cg_universe.atoms[j].position
pos_k = ns.aa2cg_universe.atoms[k].position
pos_l = ns.aa2cg_universe.atoms[l].position
r_ij = pos_j - pos_i
r_ik = pos_k - pos_i
r_il = pos_l - pos_i
r_ja = a * r_ik - r_ij
r_jb = b * r_il - r_ij
r_m = np.cross(r_ja, r_jb)
traj[ts.frame] = pos_i - c * (r_m / mda.lib.mdamath.norm(r_m))
# Functions for virtual_sitesn
def vsn_func_1(ns, traj, vs_def_beads_ids):
"""Function for virtual_sitesn.
vs_n func 1 -> Center of Geometry
ns requires:
aa2cg_universe (edited inplace)
"""
for ts in ns.aa2cg_universe.trajectory:
traj[ts.frame] = ns.aa2cg_universe.atoms[vs_def_beads_ids].center_of_geometry(pbc=None)
def vsn_func_2(ns, traj, vs_def_beads_ids, bead_id):
"""Function for virtual_sitesn.
vs_n func 2 -> Center of Mass
ns requires:
aa2cg_universe (edited inplace)
"""
# inform user if this VS definition uses beads (or VS) with mass 0,
# because this is COM so 0 mass means a bead that was marked for defining the VS is in fact ignored
zero_mass_beads_ids = []
for bid in vs_def_beads_ids:
if bid in ns.cg_itp["virtual_sitesn"]:
if ns.cg_itp["virtual_sitesn"][bid]["mass"] == 0:
zero_mass_beads_ids.append(bid)
if len(zero_mass_beads_ids) > 0:
print(" WARNING: Virtual site ID {} uses function 2 for COM, but its definition contains IDs " + " ".join(
zero_mass_beads_ids) + "which have no mass".format(bead_id + 1))
for ts in ns.aa2cg_universe.trajectory:
traj[ts.frame] = ns.aa2cg_universe.atoms[vs_def_beads_ids].center_of_mass(pbc=None)
def vsn_func_3(ns, traj, vs_def_beads_ids, vs_params):
"""Function for virtual_sitesn.
vs_n func 3 -> Center of Weights (each atom has a given weight, pairwise formatting: id1 w1 id2 w2 ..)
ns requires:
aa2cg_universe (edited inplace)
"""
masses_and_weights = np.array(
[ns.aa2cg_universe.atoms[vs_def_beads_ids[i]].mass * vs_params[i] for i in range(len(vs_def_beads_ids))])
for ts in ns.aa2cg_universe.trajectory:
traj[ts.frame] = ns.aa2cg_universe.atoms[vs_def_beads_ids].center(masses_and_weights)
|
# Copyright (C) 2013 Matthew C. Zwier, Nick Rego, and Lillian T. Chong
#
# This file is part of WESTPA.
#
# WESTPA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WESTPA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WESTPA. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, division; __metaclass__ = type
import sys
import logging
import math
from numpy import index_exp
from west.data_manager import seg_id_dtype, weight_dtype
from westpa.binning import index_dtype, assign_and_label, accumulate_labeled_populations
from westtools import (WESTParallelTool, WESTDataReader, WESTDSSynthesizer, BinMappingComponent,
ProgressIndicatorComponent)
import numpy
import westpa
from westpa import h5io
from westpa.h5io import WESTPAH5File
from westpa.extloader import get_object
log = logging.getLogger('westtools.w_assign')
def parse_pcoord_value(pc_str):
namespace = {'math': math,
'numpy': numpy,
'inf': float('inf')}
arr = numpy.array(eval(pc_str,namespace))
if arr.ndim == 0:
arr.shape = (1,1)
elif arr.ndim == 1:
arr.shape = (1,) + arr.shape
else:
raise ValueError('too many dimensions')
return arr
def _assign_label_pop(n_iter, lb, ub, mapper, nstates, state_map, last_labels, parent_id_dsspec, weight_dsspec, pcoord_dsspec):
nbins = len(state_map)-1
parent_ids = parent_id_dsspec.get_iter_data(n_iter,index_exp[lb:ub])
weights = weight_dsspec.get_iter_data(n_iter,index_exp[lb:ub])
pcoords = pcoord_dsspec.get_iter_data(n_iter,index_exp[lb:ub])
assignments, trajlabels, statelabels = assign_and_label(lb, ub, parent_ids,
mapper.assign, nstates, state_map, last_labels, pcoords)
pops = numpy.zeros((nstates+1,nbins+1), weight_dtype)
accumulate_labeled_populations(weights, assignments, trajlabels, pops)
return (assignments, trajlabels, pops, lb, ub, statelabels)
class WAssign(WESTParallelTool):
prog='w_assign'
description = '''\
Assign walkers to bins, producing a file (by default named "assign.h5")
which can be used in subsequent analysis.
For consistency in subsequent analysis operations, the entire dataset
must be assigned, even if only a subset of the data will be used. This
ensures that analyses that rely on tracing trajectories always know the
originating bin of each trajectory.
-----------------------------------------------------------------------------
Source data
-----------------------------------------------------------------------------
Source data is provided either by a user-specified function
(--construct-dataset) or a list of "data set specifications" (--dsspecs).
If neither is provided, the progress coordinate dataset ''pcoord'' is used.
To use a custom function to extract or calculate data whose probability
distribution will be calculated, specify the function in standard Python
MODULE.FUNCTION syntax as the argument to --construct-dataset. This function
will be called as function(n_iter,iter_group), where n_iter is the iteration
whose data are being considered and iter_group is the corresponding group
in the main WEST HDF5 file (west.h5). The function must return data which can
be indexed as [segment][timepoint][dimension].
To use a list of data set specifications, specify --dsspecs and then list the
desired datasets one-by-one (space-separated in most shells). These data set
specifications are formatted as NAME[,file=FILENAME,slice=SLICE], which will
use the dataset called NAME in the HDF5 file FILENAME (defaulting to the main
WEST HDF5 file west.h5), and slice it with the Python slice expression SLICE
(as in [0:2] to select the first two elements of the first axis of the
dataset). The ``slice`` option is most useful for selecting one column (or
more) from a multi-column dataset, such as arises when using a progress
coordinate of multiple dimensions.
-----------------------------------------------------------------------------
Specifying macrostates
-----------------------------------------------------------------------------
Optionally, kinetic macrostates may be defined in terms of sets of bins.
Each trajectory will be labeled with the kinetic macrostate it was most
recently in at each timepoint, for use in subsequent kinetic analysis.
This is required for all kinetics analysis (w_kintrace and w_kinmat).
There are three ways to specify macrostates:
1. States corresponding to single bins may be identified on the command
line using the --states option, which takes multiple arguments, one for
each state (separated by spaces in most shells). Each state is specified
as a coordinate tuple, with an optional label prepended, as in
``bound:1.0`` or ``unbound:(2.5,2.5)``. Unlabeled states are named
``stateN``, where N is the (zero-based) position in the list of states
supplied to --states.
2. States corresponding to multiple bins may use a YAML input file specified
with --states-from-file. This file defines a list of states, each with a
name and a list of coordinate tuples; bins containing these coordinates
will be mapped to the containing state. For instance, the following
file::
---
states:
- label: unbound
coords:
- [9.0, 1.0]
- [9.0, 2.0]
- label: bound
coords:
- [0.1, 0.0]
produces two macrostates: the first state is called "unbound" and
consists of bins containing the (2-dimensional) progress coordinate
values (9.0, 1.0) and (9.0, 2.0); the second state is called "bound"
and consists of the single bin containing the point (0.1, 0.0).
3. Arbitrary state definitions may be supplied by a user-defined function,
specified as --states-from-function=MODULE.FUNCTION. This function is
called with the bin mapper as an argument (``function(mapper)``) and must
return a list of dictionaries, one per state. Each dictionary must contain
a vector of coordinate tuples with key "coords"; the bins into which each
of these tuples falls define the state. An optional name for the state
(with key "label") may also be provided.
-----------------------------------------------------------------------------
Output format
-----------------------------------------------------------------------------
The output file (-o/--output, by default "assign.h5") contains the following
attributes datasets:
``nbins`` attribute
*(Integer)* Number of valid bins. Bin assignments range from 0 to
*nbins*-1, inclusive.
``nstates`` attribute
*(Integer)* Number of valid macrostates (may be zero if no such states are
specified). Trajectory ensemble assignments range from 0 to *nstates*-1,
inclusive, when states are defined.
``/assignments`` [iteration][segment][timepoint]
*(Integer)* Per-segment and -timepoint assignments (bin indices).
``/npts`` [iteration]
*(Integer)* Number of timepoints in each iteration.
``/nsegs`` [iteration]
*(Integer)* Number of segments in each iteration.
``/labeled_populations`` [iterations][state][bin]
*(Floating-point)* Per-iteration and -timepoint bin populations, labeled
by most recently visited macrostate. The last state entry (*nstates-1*)
corresponds to trajectories initiated outside of a defined macrostate.
``/bin_labels`` [bin]
*(String)* Text labels of bins.
When macrostate assignments are given, the following additional datasets are
present:
``/trajlabels`` [iteration][segment][timepoint]
*(Integer)* Per-segment and -timepoint trajectory labels, indicating the
macrostate which each trajectory last visited.
``/state_labels`` [state]
*(String)* Labels of states.
``/state_map`` [bin]
*(Integer)* Mapping of bin index to the macrostate containing that bin.
An entry will contain *nbins+1* if that bin does not fall into a
macrostate.
Datasets indexed by state and bin contain one more entry than the number of
valid states or bins. For *N* bins, axes indexed by bin are of size *N+1*, and
entry *N* (0-based indexing) corresponds to a walker outside of the defined bin
space (which will cause most mappers to raise an error). More importantly, for
*M* states (including the case *M=0* where no states are specified), axes
indexed by state are of size *M+1* and entry *M* refers to trajectories
initiated in a region not corresponding to a defined macrostate.
Thus, ``labeled_populations[:,:,:].sum(axis=1)[:,:-1]`` gives overall per-bin
populations, for all defined bins and
``labeled_populations[:,:,:].sum(axis=2)[:,:-1]`` gives overall
per-trajectory-ensemble populations for all defined states.
-----------------------------------------------------------------------------
Parallelization
-----------------------------------------------------------------------------
This tool supports parallelized binning, including reading/calculating input
data.
-----------------------------------------------------------------------------
Command-line options
-----------------------------------------------------------------------------
'''
def __init__(self):
super(WAssign,self).__init__()
# Parallel processing by default (this is not actually necessary, but it is
# informative!)
self.wm_env.default_work_manager = self.wm_env.default_parallel_work_manager
self.data_reader = WESTDataReader()
self.dssynth = WESTDSSynthesizer(default_dsname='pcoord')
self.binning = BinMappingComponent()
self.progress = ProgressIndicatorComponent()
self.output_file = None
self.output_filename = None
self.states = []
def add_args(self, parser):
self.data_reader.add_args(parser)
self.binning.add_args(parser, suppress=['--bins-from-h5file'])
self.dssynth.add_args(parser)
sgroup = parser.add_argument_group('macrostate definitions').add_mutually_exclusive_group()
sgroup.add_argument('--states', nargs='+', metavar='STATEDEF',
help='''Single-bin kinetic macrostate, specified by a coordinate tuple (e.g. '1.0' or '[1.0,1.0]'),
optionally labeled (e.g. 'bound:[1.0,1.0]'). States corresponding to multiple bins
must be specified with --states-from-file.''')
sgroup.add_argument('--states-from-file', metavar='STATEFILE',
help='''Load kinetic macrostates from the YAML file STATEFILE. See description
above for the appropriate structure.''')
sgroup.add_argument('--states-from-function', metavar='STATEFUNC',
help='''Load kinetic macrostates from the function STATEFUNC, specified as
module_name.func_name. This function is called with the bin mapper as an argument,
and must return a list of dictionaries {'label': state_label, 'coords': 2d_array_like}
one for each macrostate; the 'coords' entry must contain enough rows to identify all bins
in the macrostate.''')
agroup = parser.add_argument_group('other options')
agroup.add_argument('-o', '--output', dest='output', default='assign.h5',
help='''Store results in OUTPUT (default: %(default)s).''')
def process_args(self, args):
self.progress.process_args(args)
self.data_reader.process_args(args)
with self.data_reader:
self.dssynth.h5filename = self.data_reader.we_h5filename
self.dssynth.process_args(args)
self.binning.process_args(args)
if args.states:
self.parse_cmdline_states(args.states)
elif args.states_from_file:
self.load_state_file(args.states_from_file)
elif args.states_from_function:
self.load_states_from_function(get_object(args.states_from_function,path=['.']))
if self.states and len(self.states) < 2:
raise ValueError('zero, two, or more macrostates are required')
#self.output_file = WESTPAH5File(args.output, 'w', creating_program=True)
self.output_filename = args.output
log.debug('state list: {!r}'.format(self.states))
def parse_cmdline_states(self, state_strings):
states = []
for istring, state_string in enumerate(state_strings):
try:
(label, coord_str) = state_string.split(':')
except ValueError:
label = 'state{}'.format(istring)
coord_str = state_string
coord = parse_pcoord_value(coord_str)
states.append({'label': label, 'coords': coord})
self.states = states
def load_state_file(self, state_filename):
import yaml
ydict = yaml.load(open(state_filename, 'rt'))
ystates = ydict['states']
states = []
for istate, ystate in enumerate(ystates):
state = {}
state['label'] = ystate.get('label', 'state{}'.format(istate))
# coords can be:
# - a scalar, in which case it is one bin, 1-D
# - a single list, which is rejected as ambiguous
# - a list of lists, which is a list of coordinate tuples
coords = numpy.array(ystate['coords'])
if coords.ndim == 0:
coords.shape = (1,1)
elif coords.ndim == 1:
raise ValueError('list {!r} is ambiguous (list of 1-d coordinates, or single multi-d coordinate?)'
.format(ystate['coords']))
elif coords.ndim > 2:
raise ValueError('coordinates must be 2-D')
state['coords'] = coords
states.append(state)
self.states = states
def load_states_from_function(self, statefunc):
states = statefunc(self.binning.mapper)
for istate, state in enumerate(states):
state.setdefault('label','state{}'.format(istate))
try:
state['coords'] = numpy.array(state['coords'])
except KeyError:
raise ValueError('state function {!r} returned a state {!r} without coordinates'.format(statefunc,state))
self.states = states
log.debug('loaded states: {!r}'.format(self.states))
def assign_iteration(self, n_iter, nstates, nbins, state_map, last_labels):
''' Method to encapsulate the segment slicing (into n_worker slices) and parallel job submission
Submits job(s), waits on completion, splices them back together
Returns: assignments, trajlabels, pops for this iteration'''
futures = []
iter_group = self.data_reader.get_iter_group(n_iter)
nsegs, npts = iter_group['pcoord'].shape[:2]
n_workers = self.work_manager.n_workers or 1
assignments = numpy.empty((nsegs, npts), dtype=index_dtype)
trajlabels = numpy.empty((nsegs, npts), dtype=index_dtype)
statelabels = numpy.empty((nsegs, npts), dtype=index_dtype)
pops = numpy.zeros((nstates+1,nbins+1), dtype=weight_dtype)
#Submit jobs to work manager
blocksize = nsegs // n_workers
if nsegs % n_workers > 0:
blocksize += 1
def task_gen():
if __debug__:
checkset = set()
for lb in xrange(0, nsegs, blocksize):
ub = min(nsegs, lb+blocksize)
if __debug__:
checkset.update(set(xrange(lb,ub)))
args = ()
kwargs = dict(n_iter=n_iter,
lb=lb, ub=ub, mapper=self.binning.mapper, nstates=nstates, state_map=state_map,
last_labels=last_labels,
parent_id_dsspec=self.data_reader.parent_id_dsspec,
weight_dsspec=self.data_reader.weight_dsspec,
pcoord_dsspec=self.dssynth.dsspec)
yield (_assign_label_pop, args, kwargs)
#futures.append(self.work_manager.submit(_assign_label_pop,
#kwargs=)
if __debug__:
assert checkset == set(xrange(nsegs)), 'segments missing: {}'.format(set(xrange(nsegs)) - checkset)
#for future in self.work_manager.as_completed(futures):
for future in self.work_manager.submit_as_completed(task_gen(), queue_size=self.max_queue_len):
assign_slice, traj_slice, slice_pops, lb, ub, state_slice = future.get_result(discard=True)
assignments[lb:ub, :] = assign_slice
trajlabels[lb:ub, :] = traj_slice
statelabels[lb:ub, :] = state_slice
pops += slice_pops
del assign_slice, traj_slice, slice_pops, state_slice
del futures
return (assignments, trajlabels, pops, statelabels)
def go(self):
assert self.data_reader.parent_id_dsspec._h5file is None
assert self.data_reader.weight_dsspec._h5file is None
if hasattr(self.dssynth.dsspec, '_h5file'):
assert self.dssynth.dsspec._h5file is None
pi = self.progress.indicator
pi.operation = 'Initializing'
with pi, self.data_reader, WESTPAH5File(self.output_filename, 'w', creating_program=True) as self.output_file:
assign = self.binning.mapper.assign
# We always assign the entire simulation, so that no trajectory appears to start
# in a transition region that doesn't get initialized in one.
iter_start = 1
iter_stop = self.data_reader.current_iteration
h5io.stamp_iter_range(self.output_file, iter_start, iter_stop)
nbins = self.binning.mapper.nbins
self.output_file.attrs['nbins'] = nbins
state_map = numpy.empty((self.binning.mapper.nbins+1,), index_dtype)
state_map[:] = 0 # state_id == nstates => unknown state
# Recursive mappers produce a generator rather than a list of labels
# so consume the entire generator into a list
labels = [label for label in self.binning.mapper.labels]
self.output_file.create_dataset('bin_labels', data=labels, compression=9)
if self.states:
nstates = len(self.states)
state_map[:] = nstates # state_id == nstates => unknown state
state_labels = [state['label'] for state in self.states]
for istate, sdict in enumerate(self.states):
assert state_labels[istate] == sdict['label'] #sanity check
state_assignments = assign(sdict['coords'])
for assignment in state_assignments:
state_map[assignment] = istate
self.output_file.create_dataset('state_map', data=state_map, compression=9, shuffle=True)
self.output_file['state_labels'] = state_labels #+ ['(unknown)']
else:
nstates = 0
self.output_file.attrs['nstates'] = nstates
iter_count = iter_stop - iter_start
nsegs = numpy.empty((iter_count,), seg_id_dtype)
npts = numpy.empty((iter_count,), seg_id_dtype)
# scan for largest number of segments and largest number of points
pi.new_operation ('Scanning for segment and point counts', iter_stop-iter_start)
for iiter, n_iter in enumerate(xrange(iter_start,iter_stop)):
iter_group = self.data_reader.get_iter_group(n_iter)
nsegs[iiter], npts[iiter] = iter_group['pcoord'].shape[0:2]
pi.progress += 1
del iter_group
pi.new_operation('Preparing output')
# create datasets
self.output_file.create_dataset('nsegs', data=nsegs, shuffle=True, compression=9)
self.output_file.create_dataset('npts', data=npts, shuffle=True, compression=9)
max_nsegs = nsegs.max()
max_npts = npts.max()
assignments_shape = (iter_count,max_nsegs,max_npts)
assignments_dtype = numpy.min_scalar_type(nbins)
assignments_ds = self.output_file.create_dataset('assignments', dtype=assignments_dtype, shape=assignments_shape,
compression=4, shuffle=True,
chunks=h5io.calc_chunksize(assignments_shape, assignments_dtype),
fillvalue=nbins)
if self.states:
trajlabel_dtype = numpy.min_scalar_type(nstates)
trajlabels_ds = self.output_file.create_dataset('trajlabels', dtype=trajlabel_dtype, shape=assignments_shape,
compression=4, shuffle=True,
chunks=h5io.calc_chunksize(assignments_shape, trajlabel_dtype),
fillvalue=nstates)
statelabels_ds = self.output_file.create_dataset('statelabels', dtype=trajlabel_dtype, shape=assignments_shape,
compression=4, shuffle=True,
chunks=h5io.calc_chunksize(assignments_shape, trajlabel_dtype),
fillvalue=nstates)
pops_shape = (iter_count,nstates+1,nbins+1)
pops_ds = self.output_file.create_dataset('labeled_populations', dtype=weight_dtype, shape=pops_shape,
compression=4, shuffle=True,
chunks=h5io.calc_chunksize(pops_shape, weight_dtype))
h5io.label_axes(pops_ds, ['iteration', 'state', 'bin'])
pi.new_operation('Assigning to bins', iter_stop-iter_start)
last_labels = None # mapping of seg_id to last macrostate inhabited
for iiter, n_iter in enumerate(xrange(iter_start,iter_stop)):
#get iteration info in this block
if iiter == 0:
last_labels = numpy.empty((nsegs[iiter],), index_dtype)
last_labels[:] = nstates #unknown state
#Slices this iteration into n_workers groups of segments, submits them to wm, splices results back together
assignments, trajlabels, pops, statelabels = self.assign_iteration(n_iter, nstates, nbins, state_map, last_labels)
##Do stuff with this iteration's results
last_labels = trajlabels[:,-1].copy()
assignments_ds[iiter, 0:nsegs[iiter], 0:npts[iiter]] = assignments
pops_ds[iiter] = pops
if self.states:
trajlabels_ds[iiter, 0:nsegs[iiter], 0:npts[iiter]] = trajlabels
statelabels_ds[iiter, 0:nsegs[iiter], 0:npts[iiter]] = statelabels
pi.progress += 1
del assignments, trajlabels, pops, statelabels
for dsname in 'assignments', 'npts', 'nsegs', 'labeled_populations', 'statelabels':
h5io.stamp_iter_range(self.output_file[dsname], iter_start, iter_stop)
if __name__ == '__main__':
WAssign().main()
|
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import fields
import cybox.bindings.unix_user_account_object as unix_user_account_binding
from cybox.common import String, NonNegativeInteger, UnsignedInteger
from cybox.objects.user_account_object import (Group, GroupList, Privilege,
PrivilegeList, UserAccount)
class UnixGroup(Group):
_binding = unix_user_account_binding
_binding_class = unix_user_account_binding.UnixGroupType
_namespace = 'http://cybox.mitre.org/objects#UnixUserAccountObject-2'
_XSI_TYPE = "UnixGroupType"
group_id = fields.TypedField("Group_ID", NonNegativeInteger)
class UnixGroupList(GroupList):
group = fields.TypedField("Group", UnixGroup, multiple=True)
class UnixPrivilege(Privilege):
_binding = unix_user_account_binding
_binding_class = unix_user_account_binding.UnixPrivilegeType
_namespace = 'http://cybox.mitre.org/objects#UnixUserAccountObject-2'
_XSI_TYPE = "UnixPrivilegeType"
permissions_mask = fields.TypedField("Permissions_Mask", String)
class UnixPrivilegeList(PrivilegeList):
privilege = fields.TypedField("Privilege", UnixPrivilege, multiple=True)
class UnixUserAccount(UserAccount):
_binding = unix_user_account_binding
_binding_class = unix_user_account_binding.UnixUserAccountObjectType
_namespace = 'http://cybox.mitre.org/objects#UnixUserAccountObject-2'
_XSI_NS = "UnixUserAccountObj"
_XSI_TYPE = "UnixUserAccountObjectType"
group_id = fields.TypedField("Group_ID", UnsignedInteger)
user_id = fields.TypedField("User_ID", UnsignedInteger)
login_shell = fields.TypedField("Login_Shell", String)
# Override abstract types
group_list = fields.TypedField('Group_List', UnixGroupList)
privilege_list = fields.TypedField('Privilege_List', UnixPrivilegeList)
|
from . import LinAlg as A
from. import Set as S
class Relation:
def __init__(self, r_dict):
self.left = set(r_dict.keys())
self.right = S.Set.union(*(r_dict[x] \
for x in r_dict.keys()))
self.table_map_left = dict(zip(self.left, \
range(0, len(self.left))))
self.table_map_right = dict(zip(self.right, \
range(0, len(self.right))))
self.r_dict = r_dict
self.table = A.Matrix(\
[[1 \
if a in r_dict \
and b in r_dict[a] \
else 0
for b in self.right]
for a in self.left]
)
def holds(self, a, b):
ix_a = self.table_map_left[a]
ix_b = self.table_map_right[b]
return 1 == self.table[ix_a, ix_b]
def not_holds(self, a, b):
ix_a = self.table_map_left[a]
ix_b = self.table_map_right[b]
return 0 == self.table[ix_a, ix_b]
def all_pairs(self):
return {(a,b) \
for a in self.left
for b in self.right
if self.holds(a,b)}
class EqRelation(Relation):
def __init__(self, sigma, parts):
self.parts = parts
if not S.Set.is_partition(sigma, parts):
msg = "'EqRelation' must generate from a "\
"disjoint partition."
raise ValueError(msg)
r_dict = {}
for p in parts:
for x in p:
if not x in r_dict:
r_dict[x] = p
Relation.__init__(self, r_dict)
|
from setuptools import setup
with open("README.rst", "r") as fh:
long_description = fh.read()
with open("pywavez/__version__.py", "r") as fh:
versiondict = {"__builtins__": {}}
exec(fh.read(), versiondict)
version = versiondict["version"]
setup(
name="pywavez",
version=version,
description="Native Python implementation of the ZWave protocol",
long_description=long_description,
long_description_content_type="text/x-rst",
url="https://github.com/spacedentist/pywavez",
download_url=(
f"https://github.com/spacedentist/pywavez/archive/{ version }.tar.gz"
),
author="Sven Over",
author_email="sp@cedenti.st",
license="MIT",
packages=["pywavez"],
install_requires=["asyncinit", "pyserial", "pyserial-asyncio"],
entry_points={
"console_scripts": [
"pywavez-remote-serial-server=pywavez.RemoteSerialDevice:main"
]
},
test_suite="pywavez.tests",
setup_requires=["tox"],
)
|
#!/usr/bin/env python3
import collections
import lxml.etree
import datetime
import slixmpp
import asyncio
import logging
import signal
import atexit
import time
import sys
import io
import os
from functools import partial
from slixmpp.xmlstream.matcher.base import MatcherBase
if not hasattr(asyncio, "ensure_future"):
asyncio.ensure_future = getattr(asyncio, "async")
class MatchAll(MatcherBase):
"""match everything"""
def match(self, xml):
return True
class StanzaError(Exception):
"""
Raised when a step fails.
"""
pass
class SkipStepError(Exception):
"""
Raised by a step when it needs to be skiped, by running
the next available step immediately.
"""
pass
class XMPPComponent(slixmpp.BaseXMPP):
"""
XMPPComponent sending a “scenario” of stanzas, checking that the responses
match the expected results.
"""
def __init__(self, scenario, biboumi):
super().__init__(jid="biboumi.localhost", default_ns="jabber:component:accept")
self.is_component = True
self.auto_authorize = None # Do not accept or reject subscribe requests automatically
self.auto_subscribe = False
self.stream_header = '<stream:stream %s %s from="%s" id="%s">' % (
'xmlns="jabber:component:accept"',
'xmlns:stream="%s"' % self.stream_ns,
self.boundjid, self.get_id())
self.stream_footer = "</stream:stream>"
self.register_handler(slixmpp.Callback('Match All',
MatchAll(None),
self.handle_incoming_stanza))
self.add_event_handler("session_end", self.on_end_session)
asyncio.ensure_future(self.accept_routine())
self.scenario = scenario
self.biboumi = biboumi
# A callable, taking a stanza as argument and raising a StanzaError
# exception if the test should fail.
self.stanza_checker = None
self.failed = False
self.accepting_server = None
self.saved_values = {}
def error(self, message):
print("[31;1mFailure[0m: %s" % (message,))
self.scenario.steps = []
self.failed = True
def on_end_session(self, _):
self.loop.stop()
def handle_incoming_stanza(self, stanza):
if self.stanza_checker:
try:
self.stanza_checker(stanza)
except StanzaError as e:
self.error(e)
except SkipStepError:
# Run the next step and then re-handle this same stanza
self.run_scenario()
return self.handle_incoming_stanza(stanza)
self.stanza_checker = None
self.run_scenario()
def run_scenario(self):
if self.scenario.steps:
step = self.scenario.steps.pop(0)
try:
step(self, self.biboumi)
except Exception as e:
self.error(e)
self.run_scenario()
else:
if self.biboumi:
self.biboumi.stop()
@asyncio.coroutine
def accept_routine(self):
self.accepting_server = yield from self.loop.create_server(lambda: self,
"127.0.0.1", 8811, reuse_address=True)
def check_stanza_against_all_expected_xpaths(self):
pass
def match(stanza, xpath):
tree = lxml.etree.parse(io.StringIO(str(stanza)))
matched = tree.xpath(xpath, namespaces={'re': 'http://exslt.org/regular-expressions',
'muc_user': 'http://jabber.org/protocol/muc#user',
'muc_owner': 'http://jabber.org/protocol/muc#owner',
'muc': 'http://jabber.org/protocol/muc',
'disco_info': 'http://jabber.org/protocol/disco#info',
'muc_traffic': 'http://jabber.org/protocol/muc#traffic',
'disco_items': 'http://jabber.org/protocol/disco#items',
'commands': 'http://jabber.org/protocol/commands',
'dataform': 'jabber:x:data',
'version': 'jabber:iq:version',
'mam': 'urn:xmpp:mam:2',
'rms': 'http://jabber.org/protocol/rsm',
'delay': 'urn:xmpp:delay',
'forward': 'urn:xmpp:forward:0',
'client': 'jabber:client',
'rsm': 'http://jabber.org/protocol/rsm',
'carbon': 'urn:xmpp:carbons:2',
'hints': 'urn:xmpp:hints',
'stanza': 'urn:ietf:params:xml:ns:xmpp-stanzas',
'stable_id': 'urn:xmpp:sid:0'})
return matched
def check_xpath(xpaths, xmpp, after, stanza):
for xpath in xpaths:
expected = True
real_xpath = xpath
# We can check that a stanza DOESN’T match, by adding a ! before it.
if xpath.startswith('!'):
expected = False
xpath = xpath[1:]
matched = match(stanza, xpath)
if (expected and not matched) or (not expected and matched):
raise StanzaError("Received stanza\n%s\ndid not match expected xpath\n%s" % (stanza, real_xpath))
if after:
if isinstance(after, collections.Iterable):
for af in after:
af(stanza, xmpp)
else:
after(stanza, xmpp)
def all_xpaths_match(stanza, xpaths):
for xpath in xpaths:
matched = match(stanza, xpath)
if not matched:
return False
return True
def check_list_of_xpath(list_of_xpaths, xmpp, stanza):
found = None
for i, xpaths in enumerate(list_of_xpaths):
if all_xpaths_match(stanza, xpaths):
found = True
list_of_xpaths.pop(i)
break
if not found:
raise StanzaError("Received stanza “%s” did not match any of the expected xpaths:\n%s" % (stanza, list_of_xpaths))
if list_of_xpaths:
step = partial(expect_unordered_already_formatted, list_of_xpaths)
xmpp.scenario.steps.insert(0, step)
def check_xpath_optional(xpaths, xmpp, after, stanza):
try:
check_xpath(xpaths, xmpp, after, stanza)
except StanzaError:
raise SkipStepError()
class Scenario:
"""Defines a list of actions that are executed in sequence, until one of
them throws an exception, or until the end. An action can be something
like “send a stanza”, “receive the next stanza and check that it matches
the given XPath”, “send a signal”, “wait for the end of the process”,
etc
"""
def __init__(self, name, steps, conf="basic"):
"""
Steps is a list of 2-tuple:
[(action, answer), (action, answer)]
"""
self.name = name
self.steps = []
self.conf = conf
for elem in steps:
if isinstance(elem, collections.Iterable):
for step in elem:
self.steps.append(step)
else:
self.steps.append(elem)
class ProcessRunner:
def __init__(self):
self.process = None
self.signal_sent = False
self.create = None
@asyncio.coroutine
def start(self):
self.process = yield from self.create
@asyncio.coroutine
def wait(self):
code = yield from self.process.wait()
return code
def stop(self):
if not self.signal_sent:
self.signal_sent = True
if self.process:
self.process.send_signal(signal.SIGINT)
def __del__(self):
self.stop()
class BiboumiRunner(ProcessRunner):
def __init__(self, name):
super().__init__()
self.name = name
self.fd = open("biboumi_%s_output.txt" % (name,), "w")
with_valgrind = os.environ.get("E2E_WITH_VALGRIND") is not None
if with_valgrind:
self.create = asyncio.create_subprocess_exec("valgrind", "--suppressions=" + (os.environ.get("E2E_BIBOUMI_SUPP_DIR") or "") + "biboumi.supp", "--leak-check=full", "--show-leak-kinds=all",
"--errors-for-leak-kinds=all", "--error-exitcode=16",
"./biboumi", "test.conf", stdin=None, stdout=self.fd,
stderr=self.fd, loop=None, limit=None)
else:
self.create = asyncio.create_subprocess_exec("./biboumi", "test.conf", stdin=None, stdout=self.fd,
stderr=self.fd, loop=None, limit=None)
class IrcServerRunner(ProcessRunner):
def __init__(self):
super().__init__()
self.create = asyncio.create_subprocess_exec("charybdis", "-foreground", "-configfile", os.getcwd() + "/../tests/end_to_end/ircd.conf",
stderr=asyncio.subprocess.PIPE)
def send_stanza(stanza, xmpp, biboumi):
replacements = common_replacements
replacements.update(xmpp.saved_values)
xmpp.send_raw(stanza.format_map(replacements))
asyncio.get_event_loop().call_soon(xmpp.run_scenario)
def expect_stanza(xpaths, xmpp, biboumi, optional=False, after=None):
replacements = common_replacements
replacements.update(xmpp.saved_values)
check_func = check_xpath if not optional else check_xpath_optional
if isinstance(xpaths, str):
xmpp.stanza_checker = partial(check_func, [xpaths.format_map(replacements)], xmpp, after)
elif isinstance(xpaths, tuple):
xmpp.stanza_checker = partial(check_func, [xpath.format_map(replacements) for xpath in xpaths], xmpp, after)
else:
print("Warning, from argument type passed to expect_stanza: %s" % (type(xpaths)))
def save_current_timestamp_plus_delta(key, delta, message, xmpp):
now_plus_delta = datetime.datetime.utcnow() + delta
xmpp.saved_values[key] = now_plus_delta.strftime("%FT%T.967Z")
def sleep_for(duration, xmpp, biboumi):
time.sleep(duration)
asyncio.get_event_loop().call_soon(xmpp.run_scenario)
# list_of_xpaths: [(xpath, xpath), (xpath, xpath), (xpath)]
def expect_unordered(list_of_xpaths, xmpp, biboumi):
formatted_list_of_xpaths = []
for xpaths in list_of_xpaths:
formatted_xpaths = []
for xpath in xpaths:
formatted_xpath = xpath.format_map(common_replacements)
formatted_xpaths.append(formatted_xpath)
formatted_list_of_xpaths.append(tuple(formatted_xpaths))
expect_unordered_already_formatted(formatted_list_of_xpaths, xmpp, biboumi)
def expect_unordered_already_formatted(formatted_list_of_xpaths, xmpp, biboumi):
xmpp.stanza_checker = partial(check_list_of_xpath, formatted_list_of_xpaths, xmpp)
class BiboumiTest:
"""
Spawns a biboumi process and a fake XMPP Component that will run a
Scenario. It redirects the outputs of the subprocess into separated
files, and detects any failure in the running of the scenario.
"""
def __init__(self, scenario, expected_code=0):
self.scenario = scenario
self.expected_code = expected_code
def run(self):
with_valgrind = os.environ.get("E2E_WITH_VALGRIND") is not None
print("Running scenario: [33;1m%s[0m%s" % (self.scenario.name, " (with valgrind)" if with_valgrind else ''))
# Redirect the slixmpp logging into a specific file
output_filename = "slixmpp_%s_output.txt" % (self.scenario.name,)
with open(output_filename, "w"):
pass
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)-8s %(message)s',
filename=output_filename)
with open("test.conf", "w") as fd:
fd.write(confs[self.scenario.conf])
try:
os.remove("e2e_test.sqlite")
except FileNotFoundError:
pass
# Start the XMPP component and biboumi
biboumi = BiboumiRunner(self.scenario.name)
xmpp = XMPPComponent(self.scenario, biboumi)
asyncio.get_event_loop().run_until_complete(biboumi.start())
asyncio.get_event_loop().call_soon(xmpp.run_scenario)
xmpp.process()
code = asyncio.get_event_loop().run_until_complete(biboumi.wait())
xmpp.biboumi = None
self.scenario.steps.clear()
failed = False
if not xmpp.failed:
if code != self.expected_code:
xmpp.error("Wrong return code from biboumi's process: %d" % (code,))
failed = True
else:
print("[32;1mSuccess![0m")
else:
failed = True
xmpp.saved_values.clear()
if xmpp.server:
xmpp.accepting_server.close()
return not failed
confs = {
'basic':
"""hostname=biboumi.localhost
password=coucou
db_name=e2e_test.sqlite
port=8811
admin=admin@example.com
identd_port=1113
outgoing_bind=127.0.0.1""",
'fixed_server':
"""hostname=biboumi.localhost
password=coucou
db_name=e2e_test.sqlite
port=8811
fixed_irc_server=irc.localhost
admin=admin@example.com
identd_port=1113
""",
'persistent_by_default':
"""hostname=biboumi.localhost
password=coucou
db_name=e2e_test.sqlite
port=8811
persistent_by_default=true
""",}
common_replacements = {
'irc_server_one': 'irc.localhost@biboumi.localhost',
'irc_server_two': 'localhost@biboumi.localhost',
'irc_host_one': 'irc.localhost',
'irc_host_two': 'localhost',
'biboumi_host': 'biboumi.localhost',
'resource_one': 'resource1',
'resource_two': 'resource2',
'nick_one': 'Nick',
'jid_one': 'first@example.com',
'jid_two': 'second@example.com',
'jid_admin': 'admin@example.com',
'nick_two': 'Bobby',
'nick_three': 'Bernard',
'lower_nick_one': 'nick',
'lower_nick_two': 'bobby',
}
def handshake_sequence():
return (partial(expect_stanza, "//handshake"),
partial(send_stanza, "<handshake xmlns='jabber:component:accept'/>"))
def connection_begin_sequence(irc_host, jid, expected_irc_presence=False, fixed_irc_server=False):
jid = jid.format_map(common_replacements)
if fixed_irc_server:
xpath = "/message[@to='" + jid + "'][@from='biboumi.localhost']/body[text()='%s']"
xpath_re = "/message[@to='" + jid + "'][@from='biboumi.localhost']/body[re:test(text(), '%s')]"
else:
xpath = "/message[@to='" + jid + "'][@from='" + irc_host + "@biboumi.localhost']/body[text()='%s']"
xpath_re = "/message[@to='" + jid + "'][@from='" + irc_host + "@biboumi.localhost']/body[re:test(text(), '%s')]"
result = (
partial(expect_stanza,
(xpath % ('Connecting to %s:6697 (encrypted)' % irc_host),
"/message/hints:no-copy",
"/message/carbon:private"
)
),
partial(expect_stanza,
xpath % 'Connection failed: Connection refused'),
partial(expect_stanza,
xpath % ('Connecting to %s:6670 (encrypted)' % irc_host)),
partial(expect_stanza,
xpath % 'Connection failed: Connection refused'),
partial(expect_stanza,
xpath % ('Connecting to %s:6667 (not encrypted)' % irc_host)),
partial(expect_stanza,
xpath % 'Connected to IRC server.'))
if expected_irc_presence:
result += (partial(expect_stanza, "/presence[@from='" + irc_host + "@biboumi.localhost']"),)
# These five messages can be receive in any order
result += (
partial(expect_stanza,
xpath_re % (r'^%s: (\*\*\* Checking Ident|\*\*\* Looking up your hostname\.\.\.|\*\*\* Found your hostname: .*|ACK multi-prefix|\*\*\* Got Ident response)$' % 'irc.localhost')),
partial(expect_stanza,
xpath_re % (r'^%s: (\*\*\* Checking Ident|\*\*\* Looking up your hostname\.\.\.|\*\*\* Found your hostname: .*|ACK multi-prefix|\*\*\* Got Ident response)$' % 'irc.localhost')),
partial(expect_stanza,
xpath_re % (r'^%s: (\*\*\* Checking Ident|\*\*\* Looking up your hostname\.\.\.|\*\*\* Found your hostname: .*|ACK multi-prefix|\*\*\* Got Ident response)$' % 'irc.localhost')),
partial(expect_stanza,
xpath_re % (r'^%s: (\*\*\* Checking Ident|\*\*\* Looking up your hostname\.\.\.|\*\*\* Found your hostname: .*|ACK multi-prefix|\*\*\* Got Ident response)$' % 'irc.localhost')),
partial(expect_stanza,
xpath_re % (r'^%s: (\*\*\* Checking Ident|\*\*\* Looking up your hostname\.\.\.|\*\*\* Found your hostname: .*|ACK multi-prefix|\*\*\* Got Ident response)$' % 'irc.localhost')),
)
return result
def connection_tls_begin_sequence(irc_host, jid, fixed_irc_server):
jid = jid.format_map(common_replacements)
if fixed_irc_server:
xpath = "/message[@to='" + jid + "'][@from='biboumi.localhost']/body[text()='%s']"
xpath_re = "/message[@to='" + jid + "'][@from='biboumi.localhost']/body[re:test(text(), '%s')]"
else:
xpath = "/message[@to='" + jid + "'][@from='" + irc_host + "@biboumi.localhost']/body[text()='%s']"
xpath_re = "/message[@to='" + jid + "'][@from='" + irc_host + "@biboumi.localhost']/body[re:test(text(), '%s')]"
irc_host = 'irc.localhost'
return (
partial(expect_stanza,
(xpath % ('Connecting to %s:7778 (encrypted)' % irc_host),
"/message/hints:no-copy",
"/message/carbon:private",
)
),
partial(expect_stanza,
xpath % 'Connected to IRC server (encrypted).'),
# These five messages can be receive in any order
partial(expect_stanza,
xpath_re % (r'^%s: (\*\*\* Checking Ident|\*\*\* Looking up your hostname\.\.\.|\*\*\* Found your hostname: .*|ACK multi-prefix|\*\*\* Got Ident response)$' % irc_host)),
partial(expect_stanza,
xpath_re % (r'^%s: (\*\*\* Checking Ident|\*\*\* Looking up your hostname\.\.\.|\*\*\* Found your hostname: .*|ACK multi-prefix|\*\*\* Got Ident response)$' % irc_host)),
partial(expect_stanza,
xpath_re % (r'^%s: (\*\*\* Checking Ident|\*\*\* Looking up your hostname\.\.\.|\*\*\* Found your hostname: .*|ACK multi-prefix|\*\*\* Got Ident response)$' % irc_host)),
partial(expect_stanza,
xpath_re % (r'^%s: (\*\*\* Checking Ident|\*\*\* Looking up your hostname\.\.\.|\*\*\* Found your hostname: .*|ACK multi-prefix|\*\*\* Got Ident response)$' % irc_host)),
partial(expect_stanza,
xpath_re % (r'^%s: (\*\*\* Checking Ident|\*\*\* Looking up your hostname\.\.\.|\*\*\* Found your hostname: .*|ACK multi-prefix|\*\*\* Got Ident response)$' % irc_host)),
)
def connection_end_sequence(irc_host, jid, fixed_irc_server=False):
jid = jid.format_map(common_replacements)
if fixed_irc_server:
xpath = "/message[@to='" + jid + "'][@from='biboumi.localhost']/body[text()='%s']"
xpath_re = "/message[@to='" + jid + "'][@from='biboumi.localhost']/body[re:test(text(), '%s')]"
else:
xpath = "/message[@to='" + jid + "'][@from='" + irc_host + "@biboumi.localhost']/body[text()='%s']"
xpath_re = "/message[@to='" + jid + "'][@from='" + irc_host + "@biboumi.localhost']/body[re:test(text(), '%s')]"
irc_host = 'irc.localhost'
return (
partial(expect_stanza,
xpath_re % (r'^%s: Your host is .*$' % irc_host)),
partial(expect_stanza,
xpath_re % (r'^%s: This server was created .*$' % irc_host)),
partial(expect_stanza,
xpath_re % (r'^%s: There are \d+ users and \d+ invisible on \d+ servers$' % irc_host)),
partial(expect_stanza,
xpath_re % (r'^%s: \d+ unknown connection\(s\)$' % irc_host), optional=True),
partial(expect_stanza,
xpath_re % (r'^%s: \d+ channels formed$' % irc_host), optional=True),
partial(expect_stanza,
xpath_re % (r'^%s: I have \d+ clients and \d+ servers$' % irc_host)),
partial(expect_stanza,
xpath_re % (r'^%s: \d+ \d+ Current local users \d+, max \d+$' % irc_host)),
partial(expect_stanza,
xpath_re % (r'^%s: \d+ \d+ Current global users \d+, max \d+$' % irc_host)),
partial(expect_stanza,
xpath_re % (r'^%s: Highest connection count: \d+ \(\d+ clients\) \(\d+ connections received\)$' % irc_host)),
partial(expect_stanza,
xpath % "- This is charybdis MOTD you might replace it, but if not your friends will\n- laugh at you.\n"),
partial(expect_stanza,
xpath_re % r'^User mode for \w+ is \[\+Z?i\]$'),
)
def connection_middle_sequence(irc_host, jid, fixed_irc_server=False):
if fixed_irc_server:
xpath_re = "/message[@to='" + jid + "'][@from='biboumi.localhost']/body[re:test(text(), '%s')]"
else:
xpath_re = "/message[@to='" + jid + "'][@from='" + irc_host + "@biboumi.localhost']/body[re:test(text(), '%s')]"
irc_host = 'irc.localhost'
return (
partial(expect_stanza, xpath_re % (r'^%s: \*\*\* You are exempt from flood limits$' % irc_host)),
)
def connection_sequence(irc_host, jid, expected_irc_presence=False, fixed_irc_server=False):
return connection_begin_sequence(irc_host, jid, expected_irc_presence, fixed_irc_server=fixed_irc_server) +\
connection_middle_sequence(irc_host, jid, fixed_irc_server=fixed_irc_server) +\
connection_end_sequence(irc_host, jid, fixed_irc_server=fixed_irc_server)
def connection_tls_sequence(irc_host, jid, fixed_irc_server=False):
return connection_tls_begin_sequence(irc_host, jid, fixed_irc_server) + \
connection_middle_sequence(irc_host, jid, fixed_irc_server) +\
connection_end_sequence(irc_host, jid, fixed_irc_server)
def extract_attribute(xpath, name, stanza):
matched = match(stanza, xpath)
return matched[0].get(name)
def chan_name_from_jid(jid):
return jid[1:jid.find('%')]
def extract_text(xpath, stanza):
matched = match(stanza, xpath)
return matched[0].text
def save_value(name, func, stanza, xmpp):
xmpp.saved_values[name] = func(stanza)
if __name__ == '__main__':
atexit.register(asyncio.get_event_loop().close)
# Start the test component, accepting connections on the configured
# port.
scenarios = (
Scenario("basic_handshake_success",
[
handshake_sequence()
]),
Scenario("irc_server_connection",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
]),
Scenario("irc_server_connection_failure",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%doesnotexist@{biboumi_host}/{nick_one}' />"),
partial(expect_stanza,
"/message/body[text()='Connecting to doesnotexist:6697 (encrypted)']"),
partial(expect_stanza,
"/message/body[re:test(text(), 'Connection failed: (Domain name not found|Name or service not known)')]"),
partial(expect_stanza,
("/presence[@from='#foo%doesnotexist@{biboumi_host}/{nick_one}']/muc:x",
"/presence/error[@type='cancel']/stanza:item-not-found",
"/presence/error[@type='cancel']/stanza:text[re:test(text(), '(Domain name not found|Name or service not known)')]")),
]),
Scenario("simple_channel_join",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
]),
Scenario("quit",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# Send a raw QUIT message
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='{irc_server_one}' type='chat'><body>QUIT bye bye</body></message>"),
partial(expect_stanza, ("/presence[@from='#foo%{irc_server_one}/{nick_one}'][@type='unavailable']/muc_user:x/muc_user:status[@code='110']",
"/presence[@from='#foo%{irc_server_one}/{nick_one}'][@type='unavailable']/muc_user:x/muc_user:status[@code='110']",)),
]),
Scenario("multiple_channels_join",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#bar%{irc_server_one}/{nick_one}' />"),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#baz%{irc_server_one}/{nick_one}'> <x xmlns='http://jabber.org/protocol/muc'><password>SECRET</password></x></presence>"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
partial(expect_stanza,
"/message/body[text()='Mode #bar [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#bar%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#bar%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
partial(expect_stanza,
"/message/body[text()='Mode #baz [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#baz%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#baz%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
]),
Scenario("not_connected_error",
[
handshake_sequence(),
partial(send_stanza,
"<presence type='unavailable' from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
]),
Scenario("channel_join_with_two_users",
[
handshake_sequence(),
# First user joins
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@jid='~nick@localhost'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# Second user joins
partial(send_stanza,
"<presence from='{jid_two}/{resource_one}' to='#foo%{irc_server_one}/{nick_two}' />"),
connection_sequence("irc.localhost", '{jid_two}/{resource_one}'),
partial(expect_unordered, [
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='none'][@role='participant'][@jid='~bobby@localhost']",),
("/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",),
("/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='none'][@jid='~bobby@localhost'][@role='participant']",
"/presence/muc_user:x/muc_user:status[@code='110']",),
("/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]",),
]),
]),
Scenario("channel_force_join",
[
handshake_sequence(),
# First user joins
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}'><x xmlns='http://jabber.org/protocol/muc'/></presence>"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@jid='~nick@localhost'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# Second user joins
partial(send_stanza,
"<presence from='{jid_two}/{resource_one}' to='#foo%{irc_server_one}/{nick_two}'><x xmlns='http://jabber.org/protocol/muc'/></presence>"),
connection_sequence("irc.localhost", '{jid_two}/{resource_one}'),
partial(expect_unordered, [
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='none'][@role='participant'][@jid='~bobby@localhost']",),
("/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",),
("/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='none'][@jid='~bobby@localhost'][@role='participant']",
"/presence/muc_user:x/muc_user:status[@code='110']",),
("/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]",),
]),
# Here we simulate a desynchronization of a client: The client thinks it’s
# disconnected from the room, but biboumi still thinks it’s in the room. The
# client thus sends a join presence, and biboumi should send everything
# (user list, history, etc) in response.
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_three}'><x xmlns='http://jabber.org/protocol/muc'/></presence>"),
partial(expect_unordered, [
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='none'][@role='participant'][@jid='~bobby@localhost']",),
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']",),
("/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]",),
]),
# And also, that was not the same nickname
partial(expect_unordered, [
("/presence[@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_two}/{resource_one}'][@type='unavailable']/muc_user:x/muc_user:item[@nick='Bernard']",
"/presence/muc_user:x/muc_user:status[@code='303']"),
("/presence[@from='#foo%{irc_server_one}/{nick_three}'][@to='{jid_two}/{resource_one}']",),
("/presence[@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_one}/{resource_one}'][@type='unavailable']/muc_user:x/muc_user:item[@nick='Bernard']",
"/presence/muc_user:x/muc_user:status[@code='303']",
"/presence/muc_user:x/muc_user:status[@code='110']"),
("/presence[@from='#foo%{irc_server_one}/{nick_three}'][@to='{jid_one}/{resource_one}']",
"/presence/muc_user:x/muc_user:status[@code='110']"),
]),
]),
Scenario("channel_join_with_password",
[
handshake_sequence(),
# First user joins
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@jid='~nick@localhost'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# Set a password in the room, by using /mode +k
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>/mode +k SECRET</body></message>"),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='Mode #foo [+k SECRET] by {nick_one}']"),
# Second user tries to join, without a password
partial(send_stanza,
"<presence from='{jid_two}/{resource_one}' to='#foo%{irc_server_one}/{nick_two}'/>"),
connection_sequence("irc.localhost", '{jid_two}/{resource_one}'),
partial(expect_stanza, "/message/body[text()='{irc_host_one}: #foo: Cannot join channel (+k) - bad key']"),
partial(expect_stanza,
"/presence[@type='error'][@from='#foo%{irc_server_one}/{nick_two}']/error[@type='auth']/stanza:not-authorized",
),
# Second user joins, with a password
partial(send_stanza,
"<presence from='{jid_two}/{resource_one}' to='#foo%{irc_server_one}/{nick_two}'> <x xmlns='http://jabber.org/protocol/muc'><password>SECRET</password></x></presence>"),
# connection_sequence("irc.localhost", '{jid_two}/{resource_one}'),
partial(expect_unordered, [
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='none'][@role='participant'][@jid='~bobby@localhost']",),
("/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",),
("/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='none'][@jid='~bobby@localhost'][@role='participant']",
"/presence/muc_user:x/muc_user:status[@code='110']",),
("/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]",),
]),
]),
Scenario("channel_custom_topic",
[
handshake_sequence(),
# First user joins
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@jid='~nick@localhost'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# First user sets the topic
partial(send_stanza,
"<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><subject>TOPIC TEST</subject></message>"),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat'][@to='{jid_one}/{resource_one}']/subject[text()='TOPIC TEST']"),
# Second user joins
partial(send_stanza,
"<presence from='{jid_two}/{resource_one}' to='#foo%{irc_server_one}/{nick_two}' />"),
connection_sequence("irc.localhost", '{jid_two}/{resource_one}'),
# Our presence, sent to the other user
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='none'][@jid='~bobby@localhost'][@role='participant']",)),
# The other user presence
partial(expect_stanza,
"/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']"),
# Our own presence
partial(expect_stanza,
("/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='none'][@jid='~bobby@localhost'][@role='participant']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/subject[text()='TOPIC TEST']"),
]),
Scenario("multiline_topic",
[
handshake_sequence(),
# User joins
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# User tries to set a multiline topic
partial(send_stanza,
"<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><subject>FIRST LINE\nSECOND LINE.</subject></message>"),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat'][@to='{jid_one}/{resource_one}']/subject[text()='FIRST LINE SECOND LINE.']"),
]),
Scenario("channel_basic_join_on_fixed_irc_server",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#zgeg@{biboumi_host}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}', fixed_irc_server=True),
partial(expect_stanza,
"/message/body[text()='Mode #zgeg [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#zgeg@{biboumi_host}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#zgeg@{biboumi_host}'][@type='groupchat']/subject[not(text())]"),
], conf='fixed_server'
),
Scenario("list_adhoc",
[
handshake_sequence(),
partial(send_stanza, "<iq type='get' id='idwhatever' from='{jid_one}/{resource_one}' to='{biboumi_host}'><query xmlns='http://jabber.org/protocol/disco#items' node='http://jabber.org/protocol/commands' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/disco_items:query[@node='http://jabber.org/protocol/commands']",
"/iq/disco_items:query/disco_items:item[3]")),
]),
Scenario("list_admin_adhoc",
[
handshake_sequence(),
partial(send_stanza, "<iq type='get' id='idwhatever' from='{jid_admin}/{resource_one}' to='{biboumi_host}'><query xmlns='http://jabber.org/protocol/disco#items' node='http://jabber.org/protocol/commands' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/disco_items:query[@node='http://jabber.org/protocol/commands']",
"/iq/disco_items:query/disco_items:item[5]")),
]),
Scenario("list_adhoc_fixed_server",
[
handshake_sequence(),
partial(send_stanza, "<iq type='get' id='idwhatever' from='{jid_one}/{resource_one}' to='{biboumi_host}'><query xmlns='http://jabber.org/protocol/disco#items' node='http://jabber.org/protocol/commands' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/disco_items:query[@node='http://jabber.org/protocol/commands']",
"/iq/disco_items:query/disco_items:item[5]")),
], conf='fixed_server'),
Scenario("list_admin_adhoc_fixed_server",
[
handshake_sequence(),
partial(send_stanza, "<iq type='get' id='idwhatever' from='{jid_admin}/{resource_one}' to='{biboumi_host}'><query xmlns='http://jabber.org/protocol/disco#items' node='http://jabber.org/protocol/commands' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/disco_items:query[@node='http://jabber.org/protocol/commands']",
"/iq/disco_items:query/disco_items:item[5]")),
], conf='fixed_server'),
Scenario("list_adhoc_irc",
[
handshake_sequence(),
partial(send_stanza, "<iq type='get' id='idwhatever' from='{jid_one}/{resource_one}' to='{irc_host_one}@{biboumi_host}'><query xmlns='http://jabber.org/protocol/disco#items' node='http://jabber.org/protocol/commands' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/disco_items:query[@node='http://jabber.org/protocol/commands']",
"/iq/disco_items:query/disco_items:item[2]")),
]),
Scenario("list_adhoc_irc_fixed_server",
[
handshake_sequence(),
partial(send_stanza, "<iq type='get' id='idwhatever' from='{jid_one}/{resource_one}' to='{biboumi_host}'><query xmlns='http://jabber.org/protocol/disco#items' node='http://jabber.org/protocol/commands' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/disco_items:query[@node='http://jabber.org/protocol/commands']",
"/iq/disco_items:query/disco_items:item[4]")),
], conf='fixed_server'),
Scenario("list_admin_adhoc_irc_fixed_server",
[
handshake_sequence(),
partial(send_stanza, "<iq type='get' id='idwhatever' from='{jid_admin}/{resource_one}' to='{biboumi_host}'><query xmlns='http://jabber.org/protocol/disco#items' node='http://jabber.org/protocol/commands' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/disco_items:query[@node='http://jabber.org/protocol/commands']",
"/iq/disco_items:query/disco_items:item[6]")),
], conf='fixed_server'),
Scenario("list_muc_user_adhoc",
[
handshake_sequence(),
partial(send_stanza, "<iq type='get' id='idwhatever' from='{jid_admin}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}'><query xmlns='http://jabber.org/protocol/disco#items' node='http://jabber.org/protocol/commands' /></iq>"),
partial(expect_stanza, "/iq[@type='error']/error[@type='cancel']/stanza:feature-not-implemented"),
]
),
Scenario("execute_hello_adhoc_command",
[
handshake_sequence(),
partial(send_stanza, "<iq type='set' id='hello-command1' from='{jid_one}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='hello' action='execute' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/commands:command[@node='hello'][@sessionid][@status='executing']",
"/iq/commands:command/dataform:x[@type='form']/dataform:title[text()='Configure your name.']",
"/iq/commands:command/dataform:x[@type='form']/dataform:instructions[text()='Please provide your name.']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single']/dataform:required",
"/iq/commands:command/commands:actions/commands:next",
),
after = partial(save_value, "sessionid", partial(extract_attribute, "/iq[@type='result']/commands:command[@node='hello']", "sessionid"))
),
partial(send_stanza, "<iq type='set' id='hello-command2' from='{jid_one}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='hello' sessionid='{sessionid}' action='next'><x xmlns='jabber:x:data' type='submit'><field var='name'><value>COUCOU</value></field></x></command></iq>"),
partial(expect_stanza, "/iq[@type='result']/commands:command[@node='hello'][@status='completed']/commands:note[@type='info'][text()='Hello COUCOU!']")
]),
Scenario("execute_incomplete_hello_adhoc_command",
[
handshake_sequence(),
partial(send_stanza, "<iq type='set' id='hello-command1' from='{jid_one}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='hello' action='execute' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/commands:command[@node='hello'][@sessionid][@status='executing']",
"/iq/commands:command/commands:actions/commands:next",
),
after = partial(save_value, "sessionid", partial(extract_attribute, "/iq[@type='result']/commands:command[@node='hello']", "sessionid"))
),
partial(send_stanza, "<iq type='set' id='hello-command2' from='{jid_one}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='hello' sessionid='{sessionid}' action='next'><x xmlns='jabber:x:data' type='submit'></x></command></iq>"),
partial(expect_stanza, "/iq[@type='error']")
]),
Scenario("execute_ping_adhoc_command",
[
handshake_sequence(),
partial(send_stanza, "<iq type='set' id='ping-command1' from='{jid_one}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='ping' action='execute' /></iq>"),
partial(expect_stanza, "/iq[@type='result']/commands:command[@node='ping'][@status='completed']/commands:note[@type='info'][text()='Pong']")
]),
Scenario("execute_reload_adhoc_command",
[
handshake_sequence(),
partial(send_stanza, "<iq type='set' id='ping-command1' from='{jid_admin}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='reload' action='execute' /></iq>"),
partial(expect_stanza, "/iq[@type='result']/commands:command[@node='reload'][@status='completed']/commands:note[@type='info'][text()='Configuration reloaded.']")
]),
Scenario("execute_forbidden_adhoc_command",
[
handshake_sequence(),
partial(send_stanza, "<iq type='set' id='command1' from='{jid_one}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='disconnect-user' action='execute' /></iq>"),
partial(expect_stanza, ("/iq[@type='error'][@id='command1']/commands:command[@node='disconnect-user']",
"/iq/commands:command/commands:error[@type='cancel']/stanza:forbidden")),
]),
Scenario("execute_disconnect_user_adhoc_command",
[
handshake_sequence(),
partial(send_stanza, "<presence from='{jid_admin}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_admin}/{resource_one}'),
partial(expect_stanza, "/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza, "<iq type='set' id='command1' from='{jid_admin}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='disconnect-user' action='execute' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/commands:command[@node='disconnect-user'][@sessionid][@status='executing']",
"/iq/commands:command/commands:actions/commands:next",
),
after = partial(save_value, "sessionid", partial(extract_attribute, "/iq/commands:command[@node='disconnect-user']", "sessionid"))
),
partial(send_stanza, "<iq type='set' id='command2' from='{jid_admin}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='disconnect-user' sessionid='{sessionid}' action='next'><x xmlns='jabber:x:data' type='submit'><field var='jids'><value>{jid_admin}</value></field><field var='quit-message'><value>Disconnected by e2e</value></field></x></command></iq>"),
partial(expect_stanza, "/iq[@type='result']/commands:command[@node='disconnect-user'][@status='completed']/commands:note[@type='info'][text()='1 user has been disconnected.']"),
# Note, charybdis ignores our QUIT message, so we can't test it
partial(expect_stanza, "/presence[@type='unavailable'][@to='{jid_admin}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']"),
]),
Scenario("execute_admin_disconnect_from_server_adhoc_command",
[
handshake_sequence(),
# Admin connects to first server
partial(send_stanza, "<presence from='{jid_admin}/{resource_one}' to='#bar%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_admin}/{resource_one}'),
partial(expect_stanza, "/message/body[text()='Mode #bar [+nt] by {irc_host_one}']"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
# Non-Admin connects to first server
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_two}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza, "/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
# Non-admin connects to second server
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#bon%{irc_server_two}/{nick_three}' />"),
connection_sequence("localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza, "/message/body[text()='Mode #bon [+nt] by {irc_host_one}']"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
# Execute as admin
partial(send_stanza, "<iq type='set' id='command1' from='{jid_admin}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='disconnect-from-irc-server' action='execute' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/commands:command[@node='disconnect-from-irc-server'][@sessionid][@status='executing']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@var='jid'][@type='list-single']/dataform:option[@label='{jid_one}']/dataform:value[text()='{jid_one}']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@var='jid'][@type='list-single']/dataform:option[@label='{jid_admin}']/dataform:value[text()='{jid_admin}']",
"/iq/commands:command/commands:actions/commands:next",
),
after = partial(save_value, "sessionid", partial(extract_attribute, "/iq/commands:command[@node='disconnect-from-irc-server']", "sessionid"))
),
partial(send_stanza, "<iq type='set' id='command2' from='{jid_admin}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='disconnect-from-irc-server' sessionid='{sessionid}' action='next'><x xmlns='jabber:x:data' type='submit'><field var='jid'><value>{jid_one}</value></field><field var='quit-message'><value>e2e test one</value></field></x></command></iq>"),
partial(expect_stanza, ("/iq[@type='result']/commands:command[@node='disconnect-from-irc-server'][@sessionid][@status='executing']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@var='quit-message'][@type='text-single']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@var='irc-servers'][@type='list-multi']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@var='irc-servers']/dataform:option[@label='localhost']/dataform:value[text()='localhost']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@var='irc-servers']/dataform:option[@label='irc.localhost']/dataform:value[text()='irc.localhost']",
"/iq/commands:command/commands:actions/commands:next",
),
after = partial(save_value, "sessionid", partial(extract_attribute, "/iq/commands:command[@node='disconnect-from-irc-server']", "sessionid"))
),
partial(send_stanza, "<iq type='set' id='command2' from='{jid_admin}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='disconnect-from-irc-server' sessionid='{sessionid}' action='next'><x xmlns='jabber:x:data' type='submit'><field var='irc-servers'><value>localhost</value></field><field var='quit-message'><value>Disconnected by e2e</value></field></x></command></iq>"),
partial(expect_unordered, [("/presence[@type='unavailable'][@to='{jid_one}/{resource_one}'][@from='#bon%{irc_server_two}/{nick_three}']",),
("/iq[@type='result']/commands:command[@node='disconnect-from-irc-server'][@status='completed']/commands:note[@type='info'][text()='{jid_one} was disconnected from 1 IRC server.']",),
]),
# Execute as non-admin (this skips the first step)
partial(send_stanza, "<iq type='set' id='command1' from='{jid_one}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='disconnect-from-irc-server' action='execute' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/commands:command[@node='disconnect-from-irc-server'][@sessionid][@status='executing']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@var='quit-message'][@type='text-single']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@var='irc-servers'][@type='list-multi']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@var='irc-servers']/dataform:option[@label='irc.localhost']/dataform:value[text()='irc.localhost']",
"/iq/commands:command/commands:actions/commands:next",
),
after = partial(save_value, "sessionid", partial(extract_attribute, "/iq/commands:command[@node='disconnect-from-irc-server']", "sessionid"))
),
partial(send_stanza, "<iq type='set' id='command2' from='{jid_one}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='disconnect-from-irc-server' sessionid='{sessionid}' action='next'><x xmlns='jabber:x:data' type='submit'><field var='irc-servers'><value>irc.localhost</value></field><field var='quit-message'><value>Disconnected by e2e</value></field></x></command></iq>"),
partial(expect_unordered, [("/presence[@type='unavailable'][@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']",),
("/iq[@type='result']/commands:command[@node='disconnect-from-irc-server'][@status='completed']/commands:note[@type='info'][text()='{jid_one}/{resource_one} was disconnected from 1 IRC server.']",),
]),
]),
Scenario("multisessionnick",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat'][@to='{jid_one}/{resource_one}']/subject[not(text())]"),
# The other resources joins the same room, with the same nick
partial(send_stanza,
"<presence from='{jid_one}/{resource_two}' to='#foo%{irc_server_one}/{nick_one}' />"),
# We receive our own join
partial(expect_unordered,
[("/presence[@to='{jid_one}/{resource_two}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']"),
("/message[@from='#foo%{irc_server_one}'][@type='groupchat'][@to='{jid_one}/{resource_two}']/subject[not(text())]",)]
),
# A different user joins the same room
partial(send_stanza,
"<presence from='{jid_two}/{resource_one}' to='#foo%{irc_server_one}/{nick_two}' />"),
connection_sequence("irc.localhost", '{jid_two}/{resource_one}'),
partial(expect_unordered, [
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']",),
("/presence[@to='{jid_one}/{resource_two}'][@from='#foo%{irc_server_one}/{nick_two}']",),
("/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']",
"/presence/muc_user:x/muc_user:status[@code='110']",),
("/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']",),
]
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# That second user sends a private message to the first one
partial(send_stanza, "<message from='{jid_two}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' type='chat'><body>RELLO</body></message>"),
# Message is received with a server-wide JID, by the two resources behind nick_one
partial(expect_stanza, ("/message[@from='{lower_nick_two}%{irc_server_one}'][@to='{jid_one}/{resource_one}'][@type='chat']/body[text()='RELLO']",
"/message/hints:no-copy",
"/message/carbon:private",
"!/message/muc_user:x")),
partial(expect_stanza, "/message[@from='{lower_nick_two}%{irc_server_one}'][@to='{jid_one}/{resource_two}'][@type='chat']/body[text()='RELLO']"),
# First occupant (with the two resources) changes her/his nick
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_two}' />"),
partial(expect_unordered, [
("/message[@to='{jid_one}/{resource_one}'][@type='chat']/body[text()='irc.localhost: Bobby: Nickname is already in use.']",),
("/message[@to='{jid_one}/{resource_two}'][@type='chat']/body[text()='irc.localhost: Bobby: Nickname is already in use.']",),
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}'][@type='error']",),
("/presence[@to='{jid_one}/{resource_two}'][@from='#foo%{irc_server_one}/{nick_two}'][@type='error']",),
]),
# First occupant (with the two resources) changes her/his nick
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_three}' />"),
partial(expect_unordered, [
("/presence[@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_two}/{resource_one}'][@type='unavailable']/muc_user:x/muc_user:item[@nick='Bernard']",
"/presence/muc_user:x/muc_user:status[@code='303']"),
("/presence[@from='#foo%{irc_server_one}/{nick_three}'][@to='{jid_two}/{resource_one}']",),
("/presence[@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_one}/{resource_one}'][@type='unavailable']/muc_user:x/muc_user:item[@nick='Bernard']",
"/presence/muc_user:x/muc_user:status[@code='303']",
"/presence/muc_user:x/muc_user:status[@code='110']"),
("/presence[@from='#foo%{irc_server_one}/{nick_three}'][@to='{jid_one}/{resource_one}']",
"/presence/muc_user:x/muc_user:status[@code='110']"),
("/presence[@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_one}/{resource_two}'][@type='unavailable']/muc_user:x/muc_user:item[@nick='Bernard']",
"/presence/muc_user:x/muc_user:status[@code='303']",
"/presence/muc_user:x/muc_user:status[@code='110']"),
("/presence[@from='#foo%{irc_server_one}/{nick_three}'][@to='{jid_one}/{resource_two}']",
"/presence/muc_user:x/muc_user:status[@code='110']"),
]),
# One resource leaves the server entirely.
partial(send_stanza, "<presence type='unavailable' from='{jid_one}/{resource_two}' to='#foo%{irc_server_one}/{nick_one}' />"),
# The leave is forwarded only to us
partial(expect_stanza,
("/presence[@type='unavailable']/muc_user:x/muc_user:status[@code='110']",
"/presence/status[text()='Biboumi note: 1 resources are still in this channel.']",
)
),
# The second user sends two new private messages to the first user
partial(send_stanza, "<message from='{jid_two}/{resource_one}' to='#foo%{irc_server_one}/{nick_three}' type='chat'><body>first</body></message>"),
partial(send_stanza, "<message from='{jid_two}/{resource_one}' to='#foo%{irc_server_one}/{nick_three}' type='chat'><body>second</body></message>"),
# The first user receives the two messages, on the connected resource, once each
partial(expect_unordered, [
("/message[@from='{lower_nick_two}%{irc_server_one}'][@to='{jid_one}/{resource_one}'][@type='chat']/body[text()='first']",),
("/message[@from='{lower_nick_two}%{irc_server_one}'][@to='{jid_one}/{resource_one}'][@type='chat']/body[text()='second']",),
]),
]),
Scenario("persistent_channel",
[
# Join the channel with user 1
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat'][@to='{jid_one}/{resource_one}']/subject[not(text())]"),
# Make it persistent for user 1
partial(send_stanza, "<iq from='{jid_one}/{resource_one}' id='conf1' to='#foo%{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/muc#owner'/></iq>"),
partial(expect_stanza, "/iq[@type='result']/muc_owner:query/dataform:x/dataform:field[@var='persistent'][@type='boolean']/dataform:value[text()='false']"),
partial(send_stanza, "<iq from='{jid_one}/{resource_one}' id='conf2' to='#foo%{irc_server_one}' type='set'><query xmlns='http://jabber.org/protocol/muc#owner'><x type='submit' xmlns='jabber:x:data'><field var='persistent' xmlns='jabber:x:data'><value>true</value></field></x></query></iq>"),
partial(expect_stanza, "/iq[@type='result']"),
# Check that the value is now effectively true
partial(send_stanza, "<iq from='{jid_one}/{resource_one}' id='conf1' to='#foo%{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/muc#owner'/></iq>"),
partial(expect_stanza, "/iq[@type='result']/muc_owner:query/dataform:x/dataform:field[@var='persistent'][@type='boolean']/dataform:value[text()='true']"),
# A second user joins the same channel
partial(send_stanza,
"<presence from='{jid_two}/{resource_one}' to='#foo%{irc_server_one}/{nick_two}' />"),
connection_sequence("irc.localhost", '{jid_two}/{resource_one}'),
partial(expect_unordered, [
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']",),
("/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']",
"/presence/muc_user:x/muc_user:status[@code='110']",),
("/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']",),
]
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# First user leaves the room (but biboumi will stay in the channel)
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' type='unavailable' />"),
# Only user 1 receives the unavailable presence
partial(expect_stanza,
("/presence[@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_one}/{resource_one}'][@type='unavailable']/muc_user:x/muc_user:status[@code='110']",
"/presence/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']")),
# Second user sends a channel message
partial(send_stanza, "<message type='groupchat' from='{jid_two}/{resource_one}' to='#foo%{irc_server_one}'><body>coucou</body></message>"),
# Message should only be received by user 2, since user 1 has no resource in the room
partial(expect_stanza, "/message[@type='groupchat'][@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']"),
# Second user leaves the channel
partial(send_stanza, "<presence type='unavailable' from='{jid_two}/{resource_one}' to='#foo%{irc_server_one}/{nick_two}' />"),
partial(expect_stanza, "/presence[@type='unavailable'][@from='#foo%{irc_server_one}/{nick_two}']"),
]),
Scenario("channel_join_with_different_nick",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat'][@to='{jid_one}/{resource_one}']/subject[not(text())]"),
# The same resource joins a different channel with a different nick
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#bar%{irc_server_one}/{nick_two}' />"),
# We must receive a join presence in response, without any nick change (nick_two) must be ignored
partial(expect_stanza,
"/message/body[text()='Mode #bar [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#bar%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#bar%{irc_server_one}'][@type='groupchat'][@to='{jid_one}/{resource_one}']/subject[not(text())]"),
]),
Scenario("notices",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='{irc_server_one}' type='chat'><body>NOTICE {nick_one} :[#foo] Hello in a notice.</body></message>"),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/body[text()='[notice] [#foo] Hello in a notice.']"),
]),
Scenario("multiline_message",
[
handshake_sequence(),
# First user joins
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# Send a multi-line channel message
partial(send_stanza, "<message id='the-message-id' from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>un\ndeux\ntrois</body></message>"),
# Receive multiple messages, in order
partial(expect_stanza,
"/message[@from='#foo%{irc_server_one}/{nick_one}'][@id='the-message-id'][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='un']"),
partial(expect_stanza,
"/message[@from='#foo%{irc_server_one}/{nick_one}'][@id][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='deux']"),
partial(expect_stanza,
"/message[@from='#foo%{irc_server_one}/{nick_one}'][@id][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='trois']"),
# Send a simple message, with no id
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>hello</body></message>"),
# Expect a non-empty id as a result (should be a uuid)
partial(expect_stanza,
"!/message[@id='']/body[text()='hello']"),
# Second user joins
partial(send_stanza,
"<presence from='{jid_two}/{resource_one}' to='#foo%{irc_server_one}/{nick_two}' />"),
connection_sequence("irc.localhost", '{jid_two}/{resource_one}'),
# Our presence, sent to the other user
partial(expect_unordered, [
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='none'][@jid='~bobby@localhost'][@role='participant']",),
("/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",),
("/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='none'][@jid='~bobby@localhost'][@role='participant']",
"/presence/muc_user:x/muc_user:status[@code='110']"),
("/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]",)
]),
# Send a multi-line channel message
partial(send_stanza, "<message id='the-message-id' from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>un\ndeux\ntrois</body></message>"),
# Receive multiple messages, for each user
partial(expect_unordered, [
("/message[@from='#foo%{irc_server_one}/{nick_one}'][@id='the-message-id'][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='un']",),
("/message[@from='#foo%{irc_server_one}/{nick_one}'][@id][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='deux']",),
("/message[@from='#foo%{irc_server_one}/{nick_one}'][@id][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='trois']",),
("/message[@from='#foo%{irc_server_one}/{nick_one}'][@id][@to='{jid_two}/{resource_one}'][@type='groupchat']/body[text()='un']",),
("/message[@from='#foo%{irc_server_one}/{nick_one}'][@id][@to='{jid_two}/{resource_one}'][@type='groupchat']/body[text()='deux']",),
("/message[@from='#foo%{irc_server_one}/{nick_one}'][@id][@to='{jid_two}/{resource_one}'][@type='groupchat']/body[text()='trois']",),
])
]),
Scenario("channel_messages",
[
handshake_sequence(),
# First user joins
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# Second user joins
partial(send_stanza,
"<presence from='{jid_two}/{resource_one}' to='#foo%{irc_server_one}/{nick_two}' />"),
connection_sequence("irc.localhost", '{jid_two}/{resource_one}'),
# Our presence, sent to the other user
partial(expect_unordered, [
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='none'][@jid='~bobby@localhost'][@role='participant']",),
("/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",),
("/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='none'][@jid='~bobby@localhost'][@role='participant']",
"/presence/muc_user:x/muc_user:status[@code='110']"),
("/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]",)
]),
# Send a channel message
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>coucou</body></message>"),
# Receive the message, forwarded to the two users
partial(expect_unordered, [
("/message[@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='coucou']",
"/message/stable_id:stanza-id[@by='#foo%{irc_server_one}'][@id]"),
("/message[@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_two}/{resource_one}'][@type='groupchat']/body[text()='coucou']",
"/message/stable_id:stanza-id[@by='#foo%{irc_server_one}'][@id]")
]),
# Send a private message, to a in-room JID
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_two}' type='chat'><body>coucou in private</body></message>"),
# Message is received with a server-wide JID
partial(expect_stanza, "/message[@from='{lower_nick_one}%{irc_server_one}'][@to='{jid_two}/{resource_one}'][@type='chat']/body[text()='coucou in private']"),
# Respond to the message, to the server-wide JID
partial(send_stanza, "<message from='{jid_two}/{resource_one}' to='{lower_nick_one}%{irc_server_one}' type='chat'><body>yes</body></message>"),
# The response is received from the in-room JID
partial(expect_stanza, ("/message[@from='#foo%{irc_server_one}/{nick_two}'][@to='{jid_one}/{resource_one}'][@type='chat']/body[text()='yes']",
"/message/muc_user:x")),
## Do the exact same thing, from a different chan,
# to check if the response comes from the right JID
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#dummy%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza, "/message"),
partial(expect_stanza,
"/presence/muc_user:x/muc_user:status[@code='110']"),
partial(expect_stanza, "/message[@from='#dummy%{irc_server_one}'][@type='groupchat']/subject"),
# Send a private message, to a in-room JID
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#dummy%{irc_server_one}/{nick_two}' type='chat'><body>re in private</body></message>"),
# Message is received with a server-wide JID
partial(expect_stanza, "/message[@from='{lower_nick_one}%{irc_server_one}'][@to='{jid_two}/{resource_one}'][@type='chat']/body[text()='re in private']"),
# Respond to the message, to the server-wide JID
partial(send_stanza, "<message from='{jid_two}/{resource_one}' to='{lower_nick_one}%{irc_server_one}' type='chat'><body>re</body></message>"),
# The response is received from the in-room JID
partial(expect_stanza, "/message[@from='#dummy%{irc_server_one}/{nick_two}'][@to='{jid_one}/{resource_one}'][@type='chat']/body[text()='re']"),
# Now we leave the room, to check if the subsequent private messages are still received properly
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#dummy%{irc_server_one}/{nick_one}' type='unavailable' />"),
partial(expect_stanza,
"/presence[@type='unavailable']/muc_user:x/muc_user:status[@code='110']"),
# The private messages from this nick should now come (again) from the server-wide JID
partial(send_stanza, "<message from='{jid_two}/{resource_one}' to='{lower_nick_one}%{irc_server_one}' type='chat'><body>hihihoho</body></message>"),
partial(expect_stanza,
"/message[@from='{lower_nick_two}%{irc_server_one}'][@to='{jid_one}/{resource_one}']"),
]
),
Scenario("encoded_channel_join",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#biboumi\\40louiz.org\\3a80%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #biboumi@louiz.org:80 [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#biboumi\\40louiz.org\\3a80%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#biboumi\\40louiz.org\\3a80%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
]),
Scenario("self_ping_with_error",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# Send a ping to ourself
partial(send_stanza,
"<iq type='get' from='{jid_one}/{resource_one}' id='first_ping' to='#foo%{irc_server_one}/{nick_one}'><ping xmlns='urn:xmpp:ping' /></iq>"),
# We receive our own ping request,
partial(expect_stanza,
"/iq[@from='{lower_nick_one}%{irc_server_one}'][@type='get'][@to='{jid_one}/{resource_one}'][@id='gnip_tsrif']"),
# Respond to the request with an error
partial(send_stanza,
"<iq from='{jid_one}/{resource_one}' id='gnip_tsrif' to='{lower_nick_one}%{irc_server_one}' type='error'><error type='cancel'><feature-not-implemented xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/></error></iq>"),
partial(expect_stanza,
"/iq[@from='#foo%{irc_server_one}/{nick_one}'][@type='result'][@to='{jid_one}/{resource_one}'][@id='first_ping']"),
# Send a ping to ourself
partial(send_stanza,
"<iq type='get' from='{jid_one}/{resource_one}' id='first_ping' to='#foo%{irc_server_one}/{nick_one}'><ping xmlns='urn:xmpp:ping' /></iq>"),
# We receive our own ping request,
partial(expect_stanza,
"/iq[@from='{lower_nick_one}%{irc_server_one}'][@type='get'][@to='{jid_one}/{resource_one}'][@id='gnip_tsrif']"),
# Respond to the request with an error
partial(send_stanza,
"<iq from='{jid_one}/{resource_one}' id='gnip_tsrif' to='{lower_nick_one}%{irc_server_one}' type='error'><error type='cancel'><service-unavailable xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/></error></iq>"),
partial(expect_stanza,
"/iq[@from='#foo%{irc_server_one}/{nick_one}'][@type='result'][@to='{jid_one}/{resource_one}'][@id='first_ping']"),
]),
Scenario("self_ping_not_in_muc",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# Send a ping to ourself, in a muc where we’re not
partial(send_stanza,
"<iq type='get' from='{jid_one}/{resource_one}' id='first_ping' to='#nil%{irc_server_one}/{nick_one}'><ping xmlns='urn:xmpp:ping' /></iq>"),
# Immediately receive an error
partial(expect_stanza,
"/iq[@from='#nil%{irc_server_one}/{nick_one}'][@type='error'][@to='{jid_one}/{resource_one}'][@id='first_ping']/error/stanza:not-allowed"),
# Send a ping to ourself, in a muc where we are, but not this resource
partial(send_stanza,
"<iq type='get' from='{jid_one}/{resource_two}' id='first_ping' to='#foo%{irc_server_one}/{nick_one}'><ping xmlns='urn:xmpp:ping' /></iq>"),
# Immediately receive an error
partial(expect_stanza,
"/iq[@from='#foo%{irc_server_one}/{nick_one}'][@type='error'][@to='{jid_one}/{resource_two}'][@id='first_ping']/error/stanza:not-allowed"),
]),
Scenario("self_ping_on_real_channel",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# Send a ping to ourself
partial(send_stanza,
"<iq type='get' from='{jid_one}/{resource_one}' id='first_ping' to='#foo%{irc_server_one}/{nick_one}'><ping xmlns='urn:xmpp:ping' /></iq>"),
# We receive our own ping request,
partial(expect_stanza,
"/iq[@from='{lower_nick_one}%{irc_server_one}'][@type='get'][@to='{jid_one}/{resource_one}'][@id='gnip_tsrif']"),
# Respond to the request
partial(send_stanza,
"<iq type='result' to='{lower_nick_one}%{irc_server_one}' id='gnip_tsrif' from='{jid_one}/{resource_one}'/>"),
partial(expect_stanza,
"/iq[@from='#foo%{irc_server_one}/{nick_one}'][@type='result'][@to='{jid_one}/{resource_one}'][@id='first_ping']"),
# Now join the same room, from the same bare JID, behind the same nick
partial(send_stanza,
"<presence from='{jid_one}/{resource_two}' to='#foo%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_two}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat'][@to='{jid_one}/{resource_two}']/subject[not(text())]"),
# And re-send a self ping
partial(send_stanza,
"<iq type='get' from='{jid_one}/{resource_one}' id='second_ping' to='#foo%{irc_server_one}/{nick_one}'><ping xmlns='urn:xmpp:ping' /></iq>"),
# We receive our own ping request. Note that we don't know the to value, it could be one of our two resources.
partial(expect_stanza,
"/iq[@from='{lower_nick_one}%{irc_server_one}'][@type='get'][@to][@id='gnip_dnoces']",
after = partial(save_value, "to", partial(extract_attribute, "/iq", "to"))),
# Respond to the request, using the extracted 'to' value as our 'from'
partial(send_stanza,
"<iq type='result' to='{lower_nick_one}%{irc_server_one}' id='gnip_dnoces' from='{to}'/>"),
partial(expect_stanza,
"/iq[@from='#foo%{irc_server_one}/{nick_one}'][@type='result'][@to='{jid_one}/{resource_one}'][@id='second_ping']"),
## And re-do exactly the same thing, just change the resource initiating the self ping
partial(send_stanza,
"<iq type='get' from='{jid_one}/{resource_two}' id='third_ping' to='#foo%{irc_server_one}/{nick_one}'><ping xmlns='urn:xmpp:ping' /></iq>"),
partial(expect_stanza,
"/iq[@from='{lower_nick_one}%{irc_server_one}'][@type='get'][@to][@id='gnip_driht']",
after = partial(save_value, "to", partial(extract_attribute, "/iq", "to"))),
partial(send_stanza,
"<iq type='result' to='{lower_nick_one}%{irc_server_one}' id='gnip_driht' from='{to}'/>"),
partial(expect_stanza,
"/iq[@from='#foo%{irc_server_one}/{nick_one}'][@type='result'][@to='{jid_one}/{resource_two}'][@id='third_ping']"),
]),
Scenario("self_ping_fixed_server", [
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo@{biboumi_host}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}', fixed_irc_server=True),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo@{biboumi_host}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo@{biboumi_host}'][@type='groupchat']/subject[not(text())]"),
# Send a ping to ourself
partial(send_stanza,
"<iq type='get' from='{jid_one}/{resource_one}' id='first_ping' to='#foo@{biboumi_host}/{nick_one}'><ping xmlns='urn:xmpp:ping' /></iq>"),
# We receive our own ping request,
partial(expect_stanza,
"/iq[@from='{lower_nick_one}@{biboumi_host}'][@type='get'][@to='{jid_one}/{resource_one}'][@id='gnip_tsrif']"),
# Respond to the request
partial(send_stanza,
"<iq type='result' to='{lower_nick_one}@{biboumi_host}' id='gnip_tsrif' from='{jid_one}/{resource_one}'/>"),
partial(expect_stanza,
"/iq[@from='#foo@{biboumi_host}/{nick_one}'][@type='result'][@to='{jid_one}/{resource_one}'][@id='first_ping']"),
], conf="fixed_server"),
Scenario("simple_kick",
[
handshake_sequence(),
# First user joins
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence/muc_user:x/muc_user:status[@code='110']"),
partial(expect_stanza, "/message[@type='groupchat']/subject"),
# Second user joins
partial(send_stanza,
"<presence from='{jid_two}/{resource_one}' to='#foo%{irc_server_one}/{nick_two}' />"),
connection_sequence("irc.localhost", '{jid_two}/{resource_one}'),
partial(expect_unordered, [
("/presence/muc_user:x/muc_user:item[@affiliation='none'][@role='participant']",),
("/presence/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",),
("/presence/muc_user:x/muc_user:status[@code='110']",),
("/message/subject",),
]),
# demonstrate bug https://lab.louiz.org/louiz/biboumi/issues/3291
# First user joins an other channel
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#bar%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence/muc_user:x/muc_user:status[@code='110']"),
partial(expect_stanza, "/message[@type='groupchat']/subject"),
# Second user joins
partial(send_stanza,
"<presence from='{jid_two}/{resource_one}' to='#bar%{irc_server_one}/{nick_two}' />"),
partial(expect_unordered, [
("/presence/muc_user:x/muc_user:item[@affiliation='none'][@role='participant']",),
("/presence/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",),
("/presence/muc_user:x/muc_user:status[@code='110']",),
("/message/subject",),
]),
# Moderator kicks participant
partial(send_stanza,
"<iq id='kick1' to='#foo%{irc_server_one}' from='{jid_one}/{resource_one}' type='set'><query xmlns='http://jabber.org/protocol/muc#admin'><item nick='{nick_two}' role='none'><reason>reported</reason></item></query></iq>"),
partial(expect_unordered, [
("/presence[@type='unavailable'][@to='{jid_two}/{resource_one}']/muc_user:x/muc_user:item[@role='none']/muc_user:actor[@nick='{nick_one}']",
"/presence/muc_user:x/muc_user:item/muc_user:reason[text()='reported']",
"/presence/muc_user:x/muc_user:status[@code='307']",
"/presence/muc_user:x/muc_user:status[@code='110']"
),
("/presence[@type='unavailable'][@to='{jid_one}/{resource_one}']/muc_user:x/muc_user:item[@role='none']/muc_user:actor[@nick='{nick_one}']",
"/presence/muc_user:x/muc_user:item/muc_user:reason[text()='reported']",
"/presence/muc_user:x/muc_user:status[@code='307']",
),
("/iq[@id='kick1'][@type='result']",),
]),
# Bug 3291, suite. We must not receive any presence from #foo, here
partial(send_stanza, "<message from='{jid_two}/{resource_one}' to='{irc_server_one}' type='chat'><body>QUIT bye bye</body></message>"),
partial(expect_unordered,
[("/presence[@from='#bar%{irc_server_one}/{nick_two}'][@to='{jid_one}/{resource_one}']",),
("/presence[@from='#bar%{irc_server_one}/{nick_two}'][@to='{jid_two}/{resource_one}']",),
("/message",),
("/message",)])
]),
Scenario("mode_change",
[
handshake_sequence(),
# First user joins
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence/muc_user:x/muc_user:status[@code='110']"),
partial(expect_stanza, "/message[@type='groupchat']/subject"),
# Second user joins
partial(send_stanza,
"<presence from='{jid_two}/{resource_one}' to='#foo%{irc_server_one}/{nick_two}' />"),
connection_sequence("irc.localhost", '{jid_two}/{resource_one}'),
partial(expect_unordered, [
("/presence/muc_user:x/muc_user:item[@affiliation='none'][@role='participant']",),
("/presence/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",),
("/presence/muc_user:x/muc_user:status[@code='110']",),
("/message/subject",),
]),
# Change a user mode with a message starting with /mode
partial(send_stanza,
"<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>/mode +v {nick_two}</body></message>"),
partial(expect_unordered, [
("/message[@to='{jid_one}/{resource_one}']/body[text()='Mode #foo [+v {nick_two}] by {nick_one}']",),
("/message[@to='{jid_two}/{resource_one}']/body[text()='Mode #foo [+v {nick_two}] by {nick_one}']",),
("/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='member'][@role='participant']",),
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='member'][@role='participant']",)
]),
# using an iq
partial(send_stanza,
"<iq from='{jid_one}/{resource_one}' id='id1' to='#foo%{irc_server_one}' type='set'><query xmlns='http://jabber.org/protocol/muc#admin'><item affiliation='admin' nick='{nick_two}'/></query></iq>"),
partial(expect_unordered, [
("/message[@to='{jid_one}/{resource_one}']/body[text()='Mode #foo [+o {nick_two}] by {nick_one}']",),
("/message[@to='{jid_two}/{resource_one}']/body[text()='Mode #foo [+o {nick_two}] by {nick_one}']",),
("/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",),
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",),
("/iq[@id='id1'][@type='result'][@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}']",),
]),
# remove the mode
partial(send_stanza,
"<iq from='{jid_one}/{resource_one}' id='id1' to='#foo%{irc_server_one}' type='set'><query xmlns='http://jabber.org/protocol/muc#admin'><item affiliation='member' nick='{nick_two}' role='participant'/></query></iq>"),
partial(expect_unordered, [
("/message[@to='{jid_one}/{resource_one}']/body[text()='Mode #foo [+v-o {nick_two} {nick_two}] by {nick_one}']",),
("/message[@to='{jid_two}/{resource_one}']/body[text()='Mode #foo [+v-o {nick_two} {nick_two}] by {nick_one}']",),
("/presence[@to='{jid_two}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='member'][@role='participant']",),
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='member'][@role='participant']",),
("/iq[@id='id1'][@type='result'][@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}']",),
]),
# using an iq, an a non-existant nick
partial(send_stanza,
"<iq from='{jid_one}/{resource_one}' id='id1' to='#foo%{irc_server_one}' type='set'><query xmlns='http://jabber.org/protocol/muc#admin'><item affiliation='admin' nick='blectre'/></query></iq>"),
partial(expect_stanza, "/iq[@type='error']"),
# using an iq, without the rights to do it
partial(send_stanza,
"<iq from='{jid_two}/{resource_one}' id='id1' to='#foo%{irc_server_one}' type='set'><query xmlns='http://jabber.org/protocol/muc#admin'><item affiliation='admin' nick='{nick_one}'/></query></iq>"),
partial(expect_unordered, [
("/iq[@type='error']",),
("/message[@type='chat'][@to='{jid_two}/{resource_one}']",),
]),
# using an iq, with an unknown mode
partial(send_stanza,
"<iq from='{jid_two}/{resource_one}' id='id1' to='#foo%{irc_server_one}' type='set'><query xmlns='http://jabber.org/protocol/muc#admin'><item affiliation='owner' nick='{nick_one}'/></query></iq>"),
partial(expect_unordered, [
("/iq[@type='error']",),
("/message[@type='chat'][@to='{jid_two}/{resource_one}']",),
]),
]),
Scenario("multisession_kick",
[
handshake_sequence(),
# First user joins
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence/muc_user:x/muc_user:status[@code='110']"),
partial(expect_stanza, "/message[@type='groupchat']/subject"),
# Second user joins, fprom two resources
partial(send_stanza,
"<presence from='{jid_two}/{resource_one}' to='#foo%{irc_server_one}/{nick_two}' />"),
connection_sequence("irc.localhost", '{jid_two}/{resource_one}'),
partial(expect_unordered, [
("/presence/muc_user:x/muc_user:item[@affiliation='none'][@role='participant']",),
("/presence/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",),
("/presence/muc_user:x/muc_user:status[@code='110']",),
("/message/subject",),
]),
partial(send_stanza,
"<presence from='{jid_two}/{resource_two}' to='#foo%{irc_server_one}/{nick_two}' />"),
partial(expect_stanza,
"/presence[@to='{jid_two}/{resource_two}'][@from='#foo%{irc_server_one}/{nick_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_two}/{resource_two}'][@from='#foo%{irc_server_one}/{nick_two}']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat'][@to='{jid_two}/{resource_two}']/subject[not(text())]"),
# Moderator kicks participant
partial(send_stanza,
"<iq id='kick1' to='#foo%{irc_server_one}' from='{jid_one}/{resource_one}' type='set'><query xmlns='http://jabber.org/protocol/muc#admin'><item nick='{nick_two}' role='none'><reason>reported</reason></item></query></iq>"),
partial(expect_unordered, [
("/presence[@type='unavailable'][@to='{jid_two}/{resource_one}']/muc_user:x/muc_user:item[@role='none']/muc_user:actor[@nick='{nick_one}']",
"/presence/muc_user:x/muc_user:item/muc_user:reason[text()='reported']",
"/presence/muc_user:x/muc_user:status[@code='307']",
"/presence/muc_user:x/muc_user:status[@code='110']"
),
("/presence[@type='unavailable'][@to='{jid_two}/{resource_two}']/muc_user:x/muc_user:item[@role='none']/muc_user:actor[@nick='{nick_one}']",
"/presence/muc_user:x/muc_user:item/muc_user:reason[text()='reported']",
"/presence/muc_user:x/muc_user:status[@code='307']",
"/presence/muc_user:x/muc_user:status[@code='110']"
),
("/presence[@type='unavailable']/muc_user:x/muc_user:item[@role='none']/muc_user:actor[@nick='{nick_one}']",
"/presence/muc_user:x/muc_user:item/muc_user:reason[text()='reported']",
"/presence/muc_user:x/muc_user:status[@code='307']",
),
("/iq[@id='kick1'][@type='result']",),
]),
]),
Scenario("self_version",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# Send a version request to ourself
partial(send_stanza,
"<iq type='get' from='{jid_one}/{resource_one}' id='first_version' to='#foo%{irc_server_one}/{nick_one}'><query xmlns='jabber:iq:version' /></iq>"),
# We receive our own request,
partial(expect_stanza,
"/iq[@from='{lower_nick_one}%{irc_server_one}'][@type='get'][@to='{jid_one}/{resource_one}']",
after = partial(save_value, "id", partial(extract_attribute, "/iq", 'id'))),
# Respond to the request
partial(send_stanza,
"<iq type='result' to='{lower_nick_one}%{irc_server_one}' id='{id}' from='{jid_one}/{resource_one}'><query xmlns='jabber:iq:version'><name>e2e test</name><version>1.0</version><os>Fedora</os></query></iq>"),
partial(expect_stanza,
"/iq[@from='#foo%{irc_server_one}/{nick_one}'][@type='result'][@to='{jid_one}/{resource_one}'][@id='first_version']/version:query/version:name[text()='e2e test (through the biboumi gateway) 1.0 Fedora']"),
# Now join the same room, from the same bare JID, behind the same nick
partial(send_stanza,
"<presence from='{jid_one}/{resource_two}' to='#foo%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_two}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat'][@to='{jid_one}/{resource_two}']/subject[not(text())]"),
# And re-send a self ping
partial(send_stanza,
"<iq type='get' from='{jid_one}/{resource_two}' id='second_version' to='#foo%{irc_server_one}/{nick_one}'><query xmlns='jabber:iq:version' /></iq>"),
# We receive our own request. Note that we don't know the to value, it could be one of our two resources.
partial(expect_stanza,
"/iq[@from='{lower_nick_one}%{irc_server_one}'][@type='get'][@to]",
after = (partial(save_value, "to", partial(extract_attribute, "/iq", "to")),
partial(save_value, "id", partial(extract_attribute, "/iq", "id")))),
# Respond to the request, using the extracted 'to' value as our 'from'
partial(send_stanza,
"<iq type='result' to='{lower_nick_one}%{irc_server_one}' id='{id}' from='{to}'><query xmlns='jabber:iq:version'><name>e2e test</name><version>1.0</version><os>Fedora</os></query></iq>"),
partial(expect_stanza,
"/iq[@from='#foo%{irc_server_one}/{nick_one}'][@type='result'][@to='{jid_one}/{resource_two}'][@id='second_version']"),
# And do exactly the same thing, but initiated by the other resource
partial(send_stanza,
"<iq type='get' from='{jid_one}/{resource_one}' id='second_version' to='#foo%{irc_server_one}/{nick_one}'><query xmlns='jabber:iq:version' /></iq>"),
# We receive our own request. Note that we don't know the to value, it could be one of our two resources.
partial(expect_stanza,
"/iq[@from='{lower_nick_one}%{irc_server_one}'][@type='get'][@to]",
after = (partial(save_value, "to", partial(extract_attribute, "/iq", "to")),
partial(save_value, "id", partial(extract_attribute, "/iq", "id")))),
# Respond to the request, using the extracted 'to' value as our 'from'
partial(send_stanza,
"<iq type='result' to='{lower_nick_one}%{irc_server_one}' id='{id}' from='{to}'><query xmlns='jabber:iq:version'><name>e2e test</name><version>1.0</version><os>Fedora</os></query></iq>"),
partial(expect_stanza,
"/iq[@from='#foo%{irc_server_one}/{nick_one}'][@type='result'][@to='{jid_one}/{resource_one}'][@id='second_version']"),
]),
Scenario("version_on_global_nick",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
partial(send_stanza,
"<iq type='get' from='{jid_one}/{resource_one}' id='first_version' to='{lower_nick_one}%{irc_server_one}'><query xmlns='jabber:iq:version' /></iq>"),
partial(expect_stanza,
"/iq[@from='{lower_nick_one}%{irc_server_one}'][@type='get'][@to='{jid_one}/{resource_one}']",
after = partial(save_value, "id", partial(extract_attribute, "/iq", 'id'))),
partial(send_stanza,
"<iq type='result' to='{lower_nick_one}%{irc_server_one}' id='{id}' from='{jid_one}/{resource_one}'><query xmlns='jabber:iq:version'><name>e2e test</name><version>1.0</version><os>Fedora</os></query></iq>"),
partial(expect_stanza,
"/iq[@from='{lower_nick_one}%{irc_server_one}'][@type='result'][@to='{jid_one}/{resource_one}'][@id='first_version']/version:query/version:name[text()='e2e test (through the biboumi gateway) 1.0 Fedora']"),
]),
Scenario("self_invite",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
partial(send_stanza,
"<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}'><x xmlns='http://jabber.org/protocol/muc#user'><invite to='{nick_one}'/></x></message>"),
partial(expect_stanza,
"/message/body[text()='{nick_one} is already on channel #foo']")
]),
Scenario("client_error",
[
handshake_sequence(),
# First resource
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# Second resource, same channel
partial(send_stanza,
"<presence from='{jid_one}/{resource_two}' to='#foo%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_two}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat'][@to='{jid_one}/{resource_two}']/subject[not(text())]"),
# Now the first resource has an error
partial(send_stanza,
"<message from='{jid_one}/{resource_one}' to='#foo%%{irc_server_one}/{nick_one}' type='error'><error type='cancel'><recipient-unavailable xmlns='urn:ietf:params:xml:ns:xmpp-stanzas'/></error></message>"),
# Receive a leave only to the leaving resource
partial(expect_stanza,
("/presence[@type='unavailable'][@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_one}/{resource_one}']/muc_user:x/muc_user:status[@code='110']",
"/presence/status[text()='Biboumi note: 1 resources are still in this channel.']")
),
]),
Scenario("simple_mam",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# Send two channel messages
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>coucou</body></message>"),
partial(expect_stanza,
("/message[@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='coucou']",
"/message/stable_id:stanza-id[@by='#foo%{irc_server_one}'][@id]",)
),
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>coucou 2</body></message>"),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='coucou 2']"),
# Retrieve the complete archive
partial(send_stanza, "<iq to='#foo%{irc_server_one}' from='{jid_one}/{resource_one}' type='set' id='id1'><query xmlns='urn:xmpp:mam:2' queryid='qid1' /></iq>"),
partial(expect_stanza,
("/message/mam:result[@queryid='qid1']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid1']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='coucou']")
),
partial(expect_stanza,
("/message/mam:result[@queryid='qid1']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid1']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='coucou 2']")
),
partial(expect_stanza,
("/iq[@type='result'][@id='id1'][@from='#foo%{irc_server_one}'][@to='{jid_one}/{resource_one}']",
"/iq/mam:fin/rms:set/rsm:last",
"/iq/mam:fin/rsm:set/rsm:first",
"/iq/mam:fin[@complete='true']")),
# Retrieve an empty archive by specifying an early “end” date
partial(send_stanza, """<iq to='#foo%{irc_server_one}' from='{jid_one}/{resource_one}' type='set' id='id2'>
<query xmlns='urn:xmpp:mam:2' queryid='qid2'>
<x xmlns='jabber:x:data' type='submit'>
<field var='FORM_TYPE' type='hidden'> <value>urn:xmpp:mam:2</value></field>
<field var='end'><value>2000-06-07T00:00:00Z</value></field>
</x>
</query></iq>"""),
partial(expect_stanza,
("/iq[@type='result'][@id='id2'][@from='#foo%{irc_server_one}'][@to='{jid_one}/{resource_one}']",
"/iq/mam:fin[@complete='true']/rsm:set",)),
# Retrieve an empty archive by specifying a late “start” date
# (note that this test will break in ~1000 years)
partial(send_stanza, """<iq to='#foo%{irc_server_one}' from='{jid_one}/{resource_one}' type='set' id='id3'>
<query xmlns='urn:xmpp:mam:2' queryid='qid3'>
<x xmlns='jabber:x:data' type='submit'>
<field var='FORM_TYPE' type='hidden'> <value>urn:xmpp:mam:2</value></field>
<field var='start'><value>2222-06-07T00:00:00Z</value></field>
</x>
</query></iq>"""),
partial(expect_stanza,
("/iq[@type='result'][@id='id3'][@from='#foo%{irc_server_one}'][@to='{jid_one}/{resource_one}']",
"/iq/mam:fin[@complete='true']/rsm:set")),
# Retrieve a limited archive
partial(send_stanza, "<iq to='#foo%{irc_server_one}' from='{jid_one}/{resource_one}' type='set' id='id4'><query xmlns='urn:xmpp:mam:2' queryid='qid4'><set xmlns='http://jabber.org/protocol/rsm'><max>1</max></set></query></iq>"),
partial(expect_stanza,
("/message/mam:result[@queryid='qid4']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid4']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='coucou']")
),
partial(expect_stanza,
("/iq[@type='result'][@id='id4'][@from='#foo%{irc_server_one}'][@to='{jid_one}/{resource_one}']",
"/iq/mam:fin[@complete='true']/rsm:set")),
]),
Scenario("mam_with_timestamps",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# Send two channel messages
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>coucou</body></message>"),
partial(expect_stanza,
("/message[@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='coucou']",
"/message/stable_id:stanza-id[@by='#foo%{irc_server_one}'][@id]",)
),
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>coucou 2</body></message>"),
# Record the current time
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='coucou 2']",
after = partial(save_current_timestamp_plus_delta, "first_timestamp", datetime.timedelta(seconds=1))),
# Wait two seconds before sending two new messages
partial(sleep_for, 2),
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>coucou 3</body></message>"),
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>coucou 4</body></message>"),
partial(expect_stanza, "/message[@type='groupchat']/body[text()='coucou 3']"),
partial(expect_stanza, "/message[@type='groupchat']/body[text()='coucou 4']",
after = partial(save_current_timestamp_plus_delta, "second_timestamp", datetime.timedelta(seconds=1))),
# Retrieve the archive, after our saved datetime
partial(send_stanza, """<iq to='#foo%{irc_server_one}' from='{jid_one}/{resource_one}' type='set' id='id8'>
<query xmlns='urn:xmpp:mam:2' queryid='qid16'>
<x type='submit' xmlns='jabber:x:data'>
<field var='FORM_TYPE' xmlns='jabber:x:data'><value xmlns='jabber:x:data'>urn:xmpp:mam:2</value></field>
<field var='start' xmlns='jabber:x:data'><value xmlns='jabber:x:data'>{first_timestamp}</value></field>
<field var='end' xmlns='jabber:x:data'><value xmlns='jabber:x:data'>{second_timestamp}</value></field>
</x>
</query>
</iq>"""),
partial(expect_stanza,
("/message/mam:result[@queryid='qid16']/forward:forwarded/delay:delay",
"/message/mam:result/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='coucou 3']")
),
partial(expect_stanza,
("/message/mam:result[@queryid='qid16']/forward:forwarded/delay:delay",
"/message/mam:result/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='coucou 4']")
),
partial(expect_stanza,
("/iq[@type='result'][@id='id8'][@from='#foo%{irc_server_one}'][@to='{jid_one}/{resource_one}']",
"/iq/mam:fin[@complete='true']/rsm:set")),
]),
Scenario("join_history_limits",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# Send two channel messages
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>coucou</body></message>"),
partial(expect_stanza,
("/message[@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='coucou']",
"/message/stable_id:stanza-id[@by='#foo%{irc_server_one}'][@id]",)
),
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>coucou 2</body></message>"),
# Record the current time
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='coucou 2']",
after = partial(save_current_timestamp_plus_delta, "first_timestamp", datetime.timedelta(seconds=1))),
# Wait two seconds before sending two new messages
partial(sleep_for, 2),
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>coucou 3</body></message>"),
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>coucou 4</body></message>"),
partial(expect_stanza, "/message[@type='groupchat']/body[text()='coucou 3']"),
partial(expect_stanza, "/message[@type='groupchat']/body[text()='coucou 4']",
after = partial(save_current_timestamp_plus_delta, "second_timestamp", datetime.timedelta(seconds=1))),
# join some other channel, to stay connected to the server even after leaving #foo
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#DUMMY%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence/muc_user:x/muc_user:status[@code='110']"),
partial(expect_stanza, "/message/subject"),
# Leave #foo
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='unavailable' />"),
partial(expect_stanza, "/presence[@type='unavailable']"),
# Rejoin #foo, with some history limit
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}'><x xmlns='http://jabber.org/protocol/muc'><history maxchars='0'/></x></presence>"),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence/muc_user:x/muc_user:status[@code='110']"),
partial(expect_stanza, "/message/subject"),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='unavailable' />"),
partial(expect_stanza, "/presence[@type='unavailable']"),
# Rejoin #foo, with some history limit
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}'><x xmlns='http://jabber.org/protocol/muc'><history maxstanzas='3'/></x></presence>"),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence/muc_user:x/muc_user:status[@code='110']"),
partial(expect_stanza,
"/message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/body[text()='coucou 2']"),
partial(expect_stanza,
"/message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/body[text()='coucou 3']"),
partial(expect_stanza,
"/message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/body[text()='coucou 4']"),
partial(expect_stanza, "/message/subject"),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='unavailable' />"),
partial(expect_stanza, "/presence[@type='unavailable']"),
# Rejoin #foo, with some history limit
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}'><x xmlns='http://jabber.org/protocol/muc'><history since='{first_timestamp}'/></x></presence>"),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence/muc_user:x/muc_user:status[@code='110']"),
partial(expect_stanza,
"/message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/body[text()='coucou 3']"),
partial(expect_stanza,
"/message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/body[text()='coucou 4']"),
partial(expect_stanza, "/message/subject"),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='unavailable' />"),
partial(expect_stanza, "/presence[@type='unavailable']"),
# Rejoin #foo, with some history limit
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}'><x xmlns='http://jabber.org/protocol/muc'><history seconds='1'/></x></presence>"),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence/muc_user:x/muc_user:status[@code='110']"),
partial(expect_stanza,
"/message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/body[text()='coucou 3']"),
partial(expect_stanza,
"/message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/body[text()='coucou 4']"),
partial(expect_stanza, "/message/subject"),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='unavailable' />"),
partial(expect_stanza, "/presence[@type='unavailable']"),
# Rejoin #foo, with some history limit
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}'><x xmlns='http://jabber.org/protocol/muc'><history seconds='5'/></x></presence>"),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence/muc_user:x/muc_user:status[@code='110']"),
partial(expect_stanza,
"/message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/body[text()='coucou']"), partial(expect_stanza,
"/message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/body[text()='coucou 2']"),
partial(expect_stanza,
"/message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/body[text()='coucou 3']"),
partial(expect_stanza,
"/message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/body[text()='coucou 4']"),
partial(expect_stanza, "/message/subject"),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='unavailable' />"),
partial(expect_stanza, "/presence[@type='unavailable']"),
]),
Scenario("mam_on_fixed_server",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo@{biboumi_host}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}', fixed_irc_server=True),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo@{biboumi_host}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo@{biboumi_host}'][@type='groupchat']/subject[not(text())]"),
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo@{biboumi_host}' type='groupchat'><body>coucou</body></message>"),
partial(expect_stanza, "/message[@from='#foo@{biboumi_host}/{nick_one}'][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='coucou']"),
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo@{biboumi_host}' type='groupchat'><body>coucou 2</body></message>"),
partial(expect_stanza, "/message[@from='#foo@{biboumi_host}/{nick_one}'][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='coucou 2']"),
# Retrieve the complete archive
partial(send_stanza, "<iq to='#foo@{biboumi_host}' from='{jid_one}/{resource_one}' type='set' id='id1'><query xmlns='urn:xmpp:mam:2' queryid='qid1' /></iq>"),
partial(expect_stanza,
("/message/mam:result[@queryid='qid1']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid1']/forward:forwarded/client:message[@from='#foo@{biboumi_host}/{nick_one}'][@type='groupchat']/client:body[text()='coucou']")
),
partial(expect_stanza,
("/message/mam:result[@queryid='qid1']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid1']/forward:forwarded/client:message[@from='#foo@{biboumi_host}/{nick_one}'][@type='groupchat']/client:body[text()='coucou 2']")
),
], conf="fixed_server"),
Scenario("default_mam_limit",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]",
after = partial(save_value, "counter", lambda x: 0)),
] + [
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>{counter}</body></message>"),
partial(expect_stanza,
"/message[@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='{counter}']",
after = partial(save_value, "counter", lambda stanza: str(1 + int(extract_text("/message/body", stanza))))
),
] * 150 + [
# Retrieve the archive, without any restriction
partial(send_stanza, "<iq to='#foo%{irc_server_one}' from='{jid_one}/{resource_one}' type='set' id='id1'><query xmlns='urn:xmpp:mam:2' queryid='qid1' /></iq>"),
# Since we should only receive the last 100 messages from the archive,
# it should start with message "1"
partial(expect_stanza,
("/message/mam:result[@queryid='qid1']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid1']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='1']")
),
] + [
# followed by 98 more messages
partial(expect_stanza,
("/message/mam:result[@queryid='qid1']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid1']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body")
),
] * 98 + [
# and finally the message "99"
partial(expect_stanza,
("/message/mam:result[@queryid='qid1']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid1']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='100']"),
after = partial(save_value, "last_uuid", partial(extract_attribute, "/message/mam:result", "id"))
),
# And it should not be marked as complete
partial(expect_stanza,
("/iq[@type='result'][@id='id1'][@from='#foo%{irc_server_one}'][@to='{jid_one}/{resource_one}']",
"/iq/mam:fin/rsm:set/rsm:last[text()='{last_uuid}']",
"!/iq//mam:fin[@complete='true']",
"/iq//mam:fin")),
# Retrieve the next page, using the “after” thingy
partial(send_stanza, "<iq to='#foo%{irc_server_one}' from='{jid_one}/{resource_one}' type='set' id='id2'><query xmlns='urn:xmpp:mam:2' queryid='qid2' ><set xmlns='http://jabber.org/protocol/rsm'><after>{last_uuid}</after></set></query></iq>"),
partial(expect_stanza,
("/message/mam:result[@queryid='qid2']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid2']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='101']")
),
] + 47 * [
partial(expect_stanza,
("/message/mam:result[@queryid='qid2']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid2']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body")
),
] + [
partial(expect_stanza,
("/message/mam:result[@queryid='qid2']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid2']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='149']"),
after = partial(save_value, "last_uuid", partial(extract_attribute, "/message/mam:result", "id"))
),
partial(expect_stanza,
("/iq[@type='result'][@id='id2'][@from='#foo%{irc_server_one}'][@to='{jid_one}/{resource_one}']",
"/iq/mam:fin/rsm:set/rsm:last[text()='{last_uuid}']",
"/iq//mam:fin[@complete='true']",
"/iq//mam:fin")),
# Send a request with a non-existing ID set as the “after” value.
partial(send_stanza, "<iq to='#foo%{irc_server_one}' from='{jid_one}/{resource_one}' type='set' id='id3'><query xmlns='urn:xmpp:mam:2' queryid='qid3' ><set xmlns='http://jabber.org/protocol/rsm'><after>DUMMY_ID</after></set></query></iq>"),
partial(expect_stanza, "/iq[@id='id3'][@type='error']/error[@type='cancel']/stanza:item-not-found"),
# Request the last page just BEFORE the last message in the archive
partial(send_stanza, "<iq to='#foo%{irc_server_one}' from='{jid_one}/{resource_one}' type='set' id='id3'><query xmlns='urn:xmpp:mam:2' queryid='qid3' ><set xmlns='http://jabber.org/protocol/rsm'><before></before></set></query></iq>"),
partial(expect_stanza,
("/message/mam:result[@queryid='qid3']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid3']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='50']")
),
] + 98 * [
partial(expect_stanza,
("/message/mam:result[@queryid='qid3']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid3']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body")
),
] + [
partial(expect_stanza,
("/message/mam:result[@queryid='qid3']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid3']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='149']"),
after = partial(save_value, "last_uuid", partial(extract_attribute, "/message/mam:result", "id"))
),
partial(expect_stanza,
("/iq[@type='result'][@id='id3'][@from='#foo%{irc_server_one}'][@to='{jid_one}/{resource_one}']",
"/iq/mam:fin/rsm:set/rsm:last[text()='{last_uuid}']",
"!/iq//mam:fin[@complete='true']",
"/iq//mam:fin")),
# Do the same thing, but with a limit value.
partial(send_stanza, "<iq to='#foo%{irc_server_one}' from='{jid_one}/{resource_one}' type='set' id='id4'><query xmlns='urn:xmpp:mam:2' queryid='qid4' ><set xmlns='http://jabber.org/protocol/rsm'><before>{last_uuid}</before><max>2</max></set></query></iq>"),
partial(expect_stanza,
("/message/mam:result[@queryid='qid4']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid4']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='147']")
),
partial(expect_stanza,
("/message/mam:result[@queryid='qid4']/forward:forwarded/delay:delay",
"/message/mam:result[@queryid='qid4']/forward:forwarded/client:message[@from='#foo%{irc_server_one}/{nick_one}'][@type='groupchat']/client:body[text()='148']"),
after = partial(save_value, "last_uuid", partial(extract_attribute, "/message/mam:result", "id"))
),
partial(expect_stanza,
("/iq[@type='result'][@id='id4'][@from='#foo%{irc_server_one}'][@to='{jid_one}/{resource_one}']",
"/iq/mam:fin/rsm:set/rsm:last[text()='{last_uuid}']",
"/iq/mam:fin[@complete='true']",
"/iq/mam:fin")),
]),
Scenario("channel_history_on_fixed_server",
[
handshake_sequence(),
# First user join
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo@{biboumi_host}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}', fixed_irc_server=True),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo@{biboumi_host}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@jid='~nick@localhost'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo@{biboumi_host}'][@type='groupchat']/subject[not(text())]"),
# Send one channel message
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo@{biboumi_host}' type='groupchat'><body>coucou</body></message>"),
partial(expect_stanza, "/message[@from='#foo@{biboumi_host}/{nick_one}'][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='coucou']"),
# Second user joins
partial(send_stanza,
"<presence from='{jid_one}/{resource_two}' to='#foo@{biboumi_host}/{nick_one}' />"),
# connection_sequence("irc.localhost", '{jid_one}/{resource_two}'),
# partial(expect_stanza,
# "/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_two}'][@from='#foo@{biboumi_host}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@jid='~nick@localhost'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
# Receive the history message
partial(expect_stanza, ("/message[@from='#foo@{biboumi_host}/{nick_one}']/body[text()='coucou']",
"/message/delay:delay[@from='#foo@{biboumi_host}']")),
partial(expect_stanza, "/message[@from='#foo@{biboumi_host}'][@type='groupchat']/subject[not(text())]"),
], conf="fixed_server"),
Scenario("channel_history",
[
handshake_sequence(),
# First user join
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@jid='~nick@localhost'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# Send one channel message
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' type='groupchat'><body>coucou</body></message>"),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}/{nick_one}'][@to='{jid_one}/{resource_one}'][@type='groupchat']/body[text()='coucou']"),
# Second user joins
partial(send_stanza,
"<presence from='{jid_one}/{resource_two}' to='#foo%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_two}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@jid='~nick@localhost'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
# Receive the history message
partial(expect_stanza, ("/message[@from='#foo%{irc_server_one}/{nick_one}']/body[text()='coucou']",
"/message/delay:delay[@from='#foo%{irc_server_one}']")),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
]),
Scenario("simple_channel_list",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#bar%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza,
"/message/body[text()='Mode #bar [+nt] by {irc_host_one}']"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message[@from='#bar%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
partial(send_stanza, "<iq from='{jid_one}/{resource_one}' id='id1' to='{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/disco#items'/></iq>"),
partial(expect_stanza, (
"/iq[@type='result']/disco_items:query",
"/iq/disco_items:query/disco_items:item[@jid='#foo%{irc_server_one}']",
"/iq/disco_items:query/disco_items:item[@jid='#bar%{irc_server_one}']"
))
]),
Scenario("channel_list_escaping",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#true\\2ffalse%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #true/false [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#true\\2ffalse%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#true\\2ffalse%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
]),
Scenario("channel_list_with_rsm",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#bar%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza,
"/message/body[text()='Mode #bar [+nt] by {irc_host_one}']"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message[@from='#bar%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#coucou%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza,
"/message/body[text()='Mode #coucou [+nt] by {irc_host_one}']"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message[@from='#coucou%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# Ask for 0 item
partial(send_stanza, "<iq from='{jid_one}/{resource_one}' id='id1' to='{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/disco#items'><set xmlns='http://jabber.org/protocol/rsm'><max>0</max></set></query></iq>"),
# Get 0 item
partial(expect_stanza, (
"/iq[@type='result']/disco_items:query",
)),
# Ask for 2 (of 3) items We don’t have the count,
# because biboumi doesn’t have the complete list when
# it sends us the 2 items
partial(send_stanza, "<iq from='{jid_one}/{resource_one}' id='id1' to='{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/disco#items'><set xmlns='http://jabber.org/protocol/rsm'><max>2</max></set></query></iq>"),
partial(expect_stanza, (
"/iq[@type='result']/disco_items:query",
"/iq/disco_items:query/disco_items:item[@jid='#bar%{irc_server_one}']",
"/iq/disco_items:query/disco_items:item[@jid='#coucou%{irc_server_one}']",
"/iq/disco_items:query/rsm:set/rsm:first[text()='#bar%{irc_server_one}'][@index='0']",
"/iq/disco_items:query/rsm:set/rsm:last[text()='#coucou%{irc_server_one}']"
)),
# Ask for 12 (of 3) items. We get the whole list, and thus we have the count included.
partial(send_stanza, "<iq from='{jid_one}/{resource_one}' id='id1' to='{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/disco#items'><set xmlns='http://jabber.org/protocol/rsm'><max>12</max></set></query></iq>"),
partial(expect_stanza, (
"/iq[@type='result']/disco_items:query",
"/iq/disco_items:query/disco_items:item[@jid='#bar%{irc_server_one}']",
"/iq/disco_items:query/disco_items:item[@jid='#coucou%{irc_server_one}']",
"/iq/disco_items:query/disco_items:item[@jid='#foo%{irc_server_one}']",
"/iq/disco_items:query/rsm:set/rsm:first[text()='#bar%{irc_server_one}'][@index='0']",
"/iq/disco_items:query/rsm:set/rsm:last[text()='#foo%{irc_server_one}']",
"/iq/disco_items:query/rsm:set/rsm:count[text()='3']"
)),
# Ask for 1 item, AFTER the first item (so,
# the second). Since we don’t invalidate the cache
# with this request, we should have the count
# included.
partial(send_stanza, "<iq from='{jid_one}/{resource_one}' id='id1' to='{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/disco#items'><set xmlns='http://jabber.org/protocol/rsm'><after>#bar%{irc_server_one}</after><max>1</max></set></query></iq>"),
partial(expect_stanza, (
"/iq[@type='result']/disco_items:query",
"/iq/disco_items:query/disco_items:item[@jid='#coucou%{irc_server_one}']",
"/iq/disco_items:query/rsm:set/rsm:first[text()='#coucou%{irc_server_one}'][@index='1']",
"/iq/disco_items:query/rsm:set/rsm:last[text()='#coucou%{irc_server_one}']",
"/iq/disco_items:query/rsm:set/rsm:count[text()='3']"
)),
# Ask for 1 item, AFTER the second item (so,
# the third).
partial(send_stanza, "<iq from='{jid_one}/{resource_one}' id='id1' to='{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/disco#items'><set xmlns='http://jabber.org/protocol/rsm'><after>#coucou%{irc_server_one}</after><max>1</max></set></query></iq>"),
partial(expect_stanza, (
"/iq[@type='result']/disco_items:query",
"/iq/disco_items:query/disco_items:item[@jid='#foo%{irc_server_one}']",
"/iq/disco_items:query/rsm:set/rsm:first[text()='#foo%{irc_server_one}'][@index='2']",
"/iq/disco_items:query/rsm:set/rsm:last[text()='#foo%{irc_server_one}']",
"/iq/disco_items:query/rsm:set/rsm:count[text()='3']"
)),
# Ask for 1 item, AFTER the third item (so,
# the fourth). Since it doesn't exist, we get 0 item
partial(send_stanza, "<iq from='{jid_one}/{resource_one}' id='id1' to='{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/disco#items'><set xmlns='http://jabber.org/protocol/rsm'><after>#foo%{irc_server_one}</after><max>1</max></set></query></iq>"),
partial(expect_stanza, (
"/iq[@type='result']/disco_items:query",
"/iq/disco_items:query/rsm:set/rsm:count[text()='3']"
)),
]),
Scenario("default_channel_list_limit",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message",
after = partial(save_value, "counter", lambda x: 0)),
] + [
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#{counter}%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence",
after = partial(save_value, "counter", lambda stanza: str(1 + int(chan_name_from_jid(extract_attribute("/presence", "from", stanza)))))),
partial(expect_stanza, "/message")
] * 110 + [
partial(send_stanza, "<iq from='{jid_one}/{resource_one}' id='id1' to='{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/disco#items'/></iq>"),
# charybdis sends the list in alphabetic order, so #foo is the last, and #99 is after #120
partial(expect_stanza, ("/iq/disco_items:query/disco_items:item[@jid='#0%{irc_server_one}']",
"/iq/disco_items:query/disco_items:item[@jid='#1%{irc_server_one}']",
"/iq/disco_items:query/disco_items:item[@jid='#109%{irc_server_one}']",
"/iq/disco_items:query/disco_items:item[@jid='#9%{irc_server_one}']",
"!/iq/disco_items:query/disco_items:item[@jid='#foo%{irc_server_one}']",
"!/iq/disco_items:query/disco_items:item[@jid='#99%{irc_server_one}']",
"!/iq/disco_items:query/disco_items:item[@jid='#90%{irc_server_one}']")),
]),
Scenario("complete_channel_list_with_pages_of_3",
[
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#aaa%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#bbb%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#ccc%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#ddd%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#eee%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#fff%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#ggg%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#hhh%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#iii%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#jjj%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza, "<iq from='{jid_one}/{resource_one}' id='id' to='{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/disco#items'><set xmlns='http://jabber.org/protocol/rsm'><max>3</max></set></query></iq>"),
partial(expect_stanza, (
"/iq[@type='result']/disco_items:query",
"/iq/disco_items:query/disco_items:item[@jid='#aaa%{irc_server_one}']",
"/iq/disco_items:query/disco_items:item[@jid='#bbb%{irc_server_one}']",
"/iq/disco_items:query/disco_items:item[@jid='#ccc%{irc_server_one}']",
"/iq/disco_items:query/rsm:set/rsm:first[text()='#aaa%{irc_server_one}'][@index='0']",
"/iq/disco_items:query/rsm:set/rsm:last[text()='#ccc%{irc_server_one}']"
)),
partial(send_stanza, "<iq from='{jid_one}/{resource_one}' id='id' to='{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/disco#items'><set xmlns='http://jabber.org/protocol/rsm'><after>#ccc%{irc_server_one}</after><max>3</max></set></query></iq>"),
partial(expect_stanza, (
"/iq[@type='result']/disco_items:query",
"/iq/disco_items:query/disco_items:item[@jid='#ddd%{irc_server_one}']",
"/iq/disco_items:query/disco_items:item[@jid='#eee%{irc_server_one}']",
"/iq/disco_items:query/disco_items:item[@jid='#fff%{irc_server_one}']",
"/iq/disco_items:query/rsm:set/rsm:first[text()='#ddd%{irc_server_one}'][@index='3']",
"/iq/disco_items:query/rsm:set/rsm:last[text()='#fff%{irc_server_one}']"
)),
partial(send_stanza, "<iq from='{jid_one}/{resource_one}' id='id' to='{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/disco#items'><set xmlns='http://jabber.org/protocol/rsm'><after>#fff%{irc_server_one}</after><max>3</max></set></query></iq>"),
partial(expect_stanza, (
"/iq[@type='result']/disco_items:query",
"/iq/disco_items:query/disco_items:item[@jid='#ggg%{irc_server_one}']",
"/iq/disco_items:query/disco_items:item[@jid='#hhh%{irc_server_one}']",
"/iq/disco_items:query/disco_items:item[@jid='#iii%{irc_server_one}']",
"/iq/disco_items:query/rsm:set/rsm:first[text()='#ggg%{irc_server_one}'][@index='6']",
"/iq/disco_items:query/rsm:set/rsm:last[text()='#iii%{irc_server_one}']"
)),
partial(send_stanza, "<iq from='{jid_one}/{resource_one}' id='id' to='{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/disco#items'><set xmlns='http://jabber.org/protocol/rsm'><after>#iii%{irc_server_one}</after><max>3</max></set></query></iq>"),
partial(expect_stanza, (
"/iq[@type='result']/disco_items:query",
"/iq/disco_items:query/disco_items:item[@jid='#jjj%{irc_server_one}']",
"/iq/disco_items:query/rsm:set/rsm:first[text()='#jjj%{irc_server_one}'][@index='9']",
"/iq/disco_items:query/rsm:set/rsm:last[text()='#jjj%{irc_server_one}']",
"/iq/disco_items:query/rsm:set/rsm:count[text()='10']"
)),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#aaa%{irc_server_one}/{nick_one}' type='unavailable' />"),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#bbb%{irc_server_one}/{nick_one}' type='unavailable' />"),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#ccc%{irc_server_one}/{nick_one}' type='unavailable' />"),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#ddd%{irc_server_one}/{nick_one}' type='unavailable' />"),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#eee%{irc_server_one}/{nick_one}' type='unavailable' />"),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#fff%{irc_server_one}/{nick_one}' type='unavailable' />"),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#ggg%{irc_server_one}/{nick_one}' type='unavailable' />"),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#hhh%{irc_server_one}/{nick_one}' type='unavailable' />"),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#iii%{irc_server_one}/{nick_one}' type='unavailable' />"),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#jjj%{irc_server_one}/{nick_one}' type='unavailable' />"),
partial(expect_stanza, "/presence[@type='unavailable']"),
partial(expect_stanza, "/presence[@type='unavailable']"),
partial(expect_stanza, "/presence[@type='unavailable']"),
partial(expect_stanza, "/presence[@type='unavailable']"),
partial(expect_stanza, "/presence[@type='unavailable']"),
partial(expect_stanza, "/presence[@type='unavailable']"),
partial(expect_stanza, "/presence[@type='unavailable']"),
partial(expect_stanza, "/presence[@type='unavailable']"),
partial(expect_stanza, "/presence[@type='unavailable']"),
partial(expect_stanza, "/presence[@type='unavailable']")
]),
Scenario("muc_traffic_info",
[
handshake_sequence(),
partial(send_stanza,
"<iq from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' id='1' type='get'><query xmlns='http://jabber.org/protocol/disco#info' node='http://jabber.org/protocol/muc#traffic'/></iq>"),
partial(expect_stanza, "/iq[@from='#foo%{irc_server_one}'][@to='{jid_one}/{resource_one}'][@type='result']/disco_info:query[@node='http://jabber.org/protocol/muc#traffic']"),
]),
Scenario("muc_disco_info",
[
handshake_sequence(),
partial(send_stanza,
"<iq from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}' id='1' type='get'><query xmlns='http://jabber.org/protocol/disco#info'/></iq>"),
partial(expect_stanza,
("/iq[@from='#foo%{irc_server_one}'][@to='{jid_one}/{resource_one}'][@type='result']/disco_info:query",
"/iq[@type='result']/disco_info:query/disco_info:identity[@category='conference'][@type='irc'][@name='#foo on {irc_host_one}']",
"/iq/disco_info:query/disco_info:feature[@var='jabber:iq:version']",
"/iq/disco_info:query/disco_info:feature[@var='http://jabber.org/protocol/commands']",
"/iq/disco_info:query/disco_info:feature[@var='urn:xmpp:ping']",
"/iq/disco_info:query/disco_info:feature[@var='urn:xmpp:mam:2']",
"/iq/disco_info:query/disco_info:feature[@var='jabber:iq:version']",
)),
]),
Scenario("fixed_muc_disco_info",
[
handshake_sequence(),
partial(send_stanza,
"<iq from='{jid_one}/{resource_one}' to='#foo@{biboumi_host}' id='1' type='get'><query xmlns='http://jabber.org/protocol/disco#info'/></iq>"),
partial(expect_stanza,
("/iq[@from='#foo@{biboumi_host}'][@to='{jid_one}/{resource_one}'][@type='result']/disco_info:query",
"/iq[@type='result']/disco_info:query/disco_info:identity[@category='conference'][@type='irc'][@name='#foo on {irc_host_one}']",
"/iq/disco_info:query/disco_info:feature[@var='jabber:iq:version']",
"/iq/disco_info:query/disco_info:feature[@var='http://jabber.org/protocol/commands']",
"/iq/disco_info:query/disco_info:feature[@var='urn:xmpp:ping']",
"/iq/disco_info:query/disco_info:feature[@var='urn:xmpp:mam:2']",
"/iq/disco_info:query/disco_info:feature[@var='jabber:iq:version']",
)),
], conf='fixed_server'),
Scenario("raw_message",
[
handshake_sequence(),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='{irc_server_one}' type='chat'><body>WHOIS {nick_one}</body></message>"),
partial(expect_stanza, "/message[@from='{irc_server_one}'][@type='chat']/body[text()='irc.localhost: {nick_one} ~{nick_one} localhost * {nick_one}']"),
]),
Scenario("raw_message_fixed_irc_server",
[
handshake_sequence(),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}', fixed_irc_server=True),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='{biboumi_host}' type='chat'><body>WHOIS {nick_one}</body></message>"),
partial(expect_stanza, "/message[@from='{biboumi_host}'][@type='chat']/body[text()='irc.localhost: {nick_one} ~{nick_one} localhost * {nick_one}']"),
], conf='fixed_server'),
Scenario("self_disco_info",
[
handshake_sequence(),
partial(send_stanza, "<iq type='get' id='get1' from='{jid_one}/{resource_one}' to='{biboumi_host}'><query xmlns='http://jabber.org/protocol/disco#info'/></iq>"),
partial(expect_stanza,
("/iq[@type='result']/disco_info:query/disco_info:identity[@category='conference'][@type='irc'][@name='Biboumi XMPP-IRC gateway']",
"/iq/disco_info:query/disco_info:feature[@var='jabber:iq:version']",
"/iq/disco_info:query/disco_info:feature[@var='http://jabber.org/protocol/commands']",
"/iq/disco_info:query/disco_info:feature[@var='urn:xmpp:ping']",
"/iq/disco_info:query/disco_info:feature[@var='urn:xmpp:mam:2']",
"/iq/disco_info:query/disco_info:feature[@var='jabber:iq:version']",
)),
]),
Scenario("invite_other",
[
handshake_sequence(),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza, "<presence from='{jid_two}/{resource_two}' to='#bar%{irc_server_one}@{biboumi_host}/{nick_two}' />"),
connection_sequence("irc.localhost", '{jid_two}/{resource_two}'),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}'><x xmlns='http://jabber.org/protocol/muc#user'><invite to='{nick_two}'/></x></message>"),
partial(expect_stanza, "/message/body[text()='{nick_two} has been invited to #foo']"),
partial(expect_stanza, "/message[@to='{jid_two}/{resource_two}'][@from='#foo%{irc_server_one}']/muc_user:x/muc_user:invite[@from='#foo%{irc_server_one}/{nick_one}']"),
partial(send_stanza, "<message from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}'><x xmlns='http://jabber.org/protocol/muc#user'><invite to='bertrand@example.com'/></x></message>"),
partial(expect_stanza, "/message[@to='bertrand@example.com'][@from='#foo%{irc_server_one}']/muc_user:x/muc_user:invite[@from='{jid_one}/{resource_one}']"),
]),
Scenario("global_configure",
[
handshake_sequence(),
partial(send_stanza, "<iq type='set' id='id1' from='{jid_one}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='configure' action='execute' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/commands:command[@node='configure'][@sessionid][@status='executing']",
"/iq/commands:command/dataform:x[@type='form']/dataform:title[text()='Configure some global default settings.']",
"/iq/commands:command/dataform:x[@type='form']/dataform:instructions[text()='Edit the form, to configure your global settings for the component.']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='max_history_length']/dataform:value[text()='20']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='boolean'][@var='record_history']/dataform:value[text()='true']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='boolean'][@var='persistent']/dataform:value[text()='false']",
"/iq/commands:command/commands:actions/commands:next",
),
after = partial(save_value, "sessionid", partial(extract_attribute, "/iq[@type='result']/commands:command[@node='configure']", "sessionid"))
),
partial(send_stanza, "<iq type='set' id='id2' from='{jid_one}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='configure' sessionid='{sessionid}' action='next'><x xmlns='jabber:x:data' type='submit'><field var='record_history'><value>0</value></field><field var='max_history_length'><value>42</value></field></x></command></iq>"),
partial(expect_stanza, "/iq[@type='result']/commands:command[@node='configure'][@status='completed']/commands:note[@type='info'][text()='Configuration successfully applied.']"),
partial(send_stanza, "<iq type='set' id='id3' from='{jid_one}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='configure' action='execute' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/commands:command[@node='configure'][@sessionid][@status='executing']",
"/iq/commands:command/dataform:x[@type='form']/dataform:title[text()='Configure some global default settings.']",
"/iq/commands:command/dataform:x[@type='form']/dataform:instructions[text()='Edit the form, to configure your global settings for the component.']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='max_history_length']/dataform:value[text()='42']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='boolean'][@var='record_history']/dataform:value[text()='false']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='boolean'][@var='persistent']/dataform:value[text()='false']",
"/iq/commands:command/commands:actions/commands:next",
),
after = partial(save_value, "sessionid", partial(extract_attribute, "/iq[@type='result']/commands:command[@node='configure']", "sessionid"))
),
partial(send_stanza, "<iq type='set' id='id4' from='{jid_one}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' action='cancel' node='configure' sessionid='{sessionid}' /></iq>"),
partial(expect_stanza, "/iq[@type='result']/commands:command[@node='configure'][@status='canceled']"),
]),
Scenario("global_configure_persistent_by_default",
[
handshake_sequence(),
partial(send_stanza, "<iq type='set' id='id1' from='{jid_one}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='configure' action='execute' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/commands:command[@node='configure'][@sessionid][@status='executing']",
"/iq/commands:command/dataform:x[@type='form']/dataform:title[text()='Configure some global default settings.']",
"/iq/commands:command/dataform:x[@type='form']/dataform:instructions[text()='Edit the form, to configure your global settings for the component.']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='max_history_length']/dataform:value[text()='20']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='boolean'][@var='record_history']/dataform:value[text()='true']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='boolean'][@var='persistent']/dataform:value[text()='true']",
"/iq/commands:command/commands:actions/commands:next",
),
),
],conf='persistent_by_default'),
Scenario("irc_server_configure",
[
handshake_sequence(),
partial(send_stanza, "<iq type='set' id='id1' from='{jid_one}/{resource_one}' to='{irc_server_one}'><command xmlns='http://jabber.org/protocol/commands' node='configure' action='execute' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/commands:command[@node='configure'][@sessionid][@status='executing']",
"/iq/commands:command/dataform:x[@type='form']/dataform:title[text()='Configure the IRC server irc.localhost']",
"/iq/commands:command/dataform:x[@type='form']/dataform:instructions[text()='Edit the form, to configure the settings of the IRC server irc.localhost']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-multi'][@var='ports']/dataform:value[text()='6667']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-multi'][@var='tls_ports']/dataform:value[text()='6670']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-multi'][@var='tls_ports']/dataform:value[text()='6697']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='boolean'][@var='verify_cert']/dataform:value[text()='true']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='fingerprint']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-private'][@var='pass']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-multi'][@var='after_connect_commands']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='nick']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='username']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='realname']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='encoding_in']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='encoding_out']",
"/iq/commands:command/commands:actions/commands:next",
),
after = partial(save_value, "sessionid", partial(extract_attribute, "/iq[@type='result']/commands:command[@node='configure']", "sessionid"))
),
partial(send_stanza, "<iq type='set' id='id2' from='{jid_one}/{resource_one}' to='{irc_server_one}'>"
"<command xmlns='http://jabber.org/protocol/commands' node='configure' sessionid='{sessionid}' action='next'>"
"<x xmlns='jabber:x:data' type='submit'>"
"<field var='ports' />"
"<field var='tls_ports'><value>6697</value><value>6698</value></field>"
"<field var='verify_cert'><value>1</value></field>"
"<field var='fingerprint'><value>12:12:12</value></field>"
"<field var='pass'><value>coucou</value></field>"
"<field var='after_connect_commands'><value>first command</value><value>second command</value></field>"
"<field var='nick'><value>my_nickname</value></field>"
"<field var='username'><value>username</value></field>"
"<field var='realname'><value>realname</value></field>"
"<field var='encoding_out'><value>UTF-8</value></field>"
"<field var='encoding_in'><value>latin-1</value></field>"
"</x></command></iq>"),
partial(expect_stanza, "/iq[@type='result']/commands:command[@node='configure'][@status='completed']/commands:note[@type='info'][text()='Configuration successfully applied.']"),
partial(send_stanza, "<iq type='set' id='id3' from='{jid_one}/{resource_one}' to='{irc_server_one}'><command xmlns='http://jabber.org/protocol/commands' node='configure' action='execute' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/commands:command[@node='configure'][@sessionid][@status='executing']",
"/iq/commands:command/dataform:x[@type='form']/dataform:title[text()='Configure the IRC server irc.localhost']",
"/iq/commands:command/dataform:x[@type='form']/dataform:instructions[text()='Edit the form, to configure the settings of the IRC server irc.localhost']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-multi']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-multi'][@var='tls_ports']/dataform:value[text()='6697']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-multi'][@var='tls_ports']/dataform:value[text()='6698']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='boolean'][@var='verify_cert']/dataform:value[text()='true']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='fingerprint']/dataform:value[text()='12:12:12']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-private'][@var='pass']/dataform:value[text()='coucou']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='nick']/dataform:value[text()='my_nickname']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-multi'][@var='after_connect_commands']/dataform:value[text()='first command']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-multi'][@var='after_connect_commands']/dataform:value[text()='second command']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='username']/dataform:value[text()='username']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='realname']/dataform:value[text()='realname']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='encoding_in']/dataform:value[text()='latin-1']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='encoding_out']/dataform:value[text()='UTF-8']",
"/iq/commands:command/commands:actions/commands:next",
),
after = partial(save_value, "sessionid", partial(extract_attribute, "/iq[@type='result']/commands:command[@node='configure']", "sessionid"))
),
partial(send_stanza, "<iq type='set' id='id4' from='{jid_one}/{resource_one}' to='{irc_server_one}'><command xmlns='http://jabber.org/protocol/commands' action='cancel' node='configure' sessionid='{sessionid}' /></iq>"),
partial(expect_stanza, "/iq[@type='result']/commands:command[@node='configure'][@status='canceled']"),
# Same thing, but try to empty some values
partial(send_stanza, "<iq type='set' id='id1' from='{jid_one}/{resource_one}' to='{irc_server_one}'><command xmlns='http://jabber.org/protocol/commands' node='configure' action='execute' /></iq>"),
partial(expect_stanza, "/iq[@type='result']",
after = partial(save_value, "sessionid", partial(extract_attribute, "/iq[@type='result']/commands:command[@node='configure']", "sessionid"))
),
partial(send_stanza, "<iq type='set' id='id2' from='{jid_one}/{resource_one}' to='{irc_server_one}'>"
"<command xmlns='http://jabber.org/protocol/commands' node='configure' sessionid='{sessionid}' action='next'>"
"<x xmlns='jabber:x:data' type='submit'>"
"<field var='pass'><value></value></field>"
"<field var='after_connect_commands'></field>"
"<field var='username'><value></value></field>"
"<field var='realname'><value></value></field>"
"<field var='encoding_out'><value></value></field>"
"<field var='encoding_in'><value></value></field>"
"</x></command></iq>"),
partial(expect_stanza, "/iq[@type='result']/commands:command[@node='configure'][@status='completed']/commands:note[@type='info'][text()='Configuration successfully applied.']"),
partial(send_stanza, "<iq type='set' id='id3' from='{jid_one}/{resource_one}' to='{irc_server_one}'><command xmlns='http://jabber.org/protocol/commands' node='configure' action='execute' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/commands:command[@node='configure'][@sessionid][@status='executing']",
"/iq/commands:command/dataform:x[@type='form']/dataform:title[text()='Configure the IRC server irc.localhost']",
"/iq/commands:command/dataform:x[@type='form']/dataform:instructions[text()='Edit the form, to configure the settings of the IRC server irc.localhost']",
"!/iq/commands:command/dataform:x[@type='form']/dataform:field[@var='pass']/dataform:value",
"!/iq/commands:command/dataform:x[@type='form']/dataform:field[@var='after_connect_commands']/dataform:value",
"!/iq/commands:command/dataform:x[@type='form']/dataform:field[@var='username']/dataform:value",
"!/iq/commands:command/dataform:x[@type='form']/dataform:field[@var='realname']/dataform:value",
"!/iq/commands:command/dataform:x[@type='form']/dataform:field[@var='encoding_in']/dataform:value",
"!/iq/commands:command/dataform:x[@type='form']/dataform:field[@var='encoding_out']/dataform:value",
"/iq/commands:command/commands:actions/commands:next",
),
after = partial(save_value, "sessionid", partial(extract_attribute, "/iq[@type='result']/commands:command[@node='configure']", "sessionid"))
),
partial(send_stanza, "<iq type='set' id='id4' from='{jid_one}/{resource_one}' to='{irc_server_one}'><command xmlns='http://jabber.org/protocol/commands' action='cancel' node='configure' sessionid='{sessionid}' /></iq>"),
partial(expect_stanza, "/iq[@type='result']/commands:command[@node='configure'][@status='canceled']"),
]),
Scenario("irc_channel_configure",
[
handshake_sequence(),
partial(send_stanza, "<iq type='set' id='id1' from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}'><command xmlns='http://jabber.org/protocol/commands' node='configure' action='execute' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/commands:command[@node='configure'][@sessionid][@status='executing']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='encoding_in']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='encoding_out']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='list-single'][@var='record_history']/dataform:value[text()='unset']",
),
after = partial(save_value, "sessionid", partial(extract_attribute, "/iq[@type='result']/commands:command[@node='configure']", "sessionid"))
),
partial(send_stanza, "<iq type='set' id='id2' from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}'>"
"<command xmlns='http://jabber.org/protocol/commands' node='configure' sessionid='{sessionid}' action='next'>"
"<x xmlns='jabber:x:data' type='submit'>"
"<field var='ports' />"
"<field var='encoding_out'><value>UTF-8</value></field>"
"<field var='encoding_in'><value>latin-1</value></field>"
"<field var='record_history'><value>true</value></field>"
"</x></command></iq>"),
partial(expect_stanza, "/iq[@type='result']/commands:command[@node='configure'][@status='completed']/commands:note[@type='info'][text()='Configuration successfully applied.']"),
partial(send_stanza, "<iq type='set' id='id3' from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}'><command xmlns='http://jabber.org/protocol/commands' node='configure' action='execute' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/commands:command[@node='configure'][@sessionid][@status='executing']",
"/iq/commands:command/dataform:x[@type='form']/dataform:title[text()='Configure the IRC channel #foo on server irc.localhost']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='encoding_in']/dataform:value[text()='latin-1']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='encoding_out']/dataform:value[text()='UTF-8']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='list-single'][@var='record_history']/dataform:value[text()='true']",
"/iq/commands:command/commands:actions/commands:next",
),
after = partial(save_value, "sessionid", partial(extract_attribute, "/iq[@type='result']/commands:command[@node='configure']", "sessionid"))
),
partial(send_stanza, "<iq type='set' id='id4' from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}'><command xmlns='http://jabber.org/protocol/commands' action='cancel' node='configure' sessionid='{sessionid}' /></iq>"),
partial(expect_stanza, "/iq[@type='result']/commands:command[@node='configure'][@status='canceled']"),
]),
Scenario("irc_channel_configure_xep0045",
[
handshake_sequence(),
partial(send_stanza, "<iq type='get' id='id1' from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}'><query xmlns='http://jabber.org/protocol/muc#owner'/></iq>"),
partial(expect_stanza, ("/iq[@type='result']/muc_owner:query",
"/iq/muc_owner:query/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='encoding_in']",
"/iq/muc_owner:query/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='encoding_out']",
),
),
partial(send_stanza, "<iq type='set' id='id2' from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}'>"
"<query xmlns='http://jabber.org/protocol/muc#owner'>"
"<x xmlns='jabber:x:data' type='submit'>"
"<field var='ports' />"
"<field var='encoding_out'><value>UTF-8</value></field>"
"<field var='encoding_in'><value>latin-1</value></field>"
"</x></query></iq>"),
partial(expect_stanza, "/iq[@type='result']"),
partial(send_stanza, "<iq type='set' id='id3' from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}'><query xmlns='http://jabber.org/protocol/muc#owner'> <x xmlns='jabber:x:data' type='cancel'/></query></iq>"),
partial(expect_stanza, "/iq[@type='result']"),
]),
Scenario("irc_channel_configure_fixed",
[
handshake_sequence(),
partial(send_stanza, "<iq type='set' id='id1' from='{jid_one}/{resource_one}' to='#foo@{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='configure' action='execute' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/commands:command[@node='configure'][@sessionid][@status='executing']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='encoding_in']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='encoding_out']",
),
after = partial(save_value, "sessionid", partial(extract_attribute, "/iq[@type='result']/commands:command[@node='configure']", "sessionid"))
),
partial(send_stanza, "<iq type='set' id='id2' from='{jid_one}/{resource_one}' to='#foo@{biboumi_host}'>"
"<command xmlns='http://jabber.org/protocol/commands' node='configure' sessionid='{sessionid}' action='next'>"
"<x xmlns='jabber:x:data' type='submit'>"
"<field var='ports' />"
"<field var='encoding_out'><value>UTF-8</value></field>"
"<field var='encoding_in'><value>latin-1</value></field>"
"</x></command></iq>"),
partial(expect_stanza, "/iq[@type='result']/commands:command[@node='configure'][@status='completed']/commands:note[@type='info'][text()='Configuration successfully applied.']"),
partial(send_stanza, "<iq type='set' id='id3' from='{jid_one}/{resource_one}' to='#foo@{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='configure' action='execute' /></iq>"),
partial(expect_stanza, ("/iq[@type='result']/commands:command[@node='configure'][@sessionid][@status='executing']",
"/iq/commands:command/dataform:x[@type='form']/dataform:title[text()='Configure the IRC channel #foo on server irc.localhost']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='encoding_in']/dataform:value[text()='latin-1']",
"/iq/commands:command/dataform:x[@type='form']/dataform:field[@type='text-single'][@var='encoding_out']/dataform:value[text()='UTF-8']",
"/iq/commands:command/commands:actions/commands:next",
),
after = partial(save_value, "sessionid", partial(extract_attribute, "/iq[@type='result']/commands:command[@node='configure']", "sessionid"))
),
partial(send_stanza, "<iq type='set' id='id4' from='{jid_one}/{resource_one}' to='#foo@{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' action='cancel' node='configure' sessionid='{sessionid}' /></iq>"),
partial(expect_stanza, "/iq[@type='result']/commands:command[@node='configure'][@status='canceled']"),
], conf='fixed_server'),
Scenario("irc_tls_connection",
[
handshake_sequence(),
# First, use an adhoc command to configure how we connect to the irc server, configure
# only one TLS port, and disable the cert verification.
partial(send_stanza, "<iq type='set' id='id1' from='{jid_one}/{resource_one}' to='{irc_server_one}'><command xmlns='http://jabber.org/protocol/commands' node='configure' action='execute' /></iq>"),
partial(expect_stanza, "/iq[@type='result']",
after = partial(save_value, "sessionid", partial(extract_attribute, "/iq[@type='result']/commands:command[@node='configure']", "sessionid"))),
partial(send_stanza, "<iq type='set' id='id2' from='{jid_one}/{resource_one}' to='{irc_server_one}'>"
"<command xmlns='http://jabber.org/protocol/commands' node='configure' sessionid='{sessionid}' action='next'>"
"<x xmlns='jabber:x:data' type='submit'>"
"<field var='ports' />"
"<field var='tls_ports'><value>7778</value></field>"
"<field var='verify_cert'><value>0</value></field>"
"<field var='nick'><value>my_special_nickname</value></field>"
"</x></command></iq>"),
partial(expect_stanza, "/iq[@type='result']/commands:command[@node='configure'][@status='completed']/commands:note[@type='info'][text()='Configuration successfully applied.']"),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_tls_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/my_special_nickname']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
]),
Scenario("get_irc_connection_info",
[
handshake_sequence(),
partial(send_stanza, "<iq type='set' id='command1' from='{jid_one}/{resource_one}' to='{irc_server_one}'><command xmlns='http://jabber.org/protocol/commands' node='get-irc-connection-info' action='execute' /></iq>"),
partial(expect_stanza, "/iq/commands:command/commands:note[text()='You are not connected to the IRC server irc.localhost']"),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza, "<iq type='set' id='command2' from='{jid_one}/{resource_one}' to='{irc_server_one}'><command xmlns='http://jabber.org/protocol/commands' node='get-irc-connection-info' action='execute' /></iq>"),
partial(expect_stanza, r"/iq/commands:command/commands:note[re:test(text(), 'Connected to IRC server irc.localhost on port 6667 since \d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d \(\d+ seconds ago\)\.\n#foo from 1 resource: {resource_one}.*')]"),
]),
Scenario("get_irc_connection_info_fixed",
[
handshake_sequence(),
partial(send_stanza, "<iq type='set' id='command1' from='{jid_one}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='get-irc-connection-info' action='execute' /></iq>"),
partial(expect_stanza, "/iq/commands:command/commands:note[text()='You are not connected to the IRC server irc.localhost']"),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo@{biboumi_host}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}', fixed_irc_server=True),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza, "<iq type='set' id='command2' from='{jid_one}/{resource_one}' to='{biboumi_host}'><command xmlns='http://jabber.org/protocol/commands' node='get-irc-connection-info' action='execute' /></iq>"),
partial(expect_stanza, r"/iq/commands:command/commands:note[re:test(text(), 'Connected to IRC server irc.localhost on port 6667 since \d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d \(\d+ seconds ago\)\.\n#foo from 1 resource: {resource_one}.*')]"),
], conf='fixed_server'),
Scenario("irc_server_presence_subscription",
[
handshake_sequence(),
partial(send_stanza, "<presence type='subscribe' from='{jid_one}/{resource_one}' to='{irc_server_one}' id='sub1' />"),
partial(expect_stanza, "/presence[@to='{jid_one}'][@from='{irc_server_one}'][@type='subscribed']")
]),
Scenario("fixed_irc_server_presence_subscription",
[
handshake_sequence(),
partial(send_stanza, "<presence type='subscribe' from='{jid_one}/{resource_one}' to='{biboumi_host}' id='sub1' />"),
partial(expect_stanza, "/presence[@to='{jid_one}'][@from='{biboumi_host}'][@type='subscribed']")
], conf='fixed_server'),
Scenario("leave_unjoined_chan",
[
handshake_sequence(),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza, "/message"),
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/message"),
partial(send_stanza, "<presence from='{jid_two}/{resource_two}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_begin_sequence("irc.localhost", '{jid_two}/{resource_two}'),
partial(expect_stanza, "/message[@to='{jid_two}/{resource_two}'][@type='chat']/body[text()='irc.localhost: {nick_one}: Nickname is already in use.']"),
partial(expect_stanza, "/presence[@type='error']/error[@type='cancel'][@code='409']/stanza:conflict"),
partial(send_stanza, "<presence from='{jid_two}/{resource_two}' to='#foo%{irc_server_one}/{nick_one}' type='unavailable' />")
]),
Scenario("basic_subscribe_unsubscribe",
[
handshake_sequence(),
# Mutual subscription exchange
partial(send_stanza, "<presence from='{jid_one}' to='{biboumi_host}' type='subscribe' id='subid1' />"),
partial(expect_stanza, "/presence[@type='subscribed'][@id='subid1']"),
# Get the current presence of the biboumi gateway
partial(expect_stanza, "/presence"),
partial(expect_stanza, "/presence[@type='subscribe']"),
partial(send_stanza, "<presence from='{jid_one}' to='{biboumi_host}' type='subscribed' />"),
# Unsubscribe
partial(send_stanza, "<presence from='{jid_one}' to='{biboumi_host}' type='unsubscribe' id='unsubid1' />"),
partial(expect_stanza, "/presence[@type='unavailable']"),
partial(expect_stanza, "/presence[@type='unsubscribed']"),
partial(expect_stanza, "/presence[@type='unsubscribe']"),
partial(send_stanza, "<presence from='{jid_one}' to='{biboumi_host}' type='unavailable' />"),
partial(send_stanza, "<presence from='{jid_one}' to='{biboumi_host}' type='unsubscribed' />"),
]),
Scenario("resource_is_removed_from_server_when_last_chan_is_left",
[
# Join the channel
handshake_sequence(),
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
connection_sequence("irc.localhost", '{jid_one}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat'][@to='{jid_one}/{resource_one}']/subject[not(text())]"),
# Make it persistent
partial(send_stanza, "<iq from='{jid_one}/{resource_one}' id='conf1' to='#foo%{irc_server_one}' type='get'><query xmlns='http://jabber.org/protocol/muc#owner'/></iq>"),
partial(expect_stanza, "/iq[@type='result']/muc_owner:query/dataform:x/dataform:field[@var='persistent'][@type='boolean']/dataform:value[text()='false']"),
partial(send_stanza, "<iq from='{jid_one}/{resource_one}' id='conf2' to='#foo%{irc_server_one}' type='set'><query xmlns='http://jabber.org/protocol/muc#owner'><x type='submit' xmlns='jabber:x:data'><field var='persistent' xmlns='jabber:x:data'><value>true</value></field></x></query></iq>"),
partial(expect_stanza, "/iq[@type='result']"),
partial(send_stanza, "<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' type='unavailable' />"),
partial(expect_stanza, "/presence[@type='unavailable'][@from='#foo%{irc_server_one}/{nick_one}']"),
# Join the same channel, with the same JID, but a different resource
partial(send_stanza,
"<presence from='{jid_one}/{resource_two}' to='#foo%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_two}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat'][@to='{jid_one}/{resource_two}']/subject[not(text())]"),
# Join some other channel with someone else
partial(send_stanza,
"<presence from='{jid_two}/{resource_one}' to='#bar%{irc_server_one}/{nick_two}' />"),
connection_sequence("irc.localhost", '{jid_two}/{resource_one}'),
partial(expect_stanza,
"/message/body[text()='Mode #bar [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_two}/{resource_one}'][@from='#bar%{irc_server_one}/{nick_two}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#bar%{irc_server_one}'][@type='groupchat'][@to='{jid_two}/{resource_one}']/subject[not(text())]"),
# Send two messages from the second user to the first one
partial(send_stanza, "<message from='{jid_two}/{resource_one}' to='{lower_nick_one}%{irc_server_one}' type='chat'><body>kikoo</body></message>"),
partial(send_stanza, "<message from='{jid_two}/{resource_one}' to='{lower_nick_one}%{irc_server_one}' type='chat'><body>second kikoo</body></message>"),
# We must receive each message only once, no duplicate
partial(expect_stanza, "/message/body[text()='kikoo']"),
partial(expect_stanza, "/message/body[text()='second kikoo']"),
]
),
Scenario("irc_server_presence_in_roster",
[
handshake_sequence(),
# Mutual subscription exchange
partial(send_stanza, "<presence from='{jid_one}' to='{irc_server_one}' type='subscribe' id='subid1' />"),
partial(expect_stanza, "/presence[@type='subscribed'][@id='subid1']"),
partial(expect_stanza, "/presence[@type='subscribe']"),
partial(send_stanza, "<presence from='{jid_one}' to='{irc_server_one}' type='subscribed' />"),
# Join a channel on that server
partial(send_stanza,
"<presence from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
# We must receive the IRC server presence, in the connection sequence
connection_sequence("irc.localhost", '{jid_one}/{resource_one}', expected_irc_presence=True),
partial(expect_stanza,
"/message/body[text()='Mode #foo [+nt] by {irc_host_one}']"),
partial(expect_stanza,
("/presence[@to='{jid_one}/{resource_one}'][@from='#foo%{irc_server_one}/{nick_one}']/muc_user:x/muc_user:item[@affiliation='admin'][@role='moderator']",
"/presence/muc_user:x/muc_user:status[@code='110']")
),
partial(expect_stanza, "/message[@from='#foo%{irc_server_one}'][@type='groupchat']/subject[not(text())]"),
# Leave the channel, and thus the IRC server
partial(send_stanza, "<presence type='unavailable' from='{jid_one}/{resource_one}' to='#foo%{irc_server_one}/{nick_one}' />"),
partial(expect_stanza, "/presence[@type='unavailable'][@from='#foo%{irc_server_one}/{nick_one}']"),
partial(expect_stanza, "/presence[@from='{irc_server_one}'][@to='{jid_one}'][@type='unavailable']"),
])
)
failures = 0
scenar_list = sys.argv[1:]
irc_output = open("irc_output.txt", "w")
irc = IrcServerRunner()
print("Starting irc server…")
asyncio.get_event_loop().run_until_complete(irc.start())
while True:
res = asyncio.get_event_loop().run_until_complete(irc.process.stderr.readline())
irc_output.write(res.decode())
if not res:
print("IRC server failed to start, see irc_output.txt for more details. Exiting…")
sys.exit(1)
if b"now running in foreground mode" in res:
break
print("irc server started.")
checks = len([s for s in scenarios if s.name in scenar_list]) if scenar_list else len(scenarios)
print("Running %s checks for biboumi." % checks)
for s in scenarios:
if scenar_list and s.name not in scenar_list:
continue
test = BiboumiTest(s)
if not test.run():
print("You can check the files slixmpp_%s_output.txt and biboumi_%s_output.txt to help you debug." %
(s.name, s.name))
failures += 1
print("Waiting for irc server to exit…")
irc.stop()
asyncio.get_event_loop().run_until_complete(irc.wait())
if failures:
print("%d test%s failed, please fix %s." % (failures, 's' if failures > 1 else '',
'them' if failures > 1 else 'it'))
sys.exit(1)
else:
print("All tests passed successfully")
|
#
# To start the service:
# python manage.py runserver
#
from app import app
from flask_script import Manager
manager = Manager(app)
@manager.command
def test():
"""Run unit tests."""
pass
if __name__ == '__main__':
manager.add_command("runserver",server)
manager.run()
|
#MenuTitle: New Tab with Glyphs Exceeding Zones
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
from builtins import str
__doc__="""
Opens a new tab with all glyphs where the extremums do not lie within zones.
"""
thisFont = Glyphs.font # frontmost font
thisFontMaster = thisFont.selectedFontMaster # active master
thisFontMasterID = thisFontMaster.id
listOfSelectedLayers = thisFont.selectedLayers # active layers of selected glyphs
def zoneList( master ):
zoneList = []
for z in master.alignmentZones:
zoneOrigin, zoneSize = int(z.position), int(z.size)
zoneList.append( ( zoneOrigin, zoneOrigin+zoneSize ) )
return zoneList
def isInZones( thisLayer, zones ):
# ignore empty glyphs:
if len(thisLayer.paths) == 0 and len(thisLayer.components) == 0:
return True
bottom = thisLayer.bounds.origin.y
top = bottom + thisLayer.bounds.size.height
isBottomInZone = False
isTopInZone = False
for thisZone in zones:
zoneOrigin, zoneEnd = thisZone[0], thisZone[1]
if zoneOrigin < zoneEnd:
# top zone
if zoneOrigin <= top <= zoneEnd:
isTopInZone = True
elif zoneOrigin > zoneEnd:
# bottom zone
if zoneOrigin >= bottom >= zoneEnd:
isBottomInZone = True
if isBottomInZone and isTopInZone:
return True
else:
return False
tabString = ""
masterZones = zoneList( thisFontMaster )
for thisGlyph in thisFont.glyphs:
thisLayer = thisGlyph.layers[thisFontMasterID]
if not isInZones( thisLayer, masterZones ):
tabString += "/%s" % thisGlyph.name
# opens new Edit tab:
from PyObjCTools.AppHelper import callAfter
callAfter( Glyphs.currentDocument.windowController().addTabWithString_, tabString )
|
import unittest
import string
# from typing import List
class Solution:
def validIPAddress(self, IP: str) -> str:
if IP is None:
return "Neither"
if "." in IP:
return self.validIP4Adress(IP)
elif ":" in IP:
return self.validIP6Adress(IP)
return "Neither"
def validIP4Adress(self, IP: str) -> str:
nums_list = IP.split('.')
if len(nums_list) != 4:
return "Neither"
for num in nums_list:
# Test number?
is_number = all(letter in string.digits for letter in num)
if not is_number:
return "Neither"
num_list = list(num)
size_num_list = len(num_list)
if size_num_list > 3 or size_num_list < 1:
return "Neither"
elif size_num_list == 3:
if num_list[0] == '0':
return "Neither"
elif size_num_list == 2:
if num_list[0] == '0':
return "Neither"
if int(num) > 255:
return "Neither"
return "IPv4"
def validIP6Adress(self, IP: str) -> str:
nums_list = IP.split(':')
if len(nums_list) != 8:
return "Neither"
for num in nums_list:
size_num_list = len(num)
if size_num_list < 1 or size_num_list > 4:
return "Neither"
# Test hex number?
hex_dig = all(letter in string.hexdigits for letter in num)
if not hex_dig:
return "Neither"
return "IPv6"
class TestValidIPAdress(unittest.TestCase):
def setUp(self):
self.sol = Solution()
def test_valid_ip_address_IP4_valid(self):
IP = "172.16.254.1"
str_ip = self.sol.validIPAddress(IP)
self.assertEqual(str_ip, "IPv4")
def test_valid_ip_address_IP4_invalid_basis_e(self):
IP = "1e1.4.5.6"
str_ip = self.sol.validIPAddress(IP)
self.assertEqual(str_ip, "Neither")
def test_valid_ip_address_IP4_valid_no_number(self):
IP = "0.0.0.-1"
str_ip = self.sol.validIPAddress(IP)
self.assertEqual(str_ip, "Neither")
def test_valid_ip_address_IP4_valid_with_zero(self):
IP = "172.0.254.1"
str_ip = self.sol.validIPAddress(IP)
self.assertEqual(str_ip, "IPv4")
def test_valid_ip_address_IP4_invalid_lead_zero(self):
IP = "172.16.254.01"
str_ip = self.sol.validIPAddress(IP)
self.assertEqual(str_ip, "Neither")
def test_valid_ip_address_IP4_invalid_lead_zero_three_nums(self):
IP = "172.016.254.1"
str_ip = self.sol.validIPAddress(IP)
self.assertEqual(str_ip, "Neither")
def test_valid_ip_address_IP4_invalid_lead_zero_four_nums(self):
IP = "172.0116.254.1"
str_ip = self.sol.validIPAddress(IP)
self.assertEqual(str_ip, "Neither")
def test_valid_ip_address_IP4_invalid_to_high(self):
IP = "172.16.256.1"
str_ip = self.sol.validIPAddress(IP)
self.assertEqual(str_ip, "Neither")
def test_valid_ip_address_IP4_invalid_empty_number(self):
IP = "172.16..1"
str_ip = self.sol.validIPAddress(IP)
self.assertEqual(str_ip, "Neither")
def test_valid_ip_address_IP4_invalid_not_enough(self):
IP = "172.16.01"
str_ip = self.sol.validIPAddress(IP)
self.assertEqual(str_ip, "Neither")
def test_valid_ip_address_invalid_empty_string(self):
IP = ""
str_ip = self.sol.validIPAddress(IP)
self.assertEqual(str_ip, "Neither")
def test_valid_ip_address_invalid_no_separators(self):
IP = "172162561"
str_ip = self.sol.validIPAddress(IP)
self.assertEqual(str_ip, "Neither")
def test_valid_ip_address_IP6_valid_with_lead_zeros(self):
IP = "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
str_ip = self.sol.validIPAddress(IP)
self.assertEqual(str_ip, "IPv6")
def test_valid_ip_address_IP6_valid_without_leading_zeros(self):
IP = "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
str_ip = self.sol.validIPAddress(IP)
self.assertEqual(str_ip, "IPv6")
def test_valid_ip_address_IP6_invalid_with_leading_zeros(self):
IP = "02001:0db8:85a3:0000:0000:8a2e:0370:7334"
str_ip = self.sol.validIPAddress(IP)
self.assertEqual(str_ip, "Neither")
def test_valid_ip_address_IP6_invalid_no_number(self):
IP = "2001:0db8:85a3::8A2E:0370:7334:0a23"
str_ip = self.sol.validIPAddress(IP)
self.assertEqual(str_ip, "Neither")
def test_valid_ip_address_IP6_invalid_number(self):
IP = "2001:0db8:85a3:451:8G2E:0370:7334:125"
str_ip = self.sol.validIPAddress(IP)
self.assertEqual(str_ip, "Neither")
if __name__ == "__main__":
unittest.main()
|
import sampling_methods
import numpy as np
__all__ = ['Supervised', 'ActiveLearning']
class _Trainer():
def __init__(self, name, epoch, batch_size):
self.name = name
self.epoch = epoch
self.batch_size = batch_size
assert (type(epoch) is int and epoch > 0)
assert (type(batch_size) is int and batch_size > 0)
def train_model(self, model, dataset, verbose='auto', validation=True):
pass
class Supervised(_Trainer):
def __init__(self, epoch=15, batch_size=32):
super().__init__("Supervised Learning", epoch, batch_size)
def train_model(self, model, dataset, verbose='auto', validation=True):
if verbose != 'auto':
assert (type(verbose) is int and verbose in range(3))
if validation:
history = model.model.fit(dataset.train_data, dataset.train_labels,
validation_data=(dataset.test_data, dataset.test_labels),
epochs=self.epoch, batch_size=self.batch_size, verbose=verbose)
else:
history = model.model.fit(dataset.train_data, dataset.train_labels,
epochs=self.epoch, batch_size=self.batch_size, verbose=verbose)
history.history['val_loss'] = [0] * self.epoch
history.history['val_accuracy'] = [0] * self.epoch
model.history = history.history
## More information about the LeNet-5 architecture can be found in the link below.
## https://www.datacamp.com/community/tutorials/active-learning
class ActiveLearning(_Trainer):
def __init__(self, epoch=10, batch_size=32,
sampling_method=None, subsample_size=0,
active_learning_rounds=20, num_labels_to_learn=128,
adversary=None):
super().__init__("Active Learning", epoch, batch_size)
## If no sampling method is specified, just label next N unlabeled images
if sampling_method is None:
sampling_method = lambda model, data: np.arange(len(data))
self.sampling_method = sampling_method
self.subsample_size = subsample_size
self.active_learning_rounds = active_learning_rounds
self.num_labels_to_learn = num_labels_to_learn
self.adversary = adversary
assert (type(subsample_size) is int and subsample_size >= 0)
assert (type(active_learning_rounds) is int and active_learning_rounds > 0)
assert (type(num_labels_to_learn) is int and num_labels_to_learn > 0)
def train_model(self, model, dataset, verbose='auto', validation=True):
if verbose != 'auto':
assert (type(verbose) is int and verbose in range(3))
learned_data = dataset.train_data[:0]
learned_labels = dataset.train_labels[:0]
not_learned_data = dataset.train_data[0:]
not_learned_labels = dataset.train_labels[0:]
history = {'loss': [], 'accuracy': [], 'val_loss': [], 'val_accuracy': []}
## Label the first N elements in the 'not-learned' list
def label(n):
nonlocal learned_data, learned_labels, not_learned_data, not_learned_labels
if n > len(not_learned_data):
n = len(not_learned_data)
learned_data = np.concatenate((learned_data, not_learned_data[:n]))
learned_labels = np.concatenate((learned_labels, not_learned_labels[:n]))
not_learned_data = not_learned_data[n:]
not_learned_labels = not_learned_labels[n:]
## Train the model, record the history.
def train(i):
nonlocal self, model, dataset, learned_data, learned_labels, history, verbose
if verbose:
print("\nRound {}\nLearned Samples: {}\n".format(i, len(learned_data)))
if validation:
history_i = model.model.fit(learned_data, learned_labels,
validation_data=(dataset.test_data, dataset.test_labels),
epochs=self.epoch, batch_size=self.batch_size, verbose=verbose)
else:
history_i = model.model.fit(learned_data, learned_labels,
epochs=self.epoch, batch_size=self.batch_size, verbose=verbose)
history_i.history['val_loss'] = [0] * self.epoch
history_i.history['val_accuracy'] = [0] * self.epoch
history['loss'] += history_i.history['loss']
history['accuracy'] += history_i.history['accuracy']
history['val_loss'] += history_i.history['val_loss']
history['val_accuracy'] += history_i.history['val_accuracy']
## Sort the 'not-learned' list with a sampling method.
def pick_samples(n):
nonlocal self, model, not_learned_data, not_learned_labels
if n and n > self.num_labels_to_learn:
n = min(n, len(not_learned_data))
pidx = np.random.permutation(len(not_learned_data))
not_learned_data = not_learned_data[pidx]
not_learned_labels = not_learned_labels[pidx]
pidx = self.sampling_method(model.model, not_learned_data[:n])
not_learned_data[:n] = not_learned_data[pidx]
not_learned_labels[:n] = not_learned_labels[pidx]
else:
pidx = self.sampling_method(model.model, not_learned_data)
not_learned_data = not_learned_data[pidx]
not_learned_labels = not_learned_labels[pidx]
## If an attack is provided, generate artificial samples by adding adversary images with their original label to the 'learned' list
def use_adversary(attack, n):
nonlocal model, learned_data, learned_labels, not_learned_data, not_learned_labels
if n > len(not_learned_data):
n = len(not_learned_data)
adversary_data, _, _, _ = attack(model.model, not_learned_data[:n])
learned_data = np.concatenate((learned_data, adversary_data))
learned_labels = np.concatenate((learned_labels, not_learned_labels[:n]))
for i in range(self.active_learning_rounds - 1):
label(self.num_labels_to_learn)
if len(not_learned_data) == 0:
break
train(i+1)
pick_samples(self.subsample_size)
if self.adversary is not None:
use_adversary(self.adversary, self.num_labels_to_learn)
label(self.num_labels_to_learn)
train(i+1)
model.history = history
|
import re
from treelib import Node, Tree
def read():
with open("input.txt", "r") as f:
return [i.strip() for i in f.readlines()]
data = read()
# fun
#parse
def parse(s: str):
reg = re.findall(r"\D+bag", s)
return set(i.rstrip("bag").rstrip().lstrip() for i in reg)
def partita(s: str):
[index, part] = [s.partition("contain").index("contain"), s.partition("contain")]
(left, right) = ([parse(i) for i in part[:index]][0], [parse(i) for i in part[index+1:]][0])
return left, right
##tree
def check(g):
tree = Tree()
tree.create_node("Root", "root")
flag = True
while flag:
try:
temp = next(g)
tree = check_tree(temp, tree)
except StopIteration:
flag = False
#tree.show()
return(tree)
def check_tree(temp, tree):
dad = temp[0].pop()
ID = "{}".format(dad)
all_tree = [*tree.expand_tree()]
if ID in all_tree:
print("???")
else:
tree.create_node(dad, ID, parent="root")
for i in list(temp[1]):
ID_F = "{}/{}".format(dad, i)
tree.create_node(i, ID_F, parent = ID)
return tree
##score
def score(tree):
score = 0
all_tree = [*tree.expand_tree()]
return 0
##main
generator = (partita(s) for s in data)
tree = check(generator)
def score2(l):
names = []
if l:
for s in l:
for i in tree.all_nodes():
if s in i.identifier:
names.append(str(i.identifier))
if names:
return names
else:
return -1
def score3(fils = ["/shiny gold"]):
score = 0
temp = score2(fils)
score += len(temp)
for i in temp:
i = "/" + i.partition("/")[0]
fils.append(i)
return fils
test = score2(["/shiny gold"])
print(test)
print("//////////////////////////")
def score4():
ref = score3()
flag = True
st = set()
while flag:
lon = len(st)
for i in score3(fils = ref):
st.add(i)
ref = list(st)
if len(st) == lon:
flag = False
return st
print(len(score4()))
#print(score2(score2(["/shiny gold"])))
|
# Modern Robotics Descriptions for all eight Interbotix Arms.
# Note that the end-effector is positioned at '/ee_arm_link'
# and that the Space frame is positioned at '/base_link'.
import numpy as np
class px100:
Slist = np.array([[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, -0.09305, 0.0, 0.0],
[0.0, -1.0, 0.0, 0.19305, 0.0, -0.035],
[0.0, -1.0, 0.0, 0.19305, 0.0, -0.135]]).T
M = np.array([[1.0, 0.0, 0.0, 0.248575],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.19305],
[0.0, 0.0, 0.0, 1.0]])
class px150:
Slist = np.array([[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, -0.10391, 0.0, 0.0],
[0.0, -1.0, 0.0, 0.25391, 0.0, -0.05],
[0.0, -1.0, 0.0, 0.25391, 0.0, -0.2],
[1.0, 0.0, 0.0, 0.0, 0.25391, 0.0]]).T
M = np.array([[1.0, 0.0, 0.0, 0.358575],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.25391],
[0.0, 0.0, 0.0, 1.0]])
class rx150:
Slist = np.array([[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, -0.10391, 0.0, 0.0],
[0.0, -1.0, 0.0, 0.25391, 0.0, -0.05],
[0.0, -1.0, 0.0, 0.25391, 0.0, -0.2],
[1.0, 0.0, 0.0, 0.0, 0.25391, 0.0]]).T
M = np.array([[1.0, 0.0, 0.0, 0.358575],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.25931],
[0.0, 0.0, 0.0, 1.0]])
class rx200:
Slist = np.array([[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, -0.10391, 0.0, 0.0],
[0.0, -1.0, 0.0, 0.30391, 0.0, -0.05],
[0.0, -1.0, 0.0, 0.30391, 0.0, -0.25],
[1.0, 0.0, 0.0, 0.0, 0.30391, 0.0]]).T
M = np.array([[1.0, 0.0, 0.0, 0.408575],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.30391],
[0.0, 0.0, 0.0, 1.0]])
class vx250:
Slist = np.array([[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, -0.12675, 0.0, 0.0],
[0.0, -1.0, 0.0, 0.37675, 0.0, -0.06],
[0.0, -1.0, 0.0, 0.37675, 0.0, -0.31],
[1.0, 0.0, 0.0, 0.0, 0.37675, 0.0]]).T
M = np.array([[1.0, 0.0, 0.0, 0.468575],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.37675],
[0.0, 0.0, 0.0, 1.0]])
class vx300:
Slist = np.array([[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, -0.12675, 0.0, 0.0],
[0.0, -1.0, 0.0, 0.42675, 0.0, -0.05955],
[0.0, -1.0, 0.0, 0.42675, 0.0, -0.35955],
[1.0, 0.0, 0.0, 0.0, 0.42675, 0.0]]).T
M = np.array([[1.0, 0.0, 0.0, 0.536494],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.42675],
[0.0, 0.0, 0.0, 1.0]])
class vx300s:
Slist = np.array([[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, -0.12675, 0.0, 0.0],
[0.0, -1.0, 0.0, 0.42675, 0.0, -0.05955],
[1.0, 0.0, 0.0, 0.0, 0.42675, 0.0],
[0.0, -1.0, 0.0, 0.42675, 0.0, -0.35955],
[1.0, 0.0, 0.0, 0.0, 0.42675, 0.0]]).T
M = np.array([[1.0, 0.0, 0.0, 0.536494],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.42675],
[0.0, 0.0, 0.0, 1.0]])
class wx200:
Slist = np.array([[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, -0.11025, 0.0, 0.0],
[0.0, -1.0, 0.0, 0.31025, 0.0, -0.05],
[0.0, -1.0, 0.0, 0.31025, 0.0, -0.25],
[1.0, 0.0, 0.0, 0.0, 0.31025, 0.0]]).T
Blist = np.array([[0.0, 0.0, 1.0, 0.0, 0.358, 0.0],
[0.0, 1.0, 0.0, 0.2, 0.0, -0.358],
[0.0, -1.0, 0.0, 0.0, 0.0, 0.308],
[0.0, -1.0, 0.0, 0.0, 0.0, 0.108],
[1.0, 0.0, 0.0, 0.0 ,0.0, 0.0]]).T
M = np.array([[1.0, 0.0, 0.0, 0.408575],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.31025],
[0.0, 0.0, 0.0, 1.0]])
class wx250:
Slist = np.array([[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, -0.11025, 0.0, 0.0],
[0.0, -1.0, 0.0, 0.36025, 0.0, -0.04975],
[0.0, -1.0, 0.0, 0.36025, 0.0, -0.29975],
[1.0, 0.0, 0.0, 0.0, 0.36025, 0.0]]).T
M = np.array([[1.0, 0.0, 0.0, 0.458325],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.36025],
[0.0, 0.0, 0.0, 1.0]])
class wx250s:
Slist = np.array([[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, -0.11025, 0.0, 0.0],
[0.0, -1.0, 0.0, 0.36025, 0.0, -0.04975],
[1.0, 0.0, 0.0, 0.0, 0.36025, 0.0],
[0.0, -1.0, 0.0, 0.36025, 0.0, -0.29975],
[1.0, 0.0, 0.0, 0.0, 0.36025, 0.0]]).T
M = np.array([[1.0, 0.0, 0.0, 0.458325],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.36025],
[0.0, 0.0, 0.0, 1.0]])
|
import pygame
import random
import numpy as np
from time import sleep
def out_of_bounds(tet, x, y):
return(x >= Tetris.width or y >= Tetris.height or x < 0 or y < 0)
class Tetromino:
tetrominos = {
1: { # I
0: [(0,1), (1,1), (2,1), (3,1)],
1: [(2,0), (2,1), (2,2), (2,3)],
2: [(3,2), (2,2), (1,2), (0,2)],
3: [(1,3), (1,2), (1,1), (1,0)],
},
2: { # J
0: [(0,0), (0,1), (1,1), (2,1)],
1: [(2,0), (1,0), (1,1), (1,2)],
2: [(2,2), (2,1), (1,1), (0,1)],
3: [(0,2), (1,2), (1,1), (1,0)],
},
3: { # L
0: [(0,1), (1,1), (2,1), (2,0)],
1: [(1,0), (1,1), (1,2), (2,2)],
2: [(2,1), (1,1), (0,1), (0,2)],
3: [(1,2), (1,1), (1,0), (0,0)],
},
4: { # O
0: [(1,0), (2,0), (1,1), (2,1)],
1: [(1,0), (2,0), (1,1), (2,1)],
2: [(1,0), (2,0), (1,1), (2,1)],
3: [(1,0), (2,0), (1,1), (2,1)],
},
5: { # S
0: [(0,1), (1,1), (1,0), (2,0)],
1: [(1,0), (1,1), (2,1), (2,2)],
2: [(2,1), (1,1), (1,2), (0,2)],
3: [(1,2), (1,1), (0,1), (0,0)],
},
6: { # T
0: [(0,1), (1,1), (1,0), (2,1)],
1: [(1,0), (1,1), (2,1), (1,2)],
2: [(2,1), (1,1), (1,2), (0,1)],
3: [(1,2), (1,1), (0,1), (1,0)],
},
7: { # Z
0: [(0,0), (1,0), (1,1), (2,1)],
1: [(2,0), (2,1), (1,1), (1,2)],
2: [(2,2), (1,2), (1,1), (0,1)],
3: [(0,2), (0,1), (1,1), (1,0)],
}
}
tetromino_colors = {
0: (0, 0, 0),
1: (61, 203, 255),
2: (40, 80, 255),
3: (255, 164, 25),
4: (255, 240, 74),
5: (100, 255, 69),
6: (183, 74, 255),
7: (255, 25, 25),
}
tetromino_rotation = {
1: [(-1,0),(-1,1),(0,-2),(-1,-2)],
2: [(1,0),(1,-1),(0,2),(1,2)],
4: [(1,0),(1,-1),(0,2),(1,2)],
5: [(-1,0),(-1,1),(0,-2),(-1,-2)],
7: [(1,0),(1,1),(0,-2),(1,-2)],
8: [(-1,0),(-1,-1),(0,2),(-1,2)],
6: [(-1,0),(-1,-1),(0,2),(-1,2)],
3: [(1,0),(1,1),(0,-2),(1,-2)]
}
i_rotation = {
1: [(-2,0),(1,0),(-2,-1),(1,2)],
2: [(2,0),(-1,0),(2,1),(-1,-2)],
4: [(-1,0),(2,0),(-1,2),(2,-1)],
5: [(1,0),(-2,0),(1,-2),(-2,1)],
7: [(2,0),(-1,0),(2,1),(-1,-2)],
8: [(-2,0),(1,0),(-2,-1),(1,2)],
6: [(1,0),(-2,0),(1,-2),(-2,1)],
3: [(-1,0),(2,0),(-1,2),(2,-1)]
}
def __init__(self, x = 0, y = 0, r = 0, t = 1):
self.x = x
self.y = y
self.r = r
self.t = t
def __copy__(self):
return Tetromino(self.x,self.y,self.r,self.t)
class Tetris:
width = 10
height = 23
difficult_clears = ["BACK-TO-BACK","TETRIS","tspin triple","tspin double","mini tspin double","tspin single","mini tspin single","tspin no lines", "mini tspin no lines"]
def __init__(self):
self.reset()
def reset(self):
self.board = np.zeros((Tetris.width,Tetris.height))
self.game_over = False
self.new_round(True)
self.score = 0
self.combo = 0
self.award = ""
self.previous_award = ""
self.level = 1
self.lines = 0
self.has_switched = False
self.previous_rotate = False
self.previous_triple_kick = False
def collide(self, tetromino):
for tet in Tetromino.tetrominos[tetromino.t][tetromino.r]:
if(out_of_bounds(tet,tet[0]+tetromino.x,tet[1]+tetromino.y) or self.board[tet[0]+tetromino.x][tet[1]+tetromino.y] > 0):
return True
return False
def add(self, tetromino):
self.has_switched = False
out_count = 0
for tet in Tetromino.tetrominos[tetromino.t][tetromino.r]:
if(tet[1]+tetromino.y<3):
out_count+=1
self.board[tet[0]+tetromino.x][tet[1]+tetromino.y] = tetromino.t
if(out_count == 4):
self.game_over = True
return
#scoring
points = 0
if(self.previous_award in Tetris.difficult_clears):
difficult = True
else:
difficult = False
#check for tspins
full = False
mini = False
if(tetromino.t == 6 and self.previous_rotate):
count = 0
x = tetromino.x
y = tetromino.y
r = tetromino.r
#check for mini tspin
if(self.board[x][y] != 0):
count+=1
elif(r == 0 or r == 3):
mini = True
if(x+2 >= Tetris.width or self.board[x+2][y] != 0):
count+=1
elif(r == 0 or r == 1):
mini = True
if(y+2 >= Tetris.width or self.board[x][y+2] != 0):
count+=1
elif(r == 2 or r == 3):
mini = True
if(x+2 >= Tetris.width or y+2 >= Tetris.width or self.board[x+2][y+2] != 0):
count+=1
elif(r == 1 or r == 2):
mini = True
full = count >= 3
if(full):
lines_cleared = self.clear_lines()
if(mini and not self.previous_triple_kick): #mini tspin
if(lines_cleared == 0):
points = self.level * 100
self.award = "mini tspin no lines"
self.previous_award = self.award
elif(lines_cleared == 1):
points = self.level * 200
self.award = "mini tspin single"
self.previous_award = self.award
elif(lines_cleared == 2):
points = self.level * 400
self.award = "mini tspin double"
self.previous_award = self.award
else: #full tspin
if(lines_cleared == 0):
points = self.level * 400
self.award = "tspin no lines"
self.previous_award = self.award
elif(lines_cleared == 1):
points = self.level * 800
self.award = "tspin single"
self.previous_award = self.award
elif(lines_cleared == 2):
points = self.level * 1200
self.award = "tspin double"
self.previous_award = self.award
elif(lines_cleared == 3):
points = self.level * 1600
self.award = "tspin triple"
self.previous_award = self.award
else:
lines_cleared = self.clear_lines()
if(lines_cleared == 1):
points = self.level * 100
self.award = "single"
self.previous_award = self.award
elif(lines_cleared == 2):
points = self.level * 300
self.award = "double"
self.previous_award = self.award
elif(lines_cleared == 3):
points = self.level * 500
self.award = "triple"
self.previous_award = self.award
elif(lines_cleared == 4):
self.score += self.level * 800
if(self.previous_award == "TETRIS" or self.previous_award == "BACK-TO-BACK"):
self.award = "BACK-TO-BACK"
else:
self.award = "TETRIS"
self.previous_award = self.award
else:
self.award = ""
if(lines_cleared >= 1):
self.score += self.level * self.combo * 50
self.combo+=1
else:
self.combo = 0
if(difficult and self.award in Tetris.difficult_clears and self.award != "tspin no lines" and self.award != "mini tspin no lines"):
self.score += 3*points//2
else:
self.score += points
self.lines+=lines_cleared
self.level = self.lines//10 + 1
self.new_round()
def left(self, tetromino):
temp = tetromino.__copy__()
temp.x -= 1
if(not self.collide(temp)):
tetromino.x -= 1
self.previous_rotate = False
return True
return False
def right(self, tetromino):
temp = tetromino.__copy__()
temp.x += 1
if(not self.collide(temp)):
tetromino.x += 1
self.previous_rotate = False
return True
return False
def down(self, tetromino):
temp = tetromino.__copy__()
temp.y += 1
if(not self.collide(temp)):
tetromino.y += 1
self.score+=1
self.previous_rotate = False
return True
else:
self.add(tetromino)
return False
def up(self, tetromino):
while(self.down(tetromino)):
self.score+=1
return True
def hold(self, tetromino):
if(self.has_switched):
return
self.previous_rotate = False
self.has_switched = True
if not hasattr(self, 'held_piece'):
self.held_piece = tetromino.__copy__()
self.new_round()
else:
self.current_piece = Tetromino(3,2,0,self.held_piece.t)
self.held_piece = tetromino.__copy__()
if self.collide(self.current_piece):
self.current_piece.y-=1
if self.collide(self.current_piece):
self.game_over = True
def rotate(self, tetromino, r):
temp = tetromino.__copy__()
temp.r = (temp.r + r) % 4
if(not self.collide(temp)):
tetromino.r = (tetromino.r + r) % 4
self.previous_rotate = True
self.previous_triple_kick = False
return True
else:
if(tetromino.t == 1):
test = tetromino.i_rotation[2*tetromino.r+(tetromino.r+r)%4]
for i in range(len(test)):
temp = tetromino.__copy__()
temp.x += test[i][0]
temp.y -= test[i][1]
temp.r = (temp.r + r) % 4
if(not self.collide(temp)):
tetromino.r = (tetromino.r + r) % 4
tetromino.x += test[i][0]
tetromino.y -= test[i][1]
self.previous_rotate = True
self.previous_triple_kick = (i == 3)
return True
else:
test = tetromino.tetromino_rotation[2*tetromino.r+(tetromino.r+r)%4]
for i in range(len(test)):
temp = tetromino.__copy__()
temp.x += test[i][0]
temp.y -= test[i][1]
temp.r = (temp.r + r) % 4
if(not self.collide(temp)):
tetromino.r = (tetromino.r + r) % 4
tetromino.x += test[i][0]
tetromino.y -= test[i][1]
self.previous_rotate = True
self.previous_triple_kick = (i == 3)
return True
return False
def get_board(self):
tetromino = self.current_piece
board = self.board.__copy__()
for tet in Tetromino.tetrominos[tetromino.t][tetromino.r]:
board[tet[0]+tetromino.x][tet[1]+tetromino.y] = tetromino.t
return board
def get_just_board(self):
return self.board
def get_current_piece(self):
return self.current_piece
def get_held_piece(self):
if hasattr(self, 'held_piece'):
return self.held_piece
else:
return False
def get_ghost_piece(self):
self.ghost_piece = self.current_piece.__copy__()
while(not self.collide(self.ghost_piece)):
self.ghost_piece.y += 1
self.ghost_piece.y -= 1
return self.ghost_piece
def new_round(self, new = False):
#get next piece
if not hasattr(self, 'queue'):
self.queue = list(range(1,len(Tetromino.tetrominos)+1))
random.shuffle(self.queue)
if not hasattr(self, 'bag') or len(self.bag) == 0:
self.bag = list(range(1,len(Tetromino.tetrominos)+1))
random.shuffle(self.bag)
self.current_piece = Tetromino(3,2,0,self.queue.pop())
self.queue.insert(0,self.bag.pop())
self.previous_rotate = False
self.previous_triple_kick = False
self.board_list = [self.get_board]
if self.collide(self.current_piece):
self.current_piece.y-=1
if self.collide(self.current_piece):
self.game_over = True
def clear_line(self,line,num):
self.board[:,num:line+num]=self.board[:,0:line]
self.board[:,0:num] = np.zeros_like(self.board[:,0:num])
def clear_lines(self):
lines_to_clear = [i for i in range(Tetris.height) if np.all(self.board[:,i])]
if lines_to_clear:
count = 0
for i,j in enumerate(lines_to_clear):
if(i < len(lines_to_clear)-1 and j == lines_to_clear[i+1] - 1):
count+=1
else:
self.clear_line(lines_to_clear[i-(count)], count+1)
count = 0
return len(lines_to_clear)
|
#!d:\app\python27\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'phply==1.0.0','console_scripts','phplex'
__requires__ = 'phply==1.0.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('phply==1.0.0', 'console_scripts', 'phplex')()
)
|
import env
import cv2
import imutils
import os
import PyAutoMakerHuman as pamh
hand = pamh.HandUtil()
cap = cv2.VideoCapture(0)
try:
while cap.isOpened():
success, frame = cap.read()
if not success:
continue
frame = cv2.flip(frame, 1)
result = hand.detect(frame)
print(result.to_dict())
result.draw(frame)
cv2.imshow("view", frame)
if cv2.waitKey(1) & 0xff == ord('q'):
break
except KeyboardInterrupt as k:
pass
cv2.destroyAllWindows()
cap.release()
|
def squared_sum(n):
a = (n * (2 * n + 1) * (n + 1))/6
return int(a)
def sum_squared(n):
a =(n * (n + 1 ))/2
a = pow(a, 2)
return int(a)
n1 = squared_sum(100)
n2 = sum_squared(100)
print(n2 - n1)
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import tarfile
import numpy as np
from typing import Any, Callable
from monai.data import CacheDataset
from monai.transforms import Randomizable
from .utils import download_and_extract
class MedNISTDataset(Randomizable, CacheDataset):
"""
The Dataset to automatically download MedNIST data and generate items for training, validation or test.
It's based on `CacheDataset` to accelerate the training process.
Args:
root_dir: target directory to download and load MedNIST dataset.
section: expected data section, can be: `training`, `validation` or `test`.
transform: transforms to execute operations on input data.
download: whether to download and extract the MedNIST from resource link, default is False.
if expected file already exists, skip downloading even set it to True.
user can manually copy `MedNIST.tar.gz` file or `MedNIST` folder to root directory.
extract: whether to extract the MedNIST.tar.gz file under root directory, default is False.
seed: random seed to randomly split training, validation and test datasets, defaut is 0.
val_frac: percentage of of validation fraction in the whole dataset, default is 0.1.
test_frac: percentage of of test fraction in the whole dataset, default is 0.1.
cache_num: number of items to be cached. Default is `sys.maxsize`.
will take the minimum of (cache_num, data_length x cache_rate, data_length).
cache_rate: percentage of cached data in total, default is 1.0 (cache all).
will take the minimum of (cache_num, data_length x cache_rate, data_length).
num_workers: the number of worker threads to use.
If 0 a single thread will be used. Default is 0.
"""
resource = "https://www.dropbox.com/s/5wwskxctvcxiuea/MedNIST.tar.gz?dl=1"
md5 = "0bc7306e7427e00ad1c5526a6677552d"
compressed_file_name = "MedNIST.tar.gz"
dataset_folder_name = "MedNIST"
def __init__(
self,
root_dir: str,
section: str,
transform: Callable[..., Any],
download: bool = False,
seed: int = 0,
val_frac: float = 0.1,
test_frac: float = 0.1,
cache_num: int = sys.maxsize,
cache_rate: float = 1.0,
num_workers: int = 0,
):
if not os.path.isdir(root_dir):
raise ValueError("root_dir must be a directory.")
self.section = section
self.val_frac = val_frac
self.test_frac = test_frac
self.set_random_state(seed=seed)
tarfile_name = os.path.join(root_dir, self.compressed_file_name)
dataset_dir = os.path.join(root_dir, self.dataset_folder_name)
if download:
download_and_extract(self.resource, tarfile_name, root_dir, self.md5)
if not os.path.exists(dataset_dir):
raise RuntimeError("can not find dataset directory, please use download=True to download it.")
data = self._generate_data_list(dataset_dir)
super().__init__(data, transform, cache_num=cache_num, cache_rate=cache_rate, num_workers=num_workers)
def randomize(self):
self.rann = self.R.random()
def _generate_data_list(self, dataset_dir):
class_names = sorted([x for x in os.listdir(dataset_dir) if os.path.isdir(os.path.join(dataset_dir, x))])
num_class = len(class_names)
image_files = [
[
os.path.join(dataset_dir, class_names[i], x)
for x in os.listdir(os.path.join(dataset_dir, class_names[i]))
]
for i in range(num_class)
]
num_each = [len(image_files[i]) for i in range(num_class)]
image_files_list = []
image_class = []
for i in range(num_class):
image_files_list.extend(image_files[i])
image_class.extend([i] * num_each[i])
num_total = len(image_class)
data = list()
for i in range(num_total):
self.randomize()
if self.section == "training":
if self.rann < self.val_frac + self.test_frac:
continue
elif self.section == "validation":
if self.rann >= self.val_frac:
continue
elif self.section == "test":
if self.rann < self.val_frac or self.rann >= self.val_frac + self.test_frac:
continue
else:
raise ValueError("section name can only be: training, validation or test.")
data.append({"image": image_files_list[i], "label": image_class[i]})
return data
|
while True:
try:
n = int(input())
if n < 90 or n == 360: print('Bom Dia!!')
elif n < 180: print('Boa Tarde!!')
elif n < 270: print('Boa Noite!!')
else: print('De Madrugada!!')
except EOFError: break
|
import librosa
from nlpaug.model.audio import Audio
"""
Reference: https://www.kaggle.com/CVxTz/audio-data-augmentation
A wrapper of librosa.effects.pitch_shift
"""
class Pitch(Audio):
def __init__(self, sampling_rate, pitch_factor):
super(Pitch, self).__init__()
self.sampling_rate = sampling_rate
self.pitch_factor = pitch_factor
def manipulate(self, data):
return librosa.effects.pitch_shift(data, self.sampling_rate, self.pitch_factor)
|
def no_valid_function():
"""
This is a test function for load_function in util.py.
load_function expects a file some_name.py to contain a function called some_name.
This file has a function with a different name to the filename, and this is used in
test_util.py to verify this case is handled correctly.
"""
return None
|
X_test_num_imputed = X_test_num.fillna(X_train_num.mean())
model.score(X_test_num_imputed, y_test)
|
'''
水仙花数
水仙花数是指一个 3 位数,它的每个位上的数字的 3次幂之和等于它本身(例如:1^3 + 5^3+ 3^3 = 153)
'''
import math
def is_armstrong_num(num):
g = num % 10
s = (num-g) // 10 % 10
b = num // 100
return math.pow(g, 3)+math.pow(s, 3)+math.pow(b, 3) == num
# res = is_armstrong_num(153)
# print(res)
'''
完全数
它所有的真因子(即除了自身以外的约数)的和(即因子函数),恰好等于它本身
例如:第一个完全数是6,它有约数1、2、3、6,除去它本身6外,其余3个数相加,1+2+3=6。第二个完全数是28,它有约数1、2、4、7、14、28
'''
def is_perfect_num(num):
s = 0
for i in range(1, num):
if num % i == 0:
s += i
return s == num
# res=is_perfect_num(6)
# print(res)
# 斐波那契数列
cache = {}
def make_fibonacci(n):
if n < 3:
return 1
if cache.__contains__(n):
return cache[n]
cache[n-1] = make_fibonacci(n-1)
cache[n-2] = make_fibonacci(n-2)
return cache[n-1] + cache[n-2]
# res = make_fibonacci(6)
# print(res)
'''
百鸡百钱
我国古代数学家张丘建在《算经》一书中提出的数学问题:鸡翁一值钱五,鸡母一值钱三,鸡雏三值钱一。百钱买百鸡,问鸡翁、鸡母、鸡雏各几何?
'''
def hundred_chicken():
arr = []
# 5 * x + 3 * y + (100-x-y) / 3== 100
# 7 * x + 4 * y == 100
for i in range(0, 26):
z = (100 - 4 * i)
if z % 7 == 0:
x = int(z/7)
arr.append([x, i, 100-x-i])
return arr
res = hundred_chicken()
print(res)
'''
Craps赌博游戏
玩家摇两颗色子 如果第一次摇出7点或11点 玩家胜
如果摇出2点 3点 12点 庄家胜 其他情况游戏继续
玩家再次要色子 如果摇出7点 庄家胜
如果摇出第一次摇的点数 玩家胜
否则游戏继续 玩家继续摇色子
玩家进入游戏时有1000元的赌注 全部输光游戏结束
'''
# todo
|
def fib(n):
f = [0, 1]
for i in xrange(2, n + 1):
f.append(f[-1] + f[-2])
return f[n]
for i in xrange(1, 12):
print fib(i)
|
# pylint: disable=invalid-name
import unittest
from test.constants import BUILTIN_TYPE_TUPLES, VALID_USER_TYPE_NAMES, INVALID_USER_TYPE_NAMES
from test.ParserTestUtils import SingleLineParserTestUtils, ParserFactoryTestUtils
from catparser.AliasParser import AliasParserFactory
class AliasParserFactoryTest(unittest.TestCase):
def test_is_match_returns_true_for_positives(self):
# Assert:
ParserFactoryTestUtils(AliasParserFactory, self).assert_positives([
'using A = foo', 'using ^ = $$$', 'using A90zaZa = te$t'
])
def test_is_match_returns_false_for_negatives(self):
# Assert:
ParserFactoryTestUtils(AliasParserFactory, self).assert_negatives([
' using A = foo', 'using A = foo ', 'import A = foo', 'using A = foo bar', 'using A B = foo bar'
])
class AliasParserTest(unittest.TestCase):
def test_can_parse_builtin_as_alias(self):
for builtin_tuple in BUILTIN_TYPE_TUPLES:
# Act + Assert:
SingleLineParserTestUtils(AliasParserFactory, self).assert_parse(
'using Age = {0}'.format(builtin_tuple[0]),
('Age', {'type': 'byte', 'signedness': builtin_tuple[2], 'size': builtin_tuple[1]}))
def test_alias_names_must_have_type_name_semantics(self):
# Assert:
SingleLineParserTestUtils(AliasParserFactory, self).assert_naming(
'using {0} = uint32',
VALID_USER_TYPE_NAMES,
INVALID_USER_TYPE_NAMES)
def test_cannot_parse_invalid_alias(self):
# Arrange:
SingleLineParserTestUtils(AliasParserFactory, self).assert_parse_exceptions([
'using Hash256 = binary_fixed(2x22)', # malformed number
'using Hash256 = binary_fixed(x)', # malformed number
'using Age = uint33', # unknown type
'using Age = FooBar' # user type
])
|
from pytest import approx
def test_my_good_example_test():
assert 1 == 1
def test_my_bad_example_test():
assert 1 == 2
|
__author__ = 'sandeep'
class Error(Exception):
""" Generic Error for client """
pass
class ValidationError(Error):
""" Error in case of invalid input """
pass
class HTTPError(Error):
""" Error Response from API """
pass
|
import logging
import os
import uuid
from datetime import datetime
from typing import Optional, List
import aiofiles
from pydantic import BaseModel
from tinydb import TinyDB, Query
from app.library import configuration, conversions
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class Conversion(BaseModel):
id: Optional[str]
input_format: str
output_format: str
status: str
created_on: Optional[str]
modified_on: Optional[str]
def get_database() -> TinyDB:
logger.debug(f"Opening TinyDB '{get_database_filename()}'...")
database = TinyDB(get_database_filename())
return database
def get_database_filename() -> str:
logger.debug("Getting database filename...")
database_directory = configuration.get_configuration().get_database_directory()
if not os.path.isdir(database_directory):
logger.debug(f"Database directory '{database_directory}' does not exist, creating directory...")
os.makedirs(database_directory, exist_ok=True)
return os.path.join(database_directory, "database.json")
async def add(conversion: Conversion) -> Conversion:
logger.debug(f"Adding...")
conversion_id = str(uuid.uuid4())
conversion = conversion.dict() | {
"id": conversion_id,
"created_on": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"modified_on": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
}
conversion = Conversion(**conversion)
with get_database() as database:
logger.debug(f"Inserting {conversion} in database...")
document_id = database.insert(conversion.dict())
logger.debug(f"Inserted {conversion} in database: doc_id={document_id}")
logger.debug(f"Getting inserted with doc_id={document_id}......")
retrieved_conversion_document = database.get(doc_id=document_id)
logger.debug(f"Got inserted {retrieved_conversion_document}")
logger.debug("Converting document to model...")
retrieved_conversion = Conversion(**retrieved_conversion_document)
logger.debug(f"Got inserted {retrieved_conversion}")
return retrieved_conversion
async def update(conversion: Conversion) -> Conversion:
logger.debug(f"Updating id={conversion.id}...")
conversion = conversion.dict() | {
"modified_on": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
}
conversion = Conversion(**conversion)
with get_database() as database:
conversion_query = Query()
logger.debug(f"Updating {conversion} in database...")
document_id = database.update(conversion.dict(), conversion_query.id == conversion.id)[0]
logger.debug(f"Updated {conversion} in database: doc_id={document_id}")
logger.debug(f"Getting updated with doc_id={document_id}...")
retrieved_conversion_document = database.get(doc_id=document_id)
logger.debug(f"Got updated {retrieved_conversion_document}")
logger.debug("Converting document to model...")
retrieved_conversion = Conversion(**retrieved_conversion_document)
logger.debug(f"Got updated {retrieved_conversion}")
return retrieved_conversion
async def get(id_: str) -> Conversion:
logger.debug(f"Getting with id={id_}...")
with get_database() as database:
conversion_query = Query()
logger.debug(f"Getting document...")
retrieved_conversion_document = database.get(conversion_query.id == id_)
logger.debug(f"Got {retrieved_conversion_document}")
logger.debug("Converting document to model...")
retrieved_conversion = Conversion(**retrieved_conversion_document)
logger.debug(f"Got {retrieved_conversion}")
return retrieved_conversion
async def get_all() -> List[Conversion]:
logger.debug(f"Getting all...")
with get_database() as database:
logger.debug(f"Getting document...")
retrieved_conversion_documents = database.all()
logger.debug(f"Got {retrieved_conversion_documents}")
logger.debug("Converting documents to models...")
retrieved_conversions = [
Conversion(**retrieved_conversion_document)
for retrieved_conversion_document in retrieved_conversion_documents
]
logger.debug(f"Got {retrieved_conversions}")
logger.debug(f"Got {len(retrieved_conversions)} items...")
return retrieved_conversions
async def delete_files(conversion_: Conversion):
logger.debug(f"Deleting files for id={conversion_.id}...")
for format_ in [conversion_.output_format, conversion_.input_format]:
filename = conversions.get_filename_from_id(conversion_.id, format_)
try:
logger.debug(f"Deleting file '{filename}'...")
await aiofiles.os.remove(filename)
except OSError as e:
logger.warning(f"Error while deleting file '{e.filename}': {e.strerror}")
async def delete(id_: str):
logger.debug(f"Deleting with id={id_}...")
conversion_ = await get(id_)
await delete_files(conversion_)
with get_database() as database:
conversion_query = Query()
logger.debug(f"Removing document...")
database.remove(conversion_query.id == id_)
|
# -*- coding: utf-8 -*-
"""
smallparts.markup.generators
Markup (HTML, XML) generation
"""
from smallparts import constants
from smallparts.markup import elements
from smallparts.namespaces import Namespace
from smallparts.text import join
#
# Constants
#
SUPPORTED_HTML_DIALECTS = {
elements.LABEL_HTML_5: Namespace(
doctype='<!DOCTYPE html>',
xmlns=None),
elements.LABEL_XHTML_1_0_STRICT: Namespace(
doctype='<!DOCTYPE html PUBLIC'
' "-//W3C//DTD XHTML 1.0 Strict//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
xmlns='http://www.w3.org/1999/xhtml'),
elements.LABEL_XHTML_1_0_TRANSITIONAL: Namespace(
doctype='<!DOCTYPE html PUBLIC'
' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">',
xmlns='http://www.w3.org/1999/xhtml'),
}
#
# Functions
#
def css_property(property_name, property_value):
"""Generate a CSS property:
property_name: property_value;
"""
return '{0}: {1};'.format(property_name, property_value)
def css_important_property(property_name, property_value):
"""Generate an 'important' CSS property:
property_name: property_value !important;
"""
return css_property(property_name,
'{0} !important'.format(property_value))
def html_document(dialect=elements.LABEL_HTML_5,
lang='en',
title='Untitled page',
head='',
body=''):
"""Generate an HTML document"""
try:
html_dialect = SUPPORTED_HTML_DIALECTS[dialect]
except KeyError:
raise ValueError(
'Unsupported HTML dialect.'
' Please specify one of {0}!'.format(
constants.COMMA_BLANK.join(SUPPORTED_HTML_DIALECTS)))
#
element = elements.Cache(dialect)
head_fragments = ['']
if dialect == elements.LABEL_HTML_5 and \
'<meta charset' not in head.lower():
head_fragments.append(element.meta(charset=constants.UTF_8))
#
if '<title' not in head.lower():
head_fragments.append(element.title(title))
#
head = head.strip()
if head:
head_fragments.append(head)
#
head_fragments.append('')
body = body.strip()
if body:
body = '\n{0}\n'.format(body)
return join.by_newlines(
html_dialect.doctype,
element.html(
join.by_newlines(
'',
element.head(constants.NEWLINE.join(head_fragments)),
element.body(body),
''),
xmlns=html_dialect.xmlns,
lang=lang))
def js_function_call(function_name, arguments):
"""Generate JavaScript code:
function_name(*arguments)
"""
return '{0}({1})'.format(
function_name,
constants.COMMA_BLANK.join(
"{0!r}".format(single_arg)
for single_arg in arguments))
def js_return(function_name, *arguments):
"""Generate JavaScript code:
return function_name(*arguments);
"""
return 'return {0};'.format(
js_function_call(function_name, arguments))
def wrap_cdata(character_data):
"""Wrap character_data in a CDATA section.
If necessary use multiple CDATA sections as suggested in
<https://en.wikipedia.org/wiki/CDATA#Nesting>
"""
return join.directly(
'<![CDATA[',
character_data.replace(']]>', ']]]]><![CDATA[>'),
']]>')
def xml_declaration(version=constants.XML_1_0,
encoding=constants.UTF_8,
standalone=None):
"""Return an XML declaration.
Omit the 'standalone' attribute if not specified.
"""
if standalone is not None:
if standalone:
standalone = constants.YES
else:
standalone = constants.NO
#
#
return '<?xml{0} ?>'.format(
elements.XmlElement.attributes_string(
dict(version=version,
encoding=encoding,
standalone=standalone).items()))
def xml_document(content,
version=constants.XML_1_0,
encoding=constants.UTF_8,
standalone=None):
"""Return a full XML document.
Strip trailing whitespace from the content.
"""
return join.by_newlines(
xml_declaration(version=version,
encoding=encoding,
standalone=standalone),
content.rstrip())
# vim: fileencoding=utf-8 ts=4 sts=4 sw=4 autoindent expandtab syntax=python:
|
import os
from consolemenu import selection_menu, console_menu
from consolemenu.format.menu_borders import MenuBorderStyleType
from consolemenu.items import function_item, submenu_item
from consolemenu.menu_formatter import MenuFormatBuilder
from infra.domain.fileSaver import FileSaver
from infra.domain.consts import getPlanQuestionsFilePathDirectory, getPlansDirectory
from infra.planViewer import PlanViewer
from infra.userQuestioner import UserQuestioner
class TaskerPlannerMenu:
def __getPlansList(self):
return os.listdir(getPlansDirectory())
def __createChoosePlanMenuItem(self) -> submenu_item.SubmenuItem:
showPlansItem = selection_menu.SelectionMenu(self.__getPlansList())
return submenu_item.SubmenuItem("Show all plans", showPlansItem)
def __createMenu(self, userQuestioner: UserQuestioner) -> console_menu.ConsoleMenu:
menu = console_menu.ConsoleMenu("Tasker Planner", "By dorshaar")
menu.formatter = MenuFormatBuilder().set_title_align('center').set_subtitle_align('center').set_border_style_type(MenuBorderStyleType.DOUBLE_LINE_BORDER).show_prologue_top_border(True).show_prologue_bottom_border(True)
askQuestionsFromJsonFileItem = function_item.FunctionItem(
"Start new plan", userQuestioner.askQuestionsFromJsonFile, [getPlanQuestionsFilePathDirectory()])
editPlanItem = function_item.FunctionItem("Edit exiting plan", userQuestioner.editPlan)
choosePlanItem = self.__createChoosePlanMenuItem()
menu.append_item(askQuestionsFromJsonFileItem)
menu.append_item(editPlanItem)
menu.append_item(choosePlanItem)
menu.add_exit()
return menu
def __init__(self, userQuestioner: UserQuestioner, planViewer: PlanViewer) -> None:
self.userQuestioner = userQuestioner
self.planViewer = planViewer
self.currentPlan = "None"
self.menu = self.__createMenu(userQuestioner)
def __handlePlanWasChosen(self, choosePlanItem: submenu_item.SubmenuItem):
if choosePlanItem.get_return() is not None:
plansList = self.__getPlansList()
self.currentPlan = plansList[choosePlanItem.get_return()]
self.userQuestioner.loadPlan(os.path.join(getPlansDirectory(), self.currentPlan))
def __findIndexOfMenuItem(self, itemText):
index = 0
for item in self.menu.items:
if str(item) == "Tasker Planner " + itemText:
return index
index = index + 1
return -1
def __updateChoosePlanMenuItem(self, choosePlanItem: submenu_item.SubmenuItem):
self.menu.remove_item(choosePlanItem)
choosePlanItem = self.__createChoosePlanMenuItem()
self.menu.append_item(choosePlanItem)
def __updateViewChoosenPlanInformationMenuItem(self):
viewPlanInformationMenuItemIndex = self.__findIndexOfMenuItem("View plan information")
if viewPlanInformationMenuItemIndex != -1:
self.menu.remove_item(self.menu.items[viewPlanInformationMenuItemIndex])
viewPlanTimeInformationMenuItemIndex = self.__findIndexOfMenuItem("View plan time information")
if viewPlanTimeInformationMenuItemIndex != -1:
self.menu.remove_item(self.menu.items[viewPlanTimeInformationMenuItemIndex])
viewChoosenPlanIformationMenuItem = function_item.FunctionItem(
"View plan information",
self.planViewer.showPlanInformation,
[self.currentPlan])
viewPlanTimeInformationMenuItem = function_item.FunctionItem(
"View plan time information",
self.planViewer.showPlanTiming,
[self.currentPlan])
self.menu.append_item(viewChoosenPlanIformationMenuItem)
self.menu.append_item(viewPlanTimeInformationMenuItem)
def runMenu(self):
ShowAllPlansMenuItemIndex = self.__findIndexOfMenuItem("Show all plans")
StartNewPlanMenuItemIndex = self.__findIndexOfMenuItem("Start new plan")
self.menu.prologue_text = f"Current plan: {self.currentPlan}"
while not self.menu.is_selected_item_exit():
if self.menu.selected_option == ShowAllPlansMenuItemIndex:
self.__handlePlanWasChosen(self.menu.items[ShowAllPlansMenuItemIndex])
if self.menu.selected_option == StartNewPlanMenuItemIndex:
startNewPlanItem = self.menu.items[StartNewPlanMenuItemIndex]
self.currentPlan = startNewPlanItem.return_value["taskName"]
self.__updateChoosePlanMenuItem(self.menu.items[ShowAllPlansMenuItemIndex])
if self.currentPlan != "None":
self.__updateViewChoosenPlanInformationMenuItem()
self.menu.prologue_text = f"Current plan: {self.currentPlan}"
self.menu.draw()
self.menu.process_user_input()
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import json
import argparse
from vqa_eval.vqaTools.vqa import VQA
from vqa_eval.vqaEval import VQAEval
def accuracy(taskType, dataType, dataSubType, resFile, dataDir, resultDir):
annFile = "%s/v2_%s_%s_annotations.json" %(dataDir, dataType, dataSubType)
quesFile = "%s/v2_%s_%s_%s_questions.json" %(dataDir, taskType, dataType, dataSubType)
resultType = "real"
fileType = "accuracy"
fileName = resFile.split("/")[-1]
accuracyFile = "%s/%s_%s_%s_%s_%s_%s" % (resultDir, taskType, dataType, dataSubType, resultType, fileType, fileName)
vqa = VQA(annFile, quesFile)
vqaRes = vqa.loadRes(resFile, quesFile)
vqaEval = VQAEval(vqa, vqaRes, n=2)
vqaEval.evaluate()
print("\n")
print("Overall Accuracy is: %.02f\n" %(vqaEval.accuracy["overall"]))
print("Per Question Type Accuracy is the following:")
for quesType in vqaEval.accuracy["perQuestionType"]:
print("%s : %.02f" %(quesType, vqaEval.accuracy["perQuestionType"][quesType]))
print("\n")
print("Per Answer Type Accuracy is the following:")
for ansType in vqaEval.accuracy["perAnswerType"]:
print("%s : %.02f" %(ansType, vqaEval.accuracy["perAnswerType"][ansType]))
print("\n")
json.dump(vqaEval.accuracy, open(accuracyFile, "w"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--task_type", default="OpenEnded")
parser.add_argument("--data_type", default="mscoco")
parser.add_argument("--data_sub_type", default="val2014")
parser.add_argument("--data_dir", default="/ceph/kien/data2.0")
parser.add_argument("--result_file", default="/ceph/kien/VisualQA/analysis/ranknet8_19.json")
parser.add_argument("--result_dir", default="/ceph/kien/VisualQA/result")
args = parser.parse_args()
params = vars(args)
print('parsed input parameters:')
print(json.dumps(params, indent=2))
accuracy(args.task_type, args.data_type, args.data_sub_type, args.result_file, args.data_dir, args.result_dir)
|
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
self.parent = None
def __str__(self):
return str(self.data)
def __iter__(self):
if self.left:
for node in self.left:
yield node
yield self.data
if self.right:
for node in self.right:
yield node
def __getitem__(self, key):
node = self.get(key)
if node:
return node
raise KeyError(key)
def get(self, data):
if data < self.data:
return self.left.get(data) if self.left else None
elif data > self.data:
return self.right.get(data) if self.right else None
return self
def insert(self, data):
if data < self.data:
if self.left is None:
self.left = Node(data)
self.left.parent = self
else:
self.left.insert(data)
elif data > self.data:
if self.right is None:
self.right = Node(data)
self.right.parent = self
else:
self.right.insert(data)
def is_left_child(self):
return self.parent and self is self.parent.left
def is_right_child(self):
return self.parent and self is self.parent.right
def count_children(self):
return bool(self.left) + bool(self.right)
def max(self):
node = self
while node.right:
node = node.right
return node
def min(self):
node = self
while node.left:
node = node.left
return node
def get_successor(self):
if self.right:
return self.right.min()
node = self
while node.is_right_child():
node = node.parent
return node.parent
def get_predecessor(self):
if self.left:
return self.left.max()
node = self
while node.is_left_child():
node = node.parent
return node.parent
def delete(self, data):
node = self.get(data)
if not node:
return
children_count = node.count_children()
if children_count == 0:
# Node has no children: remove it.
# Fix references.
if node.is_left_child():
node.parent.left = None
else:
node.parent.right = None
del node
elif children_count == 1:
# Node has 1 child: replace it by its child.
child = node.left or node.right
if node.is_left_child():
# Remove `node` from the tree by fixing references.
node.parent.left = child
child.parent = node.parent
del node
elif node.is_right_child():
# Remove `node` from the tree by fixing references.
node.parent.right = child
child.parent = node.parent
del node
else:
# If there is no parent, we are at the root.
root = node
root.data = child.data
# Remove `child` from the tree by fixing references.
root.left = child.left
root.right = child.right
if child.left:
child.left.parent = root
if child.right:
child.right.parent = root
del child
else:
# Node has 2 children: replace it by its successor.
# Because the node has 2 children, its successor is guaranteed to
# be somewhere on its right side. If the successor has children,
# they can only be on the right side.
succ = node.get_successor()
node.data = succ.data
# Remove `succ` from the tree by fixing references.
if succ.is_left_child():
succ.parent.left = succ.right
else:
succ.parent.right = succ.right
if succ.right:
succ.right.parent = succ.parent
del succ
def pprint(self, level=0):
if self.right:
self.right.pprint(level + 1)
print(f"{' ' * 4 * level}{self.data}")
if self.left:
self.left.pprint(level + 1)
def get_height(self):
return 1 + max(
self.left.get_height() if self.left else -1,
self.right.get_height() if self.right else -1
)
def _check_balance(self):
left = self.left._check_balance() if self.left else -1
right = self.right._check_balance() if self.right else -1
if abs(left - right) > 1:
raise ValueError('Unbalanced tree.')
return max(left, right) + 1
def is_balanced(self):
try:
self._check_balance()
return True
except ValueError:
return False
def is_valid(self):
prev = None
for data in self:
if prev and prev > data:
return False
prev = data
return True
bst = Node(12)
bst.insert(6)
bst.insert(14)
bst.insert(3)
bst.insert(8)
bst.insert(20)
bst.insert(5)
bst.insert(1)
bst.insert(150)
bst.insert(99)
bst.insert(7)
bst.insert(9)
bst.insert(10)
bst.insert(11)
bst.insert(13)
bst.pprint()
print(bst.is_valid())
|
from django.db import models
#import uuid
# Create your models here.
class quiz(models.Model):
contest_id=models.UUIDField(default=None)
#question_id=models.AutoField
question=models.TextField(max_length=300,default=None)
option_1=models.CharField(max_length=100,default=None)
option_2=models.CharField(max_length=100,default=None)
option_3=models.CharField(max_length=100,default=None)
option_4=models.CharField(max_length=100,default=None)
answer=models.CharField(max_length=100,default=None)
def __str__(self):
return self.question
class leaderboard(models.Model):
contest_id=models.UUIDField(default=None)
participant_name=models.CharField(max_length=100,default=None)
score=models.IntegerField(default=0)
submit_time=models.DateTimeField(auto_now_add=True, blank=True)
def __str__(self):
return self.participant_name
|
# pylint: disable=no-self-use,invalid-name
from allennlp.data.dataset_readers import SequenceTaggingDatasetReader
from allennlp.common.testing import AllenNlpTestCase
class TestSequenceTaggingDatasetReader(AllenNlpTestCase):
def test_default_format(self):
reader = SequenceTaggingDatasetReader()
dataset = reader.read('tests/fixtures/data/sequence_tagging.tsv')
assert len(dataset.instances) == 4
fields = dataset.instances[0].fields
assert [t.text for t in fields["tokens"].tokens] == ["cats", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
fields = dataset.instances[1].fields
assert [t.text for t in fields["tokens"].tokens] == ["dogs", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
fields = dataset.instances[2].fields
assert [t.text for t in fields["tokens"].tokens] == ["snakes", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
fields = dataset.instances[3].fields
assert [t.text for t in fields["tokens"].tokens] == ["birds", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
def test_brown_corpus_format(self):
reader = SequenceTaggingDatasetReader(word_tag_delimiter='/')
dataset = reader.read('tests/fixtures/data/brown_corpus.txt')
assert len(dataset.instances) == 4
fields = dataset.instances[0].fields
assert [t.text for t in fields["tokens"].tokens] == ["cats", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
fields = dataset.instances[1].fields
assert [t.text for t in fields["tokens"].tokens] == ["dogs", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
fields = dataset.instances[2].fields
assert [t.text for t in fields["tokens"].tokens] == ["snakes", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
fields = dataset.instances[3].fields
assert [t.text for t in fields["tokens"].tokens] == ["birds", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
|
# -*- coding: utf-8 -*-
"""
.. codeauthor:: Fabian Sesterhenn <sesterhenn.fabian@gmail.com>
.. affiliation::
Laboratory of Protein Design and Immunoengineering <lpdi.epfl.ch>
Bruno Correia <bruno.correia@epfl.ch>
.. func:: translate_dna_sequence
.. func:: translate_3frames
.. func:: adapt_length
.. func:: sequencing_enrichment
"""
# Standard Libraries
import re
# External Libraries
import pandas as pd
import numpy as np
from six.moves import reduce
# This Library
__all__ = ['translate_dna_sequence', 'translate_3frames',
'adapt_length', 'sequencing_enrichment']
def translate_dna_sequence( sequence ):
"""Translates **DNA** to **protein**.
Assumes always that the codon starts in the first position
of the sequence.
:param str sequence: DNA sequence
:return: :class:`str` - protein sequence
.. seealso::
:func:`.translate_3frames`
.. rubric:: Example
.. ipython::
In [1]: from rstoolbox.io import read_fastq
...: from rstoolbox.utils import translate_dna_sequence
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 500)
...: df = read_fastq("../rstoolbox/tests/data/cdk2_rand_001.fasq.gz")
...: df.iloc[0]['sequence_A']
In [1]: translate_dna_sequence(df.iloc[0]['sequence_A'])
"""
codontable = {
'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',
'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',
'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',
'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',
'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',
'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',
'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',
'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',
'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',
'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',
'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',
'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',
'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',
'TAC': 'Y', 'TAT': 'Y', 'TAA': '_', 'TAG': '_',
'TGC': 'C', 'TGT': 'C', 'TGA': '_', 'TGG': 'W'}
protein = ""
last_codon_start = len(sequence) - 2
for start in range(0, last_codon_start, 3):
codon = sequence[start:start + 3]
aa = codontable.get(codon, 'X')
protein = protein + aa
return protein
def translate_3frames( sequence, matches=None ):
"""Translates **DNA** to **protein** trying all possible frames.
Tests the three possible reading frames. To decide which one to return,
it can follow a double logic: when ``matches`` is :data:`None` it will
return the longest sequence until a stop codon, otherwise it will return
the longest sequence that contains the required match protein sequence. If
none match, it will return an empty :class:`str`.
All provided matches need to be found
:param str sequence: DNA sequence
:param matches: sequence pattern to match
:type matches: :func:`list` of :class:`str`
:return: :class:`str`
.. rubric:: Example 1: No matches - Prepend nucleotides to change reading frame
.. ipython::
In [1]: from rstoolbox.io import read_fastq
...: from rstoolbox.utils import translate_3frames
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 500)
...: df = read_fastq("../rstoolbox/tests/data/cdk2_rand_001.fasq.gz")
...: df.iloc[0]['sequence_A']
In [1]: translate_3frames('AT' + df.iloc[0]['sequence_A'])
.. rubric:: Example 2: With matches - Prepend nucleotides to change reading frame
.. ipython::
In [1]: from rstoolbox.io import read_fastq
...: from rstoolbox.utils import translate_3frames
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 500)
...: df = read_fastq("../rstoolbox/tests/data/cdk2_rand_001.fasq.gz")
...: matches = ['GAS', 'FFG']
...: translate_3frames('AT' + df.iloc[0]['sequence_A'], matches)
In [1]: translate_3frames('AT' + df.iloc[1]['sequence_A'], matches)
"""
protein_frame = []
for i in range(0, 3):
protein_frame.append(translate_dna_sequence(sequence[i:]))
protein_frame.sort(key=lambda s: len(s.split('_')[0]), reverse=True)
if matches is None:
return protein_frame[0]
match_max = len(matches)
match_count = [0, ] * len(protein_frame)
for i, p in enumerate(protein_frame):
for m in matches:
if re.search(m, p):
match_count[i] += 1
try:
i = match_count.index(match_max)
return protein_frame[i]
except ValueError:
return ""
def adapt_length( seqlist, start, stop, inclusive=False ):
"""Pick only the sequence between the provided pattern tags.
When ``inclusive`` is :data:`False` and the boundary tags are
not found, the original sequence is returned, as it is assumed
that the tags were out of the boundary of the retrieved sequence.
When ``inclusive`` is :data:`True` and the boundary tags are
not found, an empty sequence is returned for that position, as
we understand that the interest was of getting them too and we could
not.
:param str seqlist: list of protein sequence
:param str start: start pattern (not included in final sequence)
:param str stop: stop pattern (not included in final sequence)
:param bool inclusive: If :data:`False`, retrieve sequence **between**
the protein tags, otherwise include the protein tags in the
returned sequence.
:return: :func:`list` of :class:`str`
.. rubric:: Example
.. ipython::
In [1]: from rstoolbox.io import read_fastq
...: from rstoolbox.utils import translate_dna_sequence
...: from rstoolbox.utils import adapt_length
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 500)
...: df = read_fastq("../rstoolbox/tests/data/cdk2_rand_001.fasq.gz")
...: df['sequence_A'] = df.apply(lambda row: translate_dna_sequence(row['sequence_A']),
...: axis=1)
...: bounds = ['GAS', 'FFG']
...: df['sequence_A'].values[:5]
In [1]: adapt_length(df['sequence_A'].values[:5], bounds[0], bounds[1])
In [1]: adapt_length(df['sequence_A'].values[:5], bounds[0], bounds[1], True)
"""
expression = '{0}(.*){1}'.format(start, stop)
if inclusive:
expression = '({0}.*{1})'.format(start, stop)
outlist = []
for seq in seqlist:
m = re.search(expression, seq)
if m:
outlist.append(m.group(1))
elif inclusive:
outlist.append("")
else:
outlist.append(seq)
return outlist
def sequencing_enrichment( indata, enrichment=None, bounds=None, matches=None, seqID='A' ):
"""Retrieve data from multiple
`NGS <https://www.wikiwand.com/en/DNA_sequencing#/Next-generation_methods>`_ files.
Allows to obtain data from multiple files while ataching them to two conditions, a primary one
(key1) and a secondary one (key2).
For instance, let's assume that one has data obtained through selection of sequences by two
different binders and three different concentration of binder each; we would define a
``indata`` dictionary such as::
{'binder1': {'conc1': 'file1.fastq', 'conc2': 'file2.fastq', 'conc3': 'file3.fastq'},
'binder2': {'conc1': 'file4.fastq', 'conc2': 'file5.fastq', 'conc3': 'file6.fastq'}}
Also, for each binder we could decide to calculate the enrichment between any two
concentrations; we can do that by defining a ``enrichment`` dictionary such as::
{'binder1': ['conc1', 'conc3'],
'binder2': ['conc1', 'conc3']}
:param dict indata: First key is binder, second key is concentration, value is fastq file.
:param dict enrichment: Key is binder, value is list of two concentrations (min,max)
to calculate enrichment.
:param bounds: N and C limit of the sequences. Follow the logic of :func:`adapt_length`
with ``inclusive`` as :data:`False`.
:type bounds: :func:`list` of :class:`str`
:param matches: Sequence pattern to match. Follows the same logic as in
:func:`.translate_3frames`.
:type matches: :func:`list` of :class:`str`
:return: :class:`.DesignFrame` with the sequences, counts (sequence) per fastq file and
enrichment per binder (if requested).
.. rubric:: Example
(We skip printing the sequence column to ease visibility of the differences)
.. ipython::
In [1]: from rstoolbox.io import read_fastq
...: from rstoolbox.utils import sequencing_enrichment
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 20)
...: indat = {'binder1': {'conc1': '../rstoolbox/tests/data/cdk2_rand_001.fasq.gz',
...: 'conc2': '../rstoolbox/tests/data/cdk2_rand_002.fasq.gz',
...: 'conc3': '../rstoolbox/tests/data/cdk2_rand_003.fasq.gz'},
...: 'binder2': {'conc1': '../rstoolbox/tests/data/cdk2_rand_004.fasq.gz',
...: 'conc2': '../rstoolbox/tests/data/cdk2_rand_005.fasq.gz',
...: 'conc3': '../rstoolbox/tests/data/cdk2_rand_006.fasq.gz'}}
...: df = sequencing_enrichment(indat)
...: df[[_ for _ in df.columns if _ != 'sequence_A']].head()
In [1]: enrich = {'binder1': ['conc1', 'conc3'],
...: 'binder2': ['conc1', 'conc3']}
...: df = sequencing_enrichment(indat, enrich)
...: df[[_ for _ in df.columns if _ != 'sequence_A']].head()
"""
from rstoolbox.components import DesignFrame
def condition_reader(jobid, filename, bounds, matches):
from rstoolbox.io import read_fastq
df = read_fastq(filename)
df['sequence_A'] = df.apply(lambda row: translate_3frames(row['sequence_A'], matches),
axis=1)
if bounds is not None:
df['sequence_A'] = adapt_length(df['sequence_A'].values, bounds[0], bounds[1])
df = df.merge(df.groupby('sequence_A').agg('count').reset_index(),
on='sequence_A',
how='left').drop_duplicates('sequence_A').reset_index(drop=True)
df.rename(columns={'description_x': 'description', 'description_y': jobid}, inplace=True)
return df.sort_values(jobid, ascending=False)
def binder_reader(jobid, inputb, bounds, matches):
data = []
for cond in inputb:
data.append(condition_reader(jobid + '_' + cond, inputb[cond], bounds, matches))
df = reduce(lambda left, right: pd.merge(left, right, on='sequence_A', how='outer'),
data).fillna(0)
return df
data = []
for binder in indata:
data.append(binder_reader(binder, indata[binder], bounds, matches))
df = reduce(lambda left, right: pd.merge(left, right, on='sequence_A', how='outer'),
data).fillna(0)
df['len'] = df.apply(lambda row: len(row['sequence_A']), axis=1)
df = df.drop([_ for _ in df.columns if _.startswith('description')], axis=1)
if enrichment is not None:
for binder in enrichment:
eb = enrichment[binder]
id1 = '{0}_{1}'.format(binder, eb[0])
id2 = '{0}_{1}'.format(binder, eb[1])
df['enrichment_{}'.format(binder)] = df[id1] / df[id2]
df = df.replace({np.inf: -1, -np.inf: -1}).fillna(0)
designf = DesignFrame(df.rename(columns={'sequence_A': 'sequence_{}'.format(seqID)}))
designf = designf.reset_index().rename(columns={'index': 'description'})
return designf
|
from typing import Optional, Union
from .basic_ast import Ast
from .errors import ParseError, SyntaxError
from .node import create_binary_node
from .node import create_unary_node
from .node import Node
from .token import Token
class op:
AND = "AND" # noqa: E221
NOT = "NOT" # noqa: E221
OR = "OR" # noqa: E221
AND_OR = [AND, OR] # noqa: E221
LOGICAL = [AND, OR, NOT] # noqa: E221
ARG_AND = "ARG_AND" # noqa: E221
ARG_OR = "ARG_OR" # noqa: E221
ARG_NOT = "ARG_NOT" # noqa: E221
ARG_LOGICAL = [ARG_AND, ARG_OR, ARG_NOT] # noqa: E221
FREETEXT = "FREETEXT" # noqa: E221
FREERGXP = "FREERGXP" # noqa: E221
EXISTS = "EXISTS" # noqa: E221
MISSING = "MISSING" # noqa: E221
UNARY_OPS = [FREETEXT, FREERGXP, EXISTS, MISSING] # noqa: E221
EQUALS = "EQUALS" # noqa: E221
GT = "GT" # noqa: E221
GTE = "GTE" # noqa: E221
LT = "LT" # noqa: E221
LTE = "LTE" # noqa: E221
RANGE_OPS = [GT, GTE, LT, LTE] # noqa: E221
CONTAINS = "CONTAINS" # noqa: E221
STARTSWITH = "STARTSWITH" # noqa: E221
ENDSWITH = "ENDSWITH" # noqa: E221
REGEXP = "REGEXP" # noqa: E221
BINARY_OPS = [
EQUALS,
GT,
GTE,
LT,
LTE,
CONTAINS,
STARTSWITH,
ENDSWITH,
REGEXP,
] # noqa: E221
REGEX_OPS = [CONTAINS, STARTSWITH, ENDSWITH, REGEXP] # noqa: E221
OPS = [ # noqa: E221
CONTAINS,
ENDSWITH,
EQUALS,
EXISTS,
FREERGXP,
FREETEXT,
GT,
GTE,
LT,
LTE,
MISSING,
REGEXP,
STARTSWITH,
]
INT = "INT" # noqa: E221
FLOAT = "FLOAT" # noqa: E221
STRING = "STRING" # noqa: E221
ARGS = [INT, FLOAT, STRING] # noqa: E221
SEP = "||" # noqa: E221
def is_a(x: Union[Node, Token], type_) -> bool:
if isinstance(type_, list):
return x.type in type_
else:
return x.type == type_
def arg_token_any(s) -> Token:
try:
v = int(s)
return Token(op.INT, v)
except ValueError:
pass
try:
v = float(s)
return Token(op.FLOAT, v)
except ValueError:
pass
return Token(op.STRING, s)
def arg_token_string(s) -> Token:
return Token(op.STRING, s)
class KarpTNGLexer:
SEPARATOR_1 = "||" # noqa: E221
SEPARATOR_2 = "|" # noqa: E221
logical = {
"and": op.AND,
"not": op.NOT,
"or": op.OR,
}
arg_logical = {
"and": op.ARG_AND,
"or": op.ARG_OR,
"not": op.ARG_NOT,
}
ops = {
"freetext": op.FREETEXT,
"freergxp": op.FREERGXP,
"equals": op.EQUALS,
"exists": op.EXISTS,
"missing": op.MISSING,
"contains": op.CONTAINS,
"startswith": op.STARTSWITH,
"endswith": op.ENDSWITH,
"regexp": op.REGEXP,
"gt": op.GT,
"gte": op.GTE,
"lt": op.LT,
"lte": op.LTE,
}
arg1 = {
"freetext": arg_token_any,
"freergxp": arg_token_string,
"equals": arg_token_string,
"exists": arg_token_string,
"missing": arg_token_string,
"contains": arg_token_string,
"startswith": arg_token_string,
"endswith": arg_token_string,
"regexp": arg_token_string,
"gt": arg_token_string,
"gte": arg_token_string,
"lt": arg_token_string,
"lte": arg_token_string,
}
arg2 = {
"equals": arg_token_any,
"contains": arg_token_string,
"startswith": arg_token_string,
"endswith": arg_token_string,
"regexp": arg_token_string,
"gt": arg_token_any,
"gte": arg_token_any,
"lt": arg_token_any,
"lte": arg_token_any,
}
def tokenize(self, s: str):
print("Tokenizing {s}".format(s=s))
exprs = s.split(self.SEPARATOR_1)
arg_types = []
for expr in exprs:
logical_type = self.logical.get(expr)
if logical_type:
yield Token(logical_type)
else:
print("Tokenizing {expr}".format(expr=expr))
sub_exprs = expr.split(self.SEPARATOR_2)
if sub_exprs[0] in self.ops:
yield Token(self.ops[sub_exprs[0]])
arg_1 = self.arg1[sub_exprs[0]]
arg_2 = self.arg2.get(sub_exprs[0])
if len(sub_exprs) > 1:
yield arg_1(sub_exprs[1])
arg_1 = None
if len(sub_exprs) > 2:
if arg_2:
yield arg_2(sub_exprs[2])
arg_2 = None
else:
raise SyntaxError(
"Too many arguments to '{op}' in '{expr}'".format(
op=sub_exprs[0], expr=expr
)
)
if arg_2:
arg_types.append(arg_2)
if arg_1:
arg_types.append(arg_1)
else:
if sub_exprs[0] in self.arg_logical:
yield Token(self.arg_logical[sub_exprs[0]])
arg_exprs = sub_exprs[1:]
else:
arg_exprs = sub_exprs
arg = arg_types.pop()
if not arg:
raise ParseError("No arg type is set")
for arg_expr in arg_exprs:
yield arg(arg_expr)
yield Token(op.SEP)
def create_node(tok: Token):
if is_a(tok, op.UNARY_OPS) or is_a(tok, op.NOT):
return create_unary_node(tok.type, tok.value)
elif is_a(tok, op.BINARY_OPS):
return create_binary_node(tok.type, tok.value)
elif is_a(tok, op.ARGS):
return Node(tok.type, 0, tok.value)
elif is_a(tok, op.LOGICAL) or is_a(tok, op.ARG_LOGICAL):
return Node(tok.type, None, tok.value)
else:
return None
class KarpTNGParser:
def parse(self, tokens):
curr = None
stack = []
for tok in tokens:
print("Token({}, {})".format(tok.type, tok.value))
if is_a(tok, op.ARGS):
n = create_node(tok)
if curr:
curr.add_child(n)
elif stack:
stack[-1].add_child(n)
elif is_a(tok, op.SEP):
if curr:
if is_a(curr, op.OPS) and curr.n_children() < curr.arity:
stack.append(curr)
elif is_a(curr, op.ARG_LOGICAL):
n = stack.pop()
if is_a(n, op.OPS):
n.add_child(curr)
stack.append(n)
else:
raise ParseError(
"No OP to add ARG_LOGICAL '{curr}'".format(curr=curr)
)
elif stack:
n1 = stack.pop()
n1.add_child(curr)
if is_a(n1, op.NOT):
if stack:
n2 = stack.pop()
n2.add_child(n1)
stack.append(n2)
else:
stack.append(n1)
else:
stack.append(n1)
else:
stack.append(curr)
curr = None
elif is_a(tok, op.LOGICAL):
stack.append(create_node(tok))
else:
if curr:
raise RuntimeError("")
curr = create_node(tok)
print("curr = {curr}".format(curr=curr))
print("stack = {stack}".format(stack=stack))
root = None
for node in reversed(stack):
if root:
# if is_a(root, op.AND_OR) and is_a(node, op.AND_OR):
node.add_child(root)
root = node
return root
_lexer = KarpTNGLexer()
_parser = KarpTNGParser()
def parse(s: Optional[str]) -> Ast:
if not s:
return Ast()
return Ast(_parser.parse(_lexer.tokenize(s)))
|
"""Implementation of ceQTL-tools addtfgenes"""
from pathlib import Path
from bioprocs.utils.tsvio2 import TsvReader, TsvWriter
from bioprocs.utils import logger
def read_tfgenes(tgfile):
"""Read tf-gene pairs"""
logger.info('Reading TF-gene pairs in %s ...', tgfile)
reader = TsvReader(tgfile, cnames=False)
ret = {} # gene => tf
for row in reader:
ret.setdefault(row[1], set()).add(row[0])
reader.close()
return ret
def main(opts):
"""Main function"""
org_tfgenes = read_tfgenes(opts.origin)
add_tfgenes = read_tfgenes(opts.addition)
writer = TsvWriter(opts.outfile)
logger.info('Writing the union set to %s ...', opts.outfile)
for gene, tfs in org_tfgenes.items():
for tf in (tfs | add_tfgenes.pop(gene, set())):
writer.write([tf, gene])
for gene, tfs in add_tfgenes.items():
for tf in tfs:
writer.write([tf, gene])
writer.close()
logger.info('Done.')
|
"""Handles database interaction."""
import logging
from flask import current_app
import pymongo
from pymongo.collation import Collation, CollationStrength
from pymongo.errors import PyMongoError
from api import PicoException
log = logging.getLogger(__name__)
__connection = None
__client = None
def get_conn():
"""
Get a database connection, reusing one if it exists.
Raises:
PicoException if a successful connection cannot be established
"""
global __client, __connection
if not __connection:
conf = current_app.config
if conf["MONGO_USER"] and conf["MONGO_PW"]:
uri = "mongodb://{}:{}@{}:{}/{}?authMechanism=SCRAM-SHA-1".format(
conf["MONGO_USER"],
conf["MONGO_PW"],
conf["MONGO_ADDR"],
conf["MONGO_PORT"],
conf["MONGO_DB_NAME"],
)
if conf["MONGO_REPLICA_SETTINGS"]:
uri = "{}&{}".format(uri, conf["MONGO_REPLICA_SETTINGS"])
if conf["MONGO_TLS_SETTINGS"]:
uri = "{}&{}".format(uri, conf["MONGO_TLS_SETTINGS"])
else:
uri = "mongodb://{}:{}/{}".format(
conf["MONGO_ADDR"], conf["MONGO_PORT"], conf["MONGO_DB_NAME"]
)
try:
__client = pymongo.MongoClient(uri)
__connection = __client[conf["MONGO_DB_NAME"]]
except PyMongoError as error:
raise PicoException(
"Internal server error. Please contact a system adminstrator.",
data={"original_error": error},
)
log.debug("Ensuring mongo is indexed.")
__connection.exceptions.create_index([("time", pymongo.DESCENDING)])
__connection.users.create_index("uid", unique=True, name="unique uid")
__connection.users.create_index(
"username", unique=True, name="unique usernames"
)
__connection.users.create_index(
"username",
unique=True,
collation=Collation(locale="en", strength=CollationStrength.PRIMARY),
name="unique normalized usernames",
)
__connection.users.create_index("tid")
__connection.users.create_index("email")
__connection.users.create_index("demo.parentemail")
__connection.groups.create_index("gid", unique=True, name="unique gid")
__connection.groups.create_index("owner", name="owner")
__connection.groups.create_index("teachers", name="teachers")
__connection.groups.create_index("members", name="members")
__connection.groups.create_index(
[("owner", 1), ("name", 1)], unique=True, name="name and owner"
)
__connection.problems.create_index("pid", unique=True, name="unique pid")
__connection.problems.create_index("disabled")
__connection.problems.create_index(
[("score", pymongo.ASCENDING), ("name", pymongo.ASCENDING)]
)
__connection.scoreboards.create_index(
"sid", unique=True, name="unique scoreboard sid"
)
__connection.shell_servers.create_index(
"sid", unique=True, name="unique shell sid"
)
__connection.submissions.create_index([("pid", 1), ("uid", 1), ("correct", 1)])
__connection.submissions.create_index([("pid", 1), ("tid", 1), ("correct", 1)])
__connection.submissions.create_index([("uid", 1), ("correct", 1)])
__connection.submissions.create_index([("tid", 1), ("correct", 1)])
__connection.submissions.create_index([("pid", 1), ("correct", 1)])
__connection.submissions.create_index("uid")
__connection.submissions.create_index("tid")
__connection.submissions.create_index("suspicious")
__connection.teams.create_index(
"team_name", unique=True, name="unique team_names"
)
__connection.teams.create_index(
"team_name",
unique=True,
collation=Collation(locale="en", strength=CollationStrength.PRIMARY),
name="unique normalized team names",
)
__connection.teams.create_index("tid", unique=True, name="unique tid")
__connection.teams.create_index(
"eligibilities",
name="non-empty eligiblity",
partialFilterExpression={"size": {"$gt": 0}},
)
__connection.teams.create_index(
"size", name="non-empty size", partialFilterExpression={"size": {"$gt": 0}}
)
__connection.tokens.create_index("uid")
__connection.tokens.create_index("gid")
__connection.tokens.create_index("tokens.registration_token")
__connection.tokens.create_index("tokens.email_verification")
__connection.tokens.create_index("tokens.password_reset")
return __connection
|
from django.core.exceptions import ValidationError
class Plugin(object):
verbose_name = None
# Used on select plugin view
class_verbose_name = None
icon = None
change_form = None
form = None
serializer = None
change_readonly_fields = ()
plugin_field = None
def __init__(self, instance=None):
# Related model instance of this plugin
self.instance = instance
if self.form is None:
from .forms import PluginForm
self.form = PluginForm
super().__init__()
@classmethod
def get_name(cls):
return getattr(cls, 'name', cls.__name__)
@classmethod
def get_plugins(cls):
return cls.plugins
@classmethod
def get(cls, name):
if not hasattr(cls, '_registry'):
cls._registry = {
plugin.get_name(): plugin for plugin in cls.get_plugins()
}
return cls._registry[name]
@classmethod
def get_verbose_name(cls):
# don't evaluate p.verbose_name ugettext_lazy
verbose = getattr(cls.verbose_name, '_proxy____args', [cls.verbose_name])
if verbose[0]:
return cls.verbose_name
else:
return cls.get_name()
@classmethod
def get_choices(cls):
choices = []
for plugin in cls.get_plugins():
verbose = plugin.get_verbose_name()
choices.append(
(plugin.get_name(), verbose)
)
return sorted(choices, key=lambda e: e[1])
@classmethod
def get_change_readonly_fields(cls):
return cls.change_readonly_fields
@classmethod
def get_class_path(cls):
return '.'.join((cls.__module__, cls.__name__))
def clean_data(self):
""" model clean, uses cls.serizlier by default """
if self.serializer:
serializer = self.serializer(data=self.instance.data)
if not serializer.is_valid():
raise ValidationError(serializer.errors)
return serializer.data
return {}
def get_directive(self):
raise NotImplementedError
def get_form(self):
self.form.plugin = self
self.form.plugin_field = self.plugin_field
return self.form
def get_change_form(self):
form = self.change_form or self.form
form.plugin = self
form.plugin_field = self.plugin_field
return form
def get_serializer(self):
self.serializer.plugin = self
return self.serializer
class PluginModelAdapter(Plugin):
""" Adapter class for using model classes as plugins """
model = None
name_field = None
form = None
def __init__(self, instance=None):
if self.form is None:
from .forms import PluginModelAdapterForm
self.form = PluginModelAdapterForm
super().__init__(instance)
@classmethod
def get_plugins(cls):
plugins = []
for related_instance in cls.model.objects.filter(is_active=True):
attributes = {
'related_instance': related_instance,
'verbose_name': related_instance.verbose_name
}
plugins.append(type('PluginAdapter', (cls,), attributes))
return plugins
@classmethod
def get(cls, name):
# don't cache, since models can change
for plugin in cls.get_plugins():
if name == plugin.get_name():
return plugin
@classmethod
def get_name(cls):
return getattr(cls.related_instance, cls.name_field)
class PluginMount(type):
def __init__(cls, name, bases, attrs):
if not attrs.get('abstract', False):
if not hasattr(cls, 'plugins'):
# This branch only executes when processing the mount point itself.
# So, since this is a new plugin type, not an implementation, this
# class shouldn't be registered as a plugin. Instead, it sets up a
# list where plugins can be registered later.
cls.plugins = []
else:
# This must be a plugin implementation, which should be registered.
# Simply appending it to the list is all that's needed to keep
# track of it later.
cls.plugins.append(cls)
|
#!/usr/bin/env python
def concInts(x, y):
return x*10+y
def ResistorCheck():
userInput = raw_input("Option 1.) Print all potential resistor band colors.\n Option 2.) Proceed to ResitorCheck program.\n Type 1 or 2 to proceed: ")
if(userInput.lower() == "1"):
print("All potential colors for FIRST THREE BANDS are:\nBlack\nBrown\nRed\nOrange\nYellow\nGreen\nBlue\nViolet\nGrey\nWhite")
print("All potential colors for FOURTH BAND are:\nBlack\nBrown\nRed\nOrange\nYellow\nGreen\nBlue\nViolet")
ResistorCheck()
else:
band1 = raw_input("Band 1 color: ")
band2 = raw_input("Band 2 color: ")
band3 = raw_input("Band 3 color: ")
outputArray = []
for band in [band1, band2]:
tempNum = "0"
if band.lower() == "black":
tempNum = 0
outputArray.append(tempNum)
elif band.lower() == "brown":
tempNum = 1
outputArray.append(tempNum)
elif band.lower() == "red":
tempNum = 2
outputArray.append(tempNum)
elif band.lower() == "orange":
tempNum = 3
outputArray.append(tempNum)
elif band.lower() == "yellow":
tempNum = 4
outputArray.append(tempNum)
elif band.lower() == "green":
tempNum = 5
outputArray.append(tempNum)
elif band.lower() == "blue":
tempNum = 6
outputArray.append(tempNum)
elif band.lower() == "violet":
tempNum = 7
outputArray.append(tempNum)
elif band.lower() == "grey":
tempNum = 8
outputArray.append(tempNum)
elif band.lower() == "white":
tempNum = 9
outputArray.append(tempNum)
else:
print("Sorry, your input did not match any know resistor-band colors.")
print("\n")
ResistorCheck()
toMultiply = concInts(outputArray[0], outputArray[1])
multiplier = 0
if band3.lower() == "black":
multiplier = 1
elif band3.lower() == "brown":
multiplier = 10
elif band3.lower() == "red":
multiplier = 100
elif band3.lower() == "orange":
multiplier = 1000
elif band3.lower() == "yellow":
multiplier = 10000
elif band3.lower() == "green":
multiplier = 100000
elif band3.lower() == "blue":
multiplier = 1000000
elif band3.lower() == "violet" :
multiplier = 10000000
else:
print("Sorry, your input did not match any know resistor-band colors.")
print("\n")
ResistorCheck()
result = multiplier*toMultiply
print("Your resistor has a resistance of {} ohms.").format(result)
print("Program completed. Would you like to check another resistor?")
donePrompt = raw_input("Yes or No: ")
if donePrompt.lower() == "yes":
ResistorCheck()
else:
print("Program completed")
ResistorCheck()
|
class SolutionOfDynamicPlanning:
# ~~~~~爬楼梯~~~~~
# Link: https://leetcode-cn.com/leetbook/read/top-interview-questions-easy/xn854d/
# Hint: 斐波那契数列 第n项通项公式
def ClimbStairs1(n = 3) -> int:
if (n == 1):
return 1
elif (n == 2):
return 2
else:
return int((((1 + 5**0.5)/2)**(n + 1) - ((1 - 5**0.5)/2)**(n + 1))/(5**0.5))
# Hint: 普通计算
def ClimbStairs2(n = 3) -> int:
if (n == 1):
return 1
else:
value = [0 for i in range(n + 1)]
value[1] = 1
value[2] = 2
for i in range (3,n + 1):
value[i] = value[i-1] + value[i-2]
return value[n]
# Hint: 避免使用数组,使用三个数交替运算
def ClimbStairs3(n = 3) -> int:
i = 0
j = 1
for index in range(n):
k = i + j
i = j
j = k
return k
# ~~~~~买卖股票的最佳时机 I~~~~~
# Link: https://leetcode-cn.com/leetbook/read/top-interview-questions-easy/xn8fsh/
# Hint: 双游标,一个记录当前最小值,同时记录一个当前最大利润(初始利润为0)
def MaxProfit1(prices = [2,4,5,1]) -> int:
if (len(prices) == 0):
return 0
maxValue = 0
minPrice = prices[0]
for index in range (1, len(prices)):
minPrice = min(minPrice, prices[index])
maxValue = max(maxValue, prices[index] - minPrice)
print(maxValue)
return maxValue
# Hint: 动态规划
def MaxProfit2(prices = [2,4,5,1]) -> int:
# dp[i][0] -- i + 1 天结束时候没股票时的最大收益
# dp[i][1] -- i + 1 天结束时候有股票时的最大收益
# dp[i][0] = max(dp[i-1][0], dp[i-1][1] + prices[i])
# dp[i][1] = max(dp[i-1][1], -prices[i])
# dp[0][0] = 0
# dp[0][1] = -prices[0]
length = len(prices)
if (length == 0):
return 0
unhold = 0
hold = -prices[0]
for i in range(1,length):
unhold = max(unhold, hold + prices[i])
hold = max(hold, -prices[i])
print(unhold)
return unhold
# ~~~~~ 最大子序和 ~~~~~
# Link: https://leetcode-cn.com/leetbook/read/top-interview-questions-easy/xn3cg3/
# Hint: 动态规划
def MaxSubArray(nums = [-2,1,-3,4,-1,2,1,-5,4]) -> int:
# dp[i] -- 表示数组中前i+1(注意这里的i是从0开始的)个元素构成的连续子数组的最大和
# dp[i] = max(dp[i - 1], 0) + nums[i]
length = len(nums)
dp = [0] * length
dp[0] = nums[0]
maximum = dp[0]
for i in range(1, length):
if (dp[i - 1] > 0):
dp[i] = dp[i - 1] + nums[i]
else:
dp[i] = nums[i]
print(max(dp))
return max(dp)
# ~~~~~ 打家劫舍 ~~~~~
# Link: https://leetcode-cn.com/leetbook/read/top-interview-questions-easy/xnq4km/
# Hint: 动态规划
def Rob(nums = [2,7,9,3,1]) -> int:
length = len(nums)
# dp[i][0] -- i+1家没偷的最大值
# dp[i][1] -- i+1家偷了的最大值
# dp = [[0 for col in range(2)] for row in range(length)]
# print(dp)
# dp[0][0] = 0
# dp[0][1] = nums[0]
a = 0
b = nums[0]
for i in range(1, length):
temp = max(a, b)
b = a + nums[i]
a = temp
a = max(a, b)
print(a)
return a
# SolutionOfDynamicPlanning.ClimbStairs1()
# SolutionOfDynamicPlanning.ClimbStairs2()
# SolutionOfDynamicPlanning.ClimbStairs3()
# SolutionOfDynamicPlanning.MaxProfit1()
# SolutionOfDynamicPlanning.MaxProfit2()
# SolutionOfDynamicPlanning.MaxSubArray()
# SolutionOfDynamicPlanning.Rob()
|
# Copyright (c) 2016 Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from importlib import import_module
# from valence.flavor.plugins import *
import os
from oslo_log import log as logging
from valence.common.redfish import api as rfs
FLAVOR_PLUGIN_PATH = os.path.dirname(os.path.abspath(__file__)) + '/plugins'
logger = logging.getLogger()
def get_available_criteria():
pluginfiles = [f.split('.')[0]
for f in os.listdir(FLAVOR_PLUGIN_PATH)
if os.path.isfile(os.path.join(FLAVOR_PLUGIN_PATH, f))
and not f.startswith('__') and f.endswith('.py')]
resp = []
for p in pluginfiles:
module = import_module("valence.flavor.plugins." + p)
myclass = getattr(module, p + 'Generator')
inst = myclass([])
resp.append({'name': p, 'description': inst.description()})
return {'criteria': resp}
def create_flavors(criteria):
"""criteria : comma seperated generator names
This should be same as thier file name)
"""
respjson = []
lst_nodes = rfs.nodes_list()
for g in criteria.split(","):
if g:
logger.info("Calling generator : %s ." % g)
module = __import__("valence.flavor.plugins." + g, fromlist=["*"])
classobj = getattr(module, g + "Generator")
inst = classobj(lst_nodes)
respjson.append(inst.generate())
return respjson
|
import discord
from discord.ext import commands
class Roles(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(brief='Have Nic write the role selector message')
@commands.has_permissions(administrator=True)
async def roles(self, context):
not_pinned = lambda message: not message.pinned
await context.channel.purge(limit=1, check=not_pinned)
database = self.client.database
selector = await context.send(database[27])
for emoji in ['🎮']:
await selector.add_reaction(emoji)
def setup(client):
client.add_cog(Roles(client))
|
from flask import Flask, request, send_file, redirect, render_template
from pymongo import MongoClient
from uuid import uuid4
from qrcode import make as create_qr
from io import BytesIO
from PIL.Image import open, Image
from PIL import ImageDraw, ImageFont
from os import environ as env
app = Flask(__name__)
mongo = MongoClient(env.get("MONGO_URI"))
db = mongo["lagerplass"]
items_table = db["items"]
boxes_table = db["boxes"]
secure = bool(env.get("SECURE"))
def better_redirect(url):
if secure:
return redirect("https://%s%s" % (request.headers["host"], url))
@app.route('/boxes', methods=["POST"])
def create_box():
box_id = str(uuid4())
boxes_table.insert_one({
"_id": box_id,
"location": request.args["location"],
"items": []
})
return better_redirect("/boxes/%s" % box_id)
@app.route("/boxes/<string:box_id>/location", methods=["POST"])
def edit_box_location(box_id):
boxes_table.update_one({"_id": box_id}, {"$set": {"location": request.args["location"]}})
return "Ok."
@app.route("/items", methods=["POST"])
def create_item():
item_id = str(uuid4())
items_table.insert_one({
"_id": item_id,
"category": request.args["category"],
"name": request.args["name"]
})
return item_id
@app.route("/items/<string:item_id>", methods=["DELETE"])
def delete_item(item_id):
items_table.delete_one({"_id": item_id})
return "Ok."
@app.route("/boxes/<string:box_id>/items/<string:item_id>", methods=["DELETE"])
def remove_item_from_box(box_id, item_id):
boxes_table.update_one({"_id": box_id}, {"$pull": {"items": item_id}})
return "Ok."
@app.route("/boxes/<string:box_id>/qr")
def render_qr(box_id):
qr_img = BytesIO()
qr = create_qr("http%s://%s/boxes/%s" % (("", "s")[secure], request.headers["host"], box_id))
qr.save(qr_img, format="png")
qr_img.seek(0)
image: Image = open(qr_img)
width, height = image.size
draw = ImageDraw.Draw(image)
text_seat = box_id[:4]
font = ImageFont.truetype('font.ttf', 40)
draw.text((40, height - 45), text_seat, font=font)
out = BytesIO()
image.save(out, format="png")
out.seek(0)
return send_file(out, "image/png")
@app.route("/boxes/<string:box_id>")
def get_box_items(box_id):
box = boxes_table.find_one({"_id": box_id})
location = box["location"]
items = []
for item_id in box["items"]:
item = items_table.find_one({"_id": item_id})
if item:
items.append(item)
all_items_sorted = {}
all_items = items_table.find()
for item in all_items:
if item["_id"] in box["items"]:
continue
if not all_items_sorted.get(item["category"]):
all_items_sorted[item["category"]] = []
all_items_sorted[item["category"]].append(item)
return render_template("boxinfo.jinja2", box_id=box_id, location=location, items=items, all_items=all_items_sorted)
@app.route("/boxes/<string:box_id>/items/<string:item_id>", methods=["POST"])
def add_to_box(box_id, item_id):
boxes_table.update_one({"_id": box_id}, {"$push": {"items": item_id}})
return "Ok."
@app.route("/items")
def create_item_gui():
items = items_table.find()
return render_template("createitem.jinja2", items=items)
@app.route("/")
def render_homepage():
all_items = items_table.find()
categories = []
for item in all_items:
if item["category"] in categories:
continue
categories.append(item["category"])
return render_template("homepage.jinja2", categories=categories)
@app.route("/categories/<string:category_name>")
def render_category(category_name):
items = items_table.find({"category": category_name})
return render_template("category.jinja2", items=items, category=category_name)
@app.route("/items/<string:item_id>")
def render_item(item_id):
boxes = boxes_table.find({"items": item_id})
return render_template("items.jinja2", boxes=boxes)
@app.route("/boxes/<string:box_id>", methods=["DELETE"])
def delete_box(box_id):
boxes_table.delete_one({"_id": box_id})
return "Ok."
@app.route("/boxes")
def render_boxes_gui():
boxes = boxes_table.find()
return render_template("createboxes.jinja2", boxes=boxes)
if __name__ == '__main__':
app.run()
|
#For 18 points, read this and answer the questions.
#Lists (also known as arrays) are for groups of related values.
#If you have ever been tempted to name your variables like the
#following, what you really needed was a list:
var1 = 'sword'
var2 = 'shield'
var3 = 'armor'
var4 = 'boots'
var5 = 'helmet'
#Creating lists. You have two options:
#Option 1: Empty list:
activities_list = []
#Option 2: list with values already in it
activities_list = ['classes', 'hobbies', 'friends', 'sports']
#The len function can be used to determine how many
#values are in a list.
#1. What does this print?
print(len(activities_list))
#2. What does this print?
activities_list = []
print(len(activities_list))
#But if you create your list like this
activities_list = []
#how do you put things in it?
#The answer is append
activities_list.append('classes')
activities_list.append('hobbies')
activities_list.append('friends')
activities_list.append('sports')
#3. Write a line of code that appends another activity to the list.
#4. Compare what this code prints:
print(activities_list)
#versus what this code prints:
print(len(activities_list))
#The following three pieces of code print the same thing.
#5. Which option is best in your opinion and why?
#Option A:
print(activities_list[0])
print(activities_list[1])
print(activities_list[2])
print(activities_list[3])
#Option B:
for item in activities_list:
print(item)
#Option C:
for i in range(len(activities_list)):
print(activities_list[i])
#Values in a list can be changed individually by accessing the value
#with brackets [] and setting the list position equal to a new value.
#6. How is activities_list changed by activities_list[2] = 'job' ?
print(activities_list)
activities_list[2] = 'job'
print(activities_list)
#7. What will this print?
print(activities_list[1])
#8. What does this code print? Guess before running it.
drawers = ["socks", "underwear", "shirts", "pants"]
n = 2
print("Drawer "+str(n)+" contains "+drawers[n])
n = n+1
print("Drawer "+str(n)+" contains "+drawers[n])
#9. What does this code print? Guess before running it.
x=['python','list',1995,'age']
print(x[0])
print(x[1])
print(x[3])
print(x[2])
print(x[len(x)-1])
print(len(x))
#10. How is this list different after line two?
science_subjects=['chemistry','physics','biology','mathematics']
science_subjects[2]='english'
print(science_subjects)
|
"""Contains configuration parameters for the dashboards."""
DASH_STYLE = {
"backgroundColor": '#212121',
"textAlign": "center",
"color": "#C9D6DF" # font
}
PLOT_TEMPLATE = 'plotly_dark'
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from __future__ import annotations
import abc
from functools import reduce
from typing import Generic, List, Optional, Tuple, Union
import numpy as np
import param
import torch
from InnerEye.Common.generic_parsing import CudaAwareConfig
from InnerEye.Common.type_annotations import T, TupleFloat2
class Transform3DBaseMeta(type(CudaAwareConfig), abc.ABCMeta): # type: ignore
"""
Metaclass to make the hierarchy explicit for Transform3D
"""
pass
class Transform3D(CudaAwareConfig[T], abc.ABC, metaclass=Transform3DBaseMeta):
"""
Class that allows defining a transform function with the possibility of operating on the GPU.
"""
@abc.abstractmethod
def __call__(self, sample: T) -> T:
raise Exception("__call__ function must be implemented by subclasses")
class Compose3D(Generic[T]):
"""
Class that allows chaining multiple transform functions together, and applying them to a sample
"""
def __init__(self, transforms: List[Transform3D[T]]):
self._transforms = transforms
def __call__(self, sample: T) -> T:
# pythonic implementation of the foldl function
# foldl (-) 0 [1,2,3] => (((0 - 1) - 2) - 3) => -6
return reduce(lambda x, f: f(x), self._transforms, sample)
@staticmethod
def apply(compose: Optional[Compose3D[T]], sample: T) -> T:
"""
Apply a composition of transfer functions to the provided sample
:param compose: A composition of transfer functions
:param sample: The sample to apply the composition on
:return:
"""
if compose:
return compose(sample)
else:
return sample
class CTRange(Transform3D[Union[torch.Tensor, np.ndarray]]):
output_range: TupleFloat2 = param.NumericTuple(default=(0.0, 255.0), length=2,
doc="Desired output range of intensities")
window: float = param.Number(None, doc="Width of window")
level: float = param.Number(None, doc="Mid-point of window")
def __call__(self, data: Union[torch.Tensor, np.ndarray]) -> Union[torch.Tensor, np.ndarray]:
return LinearTransform.transform(
data=self.get_gpu_tensor_if_possible(data),
input_range=get_range_for_window_level(self.level, self.window),
output_range=self.output_range,
use_gpu=self.use_gpu
)
@staticmethod
def transform(data: Union[torch.Tensor, np.ndarray],
output_range: TupleFloat2,
window: float, level: float,
use_gpu: bool = False) -> Union[torch.Tensor, np.ndarray]:
# find upper and lower values of input range to linearly map to output range. Values outside range are
# floored and capped at min or max of range.
transform = CTRange(output_range=output_range, window=window, level=level, use_gpu=use_gpu)
return transform(data)
class LinearTransform(Transform3D[Union[torch.Tensor, np.ndarray]]):
input_range: TupleFloat2 = param.NumericTuple(None, length=2, doc="Expected input range of intensities")
output_range: TupleFloat2 = param.NumericTuple(None, length=2, doc="Desired output range of intensities")
def __call__(self, data: Union[torch.Tensor, np.ndarray]) -> Union[torch.Tensor, np.ndarray]:
data = self.get_gpu_tensor_if_possible(data)
gradient = (self.output_range[1] - self.output_range[0]) / (self.input_range[1] - self.input_range[0])
c = self.output_range[1] - gradient * self.input_range[1]
_apply_transform = lambda: data * gradient + c
if torch.is_tensor(data):
gradient = self.get_gpu_tensor_if_possible(torch.tensor(gradient))
c = self.get_gpu_tensor_if_possible(torch.tensor(c))
return _apply_transform().clamp(min=self.output_range[0], max=self.output_range[1])
else:
return np.clip(_apply_transform(), a_min=self.output_range[0], a_max=self.output_range[1])
@staticmethod
def transform(data: Union[torch.Tensor, np.ndarray],
input_range: TupleFloat2, output_range: TupleFloat2,
use_gpu: bool = False) -> Union[torch.Tensor, np.ndarray]:
transform = LinearTransform(use_gpu=use_gpu, input_range=input_range, output_range=output_range)
return transform(data)
def get_range_for_window_level(level: float, window: float) -> Tuple[float, float]:
upper = level + window / 2
lower = level - window / 2
return lower, upper
|
import sys
sys.path.append('../')
from util import parameter_number
from sklearn.metrics import accuracy_score
import torch
import torch.optim as optim
import torch.nn as nn
class Manager():
def __init__(self, model, args):
self.args_info = args.__str__()
self.device = torch.device('cuda:{}'.format(args.cuda) if torch.cuda.is_available() else 'cpu')
if args.load:
model.load_state_dict(torch.load(args.load))
self.model = model.to(self.device)
self.epoch = args.epoch
self.optimizer = optim.Adam(self.model.parameters(), lr= args.lr)
self.lr_scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size= 10, gamma= 0.5)
self.loss_function = nn.CrossEntropyLoss()
self.save = args.save
self.record_interval = args.interval
self.record_file = None
if args.record:
self.record_file = open(args.record, 'w')
self.best = {"epoch": 0, "acc": 0}
def record(self, info):
print(info)
if self.record_file:
self.record_file.write(info + '\n')
def train(self, train_data, test_data):
self.record("*****************************************")
self.record("Hyper-parameters: {}".format(self.args_info))
self.record("Model parameter number: {}".format(parameter_number(self.model)))
self.record("Model structure:\n{}".format(self.model.__str__()))
self.record("*****************************************")
for epoch in range(self.epoch):
self.model.train()
train_loss = 0
train_label = LabelContainer()
for i, (points, gt) in enumerate(train_data):
points = points.to(self.device)
gt = gt.view(-1,).to(self.device)
out = self.model(points)
self.optimizer.zero_grad()
loss = self.loss_function(out, gt)
loss.backward()
self.optimizer.step()
train_loss += loss.item()
pred = torch.max(out, 1)[1]
train_label.add(gt, pred)
if (i + 1) % self.record_interval == 0:
self.record(' epoch {:3d} step {:5d} | avg loss: {:.5f} | avg acc: {:.5f}'.format(epoch +1, i+1, train_loss/(i + 1), train_label.get_acc()))
train_loss /= (i+1)
train_acc = train_label.get_acc()
test_loss, test_acc = self.test(test_data)
if test_acc > self.best['acc']:
self.best['epoch'] = epoch + 1
self.best['acc'] = test_acc
if self.save:
torch.save(self.model.state_dict(), self.save)
self.record('= Epoch {} | Tain Loss: {:.5f} Train Acc: {:.3f} | Test Loss: {:.5f} Test Acc: {:.3f} | Best Acc: {:.3f}\n'.format(epoch + 1, train_loss, train_acc, test_loss, test_acc, self.best['acc']))
self.lr_scheduler.step()
self.record('* Best result at {} epoch with test acc {}'.format(self.best['epoch'], self.best['acc']))
def test(self, test_data):
self.model.eval()
test_loss = 0
test_label = LabelContainer()
for i, (points, gt) in enumerate(test_data):
points = points.to(self.device)
gt = gt.view(-1,).to(self.device)
out = self.model(points)
loss = self.loss_function(out, gt)
test_loss += loss.item()
pred = torch.max(out, 1)[1]
test_label.add(gt, pred)
test_loss /= (i+1)
test_acc = test_label.get_acc()
return test_loss, test_acc
class LabelContainer():
def __init__(self):
self.has_data = False
self.gt = None
self.pred = None
def add(self, gt, pred):
gt = gt.detach().cpu().view(-1)
pred = pred.detach().cpu().view(-1)
if self.has_data == False:
self.has_data = True
self.gt = gt
self.pred = pred
else:
self.gt = torch.cat([self.gt, gt])
self.pred = torch.cat([self.pred, pred])
def get_acc(self):
return accuracy_score(self.gt, self.pred)
|
import sys
if sys.version_info < (3, 6):
raise RuntimeError("This library requires python 3.6+.")
__version__ = '0.3.0.dev0'
# auto-import submodules
from . import colors as colors
from . import data as data
from . import plot as plot
#
# populate common APIs
from .data import Experiment as Experiment
from .data import Hypothesis as Hypothesis
from .data import Run as Run
from .data import RunList as RunList
from .data import get_runs as get_runs
from .data import parse_run as parse_run
|
import re
from synthesis_classifier.multiprocessing_classifier import perform_collection, make_batch
from synthesis_classifier.database.patents import PatentsDBWriter, PatentParagraphsByQuery
def example_paragraphs():
query = {
'path': re.compile(r'.*example.*', re.IGNORECASE),
}
return PatentParagraphsByQuery(query)
if __name__ == "__main__":
batch_size = 16
perform_collection(
PatentsDBWriter,
make_batch(example_paragraphs(), batch_size),
'./job_patents_examples.sh'
)
|
'''
Create Triqler input files by converting the evidence.txt output file from
MaxQuant.
'''
from __future__ import print_function
import sys
import os
import collections
import numpy as np
from .. import parsers
from ..triqler import __version__, __copyright__
from . import helpers
def main():
print('Triqler.convert.maxquant version %s\n%s' % (__version__, __copyright__))
print('Issued command:', os.path.basename(__file__) + " " + " ".join(map(str, sys.argv[1:])))
args, params = parseArgs()
convertMqToTriqler(args.file_list_file, args.in_file, args.out_file, params)
def parseArgs():
import argparse
apars = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
requiredNamed = apars.add_argument_group('required arguments')
apars.add_argument('in_file', default=None, metavar = "IN_FILE",
help='''MaxQuant evidence.txt file.
''')
requiredNamed.add_argument('--file_list_file', metavar='L',
help='Simple text file with spectrum file names in first column and condition in second column.',
required = True)
apars.add_argument('--out_file', default = "triqler_input.tsv", metavar='OUT',
help='''Path to triqler input file (writing in TSV format).
''')
apars.add_argument('--skip_normalization',
help='Skip retention-time based intensity normalization.',
action='store_true')
apars.add_argument('--skip_mbr_rows',
help='Skips the match-between-runs rows in the output.',
action='store_true')
# ------------------------------------------------
args = apars.parse_args()
params = dict()
params['simpleOutputFormat'] = True
params['skipNormalization'] = args.skip_normalization
params['skipMBR'] = args.skip_mbr_rows
params['plotScatter'] = False
return args, params
def convertMqToTriqler(fileListFile, mqEvidenceFile, triqlerInputFile, params):
fileInfoList = parsers.parseFileList(fileListFile)
peptideToFeatureMap = parseMqEvidenceFile(mqEvidenceFile, fileInfoList, params)
rTimeArrays, factorArrays = helpers.getNormalizationFactorArrays(peptideToFeatureMap, fileInfoList, params)
helpers.writeTriqlerInputFile(triqlerInputFile, peptideToFeatureMap, rTimeArrays, factorArrays, params)
def parseMqEvidenceFile(mqEvidenceFile, fileInfoList, params):
fileList, _, _, _ = zip(*fileInfoList)
reader = parsers.getTsvReader(mqEvidenceFile)
headers = next(reader) # save the header
headers = list(map(str.lower, headers))
peptCol = headers.index('modified sequence')
fileCol = headers.index('raw file')
chargeCol = headers.index('charge')
intensityCol = headers.index('intensity')
proteinCol = headers.index('leading proteins')
scoreCol = headers.index('score')
rtCol = headers.index('retention time')
fractionCol = headers.index('fraction') if 'fraction' in headers else -1
experimentCol = headers.index('experiment') if 'experiment' in headers else -1
print("Parsing MaxQuant evidence.txt file")
peptideToFeatureMap = collections.defaultdict(list)
for lineIdx, row in enumerate(reader):
if lineIdx % 500000 == 0:
print(" Reading line", lineIdx)
proteins = row[proteinCol].split(";")
linkPEP = 0.0
key = (row[peptCol], row[chargeCol])
if not row[fileCol] in fileList:
print("Warning: Could not find %s in the specified file list, skipping row" % row[fileCol])
continue
fileIdx = fileList.index(row[fileCol])
run, condition, sample, fraction = fileInfoList[fileIdx]
if fraction == -1 and fractionCol != -1:
sample, fraction = row[experimentCol], row[fractionCol]
if key in peptideToFeatureMap:
featureClusterIdx = peptideToFeatureMap[key][0][0].featureClusterId
else:
featureClusterIdx = len(peptideToFeatureMap)
if row[intensityCol] == "" or float(row[scoreCol]) <= 0:
continue
triqlerRow = parsers.TriqlerInputRow(sample, condition, row[chargeCol], lineIdx, linkPEP, featureClusterIdx, np.log(float(row[scoreCol])), float(row[intensityCol]), row[peptCol], proteins)
peptideToFeatureMap[key].append((triqlerRow, float(row[rtCol]), fraction))
return peptideToFeatureMap
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.