content
stringlengths 5
1.05M
|
|---|
# O mesmo professor quer sortear a ordem de apresentação dos seus alunos. Faça um programa para ajuda-lo.
from random import shuffle
aluno1 = input("Qual o primeiro aluno? ")
aluno2 = input("Qual o segundo aluno? ")
aluno3 = input("Qual o terceiro aluno? ")
aluno4 = input("Qual o quarto aluno? ")
lista = [aluno1, aluno2, aluno3, aluno4]
shuffle(lista)
print("\033[33mA ordem deve ser {}.".format(lista))
|
import numpy as np
from astropy.wcs import WCS
from astropy.io import fits
def read_spec(filename):
'''Read a UVES spectrum from the ESO pipeline
Parameters
----------
filename : string
name of the fits file with the data
Returns
-------
wavelength : np.ndarray
wavelength (in Ang)
flux : np.ndarray
flux (in erg/s/cm**2)
date_obs : string
time of observation
'''
sp = fits.open(filename)
header = sp[0].header
wcs = WCS(header)
#make index array
index = np.arange(header['NAXIS1'])
wavelength = wcs.wcs_pix2world(index[:,np.newaxis], 0)
wavelength = wavelength.flatten()
flux = sp[0].data
date_obs = header['Date-OBS']
return wavelength, flux, date_obs
|
import token
import tokenize
PREVIOUS_TOKEN_MARKERS = (token.INDENT, token.ENDMARKER, token.NEWLINE)
def _is_docstring(
tokeninfo: tokenize.TokenInfo, previous_token: tokenize.TokenInfo
) -> bool:
"""Check if a token represents a docstring."""
if (
tokeninfo.type == token.STRING
and (
previous_token.type in PREVIOUS_TOKEN_MARKERS
or previous_token.type == token.NL
and not tokeninfo.start[1]
)
and tokeninfo.line.strip().startswith(("'", '"'))
):
return True
return False
|
import random
from starter_logic import generate_journal_starter
from eliza_logic import eliza_analyze
from continuation_logic import generate_generic_continuation, generate_specific_continuation
def generate_response(text_input):
# Trim and format input
char_limit = 1000
text = text_input[-char_limit:]
sentence_list = [sentence.strip() for sentence in text.split('.') if sentence != '']
if len(text_input) > char_limit:
sentence_list = sentence_list[1:]
sentence_list = sentence_list[-5:]
# If input is empty or on a new paragraph, then return a journal starter
if len(sentence_list) == 0 or sentence_list[-1] == '\n':
return { 'responseType': 'starter', 'response': generate_journal_starter() }
# If there's a hit on the eliza matching, return that
lastSentenceResponse = eliza_analyze(sentence_list[-1])
if lastSentenceResponse != None:
return { 'responseType': 'eliza_cont', 'response': lastSentenceResponse }
# Randomly pick one of two branchs of continuation responses: generic or specific
branch = random.randint(0,2)
if branch == 0:
return { 'responseType': 'generic_cont', 'response': generate_generic_continuation() }
if branch == 1 or branch == 2:
return {'response_type': 'specific_cont', 'response': generate_specific_continuation(text) }
|
income = float(input())
gross_pay = income
taxes_owed = income * .12
net_pay = gross_pay - taxes_owed
print(gross_pay)
print(taxes_owed)
print(net_pay)
|
import logging
import psutil
from django.conf import settings
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.triggers.cron import CronTrigger
from django.core.management.base import BaseCommand
from django_apscheduler.jobstores import DjangoJobStore
from django_apscheduler.models import DjangoJobExecution
from django_apscheduler import util
from django.utils.datetime_safe import datetime
import datetime as dtime
from uploader.models import ResourceTimestamp
logger = logging.getLogger("web")
CPU_UPPER_BOUND = 90
MEMORY_UPPER_BOUND = 80
def resource_tracker():
"""
This job tracks the current cpu and memory usage and stores Timestamp objects
in the db for further usage.
"""
cpu_percentage = psutil.cpu_percent()
memory_percentage = psutil.virtual_memory().percent
# inform user about high load
if cpu_percentage > CPU_UPPER_BOUND:
logger.warning(f"High CPU usage: {cpu_percentage}%")
if memory_percentage > MEMORY_UPPER_BOUND:
logger.warning(f"High Memory usage: {memory_percentage}%")
# save timestamps in db
res_timestamp = ResourceTimestamp(cpu_percentage=cpu_percentage, memory_percentage=memory_percentage)
res_timestamp.save()
@util.close_old_connections
def delete_old_job_executions(max_age=1_209_600):
"""
This job deletes APScheduler job execution entries older than `max_age` from the database,
as well as deleting all timestamps older than 'max_age'
It helps to prevent the database from filling up with old historical records that are no
longer useful.
:param max_age: The maximum length of time to retain historical job execution records.
Defaults to 14 days.
"""
# delete job execution entries
DjangoJobExecution.objects.delete_old_job_executions(max_age)
# delete entries in db older than 2 weeks
time_delta = datetime.now() - dtime.timedelta(seconds=max_age)
ResourceTimestamp.objects.all().filter(timestamp__lt=time_delta).delete()
class Command(BaseCommand):
"""
Extends Base command to feature another command called runapscheduler by the manage.py script
like this "python3 manage.py runapscheduler"
"""
help = "Runs resource tracker as well as cleanup process."
def add_arguments(self, parser):
# Named optional argument
parser.add_argument(
'--test',
action='store_true',
help='sample every minute for testing purpose',
)
def handle(self, *args, **options):
"""
handler for runapscheduler command
"""
scheduler = BlockingScheduler(timezone=settings.TIME_ZONE)
scheduler.add_jobstore(DjangoJobStore(), "default")
# configure CronTrigger
if options['test']:
# Every hour
resource_tracker_trigger = CronTrigger(second="*/1")
# everyday at midnight
delete_old_job_executions_tigger = CronTrigger(minute="*/3")
delete_old_than = 180
else:
# Every hour
resource_tracker_trigger = CronTrigger(minute="00")
# everyday at midnight
delete_old_job_executions_tigger = CronTrigger(hour="00", minute="00")
delete_old_than = 1_209_600
# start resource_tracker
scheduler.add_job(
resource_tracker,
trigger=resource_tracker_trigger,
id="resource_tracker",
max_instances=1,
replace_existing=True,
)
logger.info(f"Added job '{resource_tracker.__name__}'.")
# start cleanup jobresource_tracker
scheduler.add_job(
delete_old_job_executions,
trigger=delete_old_job_executions_tigger,
id="delete_old_job_executions",
max_instances=1,
replace_existing=True,
args=(delete_old_than,)
)
logger.info("Added weekly job: 'delete_old_job_executions'.")
try:
logger.info("Starting scheduler for tracking...")
scheduler.start()
except KeyboardInterrupt:
logger.info("Stopping scheduler for tracking...")
scheduler.shutdown()
logger.info("Scheduler for tracking shut down successfully!")
|
"""Definition of component types"""
import numbers
from sympy import Symbol, sympify
from unyt import unyt_quantity, degC, delta_degC, V
from circuits.common import PortDirection, temperature_difference
class Port:
"""Base class for ports
Concept:
- signals flow through ports
- ports connect to other ports
- name and direction
"""
def __init__(self, name, direction=PortDirection.INOUT):
self._name = name
self._direction = direction
@property
def name(self):
"""Return the port's name"""
return self._name
@property
def direction(self):
"""Return the port's direction"""
return self._direction.name
def __repr__(self):
return f"Port('{self._name}', {self._direction})"
def __eq__(self, other):
if isinstance(other, Port):
if self._name == other._name and self._direction == other._direction:
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self._name, self._direction))
class Pin:
"""Component pin
Args:
name (str): pin name
number (int): pin number
direction (PortDirection): signal direction
owner (Component): component pin belongs to
"""
def __init__(self, name, number, owner, direction=PortDirection.INOUT):
self._name = name
self._number = number
if not issubclass(type(owner), Component):
raise TypeError(f"{owner} must be a subclass of Component")
self._owner = owner
self._direction = direction
@property
def name(self):
"""Return the pin's name"""
return self._name
@property
def number(self):
"""Return the pin number"""
return self._number
@property
def owner(self):
"""Return the pin's owner"""
return self._owner
@property
def direction(self):
"""Return the pin's direction"""
return self._direction.name
def __repr__(self):
return f"{self._owner.name}.pin({self._number})"
def __eq__(self, other):
if isinstance(other, Pin):
if self.__dict__ == other.__dict__:
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self._name, self._number, self._owner, self._direction))
class PowerTap(Port):
"""Power tap"""
def __init__(self, name):
super().__init__(name)
def __repr__(self):
return f"PowerTap('{self._name}')"
class Component:
"""Base class for components
Parameters
----------
name : str
name of component - follow schematic convention of capital letter
followed by number such as R1, U1, etc.
pins :list
list of Pins
kwargs
"""
def __init__(self, name, pins, **kwargs):
self._name = name
self._symbol = Symbol(name)
self._pins = {}
for pin in pins:
if isinstance(pin, Pin):
self._pins[pin.name] = pin
self._pins[pin.number] = pin
else:
raise TypeError(f"{pin} must be a Pin")
self._parasitic = False
self.parasitics = {}
for k, v in kwargs.items():
setattr(self, k, v)
@property
def name(self):
"""Return the component's name"""
return self._name
@name.setter
def name(self, name):
self._name = name
self._symbol = Symbol(name)
@property
def pins(self):
"""Return the component's pin dict"""
return self._pins
def __repr__(self):
return f"<Component {self._name}>"
def pin(self, name):
"""Return the Pin for pin name/number from the pins dict"""
try:
return self._pins[name]
except KeyError:
raise ValueError(f"unknown pin {name}") from None
@property
def parasitic(self):
"""Whether a component is parasitic
Parameters
----------
value : bool
"""
return self._parasitic
@parasitic.setter
def parasitic(self, value):
self._parasitic = bool(value)
@property
def has_parasitics(self):
"""Whether this component has internally defined parasitics"""
return bool(len(self.parasitics))
class PassiveComponent(Component):
"""Class for passive, two-port resistors, capacitors and inductors
Parameters
----------
name : str
name of passive component
value : float or unyt_quantity
nominal value
"""
def __init__(self, name, value, **kwargs):
try:
pin_names = kwargs.pop("pin_names")
except KeyError:
pin_names = ["1", "2"]
pins = [Pin(name, int(name), self) for name in pin_names]
self._value = value
try:
tol = kwargs.pop("tol")
except KeyError:
tol = 0.0
self._tol = tol
try:
tc = kwargs.pop("tc")
except KeyError:
tc = 0.0 / delta_degC
super().__init__(name, pins, **kwargs)
self._tc = unyt_quantity(tc, "1/K")
self._ref_temp = 20 * degC
self._refs = []
self._laplace_s = Symbol("s")
self._laplace_admittance = None
@property
def value(self):
"""Return value of component"""
return self._value
@value.setter
def value(self, value):
ratio = self._value / value
if not ratio.units.is_dimensionless:
raise ValueError(f"'{value}' must be in unit '{self._value.units}'")
self._value = value
for ref, func in self._refs:
try:
ref.value = func(value)
except NameError:
pass
@property
def tol(self):
"""value (float): tolerance"""
return self._tol
@tol.setter
def tol(self, value):
self._tol = value
@property
def tc(self):
"""value (unyt_quantity or float): temperature coefficient, drift per kelvin"""
return self._tc
@tc.setter
def tc(self, value):
self._tc = unyt_quantity(value, "1/delta_degC")
@property
def reference_temperature(self):
"""value : unyt_quantity
reference temperature for drift calculation
"""
return self._ref_temp
@reference_temperature.setter
def reference_temperature(self, value):
self._ref_temp = unyt_quantity(value, "degC")
@property
def admittance(self):
"""Return the laplace admittance"""
return self._laplace_admittance
def __repr__(self):
return f"<{self.__class__.__name__}:{self._name},{self._value}>"
def __add__(self, other):
if isinstance(other, self.__class__):
Cls = self.__class__
value = self.value + other.value
name = f"{self.name}+{other.name}"
new_component = Cls(name, value)
self._refs.append((new_component, lambda value: value + other.value))
other._refs.append((new_component, lambda value: self.value + value))
elif isinstance(other, unyt_quantity):
Cls = self.__class__
value = self.value + other
name = f"{self.name}+{str(other)}"
new_component = Cls(name, value)
self._refs.append((new_component, lambda value: value + other))
else:
raise TypeError(f"{other} not an appropriate type")
return new_component
def __radd__(self, other):
if isinstance(other, unyt_quantity):
Cls = self.__class__
value = other + self.value
name = f"{str(other)}+{self.name}"
new_component = Cls(name, value)
self._refs.append((new_component, lambda value: other + value))
else:
raise TypeError(f"{other} not an appropriate type")
return new_component
def __sub__(self, other):
if isinstance(other, self.__class__):
Cls = self.__class__
value = self.value - other.value
name = f"{self.name}-{other.name}"
new_component = Cls(name, value)
self._refs.append((new_component, lambda value: value - other.value))
other._refs.append((new_component, lambda value: self.value - value))
elif isinstance(other, unyt_quantity):
Cls = self.__class__
value = self.value - other
name = f"{self.name}-{str(other)}"
new_component = Cls(name, value)
self._refs.append((new_component, lambda value: value - other))
else:
raise TypeError(f"{other} not an appropriate type")
return new_component
def __rsub__(self, other):
if isinstance(other, unyt_quantity):
Cls = self.__class__
value = other - self.value
name = f"{str(other)}-{self.name}"
new_component = Cls(name, value)
self._refs.append((new_component, lambda value: other - value))
else:
raise TypeError(f"{other} not an appropriate type")
return new_component
def __mul__(self, other):
if isinstance(other, numbers.Number):
Cls = self.__class__
value = self.value * other
name = f"{self.name}*{other}"
new_component = Cls(name, value)
self._refs.append((new_component, lambda value: value * other))
else:
raise TypeError(f"{other} not an appropriate type")
return new_component
def __rmul__(self, other):
if isinstance(other, numbers.Number):
Cls = self.__class__
value = other * self.value
name = f"{other}*{self.name}"
new_component = Cls(name, value)
self._refs.append((new_component, lambda value: other * value))
else:
raise TypeError(f"{other} not an appropriate type")
return new_component
def __truediv__(self, other):
if isinstance(other, numbers.Number):
Cls = self.__class__
value = self.value / other
name = f"{self.name}/{other}"
new_component = Cls(name, value)
self._refs.append((new_component, lambda value: value / other))
else:
raise TypeError(f"{other} not an appropriate type")
return new_component
def to(self, unit):
"""Convert component's value to 'unit' expression
Args:
unit (str): SI unit expression
Returns:
self
"""
self._value = self._value.to(unit)
return self
def max(self, temperature=None):
"""Calculate the maximum component value at the given temperature
Parameters
----------
temperature : unyt_quantity in degree Celcius
component temperature for drift from reference temperature
default of None means to only consider tolerance
"""
if temperature is None:
temperature = self._ref_temp
deltaT = abs(temperature_difference(self._ref_temp, temperature))
return self.value * (1 + self._tol + deltaT * self._tc)
def min(self, temperature=None):
"""Calculate the minimum component value at the given temperature
Parameters
----------
temperature : unyt_quantity in degree Celcius
component temperature for drift from reference temperature
default of None means to only consider tolerance
"""
if temperature is None:
temperature = self._ref_temp
deltaT = abs(temperature_difference(self._ref_temp, temperature))
return self.value * (1 - (self._tol + deltaT * self._tc))
class Resistor(PassiveComponent):
"""Two-port linear resistor
Parameters
----------
name : str
name such as reference designator
value : float or unyt_quantity
resistance in unit ohm
"""
def __init__(self, name, value=unyt_quantity(1, "Ω"), **kwargs):
if isinstance(value, numbers.Number):
value = unyt_quantity(value, "Ω")
correct_unit = (value / unyt_quantity(1, "Ω")).units.is_dimensionless
if not correct_unit:
raise ValueError(f"{value} must be in unit ohm")
super().__init__(name, value, **kwargs)
self._laplace_admittance = 1 / self._symbol
def parallel(self, other):
"""Compute the parallel resistance with `other`
Parameters
----------
other : Resistor
"""
if not isinstance(other, Resistor):
raise TypeError(f"'{other}' is not a Resistor")
r1 = self.value
r2 = other.value
name = f"{self.name}||{other.name}"
return Resistor(name, (r1 * r2) / (r1 + r2))
class Capacitor(PassiveComponent):
"""Two-port linear capacitor
Parameters
----------
name :str
name such as reference designator
value : float or unyt_quantity
capacitance in unit farad
"""
def __init__(self, name, value=unyt_quantity(1, "F"), **kwargs):
if isinstance(value, numbers.Number):
value = unyt_quantity(value, "F")
correct_unit = (value / unyt_quantity(1, "F")).units.is_dimensionless
if not correct_unit:
raise ValueError(f"{value} must be in unit farad")
super().__init__(name, value, **kwargs)
self._laplace_admittance = self._laplace_s * self._symbol
def series(self, other):
"""Compute the series capacitance with `other`
Parameters
----------
other : Capacitor
"""
if not isinstance(other, Capacitor):
raise TypeError(f"'{other}' is not a Capacitor")
c1 = self.value
c2 = other.value
name = f"{self.name}--{other.name}"
return Capacitor(name, (c1 * c2) / (c1 + c2))
class Inductor(PassiveComponent):
"""Two-port linear inductor
Parameters
----------
name :str
name such as reference designator
value : float or unyt_quantity
inductance in unit henry
"""
def __init__(self, name, value=unyt_quantity(1, "H"), **kwargs):
if isinstance(value, numbers.Number):
value = unyt_quantity(value, "H")
correct_unit = (value / unyt_quantity(1, "H")).units.is_dimensionless
if not correct_unit:
raise ValueError(f"{value} must be in unit henry")
super().__init__(name, value, **kwargs)
self._laplace_admittance = 1 / (self._laplace_s * self._symbol)
def parallel(self, other):
"""Compute the parallel inductance with `other`
Parameters
----------
other : Inductor
"""
if not isinstance(other, Inductor):
raise TypeError(f"'{other}' is not an Inductor")
l1 = self.value
l2 = other.value
name = f"{self.name}||{other.name}"
return Inductor(name, (l1 * l2) / (l1 + l2))
class VoltageSource(Component):
"""A ideal voltage source
Parameters
----------
name : str
name such as reference designator
value : float or unyt_quantity
value in unit volt
"""
def __init__(self, name, value=unyt_quantity(1, "V")):
if isinstance(value, numbers.Number):
value = unyt_quantity(value, "V")
if not isinstance(value, unyt_quantity):
raise TypeError(f"{value} must be a unyt_quantity")
correct_unit = (value / unyt_quantity(1, "V")).units.is_dimensionless
if not correct_unit:
raise ValueError(f"{value} must be in unit volt")
pins = [Pin("1", 1, self), Pin("2", 2, self)]
super().__init__(name, pins)
self._value = value
self._tol = 0.0
self._tc = unyt_quantity(0.0, "1/K")
@property
def value(self):
"""Return value of component"""
return self._value
@value.setter
def value(self, value):
ratio = value / unyt_quantity(1, "V")
if not ratio.units.is_dimensionless:
raise ValueError(f"{value} must be in unit volt")
self._value = value
@property
def tol(self):
"""value (float): tolerance"""
return self._tol
@tol.setter
def tol(self, value):
self._tol = value
@property
def tc(self):
"""value (unyt_quantity or float): temperature coefficient, drift per kelvin"""
return self._tc
@tc.setter
def tc(self, value):
self._tc = unyt_quantity(value, "1/delta_degC")
def __repr__(self):
return f"<VoltageSource:{self._name},{self._value}>"
class Opamp(Component):
"""Opamp
Parameters
----------
name : str
Aol : sympy expression, open-loop transfer function Aol(s)
Pins
----
1 'IN+' positive input
2 'IN-' negative input
3 'OUT' output
"""
def __init__(self, name, Aol, **kwargs):
pins = [
Pin("IN+", 1, self, direction=PortDirection.IN),
Pin("IN-", 2, self, direction=PortDirection.IN),
Pin("OUT", 3, self, direction=PortDirection.OUT),
]
super().__init__(name, pins, **kwargs)
self.Aol = sympify(Aol)
if hasattr(self, "Vos"):
self.Vos = unyt_quantity(self.Vos, V)
vos = VoltageSource(f"{name}_Vos", value=self.Vos)
self.parasitics[vos] = [None, self.pin(2)]
class PassiveComponentNetwork(Component):
"""Passive component network
An `n` element array of passive components such as a resistor network.
Parameters
----------
name : str
name of passive component such as the reference designator
values : list of float or unyt_quantity of length n
nominal values
Keyword Attributes
-------------------
tol : float
absolute tolerance
tc : float or unyt_quantity
absolute temperature drift per Kelvin
rel_tol : float
relative tolerance
rel_tc : float or unyt_quantity
relative temperature drift per Kelvin
reference_temperature : unyt_quantity in unit degree Celsius
Pins - follows Vishay nomenclature
----
1 <element #1> 2*n
2 <element #2> 2*n-1
...
n <element #n> n+1
"""
def __init__(self, name, values, **kwargs):
self._n = len(values)
pins = [Pin(f"{i}", i, self) for i in range(1, 2 * self._n + 1)]
super().__init__(name, pins)
self._values = values
self._tol = kwargs.get("tol", 0.0)
self._tc = kwargs.get("tc", 0.0 / delta_degC)
self._rel_tol = kwargs.get("rel_tol", 0.0)
self._rel_tc = kwargs.get("rel_tc", 0.0 / delta_degC)
self._ref_temp = kwargs.get("reference_temperature", 20 * degC)
self._elements = []
self._symbols = []
@property
def values(self):
"""Return value of component"""
return self._values
@values.setter
def values(self, values):
correct_unit = (self._values[0] / values[0]).units.is_dimensionless
if not correct_unit:
raise ValueError(f"'{values[0]}' must be in unit '{self._values[0].units}'")
self._values = values
@property
def tol(self):
"""value : float
absolute tolerance"""
return self._tol
@tol.setter
def tol(self, value):
self._tol = value
self._elements[0].tol = value
@property
def tc(self):
"""value : unyt_quantity or float
absolute temperature coefficient as drift per kelvin"""
return self._tc
@tc.setter
def tc(self, value):
tc = unyt_quantity(value, "1/delta_degC")
self._tc = tc
self._elements[0].tc = tc
@property
def rel_tol(self):
"""value : float
relative tolerance
"""
return self._rel_tol
@rel_tol.setter
def rel_tol(self, value):
self._rel_tol = value
for element in self._elements[1:]:
element.tol = value
@property
def rel_tc(self):
"""value : float or unyt_quantity
relative temperature coefficient
"""
return self._rel_tc
@rel_tc.setter
def rel_tc(self, value):
rel_tc = unyt_quantity(value, "1/delta_degC")
self._tc = rel_tc
for element in self._elements[1:]:
element.tc = rel_tc
@property
def reference_temperature(self):
"""value : unyt_quantity
reference temperature for temperature drift calculations, default 20 °C
"""
return self._ref_temp
@reference_temperature.setter
def reference_temperature(self, value):
ref_temp = unyt_quantity(value, "degC")
self._ref_temp = ref_temp
for element in self._elements:
element.reference_temperature = ref_temp
def element_at(self, pin):
"""Retrieve the element at `pin` number
Parameters
----------
pin : int
Returns
-------
element : PassiveComponent
"""
i = pin if pin <= self._n else 2 * self._n - pin + 1
return self._elements[i - 1]
def __getitem__(self, item):
try:
return self._elements[item]
except TypeError:
i = int(str(item)[1:]) - 1
return self._elements[i]
class ResistorNetwork(PassiveComponentNetwork):
"""Resistor network
Resistor network consisting of `n` individual elements.
Parameters
----------
name : str
name such as reference designator (e.g. 'RN1')
values : list of float or unyt_quantity of length n
resistances in unit ohm
Pins - follows Vishay nomenclature
----
1 -R1- 2*n
2 -R2- 2*n-1
...
n -Rn- n+1
"""
def __init__(self, name, values, **kwargs):
super().__init__(name, values, **kwargs)
n = len(values)
for i, value in enumerate(values, start=1):
tol, tc = (self._tol, self._tc) if i == 1 else (self._rel_tol, self._rel_tc)
res = Resistor(
f"{name}_R{i}",
value,
tol=tol,
tc=tc,
pin_names=[f"{i}", f"{2*n-i+1}"],
)
self._elements.append(res)
setattr(self, f"R{i}", res)
self._symbols = [f"{name}_R{i}" for i in range(1, len(values) + 1)]
|
ERROR = "ERROR"
WARNING = "WARNING"
INFO = "Info"
DEBUG = "Debug"
VERBOSE = " "
log_levels = [ERROR, WARNING]
def Log(level, msg):
global log_levels
if level in log_levels:
print(level + ": " + msg)
return 0
def SetLevels(levels):
global log_levels
log_levels = levels
return 0
|
import logging
import os
#pylint: disable=unused-import
import sure # flake8: noqa
import sys
paths = [
'..',
]
for path in paths:
sys.path.append(os.path.abspath(path))
import pyaci
logging.captureWarnings(True)
def testSubtreeClass():
opt = pyaci.options.subtreeClass('fvSubnet')
opt['query-target'].should.equal('subtree')
opt['target-subtree-class'].should.equal('fvSubnet')
def testChildClass():
opt = pyaci.options.childClass('fvSubnet')
opt['query-target'].should.equal('children')
opt['target-subtree-class'].should.equal('fvSubnet')
def testOrderBy():
opt = pyaci.options.orderBy('fvSubnet.addr')
opt['order-by'].should.equal('fvSubnet.addr')
def testPage():
opt = pyaci.options.page(1) & pyaci.options.pageSize(50)
opt['page'].should.equal(1)
opt['page-size'].should.equal(50)
def testFilter():
opt = pyaci.options.filter(pyaci.filters.Eq('fvTenant.name', 'cisco'))
opt['query-target-filter'].should.equal('eq(fvTenant.name,"cisco")')
|
import random
import numpy as np
import networkx as nx
import sys, os, json, argparse, itertools
import grinpy as gp
import time
from glob import glob
from multiprocessing import Pool
from ortools.sat.python import cp_model
"""
This code is based on https://github.com/machine-reasoning-ufrgs/GNN-GCP
"""
def solve_csp(M, n_colors, nmin=25):
model = cp_model.CpModel()
N = len(M)
variables = []
variables = [ model.NewIntVar(0, n_colors-1, '{i}'.format(i=i)) for i in range(N) ]
for i in range(N):
for j in range(i+1,N):
if M[i][j] == 1:
model.Add( variables[i] != variables [j] )
solver = cp_model.CpSolver()
solver.parameters.max_time_in_seconds = int( ((10.0 / nmin) * N) )
status = solver.Solve(model)
if status == cp_model.FEASIBLE or status == cp_model.OPTIMAL :
solution = dict()
for k in range(N):
solution[k] = solver.Value(variables[k])
return solution
elif status == cp_model.INFEASIBLE:
return None
else:
raise Exception("CSP is unsure about the problem")
def is_cn(Ma, cn_i):
if solve_csp(Ma, cn_i-1) == None:
return True
else:
return False
def multiprocessing_dataset_generation(nmin, nmax, ncolors, path, samples, seed, num_workers=8):
if not os.path.exists(path):
os.makedirs(path)
# For increasing sampling speed, we create multiple workers/processes in parallel
samples_per_worker = int(samples//num_workers)
p = Pool()
args_list = [(nmin, nmax, ncolors, path, samples_per_worker, samples_per_worker*i, seed+i) for i in range(num_workers)]
p.map(_create_simple_dataset_tuple, args_list)
def _create_simple_dataset_tuple(args):
nmin, nmax, ncolors, path, samples, start_idx, seed = args
create_simple_dataset(nmin, nmax, ncolors, path, samples, start_idx, seed)
def create_simple_dataset(nmin, nmax, ncolors, path, samples, start_idx=0, seed=123):
if not os.path.exists(path):
os.makedirs(path)
export_pack = 500
all_solutions = {"N": np.zeros((export_pack,), dtype=np.uint8),
"adjacency": -np.ones((export_pack, nmax, nmax), dtype=np.int8),
"nodes": -np.ones((export_pack, nmax), dtype=np.int8),
"graph_idx": -np.ones((export_pack,), dtype=np.int32),
"idx": 0}
def export_solution(Ma, init_sol, z, graph_idx=-1):
N, Ma, sol = write_solution(Ma=Ma, init_sol=init_sol, save_path=None)
sol_idx = all_solutions["idx"]
all_solutions["N"][sol_idx] = N
all_solutions["adjacency"][sol_idx,:N,:N] = Ma.astype(np.uint8)
all_solutions["nodes"][sol_idx,:N] = sol
all_solutions["graph_idx"][sol_idx] = graph_idx
all_solutions["idx"] += 1
if all_solutions["idx"] >= export_pack:
all_solutions.pop("idx")
np.savez_compressed(os.path.join(path, "samples_%s_%s.npz" % (str(z-export_pack+2).zfill(7), str(z+1).zfill(7))),
**all_solutions)
all_solutions["N"].fill(0)
all_solutions["adjacency"].fill(-1)
all_solutions["nodes"].fill(-1)
all_solutions["graph_idx"].fill(-1)
all_solutions["idx"] = 0
# Adjacency density ratio to sample from.
edge_prob_constraints = {3: (0.1, 0.3), 4: (0.15, 0.3)}
np.random.seed(seed)
random.seed(seed)
z = start_idx
N = np.random.randint(nmin, nmax+1)
while z in range(start_idx,samples+start_idx):
N = np.random.randint(nmin, nmax+1)
save_path = os.path.join(path, "sample_%s.npz" % (str(z).zfill(6)))
found_sol = False
Cn = ncolors
lim_inf, lim_sup = edge_prob_constraints[Cn][0], edge_prob_constraints[Cn][1]
lim_sup = min(lim_sup, nmax/N*(lim_inf+lim_sup)/2.0)
p_connected = random.uniform(lim_inf, lim_sup)
Ma = gen_matrix(N, p_connected)
init_sol = solve_csp(Ma, Cn)
if init_sol is not None and is_cn(Ma,Cn):
export_solution(Ma, init_sol, z)
found_sol = True
if found_sol:
z += 1
if z % 100 == 0:
print("Completed %i (%4.2f%%) in [%i,%i] samples..." % (z-start_idx, (z-start_idx)*100.0/samples, start_idx, start_idx+samples))
def write_solution(Ma, init_sol, save_path=None):
N = Ma.shape[0]
sol = np.zeros(N, dtype=np.uint8)
for i in range(N):
sol[i] = int(init_sol[i])
if save_path is not None:
np.savez_compressed(save_path, adjacency=Ma, nodes=sol)
else:
return (N, Ma, sol)
def combine_solution_files(save_path):
print("Combining solution files...")
sample_files = sorted(glob(os.path.join(save_path, "sample*.npz")))
nodes, adjacency = None, None
for filename in sample_files:
data_arr = np.load(filename)
if nodes is None and adjacency is None:
nodes, adjacency = data_arr["nodes"], data_arr["adjacency"]
else:
nodes = np.concatenate([nodes, data_arr["nodes"]], axis=0)
adjacency = np.concatenate([adjacency, data_arr["adjacency"]], axis=0)
np.savez_compressed(os.path.join(save_path, "samples_combined.npz"), nodes=nodes, adjacency=adjacency)
def gen_matrix(N, prob):
Ma = np.zeros((N,N))
Ma = np.random.choice([0,1], size=(N, N), p=[1-prob,prob])
i_lower = np.tril_indices(N, -1)
Ma[i_lower] = Ma.T[i_lower] # make the matrix symmetric
np.fill_diagonal(Ma, 0)
# Ensuring that every node has at least 1 connection
while np.min(Ma.sum(axis=0)) == 0:
idx = np.argmin(Ma.sum(axis=0))
Ma[idx,:] = np.random.choice([0,1], size=(N,), p=[1-prob,prob])
Ma[:,idx] = Ma[idx,:]
Ma[idx,idx] = 0
# Test that the whole graph is connected
connect = np.zeros((N,))
connect[0] = 1
Ma_diag = np.eye(N) + Ma
while (1 - connect).sum() > 0:
new_connect = ((connect[None,:] * Ma_diag).sum(axis=1) > 0).astype(connect.dtype)
if np.any(new_connect != connect):
connect = new_connect
else:
num_choices = 3
start_nodes = np.random.choice(np.where(connect>0)[0], size=(num_choices,))
end_nodes = np.random.choice(np.where(connect==0)[0], size=(num_choices,))
Ma[start_nodes, end_nodes] = 1
Ma[end_nodes, start_nodes] = 1
Ma_diag = np.eye(N) + Ma
return Ma
if __name__ == '__main__':
# Define argument parser
parser = argparse.ArgumentParser()
parser.add_argument('--path', help='Path to which the files should be saved.', type=str, required=True)
parser.add_argument('--samples', help='Number of samples to generate', type=int, default=2e5)
parser.add_argument('--nmin', default=25, help='Minimum number of nodes in a graph', type=int)
parser.add_argument('--nmax', default=50, help='Maximum number of nodes in a graph', type=int)
parser.add_argument('--ncolor', default=3, help='Number of colors to use for the graph coloring', type=int)
parser.add_argument('--train', help='If train is selected, we use a different seed', action='store_true')
# Parse arguments from command line
args = parser.parse_args()
seed = 1327 if args.train else 3712
random.seed(seed)
np.random.seed(seed)
# Start the generation process
print('Creating {} instances'.format(args.samples))
multiprocessing_dataset_generation(
args.nmin, args.nmax,
ncolors=args.ncolor,
samples=args.samples,
path=args.path,
seed=seed
)
combine_solution_files(args.path)
|
import hashlib # used for password hashing
import requests # (NOT A PYTHON STANDARD LIBRARY, MUST DOWNLOAD)
# this is used to get content from pwnedpasswords api
# go to : http://docs.python-requests.org/en/latest/ ... to get Requests library
my_password = input('\n Enter a Password : ')
# creates the hash object with the sha-1 algorithm
# by passing in the enocded version of my_password(its string of bytes)
# note: the hashing function (sha1) only takes a bytes as its param
hash_object = hashlib.sha1(my_password.encode())
# creates a hex representation of the password
hashed_password = hash_object.hexdigest()
# retreive the first 5 characters,
# used to pass into the haveibeenpwned address
first_bits = hashed_password[:5]
# pulls the byte version from the API and converts each line to an array
r = requests.get('https://api.pwnedpasswords.com/range/' + first_bits)
r_array = r.content.decode('UTF-8').splitlines()
# the test string does not include the first 5 characters
# because the API does not return those characters as it is not necessary
test_string = hashed_password[5:40].upper()
found_match = False
occurences = ""
# loops through r_array and tests for finding
for counter, str in enumerate(r_array):
if str[:35] == test_string:
found_match = True
occurences = str[36:]
# Final print messages
if found_match:
print('\n ' + my_password + ' was found')
print(' Hash : ' + hashed_password + ' ... ' + occurences + ' occurences\n')
else:
print('\n Password not found!\n')
# Author : Wesley Witter (On github: iamwesley99)
# email : wesleywitter1@gmail.com
|
from django.contrib.auth.views import LogoutView
from django.urls import path
from django_salesforce_oauth.views import oauth, oauth_callback
urlpatterns = [
path("", oauth, {"domain": "login"}, name="oauth"),
path("sandbox/", oauth, {"domain": "test"}, name="oauth-sandbox"),
path("callback/", oauth_callback, name="oauth-callback"),
path("logout/", LogoutView.as_view(), name="logout"),
]
|
import functools
import queue
import threading
import traceback
class Worker(threading.Thread):
def __init__(self, tasks):
super().__init__(daemon=True)
self.tasks = tasks
self.start()
def run(self):
while True:
func, args, kwargs = self.tasks.get()
try:
func(*args, **kwargs)
except Exception:
traceback.print_exc()
finally:
self.tasks.task_done()
class ThreadPool:
def __init__(self, num_threads):
self.tasks = queue.Queue(num_threads)
for _ in range(num_threads):
Worker(self.tasks)
def add_task(self, func, *args, **kargs):
self.tasks.put((func, args, kargs))
def wait_completion(self):
self.tasks.join()
class Asynco:
_instance = None
pool = {}
default_pool_size = 10
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Asynco, cls).__new__(
cls, *args, **kwargs)
return cls._instance
def get_pool(self):
return self.pool
@staticmethod
def get_default_pool_size():
return Asynco.default_pool_size
@staticmethod
def get_object():
if Asynco._instance:
return Asynco._instance
else:
return Asynco()
@staticmethod
def create_pool(pool_name, size = default_pool_size):
pool = Asynco.get_object().get_pool()
if pool_name in pool:
raise Exception(pool_name + ' already exists')
else:
pool[pool_name] = ThreadPool(size)
@staticmethod
def complete_all_task(pool_name):
asynco_object = Asynco.get_object()
pool = asynco_object.get_pool()
pool[pool_name].wait_completion()
def asynco(function=None, pool_name=None):
from threading import Thread
from functools import wraps
if function is None:
return functools.partial(asynco, pool_name=pool_name)
@wraps(function)
def wrapper(*args, **kwargs):
asynco_object = Asynco.get_object()
pool = asynco_object.get_pool()
if pool_name:
lock = threading.RLock()
with lock:
if pool_name in pool:
pool[pool_name].add_task(function, *args, **kwargs)
else:
pool[pool_name] = ThreadPool(asynco_object.get_default_pool_size())
pool[pool_name].add_task(function, *args, **kwargs)
else:
t1 = Thread(target=function, args=args, kwargs=kwargs)
t1.start()
return
return wrapper
|
# -------------------------------------------------------------------------
# Copyright (c) PTC Inc. and/or all its affiliates. All rights reserved.
# See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# Datalogger Test - Test to exectute various calls for the Datalogger
# parts of the Kepware configuration API
from kepconfig.error import KepError
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import kepconfig
import kepconfig.connectivity
import kepconfig.iot_gateway
from kepconfig import datalogger
import json
import time
import datetime
import pytest
# Datalogger configs to be used
log_group_name = 'LG'
log_item1 = {
"common.ALLTYPES_NAME": "LogItem1",
"datalogger.LOG_ITEM_ID": "_System._Time"
}
log_item2 = {
"common.ALLTYPES_NAME": "LogItem2",
"datalogger.LOG_ITEM_ID": "_System._Date"
}
log_item3 = {
"common.ALLTYPES_NAME": "LogItem3",
"datalogger.LOG_ITEM_ID": "_System._Time_Second"
}
trigger1 = {
"common.ALLTYPES_NAME": "Trigger2",
"datalogger.TRIGGER_TYPE": 0
}
trigger2 = {
"common.ALLTYPES_NAME": "Trigger3",
"datalogger.TRIGGER_TYPE": 1
}
trigger3 = {
"common.ALLTYPES_NAME": "Trigger4",
"datalogger.TRIGGER_TYPE": 1
}
log_group_data1 = {
"common.ALLTYPES_NAME": log_group_name,
"common.ALLTYPES_DESCRIPTION": "",
"datalogger.LOG_GROUP_ENABLED": False,
"datalogger.LOG_GROUP_UPDATE_RATE_MSEC": 100,
"datalogger.LOG_GROUP_UPDATE_RATE_UNITS": 0,
"datalogger.LOG_GROUP_MAP_NUMERIC_ID_TO_VARCHAR": False,
"datalogger.LOG_GROUP_USE_LOCAL_TIME_FOR_TIMESTAMP_INSERTS": True,
"datalogger.LOG_GROUP_STORE_AND_FORWARD_ENABLED": False,
"datalogger.LOG_GROUP_STORE_AND_FORWARD_STORAGE_DIRECTORY": "C:\\ProgramData\\PTC\\ThingWorx Kepware Server\\V6\\DataLogger",
"datalogger.LOG_GROUP_STORE_AND_FORWARD_MAX_STORAGE_SIZE": 10,
"datalogger.LOG_GROUP_MAX_ROW_BUFFER_SIZE": 1000,
"datalogger.LOG_GROUP_DSN": "",
"datalogger.LOG_GROUP_DSN_USERNAME": "",
"datalogger.LOG_GROUP_DSN_PASSWORD": "",
"datalogger.LOG_GROUP_DSN_LOGIN_TIMEOUT": 10,
"datalogger.LOG_GROUP_DSN_QUERY_TIMEOUT": 15,
"datalogger.LOG_GROUP_TABLE_SELECTION": 0,
"datalogger.LOG_GROUP_TABLE_NAME": "",
"datalogger.LOG_GROUP_TABLE_FORMAT": 0,
"datalogger.LOG_GROUP_BATCH_ID_ITEM": "",
"datalogger.LOG_GROUP_BATCH_ID_ITEM_TYPE": "Default",
"datalogger.LOG_GROUP_BATCH_ID_UPDATE_RATE": 1000,
"datalogger.LOG_GROUP_BATCH_ID_UPDATE_RATE_UNITS": 0,
"datalogger.LOG_GROUP_REGENERATE_ALIAS_TABLE_ON_DSN_CHANGE": True,
"datalogger.LOG_GROUP_REGENERATE_ALIAS_TABLE_ON_BATCH_ID_CHANGE": True,
"datalogger.LOG_GROUP_REGENERATE_ALIAS_TABLE_ON_TABLE_NAME_CHANGE": False,
"datalogger.LOG_GROUP_REGENERATE_ALIAS_TABLE_ON_TABLE_SELECTION_CHANGE": False
}
log_group_data2 = {
"common.ALLTYPES_NAME": log_group_name +'1',
"common.ALLTYPES_DESCRIPTION": "",
"datalogger.LOG_GROUP_ENABLED": False,
"datalogger.LOG_GROUP_UPDATE_RATE_MSEC": 100,
"datalogger.LOG_GROUP_UPDATE_RATE_UNITS": 0,
"datalogger.LOG_GROUP_MAP_NUMERIC_ID_TO_VARCHAR": False,
"datalogger.LOG_GROUP_USE_LOCAL_TIME_FOR_TIMESTAMP_INSERTS": True,
"datalogger.LOG_GROUP_STORE_AND_FORWARD_ENABLED": False,
"datalogger.LOG_GROUP_STORE_AND_FORWARD_STORAGE_DIRECTORY": "C:\\ProgramData\\PTC\\ThingWorx Kepware Server\\V6\\DataLogger",
"datalogger.LOG_GROUP_STORE_AND_FORWARD_MAX_STORAGE_SIZE": 10,
"datalogger.LOG_GROUP_MAX_ROW_BUFFER_SIZE": 1000,
"datalogger.LOG_GROUP_DSN": "",
"datalogger.LOG_GROUP_DSN_USERNAME": "",
"datalogger.LOG_GROUP_DSN_PASSWORD": "",
"datalogger.LOG_GROUP_DSN_LOGIN_TIMEOUT": 10,
"datalogger.LOG_GROUP_DSN_QUERY_TIMEOUT": 15,
"datalogger.LOG_GROUP_TABLE_SELECTION": 0,
"datalogger.LOG_GROUP_TABLE_NAME": "",
"datalogger.LOG_GROUP_TABLE_FORMAT": 0,
"datalogger.LOG_GROUP_BATCH_ID_ITEM": "",
"datalogger.LOG_GROUP_BATCH_ID_ITEM_TYPE": "Default",
"datalogger.LOG_GROUP_BATCH_ID_UPDATE_RATE": 1000,
"datalogger.LOG_GROUP_BATCH_ID_UPDATE_RATE_UNITS": 0,
"datalogger.LOG_GROUP_REGENERATE_ALIAS_TABLE_ON_DSN_CHANGE": True,
"datalogger.LOG_GROUP_REGENERATE_ALIAS_TABLE_ON_BATCH_ID_CHANGE": True,
"datalogger.LOG_GROUP_REGENERATE_ALIAS_TABLE_ON_TABLE_NAME_CHANGE": False,
"datalogger.LOG_GROUP_REGENERATE_ALIAS_TABLE_ON_TABLE_SELECTION_CHANGE": False
}
log_group_data3 = {
"common.ALLTYPES_NAME": log_group_name +'2',
"common.ALLTYPES_DESCRIPTION": "",
"datalogger.LOG_GROUP_ENABLED": False,
"datalogger.LOG_GROUP_UPDATE_RATE_MSEC": 100,
"datalogger.LOG_GROUP_UPDATE_RATE_UNITS": 0,
"datalogger.LOG_GROUP_MAP_NUMERIC_ID_TO_VARCHAR": False,
"datalogger.LOG_GROUP_USE_LOCAL_TIME_FOR_TIMESTAMP_INSERTS": True,
"datalogger.LOG_GROUP_STORE_AND_FORWARD_ENABLED": False,
"datalogger.LOG_GROUP_STORE_AND_FORWARD_STORAGE_DIRECTORY": "C:\\ProgramData\\PTC\\ThingWorx Kepware Server\\V6\\DataLogger",
"datalogger.LOG_GROUP_STORE_AND_FORWARD_MAX_STORAGE_SIZE": 10,
"datalogger.LOG_GROUP_MAX_ROW_BUFFER_SIZE": 1000,
"datalogger.LOG_GROUP_DSN": "",
"datalogger.LOG_GROUP_DSN_USERNAME": "",
"datalogger.LOG_GROUP_DSN_PASSWORD": "",
"datalogger.LOG_GROUP_DSN_LOGIN_TIMEOUT": 10,
"datalogger.LOG_GROUP_DSN_QUERY_TIMEOUT": 15,
"datalogger.LOG_GROUP_TABLE_SELECTION": 0,
"datalogger.LOG_GROUP_TABLE_NAME": "",
"datalogger.LOG_GROUP_TABLE_FORMAT": 0,
"datalogger.LOG_GROUP_BATCH_ID_ITEM": "",
"datalogger.LOG_GROUP_BATCH_ID_ITEM_TYPE": "Default",
"datalogger.LOG_GROUP_BATCH_ID_UPDATE_RATE": 1000,
"datalogger.LOG_GROUP_BATCH_ID_UPDATE_RATE_UNITS": 0,
"datalogger.LOG_GROUP_REGENERATE_ALIAS_TABLE_ON_DSN_CHANGE": True,
"datalogger.LOG_GROUP_REGENERATE_ALIAS_TABLE_ON_BATCH_ID_CHANGE": True,
"datalogger.LOG_GROUP_REGENERATE_ALIAS_TABLE_ON_TABLE_NAME_CHANGE": False,
"datalogger.LOG_GROUP_REGENERATE_ALIAS_TABLE_ON_TABLE_SELECTION_CHANGE": False
}
def HTTPErrorHandler(err):
if err.__class__ is kepconfig.error.KepHTTPError:
print(err.code)
print(err.msg)
print(err.url)
print(err.hdrs)
print(err.payload)
else:
print('Different Exception Received: {}'.format(err))
def initialize(server):
try:
server._config_get(server.url +"/project/_datalogger")
except Exception as err:
pytest.skip("DataLogger plug-in is not installed", allow_module_level=True)
def complete(server):
try:
lg_left = datalogger.log_group.get_all_log_groups(server)
for x in lg_left:
print(datalogger.log_group.del_log_group(server, x['common.ALLTYPES_NAME']))
except Exception as err:
HTTPErrorHandler(err)
@pytest.fixture(scope="module")
def server(kepware_server):
server = kepware_server
# Initialize any configuration before testing in module
initialize(server)
# Everything below yield is run after module tests are completed
yield server
complete(server)
def test_log_group_add(server):
assert datalogger.log_group.add_log_group(server, log_group_data1)
assert datalogger.log_group.enable_log_group(server,log_group_name)
assert datalogger.log_group.disable_log_group(server,log_group_name)
assert datalogger.log_group.del_log_group(server, log_group_name)
assert datalogger.log_group.add_log_group(server, [log_group_data1, log_group_data2])
# Log Group 2 should fail since it exists
assert type(datalogger.log_group.add_log_group(server, [log_group_data2, log_group_data3])) == list
def test_log_group_modify(server):
assert datalogger.log_group.modify_log_group(server, {"datalogger.LOG_GROUP_USE_LOCAL_TIME_FOR_TIMESTAMP_INSERTS": False},log_group_data1['common.ALLTYPES_NAME'], force=True)
assert datalogger.log_group.modify_log_group(server,{"datalogger.LOG_GROUP_USE_LOCAL_TIME_FOR_TIMESTAMP_INSERTS": True},log_group_data1['common.ALLTYPES_NAME'])
# Fail due to no log_group name provided
with pytest.raises(KepError):
assert datalogger.log_group.modify_log_group(server,{"datalogger.LOG_GROUP_USE_LOCAL_TIME_FOR_TIMESTAMP_INSERTS": True})
assert datalogger.log_group.modify_log_group(server,{"common.ALLTYPES_NAME": log_group_name,"datalogger.LOG_GROUP_USE_LOCAL_TIME_FOR_TIMESTAMP_INSERTS": True})
def test_log_group_get(server):
assert type(datalogger.log_group.get_log_group(server, log_group_name)) == dict
assert type(datalogger.log_group.get_all_log_groups(server)) == list
def test_log_item_add(server):
assert datalogger.log_items.add_log_item(server, log_group_name, log_item1)
assert datalogger.log_items.del_log_item(server, log_group_name, log_item1['common.ALLTYPES_NAME'])
assert datalogger.log_items.add_log_item(server, log_group_name, [log_item1, log_item2])
# Fails for item 2 since it's already existing
assert type(datalogger.log_items.add_log_item(server, log_group_name, [log_item2, log_item3])) == list
def test_log_item_modify(server):
assert datalogger.log_items.modify_log_item(server, log_group_name, {"datalogger.LOG_ITEM_NUMERIC_ID": "1"} ,log_item1['common.ALLTYPES_NAME'], force=True)
assert datalogger.log_items.modify_log_item(server, log_group_name, {"datalogger.LOG_ITEM_NUMERIC_ID": "0"} ,log_item1['common.ALLTYPES_NAME'])
# Fail due to item not identified
with pytest.raises(KepError):
assert datalogger.log_items.modify_log_item(server, log_group_name, {"datalogger.LOG_ITEM_NUMERIC_ID": "0"})
assert datalogger.log_items.modify_log_item(server, log_group_name, {"common.ALLTYPES_NAME": "LogItem1","datalogger.LOG_ITEM_NUMERIC_ID": "0"}, force= True)
def test_log_item_get(server):
assert type (datalogger.log_items.get_log_item(server, log_group_name,log_item1['common.ALLTYPES_NAME'])) == dict
assert type(datalogger.log_items.get_all_log_items(server, log_group_name)) == list
# Execute mapping test before deleting items
# Modify group to wide format
assert datalogger.log_group.modify_log_group(server, {"datalogger.LOG_GROUP_TABLE_FORMAT": 1}, log_group_name, force=True)
def test_mapping_get(server):
mapping_list = []
mapping_list = datalogger.mapping.get_all_mappings(server, log_group_name)
assert type(mapping_list) == list
assert type(datalogger.mapping.get_mapping(server, log_group_name, mapping_list[0]['common.ALLTYPES_NAME'])) == dict
def test_mapping_modify(server):
mapping_list = []
mapping_list = datalogger.mapping.get_all_mappings(server, log_group_name)
assert datalogger.mapping.modify_mapping(server, log_group_name, {"datalogger.TABLE_ALIAS_SQL_LENGTH_QUALITY": 10} , mapping_list[0]['common.ALLTYPES_NAME'], force=True)
assert datalogger.mapping.modify_mapping(server, log_group_name, {"datalogger.TABLE_ALIAS_SQL_LENGTH_QUALITY": 15} , mapping_list[0]['common.ALLTYPES_NAME'])
# Fail due to map not identified
with pytest.raises(KepError):
assert datalogger.mapping.modify_mapping(server, log_group_name, {"datalogger.TABLE_ALIAS_SQL_LENGTH_QUALITY": 1})
assert datalogger.mapping.modify_mapping(server, log_group_name, {"common.ALLTYPES_NAME": mapping_list[0]['common.ALLTYPES_NAME'],"datalogger.TABLE_ALIAS_SQL_LENGTH_QUALITY": 0}, force= True)
def test_log_item_del(server):
# Delete Items
assert datalogger.log_items.del_log_item(server, log_group_name,log_item1['common.ALLTYPES_NAME'])
assert datalogger.log_items.del_log_item(server, log_group_name,log_item2['common.ALLTYPES_NAME'])
def test_trigger_add(server):
assert datalogger.triggers.add_trigger(server, log_group_name, trigger1)
assert datalogger.triggers.del_trigger(server, log_group_name, trigger1['common.ALLTYPES_NAME'])
assert datalogger.triggers.add_trigger(server, log_group_name, [trigger1, trigger2])
# Fails adding trigger 2 since it exists
assert type(datalogger.triggers.add_trigger(server, log_group_name, [trigger2, trigger3])) == list
def test_trigger_modify(server):
assert datalogger.triggers.modify_trigger(server, log_group_name, {"datalogger.TRIGGER_STATIC_INTERVAL": 1000} ,trigger1['common.ALLTYPES_NAME'], force=True)
assert datalogger.triggers.modify_trigger(server, log_group_name, {"datalogger.TRIGGER_STATIC_INTERVAL": 500} ,trigger1['common.ALLTYPES_NAME'])
# Fail due to trigger not identified
with pytest.raises(KepError):
assert datalogger.triggers.modify_trigger(server, log_group_name, {"datalogger.TRIGGER_STATIC_INTERVAL": 500})
assert datalogger.triggers.modify_trigger(server, log_group_name, {"common.ALLTYPES_NAME": trigger1['common.ALLTYPES_NAME'],"datalogger.TRIGGER_STATIC_INTERVAL": 1000}, force= True)
def test_trigger_get(server):
assert type(datalogger.triggers.get_trigger(server, log_group_name,trigger1['common.ALLTYPES_NAME'])) == dict
assert type(datalogger.triggers.get_all_triggers(server, log_group_name)) == list
def test_trigger_del(server):
# Delete triggers
assert datalogger.triggers.del_trigger(server, log_group_name,trigger1['common.ALLTYPES_NAME'])
assert datalogger.triggers.del_trigger(server, log_group_name,trigger2['common.ALLTYPES_NAME'])
def test_log_group_services(server):
# Execute Services
job = datalogger.log_group.reset_column_mapping_service(server,log_group_name)
assert type(job) == kepconfig.connection.KepServiceResponse
job = datalogger.log_group.reset_column_mapping_service(server,log_group_name, 60)
assert type(job) == kepconfig.connection.KepServiceResponse
|
import logging
import random
from babel.numbers import format_currency
from django.conf import settings
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.utils import translation
from django.utils.translation import ugettext as _
from django_extensions.db.fields import ModificationDateTimeField, CreationDateTimeField
from django_iban.fields import IBANField, SWIFTBICField
from djchoices import DjangoChoices, ChoiceItem
from bluebottle.accounts.models import BlueBottleUser
from apps.cowry.models import PaymentStatuses, Payment
from apps.cowry.signals import payment_status_changed
from .fields import DutchBankAccountField
from .mails import mail_new_voucher
logger = logging.getLogger(__name__)
random.seed()
class RecurringDirectDebitPayment(models.Model):
"""
Holds the direct debit account and payment information.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL)
active = models.BooleanField(default=False)
created = CreationDateTimeField(_("Created"))
updated = ModificationDateTimeField(_("Updated"))
# The amount in the minor unit for the given currency (e.g. for EUR in cents).
amount = models.PositiveIntegerField(_("amount"), default=0)
currency = models.CharField(max_length=3, default='')
# Bank account.
name = models.CharField(max_length=35) # max_length from DocData
city = models.CharField(max_length=35) # max_length from DocData
account = DutchBankAccountField()
# IBAN fields required for DocData recurring donation processing.
# These are not required because we will be filling these in manually (for now) and not presenting them to users.
iban = IBANField(blank=True, default='')
bic = SWIFTBICField(blank=True, default='')
def __unicode__(self):
if self.active:
postfix = ' - active'
else:
postfix = ' - inactive'
return str(self.user) + ' ' + str(self.amount) + postfix
@receiver(post_save, weak=False, sender=BlueBottleUser)
def cancel_recurring_payment_user_soft_delete(sender, instance, created, **kwargs):
if created:
return
if hasattr(instance, 'recurringdirectdebitpayment') and instance.deleted:
recurring_payment = instance.recurringdirectdebitpayment
recurring_payment.active = False
recurring_payment.save()
@receiver(post_delete, weak=False, sender=BlueBottleUser)
def cancel_recurring_payment_user_delete(sender, instance, **kwargs):
if hasattr(instance, 'recurringdirectdebitpayment'):
recurring_payment = instance.recurringdirectdebitpayment
recurring_payment.delete()
class DonationStatuses(DjangoChoices):
new = ChoiceItem('new', label=_("New"))
in_progress = ChoiceItem('in_progress', label=_("In progress"))
pending = ChoiceItem('pending', label=_("Pending"))
paid = ChoiceItem('paid', label=_("Paid"))
failed = ChoiceItem('failed', label=_("Failed"))
class Donation(models.Model):
"""
Donation of an amount from a user to a project. A Donation can have a generic foreign key from OrderItem when
it's used in the order process but it can also be used without this GFK when it's used to cash in a Voucher.
"""
class DonationTypes(DjangoChoices):
one_off = ChoiceItem('one_off', label=_("One-off"))
recurring = ChoiceItem('recurring', label=_("Recurring"))
voucher = ChoiceItem('voucher', label=_("Voucher"))
amount = models.PositiveIntegerField(_("Amount"))
currency = models.CharField(_("currency"), max_length=3)
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("User"), null=True, blank=True)
project = models.ForeignKey('projects.Project', verbose_name=_("Project"))
status = models.CharField(_("Status"), max_length=20, choices=DonationStatuses.choices, default=DonationStatuses.new, db_index=True)
created = CreationDateTimeField(_("Created"))
updated = ModificationDateTimeField(_("Updated"))
donation_type = models.CharField(_("Type"), max_length=20, choices=DonationTypes.choices, default=DonationTypes.one_off, db_index=True)
@property
def payment_method(self):
ctype = ContentType.objects.get_for_model(Donation)
order_ids = OrderItem.objects.filter(content_type__pk=ctype.id,
object_id=self.id).values_list('order_id', flat=True)
payments = Payment.objects.filter(order_id__in=order_ids)
for payment in payments:
if getattr(payment, 'docdata_payments', False):
docdata_payments = payment.docdata_payments.all()
if docdata_payments:
return ", ".join([p.payment_method for p in docdata_payments])
class Meta:
verbose_name = _("donation")
verbose_name_plural = _("donations")
def __unicode__(self):
language = translation.get_language().split('-')[0]
if not language:
language = 'en'
return u'{0} : {1} : {2}'.format(str(self.id), self.project.title,
format_currency(self.amount / 100.0, self.currency, locale=language))
class OrderStatuses(DjangoChoices):
current = ChoiceItem('current', label=_("Current")) # The single donation 'shopping cart' (editable).
recurring = ChoiceItem('recurring', label=_("Recurring")) # The recurring donation 'shopping cart' (editable).
closed = ChoiceItem('closed', label=_("Closed")) # Order with a paid, cancelled or failed payment (not editable).
class Order(models.Model):
"""
Order holds OrderItems (Donations/Vouchers).
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("user"), blank=True, null=True)
status = models.CharField(_("Status"), max_length=20, choices=OrderStatuses.choices, default=OrderStatuses.current, db_index=True)
recurring = models.BooleanField(default=False)
order_number = models.CharField(_("Order Number"), max_length=30, db_index=True, unique=True, help_text="Used to reference the Order from external systems.")
created = CreationDateTimeField(_("Created"))
updated = ModificationDateTimeField(_("Updated"))
@property
def latest_payment(self):
if self.payments.count() > 0:
return self.payments.order_by('-created').all()[0]
return None
@property
def total(self):
""" Calculated total for this Order. """
total = 0
for item in self.orderitem_set.all():
total += item.amount
return total
@property
def total_euro(self):
return "%01.2f" % (self.total / 100)
@property
def donations(self):
content_type = ContentType.objects.get_for_model(Donation)
order_items = self.orderitem_set.filter(content_type=content_type)
return Donation.objects.filter(id__in=order_items.values('object_id'))
@property
def vouchers(self):
content_type = ContentType.objects.get_for_model(Voucher)
order_items = self.orderitem_set.filter(content_type=content_type)
return Voucher.objects.filter(id__in=order_items.values('object_id'))
def __unicode__(self):
description = ''
if self.order_number:
description += self.order_number + " - "
description += "1%Club "
if self.recurring:
# TODO Use English / Dutch based on user primary_language.
description += "MAANDELIJKSE DONATIE"
elif not self.donations and self.vouchers:
if len(self.donations) > 1:
description += _("GIFTCARDS")
else:
description += _("GIFTCARD")
description += str(self.id)
elif self.donations and not self.vouchers:
if len(self.donations) > 1:
description += _("DONATIONS")
else:
description += _("DONATION")
else:
description += _("DONATIONS & GIFTCARDS")
description += " - " + _("THANK YOU!")
return description
class Meta:
ordering = ('-created',)
# http://stackoverflow.com/questions/2076838
def save(self, *args, **kwargs):
if not self.order_number:
loop_num = 0
max_number = 1000000000 # 1 billion
order_number = str(random.randint(0, max_number))
while Order.objects.filter(order_number=order_number).exists():
if loop_num > 1000:
raise ValueError(_("Couldn't generate a unique order number."))
else:
order_number = str(random.randint(0, max_number))
loop_num += 1
self.order_number = order_number
super(Order, self).save(*args, **kwargs)
class OrderItem(models.Model):
"""
This connects a Donation or a Voucher to an Order. It's generic so that Donations don't have to know about Orders
and so that we can add more Order types easily.
"""
order = models.ForeignKey(Order)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
# Calculate properties for ease of use (e.g. in serializers).
@property
def amount(self):
if self.content_object:
return self.content_object.amount
return 0
@property
def type(self):
return self.content_object.__class__.__name__
class VoucherStatuses(DjangoChoices):
new = ChoiceItem('new', label=_("New"))
paid = ChoiceItem('paid', label=_("Paid"))
cancelled = ChoiceItem('cancelled', label=_("Cancelled"))
cashed = ChoiceItem('cashed', label=_("Cashed"))
cashed_by_proxy = ChoiceItem('cashed_by_proxy', label=_("Cashed by us"))
class Voucher(models.Model):
class VoucherLanguages(DjangoChoices):
en = ChoiceItem('en', label=_("English"))
nl = ChoiceItem('nl', label=_("Dutch"))
amount = models.PositiveIntegerField(_("Amount"))
currency = models.CharField(_("Currency"), blank=True, max_length=3)
language = models.CharField(_("Language"), max_length=2, choices=VoucherLanguages.choices, default=VoucherLanguages.en)
message = models.TextField(_("Message"), blank=True, default="", max_length=500)
code = models.CharField(_("Code"), blank=True, default="", max_length=100)
status = models.CharField(_("Status"), max_length=20, choices=VoucherStatuses.choices, default=VoucherStatuses.new, db_index=True)
created = CreationDateTimeField(_("Created"))
updated = ModificationDateTimeField(_("Updated"))
sender = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("Sender"), related_name="sender", null=True, blank=True)
sender_email = models.EmailField(_("Sender email"))
sender_name = models.CharField(_("Sender name"), blank=True, default="", max_length=100)
receiver = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("Receiver"), related_name="receiver", null=True, blank=True)
receiver_email = models.EmailField(_("Receiver email"))
receiver_name = models.CharField(_("Receiver name"), blank=True, default="", max_length=100)
donations = models.ManyToManyField('Donation')
class Meta:
# Note: This can go back to 'Voucher' when we figure out a proper way to do EN -> EN translations for branding.
verbose_name = _("Gift Card")
verbose_name_plural = _("Gift Cards")
class CustomVoucherRequest(models.Model):
class CustomVoucherTypes(DjangoChoices):
card = ChoiceItem('card', label=_("Card"))
digital = ChoiceItem('digital', label=_("Digital"))
unknown = ChoiceItem('unknown', label=_("Unknown"))
class CustomVoucherStatuses(DjangoChoices):
new = ChoiceItem('new', label=_("New"))
in_progress = ChoiceItem('in progress', label=_("In progress"))
finished = ChoiceItem('finished', label=_("Finished"))
value = models.CharField(verbose_name=_("Value"), max_length=100, blank=True, default="")
number = models.PositiveIntegerField(_("Number"))
contact = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("Contact member"), null=True)
contact_name = models.CharField(verbose_name=_("Contact email"), max_length=100, blank=True, default="")
contact_email = models.EmailField(verbose_name=_("Contact email"), blank=True, default="")
contact_phone = models.CharField(verbose_name=_("Contact phone"), max_length=100, blank=True, default="")
organization = models.CharField(verbose_name=_("Organisation"), max_length=200, blank=True, default="")
message = models.TextField(_("message"), default="", max_length=500, blank=True)
type = models.CharField(_("type"), max_length=20, choices=CustomVoucherTypes.choices, default=CustomVoucherTypes.unknown)
status = models.CharField(_("status"), max_length=20, choices=CustomVoucherStatuses.choices, default=CustomVoucherStatuses.new, db_index=True)
created = CreationDateTimeField(_("created"))
def process_voucher_order_in_progress(voucher):
def generate_voucher_code():
# Upper case letters without D, O, L and I; Numbers without 0 and 1.
char_set = 'ABCEFGHJKMNPQRSTUVWXYZ23456789'
return ''.join(random.choice(char_set) for i in range(8))
code = generate_voucher_code()
while Voucher.objects.filter(code=code).exists():
code = generate_voucher_code()
voucher.code = code
voucher.status = VoucherStatuses.paid
voucher.save()
mail_new_voucher(voucher)
@receiver(payment_status_changed, sender=Payment)
def process_payment_status_changed(sender, instance, old_status, new_status, **kwargs):
# Payment statuses: new
# in_progress
# pending
# paid
# failed
# cancelled
# refunded
# unknown
order = instance.order
#
# Payment: new -> in_progress
#
if old_status == PaymentStatuses.new and new_status == PaymentStatuses.in_progress:
# Donations.
for donation in order.donations:
donation.status = DonationStatuses.in_progress
donation.save()
# Vouchers.
for voucher in order.vouchers:
process_voucher_order_in_progress(voucher)
#
# Payment: -> cancelled; Order is 'current'
#
if new_status == PaymentStatuses.cancelled and order.status == OrderStatuses.current:
# Donations.
for donation in order.donations:
donation.status = DonationStatuses.new
donation.save()
# Vouchers.
# TODO Implement vouchers.
#
# Payment: -> cancelled; Order is 'closed'
#
elif new_status == PaymentStatuses.cancelled and order.status == OrderStatuses.closed:
if order.status != OrderStatuses.closed:
order.status = OrderStatuses.closed
order.save()
# Donations.
for donation in order.donations:
donation.status = DonationStatuses.failed
donation.save()
# Vouchers.
# TODO Implement vouchers.
#
# Payment: -> cancelled; Order is not 'closed' or 'current'
#
elif new_status == PaymentStatuses.cancelled:
logger.error("PaymentStatuses.cancelled when Order {0} has status {1}.".format(order.id, order.status))
#
# Payment: -> pending
#
if new_status == PaymentStatuses.pending:
if order.status != OrderStatuses.closed:
order.status = OrderStatuses.closed
order.save()
# Donations.
for donation in order.donations:
donation.status = DonationStatuses.pending
donation.save()
# Vouchers.
# TODO Implement vouchers.
#
# Payment: -> paid
#
if new_status == PaymentStatuses.paid:
if order.status != OrderStatuses.closed:
order.status = OrderStatuses.closed
order.save()
# Donations.
for donation in order.donations:
donation.status = DonationStatuses.paid
donation.save()
# Vouchers.
# TODO Implement vouchers.
#
# Payment: -> failed, refunded or chargedback
#
if new_status in [PaymentStatuses.failed, PaymentStatuses.refunded, PaymentStatuses.chargedback]:
if order.status != OrderStatuses.closed:
order.status = OrderStatuses.closed
order.save()
# Donations.
for donation in order.donations:
donation.status = DonationStatuses.failed
donation.save()
# Vouchers.
for voucher in order.vouchers:
voucher.status = VoucherStatuses.cancelled
voucher.save()
|
from django.db import models
from django.utils import timezone
class Course(models.Model):
name = models.CharField(max_length=250, unique=True)
description = models.TextField()
start_date = models.DateTimeField(default=timezone.now)
end_date = models.DateTimeField(null=True)
def __str__(self):
return f'Course "{self.name}"'
@property
def duration(self):
if self.end_date:
return self.end_date - self.start_date
|
# By Mostapha Sadeghipour Roudsari
# Sadeghipour@gmail.com
# Honeybee started by Mostapha Sadeghipour Roudsari is licensed
# under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
"""
Remove Glazing
-
Provided by Honeybee 0.0.55
Args:
_HBZones: List of Honeybee Zones
srfIndex_: Index of the surface to removeglazing
pattern_: Pattern to remove glazings
Returns:
readMe!: Information about the Honeybee object
"""
ghenv.Component.Name = "Honeybee_Remove Glazing"
ghenv.Component.NickName = 'remGlz'
ghenv.Component.Message = 'VER 0.0.55\nSEP_11_2014'
ghenv.Component.Category = "Honeybee"
ghenv.Component.SubCategory = "00 | Honeybee"
#compatibleHBVersion = VER 0.0.55\nAUG_25_2014
#compatibleLBVersion = VER 0.0.58\nAUG_20_2014
try: ghenv.Component.AdditionalHelpFromDocStrings = "0"
except: pass
import scriptcontext as sc
import Grasshopper.Kernel as gh
import uuid
def main(HBObjects, srfIndex, pattern):
# check for Honeybee
if not sc.sticky.has_key('honeybee_release'):
msg = "You should first let Honeybee fly..."
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
return
try:
if not sc.sticky['honeybee_release'].isCompatible(ghenv.Component): return -1
except:
warning = "You need a newer version of Honeybee to use this compoent." + \
"Use updateHoneybee component to update userObjects.\n" + \
"If you have already updated userObjects drag Honeybee_Honeybee component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return
# call the objects from the lib
hb_hive = sc.sticky["honeybee_Hive"]()
HBObjectsFromHive = hb_hive.callFromHoneybeeHive(HBObjects)
HBObjs = range(len(HBObjectsFromHive))
for count, HBO in enumerate(HBObjectsFromHive):
if HBO.objectType == "HBZone":
for srfCount, surface in enumerate(HBO.surfaces):
if srfCount in srfIndex and surface.hasChild:
#remove the glzing
surface.removeAllChildSrfs()
elif pattern[srfCount] == True and surface.hasChild:
print srfCount
#remove the glzing
surface.removeAllChildSrfs()
HBObjs[count] = HBO
return hb_hive.addToHoneybeeHive(HBObjs, ghenv.Component.InstanceGuid.ToString() + str(uuid.uuid4()))
if (_HBZones and srfIndex_!=[]) or (_HBZones and pattern_!=[]):
HBZones = main(_HBZones, srfIndex_, pattern_)
|
#!/usr/bin/env python3
"""
Author: Jakob Beckmann
Copyright 2018 ChainSecurity AG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from scripts import controller
if __name__ == "__main__":
ctrler = controller.Controller()
return_code = ctrler.compile_and_report()
sys.exit(return_code)
|
from django.test import TestCase
from django.urls import reverse
from django_marina.test import ExtendedClient
class ExtendedClientTestCase(TestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.url_echo = reverse("echo")
def test_get(self):
client = ExtendedClient()
response = client.get(f"{self.url_echo}?foo=bar")
self.assertContains(response, "foo")
self.assertContains(response, "bar")
def test_post(self):
client = ExtendedClient()
response = client.post(self.url_echo, data={"foo": "bar"})
self.assertContains(response, "foo")
self.assertContains(response, "bar")
|
# vim:ts=4 sw=4 sts=4:
import unittest
from igraph import *
from igraph.test.utils import skipIf
try:
import numpy as np
except ImportError:
np = None
class EdgeTests(unittest.TestCase):
def setUp(self):
self.g = Graph.Full(10)
def testHash(self):
data = {}
n = self.g.ecount()
for i in xrange(n):
code1 = hash(self.g.es[i])
code2 = hash(self.g.es[i])
self.assertEqual(code1, code2)
data[self.g.es[i]] = i
for i in xrange(n):
self.assertEqual(i, data[self.g.es[i]])
def testRichCompare(self):
idxs = [2,5,9,13,42]
g2 = Graph.Full(10)
for i in idxs:
for j in idxs:
self.assertEqual(i == j, self.g.es[i] == self.g.es[j])
self.assertEqual(i != j, self.g.es[i] != self.g.es[j])
self.assertEqual(i < j, self.g.es[i] < self.g.es[j])
self.assertEqual(i > j, self.g.es[i] > self.g.es[j])
self.assertEqual(i <= j, self.g.es[i] <= self.g.es[j])
self.assertEqual(i >= j, self.g.es[i] >= self.g.es[j])
self.assertFalse(self.g.es[i] == g2.es[j])
self.assertFalse(self.g.es[i] != g2.es[j])
self.assertFalse(self.g.es[i] < g2.es[j])
self.assertFalse(self.g.es[i] > g2.es[j])
self.assertFalse(self.g.es[i] <= g2.es[j])
self.assertFalse(self.g.es[i] >= g2.es[j])
self.assertFalse(self.g.es[2] == self.g.vs[2])
def testRepr(self):
output = repr(self.g.es[0])
self.assertEqual(output, "igraph.Edge(%r, 0, {})" % self.g)
self.g.es["weight"] = range(10, 0, -1)
output = repr(self.g.es[3])
self.assertEqual(output, "igraph.Edge(%r, 3, {'weight': 7})" % self.g)
def testUpdateAttributes(self):
e = self.g.es[0]
e.update_attributes(a=2)
self.assertEqual(e["a"], 2)
e.update_attributes([("a", 3), ("b", 4)], c=5, d=6)
self.assertEqual(e.attributes(), dict(a=3, b=4, c=5, d=6))
e.update_attributes(dict(b=44, c=55))
self.assertEqual(e.attributes(), dict(a=3, b=44, c=55, d=6))
def testPhantomEdge(self):
e = self.g.es[self.g.ecount()-1]
e.delete()
# v is now a phantom edge; try to freak igraph out now :)
self.assertRaises(ValueError, e.update_attributes, a=2)
self.assertRaises(ValueError, e.__getitem__, "a")
self.assertRaises(ValueError, e.__setitem__, "a", 4)
self.assertRaises(ValueError, e.__delitem__, "a")
self.assertRaises(ValueError, e.attributes)
self.assertRaises(ValueError, getattr, e, "source")
self.assertRaises(ValueError, getattr, e, "target")
self.assertRaises(ValueError, getattr, e, "tuple")
def testProxyMethods(self):
g = Graph.GRG(10, 0.5)
e = g.es[0]
# - delete() is ignored because it mutates the graph
ignore = "delete"
ignore = set(ignore.split())
# Methods not listed here are expected to return an int or a float
return_types = {
}
for name in Edge.__dict__:
if name in ignore:
continue
func = getattr(e, name)
docstr = func.__doc__
if not docstr.startswith("Proxy method"):
continue
result = func()
self.assertEqual(getattr(g, name)(e.index), result,
msg=("Edge.%s proxy method misbehaved" % name))
return_type = return_types.get(name, (int, float))
self.assertTrue(isinstance(result, return_type),
msg=("Edge.%s proxy method did not return %s" % (name, return_type))
)
class EdgeSeqTests(unittest.TestCase):
def setUp(self):
self.g = Graph.Full(10)
self.g.es["test"] = range(45)
def testCreation(self):
self.assertTrue(len(EdgeSeq(self.g)) == 45)
self.assertTrue(len(EdgeSeq(self.g, 2)) == 1)
self.assertTrue(len(EdgeSeq(self.g, [1,2,3])) == 3)
self.assertTrue(EdgeSeq(self.g, [1,2,3]).indices == [1,2,3])
self.assertRaises(ValueError, EdgeSeq, self.g, 112)
self.assertRaises(ValueError, EdgeSeq, self.g, [112])
self.assertTrue(self.g.es.graph == self.g)
def testIndexing(self):
for i in xrange(self.g.ecount()):
self.assertEqual(i, self.g.es[i].index)
self.assertRaises(IndexError, self.g.es.__getitem__, -1)
self.assertRaises(TypeError, self.g.es.__getitem__, 1.5)
@skipIf(np is None, "test case depends on NumPy")
def testNumPyIndexing(self):
for i in xrange(self.g.ecount()):
arr = np.array([i])
self.assertEqual(i, self.g.es[arr[0]].index)
arr = np.array([-1])
self.assertRaises(IndexError, self.g.es.__getitem__, arr[0])
arr = np.array([1.5])
self.assertRaises(TypeError, self.g.es.__getitem__, arr[0])
def testPartialAttributeAssignment(self):
only_even = self.g.es.select(lambda e: (e.index % 2 == 0))
only_even["test"] = [0]*len(only_even)
expected = [[0,i][i % 2] for i in xrange(self.g.ecount())]
self.assertTrue(self.g.es["test"] == expected)
only_even["test2"] = range(23)
expected = [[i//2, None][i % 2] for i in xrange(self.g.ecount())]
self.assertTrue(self.g.es["test2"] == expected)
def testSequenceReusing(self):
if "test" in self.g.edge_attributes(): del self.g.es["test"]
self.g.es["test"] = ["A", "B", "C"]
self.assertTrue(self.g.es["test"] == ["A", "B", "C"]*15)
self.g.es["test"] = "ABC"
self.assertTrue(self.g.es["test"] == ["ABC"] * 45)
only_even = self.g.es.select(lambda e: (e.index % 2 == 0))
only_even["test"] = ["D", "E"]
expected = ["D", "ABC", "E", "ABC"] * 12
expected = expected[0:45]
self.assertTrue(self.g.es["test"] == expected)
del self.g.es["test"]
only_even["test"] = ["D", "E"]
expected = ["D", None, "E", None] * 12
expected = expected[0:45]
self.assertTrue(self.g.es["test"] == expected)
def testAllSequence(self):
self.assertTrue(len(self.g.es) == 45)
self.assertTrue(self.g.es["test"] == range(45))
def testEmptySequence(self):
empty_es = self.g.es.select(None)
self.assertTrue(len(empty_es) == 0)
self.assertRaises(IndexError, empty_es.__getitem__, 0)
self.assertRaises(KeyError, empty_es.__getitem__, "nonexistent")
self.assertTrue(empty_es["test"] == [])
empty_es = self.g.es[[]]
self.assertTrue(len(empty_es) == 0)
empty_es = self.g.es[()]
self.assertTrue(len(empty_es) == 0)
def testCallableFilteringFind(self):
edge = self.g.es.find(lambda e: (e.index % 2 == 1))
self.assertTrue(edge.index == 1)
self.assertRaises(IndexError, self.g.es.find, lambda e: (e.index % 2 == 3))
def testCallableFilteringSelect(self):
only_even = self.g.es.select(lambda e: (e.index % 2 == 0))
self.assertTrue(len(only_even) == 23)
self.assertRaises(KeyError, only_even.__getitem__, "nonexistent")
self.assertTrue(only_even["test"] == [i*2 for i in xrange(23)])
def testChainedCallableFilteringSelect(self):
only_div_six = self.g.es.select(lambda e: (e.index % 2 == 0),
lambda e: (e.index % 3 == 0))
self.assertTrue(len(only_div_six) == 8)
self.assertTrue(only_div_six["test"] == [0, 6, 12, 18, 24, 30, 36, 42])
only_div_six = self.g.es.select(lambda e: (e.index % 2 == 0)).select(\
lambda e: (e.index % 3 == 0))
self.assertTrue(len(only_div_six) == 8)
self.assertTrue(only_div_six["test"] == [0, 6, 12, 18, 24, 30, 36, 42])
def testIntegerFilteringFind(self):
self.assertEqual(self.g.es.find(3).index, 3)
self.assertEqual(self.g.es.select(2,3,4,2).find(3).index, 2)
self.assertRaises(IndexError, self.g.es.find, 178)
def testIntegerFilteringSelect(self):
subset = self.g.es.select(2,3,4,2)
self.assertTrue(len(subset) == 4)
self.assertTrue(subset["test"] == [2,3,4,2])
self.assertRaises(TypeError, self.g.es.select, 2, 3, 4, 2, None)
subset = self.g.es[2,3,4,2]
self.assertTrue(len(subset) == 4)
self.assertTrue(subset["test"] == [2,3,4,2])
def testIterableFilteringSelect(self):
subset = self.g.es.select(xrange(5,8))
self.assertTrue(len(subset) == 3)
self.assertTrue(subset["test"] == [5,6,7])
def testSliceFilteringSelect(self):
subset = self.g.es.select(slice(5, 8))
self.assertTrue(len(subset) == 3)
self.assertTrue(subset["test"] == [5,6,7])
subset = self.g.es[40:56:2]
self.assertTrue(len(subset) == 3)
self.assertTrue(subset["test"] == [40,42,44])
def testKeywordFilteringSelect(self):
g = Graph.Barabasi(1000, 2)
g.es["betweenness"] = g.edge_betweenness()
g.es["parity"] = [i % 2 for i in xrange(g.ecount())]
self.assertTrue(len(g.es(betweenness_gt=10)) < 2000)
self.assertTrue(len(g.es(betweenness_gt=10, parity=0)) < 2000)
def testSourceTargetFiltering(self):
g = Graph.Barabasi(1000, 2)
es1 = set(e.source for e in g.es.select(_target_in = [2,4]))
es2 = set(v1 for v1, v2 in g.get_edgelist() if v2 in [2, 4])
self.assertTrue(es1 == es2)
def testWithinFiltering(self):
g = Graph.Lattice([10, 10])
vs = [0, 1, 2, 10, 11, 12, 20, 21, 22]
vs2 = (0, 1, 10, 11)
es1 = g.es.select(_within = vs)
es2 = g.es.select(_within = VertexSeq(g, vs))
for es in [es1, es2]:
self.assertTrue(len(es) == 12)
self.assertTrue(all(e.source in vs and e.target in vs for e in es))
es_filtered = es.select(_within = vs2)
self.assertTrue(len(es_filtered) == 4)
self.assertTrue(all(e.source in vs2 and e.target in vs2 for e in es_filtered))
def testBetweenFiltering(self):
g = Graph.Lattice([10, 10])
vs1, vs2 = [10, 11, 12], [20, 21, 22]
es1 = g.es.select(_between = (vs1, vs2))
es2 = g.es.select(_between = (VertexSeq(g, vs1), VertexSeq(g, vs2)))
for es in [es1, es2]:
self.assertTrue(len(es) == 3)
self.assertTrue(all((e.source in vs1 and e.target in vs2) or \
(e.target in vs1 and e.source in vs2) for e in es))
def testIndexOutOfBoundsSelect(self):
g = Graph.Full(3)
self.assertRaises(ValueError, g.es.select, 4)
self.assertRaises(ValueError, g.es.select, 4, 5)
self.assertRaises(ValueError, g.es.select, (4, 5))
self.assertRaises(ValueError, g.es.select, 2, -1)
self.assertRaises(ValueError, g.es.select, (2, -1))
self.assertRaises(ValueError, g.es.__getitem__, (0, 1000000))
def testGraphMethodProxying(self):
idxs = [1, 3, 5, 7, 9]
g = Graph.Barabasi(100)
es = g.es(*idxs)
ebs = g.edge_betweenness()
self.assertEqual([ebs[i] for i in idxs], es.edge_betweenness())
idxs = [1, 3]
g = Graph([(0, 1), (1, 2), (2, 0), (1, 0)], directed=True)
es = g.es(*idxs)
mutual = g.is_mutual(es)
self.assertEqual(mutual, es.is_mutual())
for e, m in zip(es, mutual):
self.assertEqual(e.is_mutual(), m)
def testIsAll(self):
g = Graph.Full(5)
self.assertTrue(g.es.is_all())
self.assertFalse(g.es.select(1,2,3).is_all())
self.assertFalse(g.es.select(_within=[1,2,3]).is_all())
def suite():
edge_suite = unittest.makeSuite(EdgeTests)
es_suite = unittest.makeSuite(EdgeSeqTests)
return unittest.TestSuite([edge_suite, es_suite])
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
|
import pytest
from euclidean.R3 import V3, P3
def test_v3_create():
vector = V3(1, 2, 3)
assert 1 == vector.x
assert 2 == vector.y
assert 3 == vector.z
assert V3(1, 2, 3) == vector
assert P3(1, 2, 3) != vector
for a, b in zip(vector, [1, 2, 3]):
assert a == b
def test_v3_magnitude():
vector = V3(2, 2, 2)
assert 12 ** 0.5 == vector.magnitude()
assert 12 ** 0.5 == abs(vector)
def test_v3_dot():
assert 10 == V3(1, 2, 3).dot(V3(3, 2, 1))
assert 10 == V3(3, 2, 1).dot(V3(1, 2, 3))
with pytest.raises(TypeError):
V3(1, 2, 3).dot(P3(3, 2, 1))
def test_v3_add():
assert V3(2, 3, 4) == V3(1, 2, 3) + V3(1, 1, 1)
with pytest.raises(TypeError):
V3(1, 2, 3) + 5
def test_v3_sub():
assert V3(0, 0, 0) == V3(3, 2, 1) - V3(3, 2, 1)
with pytest.raises(TypeError):
V3(1, 2, 3) - 5
def test_v3_div():
assert V3(1.5, 2.5, 3.5) == V3(3, 5, 7) / 2
with pytest.raises(TypeError):
V3(3, 5, 7) / V3(1, 1, 1)
assert V3(1, 2, 3) == V3(3, 5, 7) // 2
with pytest.raises(TypeError):
V3(3, 5, 7) // V3(1, 1, 1)
def test_v3_neg():
assert V3(-1, -2, -3) == -V3(1, 2, 3)
|
import numpy as np
if __name__ == "__main__":
optdata = np.zeros([900, 1800], dtype=np.float64)
for v in range(8):
isuffix = v // 4
jsuffix = v % 4
grid = np.loadtxt(
f"data\\gpw_v4_population_count_rev11_2020_30_sec_{v+1}.asc", skiprows=6
)
for i in range(450):
for j in range(450):
value = np.float64(0.0)
for m in range(24):
for n in range(24):
tmp = grid[i * 24 + m, j * 24 + n]
if tmp > 0.0:
value += tmp
optdata[899 - (isuffix * 450 + i), jsuffix * 450 + j] = value
np.savetxt("datagrid.asc", optdata, fmt="%.8e")
|
'''
@author : jhhalls
'''
def plot_stratified_cross_validation():
fig, both_axes = plt.subplots(2, 1, figsize=(12, 5))
# plt.title("cross_validation_not_stratified")
axes = both_axes[0]
axes.set_title("Standard cross-validation with sorted class labels")
axes.set_frame_on(False)
n_folds = 3
n_samples = 150
n_samples_per_fold = n_samples / float(n_folds)
for i in range(n_folds):
colors = ["w"] * n_folds
colors[i] = "grey"
axes.barh(y=range(n_folds), width=[n_samples_per_fold - 1] *
n_folds, left=i * n_samples_per_fold, height=.6,
color=colors, hatch="//", edgecolor='k', align='edge')
axes.barh(y=[n_folds] * n_folds, width=[n_samples_per_fold - 1] *
n_folds, left=np.arange(3) * n_samples_per_fold, height=.6,
color="w", edgecolor='k', align='edge')
axes.invert_yaxis()
axes.set_xlim(0, n_samples + 1)
axes.set_ylabel("CV iterations")
axes.set_xlabel("Data points")
axes.set_xticks(np.arange(n_samples_per_fold / 2.,
n_samples, n_samples_per_fold))
axes.set_xticklabels(["Fold %d" % x for x in range(1, n_folds + 1)])
axes.set_yticks(np.arange(n_folds + 1) + .3)
axes.set_yticklabels(
["Split %d" % x for x in range(1, n_folds + 1)] + ["Class label"])
for i in range(3):
axes.text((i + .5) * n_samples_per_fold, 3.5, "Class %d" %
i, horizontalalignment="center")
ax = both_axes[1]
ax.set_title("Stratified Cross-validation")
ax.set_frame_on(False)
ax.invert_yaxis()
ax.set_xlim(0, n_samples + 1)
ax.set_ylabel("CV iterations")
ax.set_xlabel("Data points")
ax.set_yticks(np.arange(n_folds + 1) + .3)
ax.set_yticklabels(
["Split %d" % x for x in range(1, n_folds + 1)] + ["Class label"])
n_subsplit = n_samples_per_fold / 3.
for i in range(n_folds):
test_bars = ax.barh(
y=[i] * n_folds, width=[n_subsplit - 1] * n_folds,
left=np.arange(n_folds) * n_samples_per_fold + i * n_subsplit,
height=.6, color="grey", hatch="//", edgecolor='k', align='edge')
w = 2 * n_subsplit - 1
ax.barh(y=[0] * n_folds, width=[w] * n_folds, left=np.arange(n_folds)
* n_samples_per_fold + (0 + 1) * n_subsplit, height=.6, color="w",
hatch="//", edgecolor='k', align='edge')
ax.barh(y=[1] * (n_folds + 1), width=[w / 2., w, w, w / 2.],
left=np.maximum(0, np.arange(n_folds + 1) * n_samples_per_fold -
n_subsplit), height=.6, color="w", hatch="//",
edgecolor='k', align='edge')
training_bars = ax.barh(y=[2] * n_folds, width=[w] * n_folds,
left=np.arange(n_folds) * n_samples_per_fold,
height=.6, color="w", hatch="//", edgecolor='k',
align='edge')
ax.barh(y=[n_folds] * n_folds, width=[n_samples_per_fold - 1] *
n_folds, left=np.arange(n_folds) * n_samples_per_fold, height=.6,
color="w", edgecolor='k', align='edge')
for i in range(3):
ax.text((i + .5) * n_samples_per_fold, 3.5, "Class %d" %
i, horizontalalignment="center")
ax.set_ylim(4, -0.1)
plt.legend([training_bars[0], test_bars[0]], [
'Training data', 'Test data'], loc=(1.05, 1), frameon=False)
fig.tight_layout()
|
# UCI Electronics for Scientists
# https://github.com/dkirkby/E4S
#
# Control the AC relay using a single digital output.
# Pull out the green terminal block and use a small
# slotted screwdriver to fasten two long jumper wires.
# Connect the "-" jumper wire to the M4 GND.
# Connect the "+" jumper wire to the M4 D2.
import time
import board
import digitalio
D2 = digitalio.DigitalInOut(board.D2)
D2.direction = digitalio.Direction.OUTPUT
D2.value = False
while True:
# You should hear a loud click each second from the relay box,
# and the green LED on the relay box will be on when D2 is True.
print(D2.value)
time.sleep(1)
D2.value = not D2.value
|
"""Python configuration file parser."""
from __future__ import unicode_literals
import imp
import sys
_PY2 = sys.version_info < (3,)
if _PY2:
str = unicode
_BASIC_TYPES = (bool, int, float, bytes, str)
"""Python basic types."""
if _PY2:
_BASIC_TYPES += (long,)
_COMPLEX_TYPES = (tuple, list, set, dict)
"""Python complex types."""
_VALID_TYPES = _BASIC_TYPES + _COMPLEX_TYPES
"""Option value must be one of these types."""
class Error(Exception):
"""The base class for all exceptions that the module raises."""
def __init__(self, error, *args, **kwargs):
super(Error, self).__init__(error.format(*args, **kwargs) if args or kwargs else error)
class FileReadingError(Error):
"""Error while reading a configuration file."""
def __init__(self, path, error):
super(FileReadingError, self).__init__(
"Error while reading '{0}' configuration file: {1}.", path, error.strerror)
self.errno = error.errno
class ParsingError(Error):
"""Error while parsing a configuration file."""
def __init__(self, path, error):
super(ParsingError, self).__init__(
"Error while parsing '{0}' configuration file: {1}.", path, error)
class ValidationError(Error):
"""Error during validation of a configuration file."""
def __init__(self, path, error):
super(ValidationError, self).__init__(
"Error while parsing '{0}' configuration file: {1}.", path, error)
self.option_name = error.option_name
class _ValidationError(Error):
"""Same as ValidationError, but for internal usage."""
def __init__(self, option_name, *args, **kwargs):
super(_ValidationError, self).__init__(*args, **kwargs)
self.option_name = option_name
def load(path, contents=None):
"""Loads a configuration file."""
config_module = imp.new_module("config")
config_module.__file__ = path
if contents is None:
try:
with open(path) as config_file:
contents = config_file.read()
except EnvironmentError as e:
raise FileReadingError(path, e)
try:
exec(compile(contents, path, "exec"), config_module.__dict__)
except Exception as e:
raise ParsingError(path, e)
config = {}
for option, value in config_module.__dict__.items():
if not option.startswith("_") and option.isupper():
try:
config[option.lower()] = _validate_value(option, value)
except _ValidationError as e:
raise ValidationError(path, e)
return config
def _validate_value(option, value, valid_types=_VALID_TYPES):
"""Validates an option value."""
value_type = type(value)
if value_type not in valid_types:
raise _ValidationError(option,
"{option} has an invalid value type ({type}). Allowed types: {valid_types}.",
option=option, type=value_type.__name__,
valid_types=", ".join(t.__name__ for t in valid_types))
if value_type is dict:
value = _validate_dict(option, value)
elif value_type is list:
value = _validate_list(option, value)
elif value_type is tuple:
value = _validate_tuple(option, value)
elif value_type is set:
value = _validate_set(option, value)
elif value_type is bytes:
try:
value = value.decode()
except UnicodeDecodeError as e:
raise _ValidationError(option, "{0} has an invalid value: {1}.", option, e)
return value
def _validate_dict(option, dictionary):
"""Validates a dictionary."""
for key, value in tuple(dictionary.items()):
valid_key = _validate_value("A {0}'s key".format(option),
key, valid_types=_BASIC_TYPES)
valid_value = _validate_value("{0}[{1}]".format(option, repr(key)), value)
if valid_key is not key:
del dictionary[key]
dictionary[valid_key] = valid_value
elif valid_value is not value:
dictionary[valid_key] = valid_value
return dictionary
def _validate_list(option, sequence):
"""Validates a list."""
for index, value in enumerate(sequence):
valid_value = _validate_value("{0}[{1}]".format(option, index), value)
if valid_value is not value:
sequence[index] = valid_value
return sequence
def _validate_tuple(option, sequence):
"""Validates a tuple."""
return [
_validate_value("{0}[{1}]".format(option, index), value)
for index, value in enumerate(sequence)
]
def _validate_set(option, sequence):
"""Validates a set."""
return [
_validate_value("A {0}'s key".format(option), value)
for value in sequence
]
|
import os
import time
if os.name == "posix":
var = "clear"
elif os.name == "ce" or os.name == "nt" or os.name == "dos":
var = "cls"
def login():
os.system(var)
time.sleep(.1)
print(f"\tIniciar sesión\n")
time.sleep(.1)
global usuario
usuario = input("Introduce tu nombre de usuario: ").strip()
existe = False
try:
with open('usuarios.txt') as f:
for linea in f:
datos = linea.split(":")
if datos[0] == usuario:
existe = True
break
f.close()
except:
print("Ha ocurrido un error")
else:
if existe == True:
contraseña = input(f"Introduce la contraseña para {datos[0]}: ").strip()
if datos[2] == contraseña:
print(f"\nHa iniciado sesión {usuario}\n")
d = open('Otros.txt', 'w')
d.write('iniciadoT')
iniciado = True
datU = [iniciado, datos]
return datU
else:
print(f"Usuario o contraseña erróneo\n")
iniciado = False
return usuario
else:
print(f"\nUsuario no encontrado\n")
iniciado = False
return usuario
return usuario
def registro():
os.system(var)
time.sleep(.1)
print(f"\tRegistrarse\n")
time.sleep(.1)
usuario = input(f"Introduce un nombre de usuario: ").strip()
encontrado = False
try:
with open('usuarios.txt') as f:
for linea in f:
datos = linea.split(":")
if datos[0] == usuario:
encontrado = True
break
except:
print(f"Ha habido un error :(")
else:
if encontrado == True:
print(f"\nYa existe un usuario con ese nombre, inicia sesión o prueba con otro nombre de usuario")
continuar = input()
else:
existe = False
mail = input('Ingresa su correo: ').strip()
try:
with open('usuarios.txt') as f:
for linea in f:
datos = linea.split(":")
if datos[4] == mail:
existe = True
break
except:
print('Ha ocurrido un error.')
else:
if existe == True:
print('Ese correo ya existe, prueba a iniciar sesion o registrarse con otro correo')
continuar = input()
else:
condicion = True
while condicion:
f = open('usuarios.txt','a')
contraseña = input(f"Introduce una contraseña para {usuario}: ").strip()
contraseña2 = input(f'Vuelva a ingresar la contraseña: ').strip()
if contraseña == contraseña2:
f.write(f"{usuario}:CONTRASEÑA:{contraseña}:MAIL:{mail}:PARTIDAS JUGADAS:{0}:PARTIDAS GANADAS:{0}:PAERTIDAS PERDIDAS:{0}:\n")
print(f"\nUsuario registrado con éxito, inicia sesión para empezar a contar.\n")
f.close()
condicion = False
else:
print('\nLas contraseñas no coinciden, por favor vulve a introducirlas.\n')
def ranking():
os.system(var)
time.sleep(.4)
print(f"\tRANKING\n")
time.sleep(.4)
print('Ranking Global\n')
f = open('usuarios.txt', 'r')
lin = f.readlines()
print('Jugadores\tPartidas jugadas\tPartidas ganadas\tPartidas perdidas\t % victoria')
for linea in lin:
datos = linea.split(":")
PT = int(datos[6])
PG = int(datos[8])
print(f'{datos[0]}\t\t\t{datos[6]}\t\t\t{datos[8]}\t\t\t{datos[10]}\t\t\t{round(100*PG/PT, 2)}')
print(' ')
print('Ranking immune\n')
print('Jugadores\tPartidas jugadas\tPartidas ganadas\tPartidas perdidas\t % victoria')
for linea in lin:
datos = linea.split(":")
if datos[11] == 'immune':
PT = int(datos[6])
PG = int(datos[8])
print(f'{datos[0]}\t\t\t{datos[6]}\t\t\t{datos[8]}\t\t\t{datos[10]}\t\t\t{round(100*PG/PT, 2)}')
def marcador0():
os.system(var)
partida = 0
player1 = 0
player2 = 0
condicion0 = True
while condicion0:
print('\t\nMarcador\n')
partida = partida + 1
P1 = 0
P2 = 0
condition = True
PM = 10
while condition:
if P1 == PM:
if P2 == PM:
PM = PM + 1
print('Jugador 1 vs Jugador 2')
print(f' {P1} {P2}\n')
if P1 == (PM + 1):
if P2 < PM:
player1 = player1 + 1
print('\nHa ganado el Jugador 1\n')
print(f'El Jugador 1 lleva ganando {player1} de {partida}')
condition = False
a = input('Desea continuar (S/N): ')
if a == 's' or a == 'S':
if partida < 6:
condicion0 = True
else:
condicion0 = False
if P2 == (PM + 1):
if P1 < PM:
player2 = player2 + 1
print('\nHa ganado el Jugador 2\n')
print(f'El Jugador 2 lleva ganando {player2} de {partida}')
condition = False
a = input('Desea continuar (S/N): ')
if a == 's' or a == 'S':
if partida < 6:
condicion0 = True
else:
condicion0 = False
if condition == True:
puntos = input('Añada punto: ')
for i in '12345qwertasdfgzxcv':
if puntos == i:
P1 = P1 + 1
for j in 'yuiophjklñbnm67890':
if puntos == j:
P2 = P2 + 1
def marcador():
os.system(var)
print('\t\tMARCADOR\n')
jugador1 = input('Que jugador va a jugar: ')
jugador2 = input('Que otro jugador va a jugar: ')
partida = 0
a = 0
player1 = 0
player2 = 0
while partida < 6:
partida = partida + 1
P1 = 0
P2 = 0
condition = True
while condition:
os.system(var)
PM = 10 + a
if P1 == PM:
if P2 == PM:
a += 1
print('\t\tMARCADOR\n')
print(f'\t{jugador1}\t vs \t{jugador2}')
print(f'\t{P1}\t\t{P2}\n')
if P1 == (PM + 1):
if P2 < PM:
a=0
player1 = player1 + 1
print(f'\nHa ganado {jugador1}\n')
print(f'{jugador1} lleva ganando {player1} de {partida}')
condition = False
f = open('usuarios.txt','r')
contenido_linea = []
for linea in f:
datos_usuario = linea.split(":")
if datos_usuario[0] == jugador1:
datos_usuario[8] = int(datos_usuario[8]) + 1
datos_usuario[6] = int(datos_usuario[6]) + 1
insertamos = f"{jugador1}:{datos_usuario[0]}:{datos_usuario[1]}:{datos_usuario[2]}:{datos_usuario[3]}:{datos_usuario[4]}:{datos_usuario[5]}:{datos_usuario[6]}:{datos_usuario[7]}:{datos_usuario[8]}:{datos_usuario[9]}:{datos_usuario[10]}:\n"
User_dates = insertamos.split(":")
else:
insertamos = linea
contenido_linea.append(insertamos)
f.close()
f = open('usuarios.txt','w')
for ind, val in enumerate(contenido_linea):
f.writelines(contenido_linea[ind])
f.close()
f = open('usuarios.txt','r')
contenido_linea = []
for linea in f:
datos_usuario = linea.split(":")
if datos_usuario[0] == jugador2:
datos_usuario[10] = int(datos_usuario[10]) + 1
datos_usuario[6] = int(datos_usuario[6]) + 1
insertamos = f"{datos_usuario[0]}:{datos_usuario[1]}:{datos_usuario[2]}:{datos_usuario[3]}:{datos_usuario[4]}:{datos_usuario[5]}:{datos_usuario[6]}:{datos_usuario[7]}:{datos_usuario[8]}:{datos_usuario[9]}:{datos_usuario[10]}:\n"
User_dates = insertamos.split(":")
else:
insertamos = linea
contenido_linea.append(insertamos)
f.close()
f = open('usuarios.txt','w')
for ind, val in enumerate(contenido_linea):
f.writelines(contenido_linea[ind])
f.close()
return User_dates
if P2 == (PM + 1):
if P1 < PM:
a=0
player2 = player2 + 1
print(f'\nHa ganado {jugador2}\n')
print(f'El {jugador2} lleva ganando {player2} de {partida}')
condition = False
f = open('usuarios.txt','r')
contenido_linea = []
for linea in f:
datos_usuario = linea.split(":")
if datos_usuario[0] == jugador2:
datos_usuario[8] = int(datos_usuario[8]) + 1
datos_usuario[6] = int(datos_usuario[6]) + 1
insertamos = f"{datos_usuario[0]}:{datos_usuario[1]}:{datos_usuario[2]}:{datos_usuario[3]}:{datos_usuario[4]}:{datos_usuario[5]}:{datos_usuario[6]}:{datos_usuario[7]}:{datos_usuario[8]}:{datos_usuario[9]}:{datos_usuario[10]}:\n"
User_dates = insertamos.split(":")
else:
insertamos = linea
contenido_linea.append(insertamos)
f.close()
f = open('usuarios.txt','w')
for ind, val in enumerate(contenido_linea):
f.writelines(contenido_linea[ind])
f.close()
f = open('usuarios.txt','r')
contenido_linea = []
for linea in f:
datos_usuario = linea.split(":")
if datos_usuario[0] == jugador1:
datos_usuario[10] = int(datos_usuario[10]) + 1
datos_usuario[6] = int(datos_usuario[6]) + 1
insertamos = f"{datos_usuario[0]}:{datos_usuario[1]}:{datos_usuario[2]}:{datos_usuario[3]}:{datos_usuario[4]}:{datos_usuario[5]}:{datos_usuario[6]}:{datos_usuario[7]}:{datos_usuario[8]}:{datos_usuario[9]}:{datos_usuario[10]}:\n"
User_dates = insertamos.split(":")
else:
insertamos = linea
contenido_linea.append(insertamos)
f.close()
f = open('usuarios.txt','w')
for ind, val in enumerate(contenido_linea):
f.writelines(contenido_linea[ind])
f.close()
stop = input("Pulsa enter...")
return User_dates
if condition == True:
puntos = input('Añada punto: ')
for i in '12345qwertasdfgzxcv':
if puntos == i:
P1 = P1 + 1
for j in 'yuiophjklñbnm67890':
if puntos == j:
P2 = P2 + 1
def replace_line(file_name, line_num, text):
lines = open(file_name, 'r').readlines()
lines[line_num] = text
out = open(file_name, 'w')
out.writelines(lines)
out.close()
global nuevoGrupo
def crearGrupo():
os.system(var)
encontrado2 = False
global nuevoGrupo
nuevoGrupo = input('\nCrea un nombre de grupo: ')
try:
with open('grupos.txt') as e:
for linea in e:
datos_grupos = linea.split(":")
if datos_grupos[0] == nuevoGrupo:
encontrado2 = True
break
except:
print('Ha ocurrido un error.')
else:
if encontrado2 == True:
print('\nEse nombre de grupo ya existe, por favor elija otro nombre de grupo')
else:
condicion = True
while condicion:
contraseña = input('Crea una contraseña: ')
contraseña2 = input('Ingresa de nuevo la contraseña: ')
if contraseña == contraseña2:
e = open('grupos.txt', 'a')
e.write(f'{nuevoGrupo}:CONTRASEÑA:{contraseña}')
e.close()
condicion = False
else:
print('\nLas contraseñas no coinciden, por favor vulve a introducirlas.\n')
def unirseGrupo(usuario):
os.system(var)
existe = False
grupo = input('Ingrese el nombre del grupo: ')
g = open('grupos.txt', 'r')
gr = open('usuarios.txt', 'r')
lin = g.readlines()
a = 0
for linea in lin:
a += 1
datos = linea.split(":")
if datos[0] == grupo:
existe = True
break
if existe == True:
contraseña = input('Ingrese la contraseña: ').strip()
if datos[2] == contraseña:
print(f'\nEstás en el grupo {grupo}')
f = open('usuarios.txt','r')
contenido_linea = []
for linea in f:
datos_usuario = linea.split(":")
if datos_usuario[0] == usuario:
datos_usuario[11] = grupo
insertamos = f"{usuario}:{datos_usuario[0]}:{datos_usuario[1]}:{datos_usuario[2]}:{datos_usuario[3]}:{datos_usuario[4]}:{datos_usuario[5]}:{datos_usuario[6]}:{datos_usuario[7]}:{datos_usuario[8]}:{datos_usuario[9]}:{datos_usuario[10]}:{datos_usuario[11]}:\n"
User_dates = insertamos.split(":")
else:
insertamos = linea
contenido_linea.append(insertamos)
f.close()
f = open('usuarios.txt','w')
for ind, val in enumerate(contenido_linea):
f.writelines(contenido_linea[ind])
f.close()
else:
print('Esa contraseña no coincide con el grupo.')
continuar = input()
else:
print('Ese grupo no existe.')
def main(usuario):
os.system(var)
print('1 - Partida')
time.sleep(.1)
print('2 - Ranking')
time.sleep(.1)
print('3 - Juegos')
time.sleep(.1)
print('4 - Unirse a un grupo')
time.sleep(.1)
print('5 - Crear Grupos')
time.sleep(.1)
opcion = input('\nQue desea hacer: ').lower()
if opcion == '1' or opcion == 'partida':
marcador()
elif opcion == '2' or opcion == 'ranking':
ranking()
elif opcion == '3' or opcion == 'juegos':
pass
elif opcion == '4' or opcion == 'unirse a un grupo':
unirseGrupo(usuario)
elif opcion == '5' or opcion == 'crear grupo':
crearGrupo()
def main1():
try:
with open('Otros.txt') as o:
for linea in o:
datos = linea.split(":")
if datos[0] == 'loggedT':
iniciadoS = True
break
except:
print(f"Ha ocurrido un ")
else:
if iniciadoS == True:
print('1 - Partida')
time.sleep(.1)
print('2 - Ranking')
time.sleep(.1)
print('3 - Juegos')
time.sleep(.1)
print('4 - Unirse a un grupo')
time.sleep(.1)
print('5 - Crear Grupos')
time.sleep(.1)
opcion = input('\nQue desea hacer: ').lower()
if opcion == '1' or opcion == 'partida':
pass
elif opcion == '2' or opcion == 'ranking':
pass
elif opcion == '3' or opcion == 'juegos':
pass
elif opcion == '4' or opcion == 'unirse a un grupo':
pass
elif opcion == '5' or opcion == 'crear grupo':
pass
|
import os
from datetime import date, datetime
from pathlib import Path
import pytest
import vcr as _vcr
from olog.httpx_client import Client
from olog.util import (UncaughtServerError, ensure_name, ensure_value, ensure_time,
simplify_attr)
# This stashes Olog server responses in JSON files (one per test)
# so that an actual server does not have to be running.
# Authentication
cassette_library_dir = str(Path(__file__).parent / Path('cassettes'))
vcr = _vcr.VCR(
serializer='json',
cassette_library_dir=cassette_library_dir,
record_mode='once',
match_on=['uri', 'method'],
filter_headers=['authorization']
)
RECORDED_URL = "http://10.0.137.22:8080/Olog"
# Only required if we are re-recording for VCR.
url = os.environ.get('OLOG_URL', RECORDED_URL)
user = os.environ.get('OLOG_USER', 'admin')
password = os.environ.get('OLOG_PASSWORD', '')
cli = Client(url, user, password)
# Various test parameters
LOG_ID = 1
LOGBOOKS = ['TEST0', 'TEST1']
LOGBOOK = 'TEST'
INVALID_LOGBOOK = {'name': 'Operations', 'owner': 'invalid_name',
'state': 'Active'}
LOGBOOK_NAME = 'Operations'
PROPERTY = {'name': 'TEST', 'owner': 'admin', 'state': 'Active', 'attributes': {'id': '1', 'url': None}}
PROPERTIES = {'TEST0': {'id': None, 'url': None}, 'TEST1': {'id': None, 'url': None}}
PROPERTY_NAME = 'TEST'
PROPERTY_ATTRIBUTES = {'url': None, 'id': '1'}
INVALID_PROPERTY = {'name': 'Ticket',
'owner': 'invalid_name',
'state': 'Active',
'attributes': [{'name': 'url', 'value': None,
'state': 'Active'},
{'name': 'id', 'value': None,
'state': 'Active'}]}
TAG_NAMES = ['Fault', 'TEST']
TAG = {'name': 'Fault', 'state': 'Active'}
TAG_NAME = 'Fault'
ATTACHMENT_FILE = {'file': open('README.md', 'rb'),
'filename': (None, 'test'),
'fileMetadataDescription': (None, 'This is a attachment')}
ATTACHMENT_NAME = ATTACHMENT_FILE['filename'][1]
DATETIME_OBJ = datetime(2015, 1, 1, 0, 0, 0)
DATETIME_START = '2015-01-01 00:00:00.000123'
DATETIME_END = '2020-01-01 00:00:00.000123'
TIME_INPUTS = [
DATETIME_OBJ,
DATETIME_START,
'2015-01-01 00:00:00.000123',
'2015-01-01 00:00:00',
'2015-01-01 00:00',
'2015-01-01 00',
'2015-01-01',
'2015-01',
'2015',
date(2015, 1, 1),
1420070400.0,
1420070400]
@vcr.use_cassette()
def test_get_logbooks():
cli.get_logbooks()
@vcr.use_cassette()
def test_get_logbook():
cli.get_logbook(LOGBOOK_NAME)
@vcr.use_cassette()
def test_put_logbooks():
cli.put_logbooks(LOGBOOKS)
@vcr.use_cassette()
def test_put_logbook():
cli.put_logbook(LOGBOOK)
@vcr.use_cassette()
def test_put_logbook_with_error():
# extra verification that everything worked correctly.
# vcr will return a wrong logbook because the recorded
# response has been manually edited to be inconsistent
# with the request to exercise this code path
with pytest.raises(UncaughtServerError):
cli.put_logbook(LOGBOOK)
def test_get_logs_by_keyword_only_arguments():
with pytest.raises(TypeError):
cli.get_logs(LOGBOOK_NAME)
@vcr.use_cassette()
def test_get_logs_by_logbooks():
logs = cli.get_logs(logbooks=LOGBOOK_NAME)
for log in logs:
assert LOGBOOK_NAME == log['logbooks'][0]['name']
@vcr.use_cassette()
def test_get_logs_by_time():
cli.get_logs(start=DATETIME_START, end=DATETIME_END)
@vcr.use_cassette()
def test_get_log():
assert LOG_ID == cli.get_log(LOG_ID)['id']
@vcr.use_cassette()
def test_get_attachment():
cli.get_attachment(LOG_ID, ATTACHMENT_NAME)
@vcr.use_cassette()
def test_post_attachment():
cli.post_attachment(1, ATTACHMENT_FILE)
@vcr.use_cassette()
def test_get_tags():
cli.get_tags()
@vcr.use_cassette()
def test_get_tag():
assert TAG == cli.get_tag(TAG_NAME)
@vcr.use_cassette()
def test_put_tags():
cli.put_tags(TAG_NAMES)
@vcr.use_cassette()
def test_put_tag():
cli.put_tag(TAG_NAME)
@vcr.use_cassette()
def test_get_properties():
cli.get_properties()
@vcr.use_cassette()
def test_get_property():
cli.get_property(PROPERTY_NAME)
@vcr.use_cassette()
def test_put_properties():
cli.put_properties(PROPERTIES)
@vcr.use_cassette()
def test_put_property():
assert PROPERTY == cli.put_property(PROPERTY_NAME, PROPERTY_ATTRIBUTES)
@vcr.use_cassette()
def test_put_property_with_error():
# vcr will return a wrong property because the recorded
# response has been manually edited to be inconsistent
# with the request to exercise this code path
with pytest.raises(UncaughtServerError):
cli.put_property(PROPERTY_NAME, PROPERTY_ATTRIBUTES)
def test_ensure_name():
with pytest.raises(TypeError):
ensure_name(1)
assert 'foo' == ensure_name('foo')
def test_ensure_value():
with pytest.raises(TypeError):
ensure_value(1)
assert ensure_value(None) is None
assert 'foo' == ensure_value('foo')
def test_ensure_time():
for time in TIME_INPUTS[:-2]:
assert '2015-01-01 00:00:00.000' == ensure_time(time)
for time in TIME_INPUTS[-2:]:
# fromtimestamp() return local time. In this test case, timestamp and
# datetime given match in GMT which is +5 hours comparing to UTC.
# The code below will calculate the diff(hours) between where the
# code being execuated and GMT. Then correct it in assert.
local = datetime.fromtimestamp(0).hour
diff = local - 24 if local > 12 else local
assert '2015-01-01 00:00:00.000' == ensure_time(time - diff*3600)
with pytest.raises(ValueError):
ensure_time('ABC')
def test_simplify_attr():
before = {'attributes': [{'name': 'id', 'value': 1}]}
after = simplify_attr(before)
real = {'attributes': {'id': 1}}
assert real == after
|
"""
Library for simulating scattering onto a detector according to Klein-Nishina equation
"""
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from scipy.constants import *
from scipy.interpolate import interp1d
from os.path import dirname,realpath
#I know global variables are evil, but these are constants which i really want to keep consistent
mu = electron_mass*c*c/electron_volt
ro = physical_constants['classical electron radius'][0]
ro2= 1e28*(ro**2)
#So tired of reinventing this
def ev2angstrom(energy):
return 1e10*h*speed_of_light/(energy*electron_volt)
#Hint: this is the same function as above 0.o
def angstrom2ev(wavelength):
return ev2angstrom(wavelength)
class interpolator(dict):
def __init__(self, inFN):
self.inFN = inFN
data = np.loadtxt(inFN, skiprows=1, delimiter=',')
keys = map(int, open(inFN).readline().strip().split(',')[1:])
for k,v in zip(keys,data[:,1:].T):
self[k] = interp1d(data[:,0], v, kind='quadratic')
dir_path = dirname(realpath(__file__))
scatteringfunction = interpolator(dir_path + '/scatteringfunction.txt')
formfactor = interpolator(dir_path + '/formfactor.txt')
def transform_cartesian(theta, phi, z):
"""
Returns
-------
theta : float or array
phi : float or array
"""
theta = np.arctan2(np.hypot(x - xpos, y - ypos), -z)
phi = np.arctan2(y-ypos , x - xpos)
return theta, phi
def transform_spherical(x, y, z):
"""
transform_spherical(x, y, z, **kw) converts coordinates to the scattering and azimuthal angles, phi and theta. assumes polarization in y plane
We're using the convention according to A.L. Hanson. The calculation of scattering cross sections for polarized x-rays. Nuclear
Instruments and Methods in Physics Research (1986) 583-598. This means that theta is from runs from 0 to pi with 0 referring to
precise backscatter and pi meaning precise forward scatter.
Parameters
----------
x : float or array
y : float or array
z : float or array
This is the distance from the sample to the detector. positive z direction is in the direction of the incident photon wavevector. So,
negative z values are asking for information about backscatter.
Returns
-------
theta : float or array
phi : float or array
"""
theta = np.arctan2(np.hypot(x, y), -z)
phi = np.arctan2(y, x)
return theta, phi
def compton(theta, phi, ko, Z=None):
"""
compute the compton portion of the differential intensity of scattering for a given theta and phi according to the Klein Nishina equation
Parameters
----------
theta : float or array
phi : float or array
ko : float or array
The energy of the incident photon in eV
element : int (optional)
Atomic number or the element scattering. If none is supplied, this function returns scattering from a free electron
Returns
-------
d : float or array
The differential scattering element for a given phi and theta
"""
k = ko*mu / (mu + ko*(1 - np.cos(np.pi - theta)))
c = 0.25*ro2*np.square(k/ko)*(k/ko + ko/k - 2.*np.square(np.sin(theta)*np.cos(phi)))
if Z is not None:
wavelength = Planck*speed_of_light/(ko*electron_volt)*1e10
x = np.sin((np.pi - theta)/2.)/wavelength
c = c*scatteringfunction[Z](x)
return c
def thomson(theta, phi, ko, Z=None):
"""
compute the thomson portion differential intensity of scattering for a given theta and phi according to the Klein Nishina equation
Parameters
----------
theta : float or array
phi : float or array
ko : float or array
The energy of the incident photon in eV
element : int (optional)
Atomic number or the element scattering. If none is supplied, this function returns scattering from a free electron
Returns
-------
d : float or array
The differential scattering element for a given phi and theta
"""
t = ro2*(1. - np.square(np.sin(theta)*np.cos(phi)))
if Z is not None:
wavelength = Planck*speed_of_light/(ko*electron_volt)*1e10
x = np.sin((np.pi - theta)/2.)/wavelength
t = t*np.square(formfactor[Z](x))
return t
def differential_intensity(theta, phi, ko, Z=None):
"""
compute the total differential intensity of scattering for a given theta and phi according to the Klein Nishina equation
Parameters
----------
theta : float or array
phi : float or array
ko : float or array
The energy of the incident photon in eV
element : int (optional)
Atomic number or the element scattering. If none is supplied, this function returns scattering from a free electron
Returns
-------
d : float or array
The differential scattering element for a given phi and theta
"""
c = compton(theta, phi, ko, Z)
t = thomson(theta, phi, ko, Z)
return c + t
|
# Öffentliche Klasse zur Repräsentation eines Bruchs
class Fraction:
# Konstruktor, der Zähler und Nenner eines Bruchs
# anfordert
def __init__(self, numerator, denominator):
self.numerator = numerator
self.denominator = denominator
# Öffentliche Methode zum Addieren eines Bruchs
# Zum aktuellen Bruch wird der an die Methode über-
# gebene Bruch addiert und als eigenständiges Objekt
# zurückgegeben.
def add(self, f):
z1 = self.numerator * f.getDenominator()
z2 = self.denominator * f.getNumerator()
return Fraction(z1 + z2, self.denominator * f.getDenominator())
# Öffentliche Methode zum Multiplizieren eines Bruchs.
# Der aktuelle Bruch wird mit dem an die Methode über-
# gebenen Bruch multipliziert und als eigenständiges
# Objekt zurückgegeben.
def multiply(self, f):
return Fraction(self.numerator * f.getNumerator(),
self.denominator * f.getDenominator())
# Öffentliche Methode, die den Zähler zurückliefert
def getNumerator(self):
return self.numerator
# Öffentliche Methode, die den Nenner zurückliefert
def getDenominator(self):
return self.denominator
# Öffentliche Methode zur Ausgabe eines Bruchs
def __str__(self):
return str(self.numerator) + "/" + str(self.denominator)
# Startpunkt des Hauptprogramms
# Hier werden die implementierten Klassen zu Demonstrations- und
# Testzwecken instanziiert und verwendet.
# 1/2
f1 = Fraction(1, 2)
# 1/4
f2 = Fraction(1, 4)
# 1/2 + 1/4 = 6/8
sum = f1.add(f2)
print str(f1) + " + " + str(f2) + " = " + str(sum)
# 1/2 + 1/4 = 1/8
mult = f1.multiply(f2)
print str(f1) + " * " + str(f2) + " = " + str(mult)
|
import collections
from ...keywords import Keyword, Keywords, parsers
def load_psi4_keywords(options: Keywords) -> None:
opts = _query_options_defaults_from_psi()
def p4_validator(val):
try:
nuval = val.upper()
except AttributeError:
nuval = val
return nuval
for m in opts:
for o, v in opts[m].items():
if m == "GLOBALS":
keyword = o
else:
keyword = m + "__" + o
options.add("psi4", Keyword(keyword=keyword, default=v["value"], validator=p4_validator))
options.add(
"psi4",
Keyword(
keyword="function_kwargs_dertype",
default=None,
validator=parsers.intenum("0 1 2", nullable=True),
glossary="User dertype for gradients and Hessians.",
),
)
def _basic_validator(val):
try:
nuval = val.upper()
except AttributeError:
nuval = val
return nuval
def load_cfour_keywords_from_psi4(options: Keywords) -> None:
opts = _query_options_defaults_from_psi()
opts = opts["CFOUR"]
for o, v in opts.items():
if o.startswith("CFOUR_"):
options.add("cfour", Keyword(keyword=o[6:], default=v["value"], validator=_basic_validator))
def _query_options_defaults_from_psi(changedOnly=False):
"""Function to return a string of commands to replicate the
current state of user-modified options. Used to capture C++
options information for distributed (sow/reap) input files.
.. caution:: Some features are not yet implemented. Buy a developer a coffee.
- Need some option to get either all or changed
- Need some option to either get dict or set string or psimod command list
- command return doesn't revoke has_changed setting for unchanged with changedOnly=False
"""
import psi4
modules = [
# PSI4 Modules
"ADC",
"CCENERGY",
"CCEOM",
"CCDENSITY",
"CCLAMBDA",
"CCHBAR",
"CCRESPONSE",
"CCTRANSORT",
"CCTRIPLES",
"CPHF",
"DCT",
"DETCI",
"DFEP2",
"DFMP2",
"DFOCC",
"DLPNO",
"FINDIF",
"FISAPT",
"FNOCC",
"MCSCF",
"MINTS",
"MRCC",
"OCC",
"OPTKING",
"PSIMRCC",
"SAPT",
"SCF",
"THERMO",
# External Modules
"CFOUR",
# "DMRG",
# "EFP",
# "GDMA",
# "PCM",
# "PE",
]
options = collections.defaultdict(dict)
for opt in psi4.core.get_global_option_list():
hoc = psi4.core.has_global_option_changed(opt)
if hoc or not changedOnly:
if opt in ["DFT_CUSTOM_FUNCTIONAL", "EXTERN"]: # Feb 2017 hack
continue
val = psi4.core.get_global_option(opt)
options["GLOBALS"][opt] = {"value": val, "has_changed": hoc}
for module in modules:
if psi4.core.option_exists_in_module(module, opt):
hoc = psi4.core.has_option_changed(module, opt)
if hoc or not changedOnly:
val = psi4.core.get_option(module, opt)
options[module][opt] = {"value": val, "has_changed": hoc}
return options
|
import re
def handle_extends(content, seen_templates, template_processor):
matches = re.search(r'{%\s*extends\s*"([^"]+)"\s*%}', content)
if not matches:
return content
name = matches.group(1)
if name in seen_templates:
raise Exception("Recursive template in extends")
seen_templates[name] = True
parent_content = template_processor(name, seen_templates)
# Build a hash of block names to content, and then fill them in
# in the parent template
block_values = {}
block_regex = r'{%\s*block\s+([^ ]+)\s*%}(.*?){%\s*endblock\s*(\1|)\s*%}'
for match in re.finditer(block_regex, content, re.DOTALL):
block_name = match.group(1)
full_block = match.group(0)
block_values[block_name] = full_block
# We need to bring up any load tags that aren't in block content.
# Start by getting all content that isn't in a block, then get the load
# tags
outside_of_blocks = re.sub(block_regex, "", content)
load_tags = {}
for match in re.finditer(r'{%\s*load\s+.*?%}', outside_of_blocks):
load_tags[match.group(0)] = True
# Now replace any blocks in the parent content with those blocks, and
# return the parent content
def replace_block(match):
block_name = match.group(1)
if block_name in block_values:
return block_values[block_name]
return match.group(0)
content = re.sub(block_regex, replace_block, parent_content)
# Now we add any loose load tags back in to the top of the page
load_content = "".join(sorted(load_tags.keys()))
return u"%s%s" % (load_content, content)
|
#!/usr/local/bin/python
import psycopg2
import sys
import time
import os
from db_config import config
from db_utils import cursor_pprint
def connect():
DEV = True
if 'ENVIRONMENT' in os.environ:
env = os.environ['ENVIRONMENT']
print('working on %s' % env)
DEV = 'DEV' in env
conn = None
try:
# get connection parameters
params = config()
conn = psycopg2.connect(**params)
conn.autocommit = True
cur = conn.cursor()
sql_script = ''
if len(sys.argv) > 1 and os.path.exists(sys.argv[1]):
sql_script = sys.argv[1]
print('sql script file: ', sql_script)
fd = open(sql_script, 'r')
sql_commands_full = fd.read()
fd.close()
else:
print('no sql script file defined')
sql_commands_full = 'select to_char(current_timestamp, \'YYYY-MM-DD HH12:MI:SS\') as now;\n\nselect version()'
sql_commands = sql_commands_full.split('\n\n')
print('about to run %s sql commands:\n' % str(len(sql_commands)))
print('============================')
if sql_script != '' and sql_script.lower().find('db_sp_') != -1:
# call stored procedure
print('store procedure: %s \n' % sql_commands_full)
cur.callproc(sql_commands_full)
else:
SQL_CMD_RESULT = ('select', 'show')
for sql_command in sql_commands:
sql_command_to_run = ''
lines = sql_command.split('\n')
for line in lines:
if not line.startswith('--'):
sql_command_to_run = sql_command_to_run + line + '\n'
if len(sql_command_to_run) > 5:
start = time.perf_counter()
print('%s \n' % sql_command)
cur.execute(sql_command_to_run)
if cur.rowcount > 0:
if sql_command_to_run.lower().startswith(SQL_CMD_RESULT) and sql_command_to_run.lower().find('into') == -1:
rows = cur.fetchall()
print(cursor_pprint(cur, rows, 1), '\n')
else:
conn.commit()
print('rows affected: ', cur.rowcount)
else:
print('#### NO RESULTS ###')
print('elapsed time: {0:.4f} minutes'.format((time.perf_counter() - start) / 60))
print('============================')
print('')
# close the communication with the PostgreSQL
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(f'error: {error}')
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
if __name__ == '__main__':
connect()
|
import argparse
from Tests.test_utils import str2bool, run_command
SERVER_GA = "Demisto-Circle-CI-Content-GA*"
SERVER_MASTER = "Demisto-Circle-CI-Content-Master*"
SERVER_ONE_BEFORE_GA = "Demisto-Circle-CI-Content-OneBefore-GA*"
SERVER_TWO_BEFORE_GA = "Demisto-Circle-CI-Content-TwoBefore-GA*"
AMI_LIST = [SERVER_GA, SERVER_MASTER, SERVER_ONE_BEFORE_GA, SERVER_TWO_BEFORE_GA]
AMI_NAME_TO_READABLE = {
SERVER_GA: "Demisto GA",
SERVER_MASTER: "Server Master",
SERVER_ONE_BEFORE_GA: "Demisto one before GA",
SERVER_TWO_BEFORE_GA: "Demisto two before GA"}
def is_nightly_build():
parser = argparse.ArgumentParser(description='Utility creating an instance for Content build')
parser.add_argument('-n', '--nightly', type=str2bool, help='Run nightly build')
options = parser.parse_args()
return options.nightly
def create_instance(ami_name):
print "Creating instance from the AMI image for {}".format(AMI_NAME_TO_READABLE[ami_name])
run_command("./Tests/scripts/create_instance.sh instance.json {}".format(ami_name)) # noqa
with open('./Tests/instance_ids.txt', 'r') as instance_file:
instance_id = instance_file.read()
return instance_id
def main():
instance_ids = []
instance_ids.append("{}:{}".format(AMI_NAME_TO_READABLE[SERVER_GA], create_instance(SERVER_GA)))
# if not is_nightly_build():
# instance_ids.append("{}:{}".format(AMI_NAME_TO_READABLE[SERVER_GA], create_instance(SERVER_GA)))
#
# else:
# for ami_name in AMI_LIST:
# if ami_name == SERVER_TWO_BEFORE_GA: # Skipping this version until new Server version will be released.
# continue
# instance_ids.append("{}:{}".format(AMI_NAME_TO_READABLE[ami_name], create_instance(ami_name)))
with open('./Tests/instance_ids.txt', 'w') as instance_file:
instance_file.write('\n'.join(instance_ids))
if __name__ == "__main__":
main()
|
#
# Copyright (c) 2016-2021 Deephaven Data Labs and Patent Pending
#
##############################################################################
# NOTE: the jvm should have been initialized, or this test will certainly fail
##############################################################################
import sys
import jpy
import numpy
import pandas
from datetime import datetime
from deephaven_legacy import TableTools, tableToDataFrame, dataFrameToTable, createTableFromData
from deephaven_legacy.conversion_utils import NULL_BYTE, NULL_SHORT, NULL_INT, NULL_LONG, \
convertToJavaArray, convertToJavaList, convertToJavaArrayList, convertToJavaHashSet, \
convertToJavaHashMap
if sys.version_info[0] < 3:
int = long # cheap python2/3 compliance - probably only necessary for 32 bit system?
import unittest2 as unittest
# not part of the standard library, installed via pip (or the like)
# it provides backward compatibility with python3 style subTest context manager (handy for complex tests)
else:
import unittest
class TestTableDataframeConversion(unittest.TestCase):
"""
Test cases for table <-> dataframe conversions, really a functionality test
"""
def testTableToDataframeNoNulls(self):
"""
Test for converting a basic table with no null values to a dataframe
"""
tab_reg = TableTools.emptyTable(1).update("boolCol=(boolean)false",
"byteCol=(byte)0",
"shortCol=(short)0",
"intCol=(int)0",
"longCol=(long)0",
"floatCol=(float)0",
"doubleCol=(double)0",
"datetimeCol=new DateTime(0)",
"stringCol=`test`")
# there are no nulls here, so all three conversion options should work, and result in identical dataframes
with self.subTest(msg="convert null when no null values"):
df = tableToDataFrame(tab_reg, convertNulls='ERROR', categoricals=None)
df_reg = tableToDataFrame(tab_reg, convertNulls='PASS', categoricals=None)
df_reg_nc = tableToDataFrame(tab_reg, convertNulls='CONVERT', categoricals=None)
# EQUALITY CHECK
with self.subTest(msg='converted dfs are equal'):
self.assertTrue(df.equals(df_reg)) # equals is transitive
self.assertTrue(df_reg.equals(df_reg_nc))
# DATA TYPE TEST
for col, dtyp in [('boolCol', numpy.bool_),
('byteCol', numpy.int8),
('shortCol', numpy.int16),
('intCol', numpy.int32),
('longCol', numpy.int64),
('floatCol', numpy.float32),
('doubleCol', numpy.float64),
('datetimeCol', numpy.dtype('datetime64[ns]')),
('stringCol', numpy.object)]:
# NB: I'm confident that dtype is not checked for df.equals(), so it's not redundant to do both
with self.subTest(msg='dtype nulls_convert=ERROR for {}'.format(col)):
self.assertEqual(df[col].values.dtype, dtyp)
with self.subTest(msg='dtype nulls_convert=PASS for {}'.format(col)):
self.assertEqual(df_reg[col].values.dtype, dtyp)
with self.subTest(msg='dtype nulls_convert=CONVERT for {}'.format(col)):
self.assertEqual(df_reg_nc[col].values.dtype, dtyp) # there are no nulls -> no dumb type casts
# VALUES TEST
for col, val in [('boolCol', False),
('byteCol', 0),
('shortCol', 0),
('intCol', 0),
('longCol', 0),
('floatCol', 0),
('doubleCol', 0),
('datetimeCol', numpy.datetime64(0, 'ns')),
('stringCol', u'test')]:
# NB: raw unicode string should be simultaneously python2/3 compliant
with self.subTest(msg='entries for {}'.format(col)):
self.assertEqual(df[col].values[0], val)
def testTableToDataframeWithNulls(self):
"""
Test for converting a basic table with null values to a dataframe
"""
tab_nulls = TableTools.emptyTable(2).update("boolCol=((i==0) ? true : null)",
"byteCol=(byte)((i==0) ? 0 : NULL_BYTE)",
"shortCol=(short)((i==0) ? 2 : NULL_SHORT)",
"intCol=(int)((i==0) ? 0 : NULL_INT)",
"longCol=(long)((i==0) ? 0 : NULL_LONG)",
"floatCol=(float)((i==0) ? 2 : NULL_FLOAT)",
"doubleCol=(double)((i==0) ? 2 : NULL_DOUBLE)",
"datetimeCol=((i==0) ? new DateTime(0) : null)")
with self.subTest(msg="Does not convert if convertNulls=ERROR and nulls present"):
self.assertRaises(ValueError, tableToDataFrame,
tab_nulls, convertNulls='ERROR', categoricals=None)
with self.subTest(msg="Converts if convertNulls in [PASS, CONVERT] and nulls present"):
df_nulls = tableToDataFrame(tab_nulls, convertNulls='PASS', categoricals=None)
df_nulls_nc = tableToDataFrame(tab_nulls, convertNulls='CONVERT', categoricals=None)
# EQUALITY CHECK
self.assertFalse(df_nulls.equals(df_nulls_nc))
# DATA TYPES TEST
# verify that the dtypes are as expected when we DO NOT convert the nulls
for col, dtyp in [('boolCol', numpy.bool_),
('byteCol', numpy.int8),
('shortCol', numpy.int16),
('intCol', numpy.int32),
('longCol', numpy.int64),
('floatCol', numpy.float32),
('doubleCol', numpy.float64),
('datetimeCol', numpy.dtype('datetime64[ns]'))
]:
with self.subTest(msg='data type, nulls_convert=False, for {}'.format(col)):
self.assertEqual(df_nulls[col].values.dtype, dtyp) # as before
# verify that the dtypes are as expected when we DO convert the nulls
for col, dtyp in [('boolCol', numpy.object),
('byteCol', numpy.float32),
('shortCol', numpy.float32),
('intCol', numpy.float64),
('longCol', numpy.float64),
('floatCol', numpy.float32),
('doubleCol', numpy.float64),
('datetimeCol', numpy.dtype('datetime64[ns]'))
]:
with self.subTest(msg='data type, nulls_convert=True, for {}'.format(col)):
self.assertEqual(df_nulls_nc[col].values.dtype, dtyp)
# VALUES TEST
# verify that the null entries are as expected when we DO NOT convert the nulls
for col, val in [('boolCol', False),
('byteCol', NULL_BYTE),
('shortCol', NULL_SHORT),
('intCol', NULL_INT),
('longCol', NULL_LONG),
]:
with self.subTest(msg='null entry, nulls_convert=False, for {}'.format(col)):
self.assertEqual(df_nulls[col].values[1], val)
# floating point types & time converted to NaN/T regardless of null conversion
with self.subTest(msg='null entry, nulls_convert=False, for floatCol'):
self.assertTrue(numpy.isnan(df_nulls['floatCol'].values[1]))
with self.subTest(msg='null entry, nulls_convert=False, for doubleCol'):
self.assertTrue(numpy.isnan(df_nulls['doubleCol'].values[1]))
with self.subTest(msg='null entry, nulls_convert=False, for datetimeCol'):
self.assertTrue(numpy.isnat(df_nulls['datetimeCol'].values[1]))
# verify that the null entries are as expected when we DO convert the nulls
with self.subTest(msg='entries nulls_convert=True for bool'):
self.assertIsNone(df_nulls_nc['boolCol'][1])
for col in ['byteCol',
'shortCol',
'intCol',
'longCol',
'floatCol',
'doubleCol']:
with self.subTest(msg='regular entry, nulls_convert=True, for {}'.format(col)):
self.assertFalse(numpy.isnan(df_nulls_nc[col].values[0]))
with self.subTest(msg='null entry, nulls_convert=True, for {}'.format(col)):
self.assertTrue(numpy.isnan(df_nulls_nc[col].values[1]))
with self.subTest(msg='regular entry, nulls_convert=True, for datetimeCol'):
self.assertEqual(df_nulls_nc['datetimeCol'].values[0], numpy.datetime64(0, 'ns'))
with self.subTest(msg='null entry, nulls_convert=False, for {}'.format(col)):
self.assertTrue(numpy.isnat(df_nulls['datetimeCol'].values[1]))
def testDataframeToTable(self):
"""
Test for converting dataframe to a table
"""
getElement = lambda tab, col: tab.getColumnSource(col).get(0)
df = pandas.DataFrame({'boolCol': numpy.zeros((1, ), dtype=numpy.bool_),
'byteCol': numpy.zeros((1,), dtype=numpy.int8),
'shortCol': numpy.zeros((1,), dtype=numpy.int16),
'intCol': numpy.zeros((1,), dtype=numpy.int32),
'longCol': numpy.zeros((1,), dtype=numpy.int64),
'floatCol': numpy.zeros((1,), dtype=numpy.float32),
'doubleCol': numpy.zeros((1,), dtype=numpy.float64),
'datetimeCol': numpy.zeros((1, ), dtype='datetime64[ns]'),
'stringCol': numpy.array([u'test', ], dtype=numpy.unicode_)
})
# NB: use a raw unicode string should be simultaneously python2/3 compliant
tab = dataFrameToTable(df)
tabDef = tab.getDefinition() # get the meta-data for the table
# check that the datatypes make sense
for col, typ in [('boolCol', 'class java.lang.Boolean'),
('byteCol', 'byte'),
('shortCol', 'short'),
('intCol', 'int'),
('longCol', 'long'),
('floatCol', 'float'),
('doubleCol', 'double'),
('datetimeCol', 'class io.deephaven.time.DateTime'),
('stringCol', 'class java.lang.String')
]:
with self.subTest(msg="data type for column {}".format(col)):
self.assertEqual(typ, tabDef.getColumn(col).getDataType().toString())
# Checking equality of the entry on the java side, to the best of my ability...
with self.subTest(msg="entry for column boolCol"):
self.assertEqual(getElement(tab, 'boolCol'), False) # I'm guessing that Boolean() -> False
with self.subTest(msg="entry for column byteCol"):
self.assertEqual(getElement(tab, 'byteCol'), 0) # I'm guessing that Byte() -> 0
with self.subTest(msg="entry for column shortCol"):
self.assertEqual(getElement(tab, 'shortCol'), 0) # I'm guessing that Short() -> 0
with self.subTest(msg="entry for column intCol"):
self.assertEqual(getElement(tab, 'intCol'), 0) # I'm guessing that Integer() -> 0
with self.subTest(msg="entry for column longCol"):
self.assertEqual(getElement(tab, 'longCol'), 0) # I'm guessing that Long() -> 0
with self.subTest(msg="entry for column floatCol"):
self.assertEqual(getElement(tab, 'floatCol'), 0) # I'm guessing that Float() -> 0
with self.subTest(msg="entry for column doubleCol"):
self.assertEqual(getElement(tab, 'doubleCol'), 0) # I'm guessing that Double() -> 0
with self.subTest(msg="entry for column datetimeCol"):
cls = jpy.get_type('io.deephaven.time.DateTime')
self.assertEqual(getElement(tab, 'datetimeCol'), cls(0))
with self.subTest(msg="entry for column stringCol"):
self.assertEqual(getElement(tab, 'stringCol'), u'test')
def testUnsupportedPrimitiveTest(self):
"""
Test for behavior of unsupported column type conversion
"""
for dtypename in ['uint8', 'uint16', 'uint32', 'uint64', 'complex64', 'complex128', 'float16']:
with self.subTest(msg="dtype={}".format(dtypename)):
df = pandas.DataFrame({'test': numpy.zeros((1, ), dtype=dtypename)})
self.assertRaises(ValueError, dataFrameToTable, df)
def testArrayColumnConversion(self):
"""
Test for behavior when one of the columns is of array type (in each direction)
"""
firstTable = TableTools.emptyTable(10).update("MyString=new String(`a`+i)",
"MyChar=new Character((char) ((i%26)+97))",
"MyBoolean=new Boolean(i%2==0)",
"MyByte=new java.lang.Byte(Integer.toString(i%127))",
"MyShort=new Short(Integer.toString(i%32767))",
"MyInt=new Integer(i)",
"MyLong=new Long(i)",
"MyFloat=new Float(i+i/10)",
"MyDouble=new Double(i+i/10)"
)
arrayTable = firstTable.update("A=i%3").groupBy("A")
dataFrame = tableToDataFrame(arrayTable, convertNulls='PASS', categoricals=None)
for colName, arrayType in [
('MyString', 'io.deephaven.vector.ObjectVector'),
('MyChar', 'io.deephaven.vector.CharVector'),
('MyBoolean', 'io.deephaven.vector.ObjectVector'), # NB: BooleanVector is deprecated
('MyByte', 'io.deephaven.vector.ByteVector'),
('MyShort', 'io.deephaven.vector.ShortVector'),
('MyInt', 'io.deephaven.vector.IntVector'),
('MyLong', 'io.deephaven.vector.LongVector'),
('MyFloat', 'io.deephaven.vector.FloatVector'),
('MyDouble', 'io.deephaven.vector.DoubleVector'),
]:
with self.subTest(msg="type for original column {}".format(colName)):
self.assertEqual(arrayTable.getColumn(colName).getType().getName(), arrayType)
self.assertEqual(dataFrame[colName].values.dtype, numpy.object)
for colName, dtype in [
('MyBoolean', numpy.bool_),
('MyByte', numpy.int8),
('MyShort', numpy.int16),
('MyInt', numpy.int32),
('MyLong', numpy.int64),
('MyFloat', numpy.float32),
('MyDouble', numpy.float64),
]:
with self.subTest(msg="type of converted array for {}".format(colName)):
self.assertTrue(isinstance(dataFrame[colName].values[0], numpy.ndarray))
self.assertEqual(dataFrame[colName].values[0].dtype, dtype)
with self.subTest(msg="type of converted array for MyString"):
self.assertTrue(isinstance(dataFrame['MyString'].values[0], numpy.ndarray))
self.assertTrue(dataFrame['MyString'].values[0].dtype.name.startswith('unicode') or
dataFrame['MyString'].values[0].dtype.name.startswith('str'))
# NB: numpy really doesn't have a char type, so it gets treated like an uninterpretted type
with self.subTest(msg="type of converted array for MyChar"):
self.assertTrue(isinstance(dataFrame['MyChar'].values[0], numpy.ndarray))
self.assertTrue(dataFrame['MyChar'].values[0].dtype.name.startswith('unicode') or
dataFrame['MyChar'].values[0].dtype.name.startswith('str'))
# convert back
backTable = dataFrameToTable(dataFrame, convertUnknownToString=True)
for colName, arrayType in [
('MyString', 'io.deephaven.vector.ObjectVectorDirect'),
('MyChar', 'io.deephaven.vector.CharVectorDirect'),
('MyBoolean', 'io.deephaven.vector.ObjectVectorDirect'),
('MyByte', 'io.deephaven.vector.ByteVectorDirect'),
('MyShort', 'io.deephaven.vector.ShortVectorDirect'),
('MyInt', 'io.deephaven.vector.IntVectorDirect'),
('MyLong', 'io.deephaven.vector.LongVectorDirect'),
('MyFloat', 'io.deephaven.vector.FloatVectorDirect'),
('MyDouble', 'io.deephaven.vector.DoubleVectorDirect'),
]:
with self.subTest(msg="type for reverted column for {}".format(colName)):
self.assertEqual(backTable.getColumn(colName).getType().getName(), arrayType)
with self.subTest(msg="element type for reverted column MyBoolean"):
self.assertEqual(backTable.getColumn('MyBoolean').get(0).getComponentType().getName(), 'java.lang.Boolean')
with self.subTest(msg="element type for reverted column MyString"):
self.assertEqual(backTable.getColumn('MyString').get(0).getComponentType().getName(), 'java.lang.String')
def testListColumnVersion(self):
"""
Test for behavior when one of the data frame columns contains tuples or lists
"""
def1 = {('a', 'b'): {('A', 'B'): 1, ('A', 'C'): 2},
('a', 'a'): {('A', 'C'): 3, ('A', 'B'): 4},
('a', 'c'): {('A', 'B'): 5, ('A', 'C'): 6},
('b', 'a'): {('A', 'C'): 7, ('A', 'B'): 8},
('b', 'b'): {('A', 'D'): 9, ('A', 'B'): 10}}
dataframe1 = pandas.DataFrame(def1)
table1 = dataFrameToTable(dataframe1)
print("dataframe1 = \n{}".format(dataframe1))
print("table1 = {}\n".format(TableTools.html(table1)))
def2 = {'one': [(1, 2), (2, 3), (3, ), (4, 5, 6, 7)],
'two': [(4, 5), (6, 5, 3), (7, 6), (8, 7)],
'thing': [None, None, None, None]}
dataframe2 = pandas.DataFrame(def2)
table2 = dataFrameToTable(dataframe2, convertUnknownToString=True)
print("dataframe2 = \n{}".format(dataframe2))
print("table2 = {}\n".format(TableTools.html(table2)))
def3 = {'one': [[1, 2], [2, 3], [3, 4], [4, 5, 6, 7]],
'two': [[4, 5], [6, 5], [7, 6], [8, 7]],
'thing': [None, None, None, None]}
dataframe3 = pandas.DataFrame(def3)
table3 = dataFrameToTable(dataframe3, convertUnknownToString=True)
print("dataframe3 = \n{}".format(dataframe3))
print("table3 = {}\n".format(TableTools.html(table3)))
def testMultidimensionalArray(self):
"""
Test suite for behavior of converting a dataframe with multi-dimensional column
"""
for dtypename, array_type in [('int8', '[[B'), ('int16', '[[S'), ('int32', '[[I'), ('int64', '[[J'),
('float32', '[[F'), ('float64', '[[D'), ('U1', '[[C'),
('U3', '[[Ljava.lang.String;'),
('datetime64[ns]', '[[Lio.deephaven.time.DateTime;')]:
with self.subTest(msg="dtype={}".format(dtypename)):
nparray = numpy.empty((2, ), dtype=numpy.object)
nparray[:] = [numpy.zeros((3, 4), dtype=dtypename) for i in range(2)]
df = pandas.DataFrame({'test': nparray})
tab = dataFrameToTable(df)
self.assertTrue(tab.getColumn('test').getType().getName(), 'io.deephaven.vector.ObjectVectorDirect')
self.assertEqual(tab.getColumn('test').get(0).getClass().getName(), array_type)
with self.subTest(msg="nested array exception check"):
nparray = numpy.empty((2, ), dtype=numpy.object)
arr1 = numpy.empty((3, 4), dtype=numpy.object)
arr1[:] = 'junk'
nparray[:] = [arr1 for i in range(2)]
df = pandas.DataFrame({'test': nparray})
self.assertRaises(ValueError, dataFrameToTable, df, convertUnknownToString=False)
def testConversionUtility(self):
"""
Test suite for convertToJava* methods in conversion_utils module
"""
# mostly I'm just going to create a coverage test for a couple simple basic cases
with self.subTest(msg="convertToJavaArray for string"):
junk = convertToJavaArray("abc")
with self.subTest(msg="convertToJavaList for string"):
junk = convertToJavaList("abc")
with self.subTest(msg="convertToJavaArrayList for string"):
junk = convertToJavaArrayList("abc")
with self.subTest(msg="convertToJavaHashSet for string"):
junk = convertToJavaHashSet("abc")
with self.subTest(msg="convertToJavaArray for int list"):
junk = convertToJavaArray([0, 1, 2])
with self.subTest(msg="convertToJavaList for int list"):
junk = convertToJavaList([0, 1, 2])
with self.subTest(msg="convertToJavaArrayList for int list"):
junk = convertToJavaArrayList([0, 1, 2])
with self.subTest(msg="convertToJavaHashSet for int list"):
junk = convertToJavaHashSet([0, 1, 2])
with self.subTest(msg="convertToJavaHashMap for dict"):
junk = convertToJavaHashMap({'one': 1, 'two': 2})
with self.subTest(msg="convertToJavaHashMap for two lists"):
junk = convertToJavaHashMap(['one', 'two'], [1, 2])
def testCreateTableFromData(self):
"""
Test suite for createTbaleFRomData method
"""
data_names = ['intList', 'floatList', 'charList', 'stringList', 'booleanList', 'timeList']
data_list = [[1, 2, None], [1., 2., None], ['A', 'B', None], [u'one', u'two', None],
[True, False, None], [datetime.utcnow(), datetime.utcnow(), datetime.utcnow()]]
with self.subTest(msg="createTableFromData with lists"):
tab = createTableFromData(data_list, columns=data_names)
print("tableFromList = {}\n".format(TableTools.html(tab)))
data_dict = {}
for nm, da in zip(data_names, data_list):
data_dict[nm] = da
with self.subTest(msg="createTableFromData with dict"):
tab = createTableFromData(data_dict, columns=data_names)
print("tableFromDict = {}\n".format(TableTools.html(tab)))
|
from node import Node
from collections import deque
#Methods to implement:
#1 - isBalanced()
class BinaryTree:
def __init__(self, keyElement):
self.root = Node(int(keyElement))
self.size = 1
def getSize(self):
return int(self.size)
def preOrder(self):
print("Pre-Order traversal: ",end='')
self.__preOrder(self.root)
print("")
def __preOrder(self, root):
if(root == None):
return
print(str(root.key) + " ",end='')
self.__preOrder(root.left)
self.__preOrder(root.right)
def inOrder(self):
print("In-Order traversal: ",end='')
self.__inOrder(self.root)
print("")
def __inOrder(self,root):
if(root == None):
return
self.__inOrder(root.left)
print(str(self.key),end='')
self.__inOrder(root.right)
def postOrder(self):
print("Post Order traversal: ",end='')
self.__postOrder(self.root)
print("")
def __postOrder(self,root):
if(root == None):
return
self.__postOrder(root.left)
self.__postOrder(root.right)
print(str(root.key) + " ", end='')
def print(self):
if(self.root == None):
print("Tree is empty")
else:
self.__print(self.root,int(0),"")
def __print(self,root,level,tab):
if(root == None):
return
self.__print(root.right,level + 1, tab + "\t")
print(tab + "Level " + str(level) + " : " + str(root.key))
self.__print(root.left,level + 1, tab + "\t")
def levelOrder(self):
if(self.root == None):
return
else:
self.__levelOrder(self.root)
print("")
def __levelOrder(self,root):
queue = deque()
queue.append(root)
print("Level order: ", end ='')
while(len(queue) > 0):
element = queue.popleft()
print(str(element.key) + " ", end = '')
if(element.hasLeft()):
queue.append(element.left)
if(element.hasRight()):
queue.append(element.right)
return
def isBinarySearchTree(self):
if(self.root == None):
return False
return self.__isBinarySearchTree(self.root,None,None)
def __isBinarySearchTree(self,root,rootMin,rootMax):
if(root == None):
return True
if(rootMin != None and root.key < rootMin.key):
return False
if(rootMax != None and root.key > rootMax.key):
return False
return self.__isBinarySearchTree(root.left,rootMin,root) and self.__isBinarySearchTree(root.right,root,rootMax)
#To return a number that represents an unsuccessful operation, we can return "None" so as to avoid retrieving a negative number
#Kth Element according to irOrder traversal
def findKthElement(self, position):
if(int(position) < 0):
print("Invalid position: Less than 0")
return
elif(self.root.quantityLeft + self.root.quantityRight + 1 < int(position)):
print("Error: Number of elements less than " + str(position))
return
else:
element = self.__findKthElement(self.root,int(position))
print("Element at position " + str(position) + ": " + str(element.key))
def __findKthElement(self,root,position):
if(root == None):
print("There isn't element at this position.")
return
if(root.quantityLeft + 1 == position):
return root
elif(root.quantityLeft + 1 < position):
return self.__findKthElement(root.right,position - (root.quantityLeft + 1))
else:
return self.__findKthElement(root.left,position)
"""
def getSize(self):
if self.root == None:
return 0
else:
return self.__getSize(self.root)
def __getSize(self, root):
if(root == None):
return 0
x = self.__getSize(root.left)
y = self.__getSize(root.right)
return (x + y + 1)
"""
|
from common.test_base.mobile_test_case import MobileTestCase
from ..pages.main_page import MainPage
from ..pages.post_list_page import PostListPage
class BlogMobileTestCase(MobileTestCase):
"""Test case for testing by blog"""
@classmethod
def setup_class(cls):
super().setup_class()
cls.main_page = MainPage(cls._driver, cls.log)
cls.post_list_page = PostListPage(cls._driver, cls.log)
|
# 3rd-party modules
import requests
from bs4 import BeautifulSoup
URL: str = 'https://www.billboard.com/charts/hot-100/' # billboard web info
CLASS_FOR_SONG: str = 'u-line-height-125'
CLASS_FOR_ARTIST: str = 'a-truncate-ellipsis-2line'
class RequestBillboardInfo:
def __init__(self):
self.date_to_travel: str = input("What year would like to travel? Type the data in the format YYYY-MM-DD: ")
self.song_list: list = []
self.artist_list: list = []
self.spotify_headers = None
def request_billboard_info(self):
response = requests.get(f'{URL}/{self.date_to_travel}/')
web_html = response.text
soup = BeautifulSoup(web_html, 'html.parser')
html_song_extracted = soup.find_all(name='h3', id='title-of-a-story', class_=CLASS_FOR_SONG)
html_artist_extracted = soup.find_all(name='span', class_=CLASS_FOR_ARTIST)
self.song_list = [item.getText().replace('\n', '').replace('\t', '') for item in html_song_extracted]
self.artist_list = [item.getText().replace('\n', '').replace('\t', '') for item in html_artist_extracted]
|
# Generated by Django 3.2.13 on 2022-04-13 09:45
import django_extensions.db.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("challenges", "0015_auto_20220412_1038"),
("pages", "0003_historicalpage"),
]
operations = [
migrations.RenameField(
model_name="historicalpage",
old_name="title",
new_name="slug",
),
migrations.RenameField(
model_name="page",
old_name="title",
new_name="slug",
),
migrations.AlterField(
model_name="historicalpage",
name="display_title",
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name="page",
name="display_title",
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name="historicalpage",
name="slug",
field=django_extensions.db.fields.AutoSlugField(
blank=True,
editable=False,
max_length=64,
populate_from="display_title",
),
),
migrations.AlterField(
model_name="page",
name="slug",
field=django_extensions.db.fields.AutoSlugField(
blank=True,
editable=False,
max_length=64,
populate_from="display_title",
),
),
migrations.AlterUniqueTogether(
name="page",
unique_together={("challenge", "slug")},
),
]
|
print("Hello This is Mounika")
|
# write a program to count characters in a string
st = "AmmarAdil"
count = {}
for a in st:
if a in count:
count[a]+=1
else:
count[a] = 1
print('Count', count)
# write a program to print count of vowels in a string
st = "ammaradil"
vowle = ['a', 'e', 'i', 'o', 'u']
count = 0
for s in st:
if s in vowle:
count = count+1
print("Count", count)
# write program to convert string to upper case
st = "ammar adil"
upper_st = st.upper()
print("Upper Case", upper_st)
# write program to convert string to lower case
st = "AMMAR ADIL"
lower_st = st.lower()
print("Lower Case", lower_st)
# write a program to find union of 2 arrays
a = {1, 2, 3, 4}
b = {3, 4, 5, 6}
union_both = a.union(b)
print("Union", union_both)
# write a program to find intersection
a = {1, 2, 3, 4}
b = {3, 4, 5, 6}
intersection_both = a.intersection(b)
print("Intersection", intersection_both)
# write a program to create print array in beautiful format
a = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]
for i in a:
row = '|'
for b in i:
row = row + ' ' + str(b)
print(row + ' ' + '|')
# write a program to create zero matrix
rows = 2
cols = 3
M = []
while len(M) < rows:
M.append([])
while len(M[-1]) < cols:
M[-1].append(0.0)
print("Zero Matrix")
for i in range(rows):
row = '|'
for b in range(cols):
row = row + ' ' + str(M[i][b])
print(row + ' ' + '|')
# write a program to create identity matrix with dimension provided
dim = 3
M = []
while len(M) < dim:
M.append([])
while len(M[-1]) < dim:
M[-1].append(0.0)
for i in range(dim):
M[i][i] = 1.0
print('Identity Matrix')
for i in range(dim):
row = '|'
for b in range(dim):
row = row + ' ' + str(M[i][b])
print(row + ' ' + '|')
# Write a program to copy a given array
M = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]
rows = len(M)
cols = len(M[0])
MC = []
while len(MC) < rows:
MC.append([])
while len(MC[-1]) < cols:
MC[-1].append(0.0)
for i in range(rows):
for j in range(cols):
MC[i][j] = M[i][j]
print("Copied Array")
for i in range(rows):
row = '|'
for b in range(cols):
row = row + ' ' + str(MC[i][b])
print(row + ' ' + '|')
# write a program to transpose a matrix
M = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
if not isinstance(M[0], list):
M = [M]
rows = len(M)
cols = len(M[0])
MT = []
while len(MT) < dim:
MT.append([])
while len(MT[-1]) < dim:
MT[-1].append(0.0)
for i in range(rows):
for j in range(cols):
MT[j][i] = M[i][j]
print("Transpose Array")
for i in range(rows):
row = '|'
for b in range(cols):
row = row + ' ' + str(MT[i][b])
print(row + ' ' + '|')
# write a program to add two matrix
A = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
B = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
rowsA = len(A)
colsA = len(A[0])
rowsB = len(B)
colsB = len(B[0])
if rowsA != rowsB or colsA != colsB:
raise ArithmeticError('Matrices are NOT the same size.')
C = []
while len(C) < rowsA:
C.append([])
while len(C[-1]) < colsB:
C[-1].append(0.0)
for i in range(rowsA):
for j in range(colsB):
C[i][j] = A[i][j] + B[i][j]
print("Added Array")
for i in range(rowsA):
row = '|'
for b in range(colsA):
row = row + ' ' + str(C[i][b])
print(row + ' ' + '|')
# write a program to subtract two matrix
A = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
B = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
rowsA = len(A)
colsA = len(A[0])
rowsB = len(B)
colsB = len(B[0])
if rowsA != rowsB or colsA != colsB:
raise ArithmeticError('Matrices are NOT the same size.')
C = []
while len(C) < rowsA:
C.append([])
while len(C[-1]) < colsB:
C[-1].append(0.0)
for i in range(rowsA):
for j in range(colsB):
C[i][j] = A[i][j] - B[i][j]
print("Subtracted Array")
for i in range(rowsA):
row = '|'
for b in range(colsA):
row = row + ' ' + str(C[i][b])
print(row + ' ' + '|')
# write a program to multiply two matrix
rowsA = len(A)
colsA = len(A[0])
rowsB = len(B)
colsB = len(B[0])
if colsA != rowsB:
raise ArithmeticError('Number of A columns must equal number of B rows.')
C = []
while len(C) < rowsA:
C.append([])
while len(C[-1]) < colsB:
C[-1].append(0.0)
for i in range(rowsA):
for j in range(colsB):
total = 0
for ii in range(colsA):
total += A[i][ii] * B[ii][j]
C[i][j] = total
print("Multiplied Array")
for i in range(rowsA):
row = '|'
for b in range(colsA):
row = row + ' ' + str(C[i][b])
print(row + ' ' + '|')
# write a program to join all items in a tuple into a string, using a hash character as separator
myTuple = ("John", "Peter", "Vicky")
x = "#".join(myTuple)
print(x)
# write a program to remove spaces at the beginning and at the end of the string
txt = " banana "
x = txt.strip()
print("of all fruits", x, "is my favorite")
# write a program to remove the leading and trailing characters
txt = ",,,,,rrttgg.....banana....rrr"
x = txt.strip(",.grt")
print(x)
# write a program to split a string into a list where each line is a list item
txt = "Thank you for the music\nWelcome to the jungle"
x = txt.splitlines()
print(x)
# write a program to find index of a word in given string
txt = "Hello, welcome to my world."
x = txt.index("welcome")
print(x)
# write a program to find ceil of a number
import math
number = 34.564
ce = math.ceil(number)
print('Ceil', ce)
# write a program to find absoluute number of a given number
import math
number = 34.564
fa = math.fabs(number)
print('Fabs', fa)
# write a program to find factorinal of a number
import math
number = 8
fa = math.factorial(number)
print('Factorial', fa)
# write a program to find exponential of a number
import math
number = 3
print('Exponential', math.exp(number))
# write a program to find log of a number
import math
num = 5
base = 7
print("Log_x_b", math.log(num, base))
# write a program to find cosine of a number
import math
num = 45
print("Cosine", math.cos(num))
# write a program to find sin of a number
import math
num = 45
print("Sin", math.sin(num))
# write a program to find tangent of a number
import math
num = 45
print("Tangent", math.tan(num))
# Write a program to print bit wise AND of two numbers
a = 60 # 60 = 0011 1100
b = 13 # 13 = 0000 1101
c = a & b # 12 = 0000 1100
print("AND", c)
# Write a program to print bit wise OR of two numbers
a = 60
b = 13
c = a | b
print("OR", c)
# Write a program to print bit wise XOR of two numbers
a = 60
b = 13
c = a ^ b
print("XOR", c)
# Write a program to calculate Binary Ones Complement of a number
a = 60
c = ~a
print("Binary Ones Complement", c)
# write a program to Binary Left Shift a number
c = a << 2
print("Binary Left Shift", c)
# write a program to Binary Right Shift a number
c = a >> 2
print("Binary Right Shift", c)
|
import unittest
from merkle_tree import MerkleTree
from helpers import to_rpc_byte_order
class TestMerkleTree(unittest.TestCase):
class TransactionMock:
def __init__(self, hash_):
self.hash = hash_
def test_root_even_transactions(self):
# based on the block #125552 in the mainchain
transactions = [
TestMerkleTree.TransactionMock(to_rpc_byte_order(bytes.fromhex(
"51d37bdd871c9e1f4d5541be67a6ab62"
"5e32028744d7d4609d0c37747b40cd2d"
))),
TestMerkleTree.TransactionMock(to_rpc_byte_order(bytes.fromhex(
"60c25dda8d41f8d3d7d5c6249e2ea1b0"
"5a25bf7ae2ad6d904b512b31f997e1a1"
))),
TestMerkleTree.TransactionMock(to_rpc_byte_order(bytes.fromhex(
"01f314cdd8566d3e5dbdd97de2d9fbfb"
"fd6873e916a00d48758282cbb81a45b9"
))),
TestMerkleTree.TransactionMock(to_rpc_byte_order(bytes.fromhex(
"b519286a1040da6ad83c783eb2872659"
"eaf57b1bec088e614776ffe7dc8f6d01"
)))
]
tree = MerkleTree(transactions)
root = bytes.fromhex(
"2b12fcf1b09288fcaff797d71e950e71ae42b91e8bdb2304758dfcffc2b620e3"
)
self.assertEqual(root, tree.root)
def test_root_odd_transactions(self):
# based on the block #125553 in the mainchain
transactions = [
TestMerkleTree.TransactionMock(to_rpc_byte_order(bytes.fromhex(
"3cfc035221a3d8eb8cdef98330467dea"
"51ee8f75cf0cfa2fcc1bb1e150191e57"
))),
TestMerkleTree.TransactionMock(to_rpc_byte_order(bytes.fromhex(
"446e1c006bbc0d61f7fe4f6a325d468d"
"6dd6016ace9b611370c9854e57aab0ac"
))),
TestMerkleTree.TransactionMock(to_rpc_byte_order(bytes.fromhex(
"d59ad81b484be54d97f693cfe1a5f450"
"1948ffce13f4a558e6feb3713a1eeff6"
))),
TestMerkleTree.TransactionMock(to_rpc_byte_order(bytes.fromhex(
"6d1e61f43ec0eba4804ad74aaeff1e13"
"7bef9bdf57098c352e6e8aeb27b95c6a"
))),
TestMerkleTree.TransactionMock(to_rpc_byte_order(bytes.fromhex(
"068b30fb5b6989bfbe0c0d5e5bca5dd9"
"f9eb100dda21fbac0b16fed436da8f0b"
))),
TestMerkleTree.TransactionMock(to_rpc_byte_order(bytes.fromhex(
"fd48d02e10d6629d385f642879b0dbe6"
"a12551fd011797f82dc80d56216bfcc4"
))),
TestMerkleTree.TransactionMock(to_rpc_byte_order(bytes.fromhex(
"2ec24502228833d687e9036a047f9d33"
"880f34b237a6703c864de55a6df1013a"
)))
]
tree = MerkleTree(transactions)
root = bytes.fromhex(
"53fb6ea244d5f501a22c95c4c56701d70a6e115c5476ed95280cb22149c171b3"
)
self.assertEqual(root, tree.root)
|
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from .base_urls import *
from django.urls import include, re_path
urlpatterns += [
re_path(r'^support', include('userservice.urls')),
re_path(r'^restclients/', include('rc_django.urls')),
re_path(r'^logging/', include('django_client_logger.urls')),
re_path(r'^', include('myuw.urls')),
]
|
import os
import shutil
import json
import openpyxl
import PyPDF2
import time
import zipfile
token_json = json.load(open("token/token.json", "r"))
def valid_token(token):
return token_json.get(token) is not None
def list_output(token):
os.makedirs(f"output/{token}", exist_ok=True)
return sorted(os.listdir(f"output/{token}"))
def valid_file_name(file_name, token):
return file_name in list_output(token)
def read_excel(token):
test_file = openpyxl.load_workbook(f"input/{token}/test.xlsx", data_only=True)
test_sheet = test_file["test"]
row_count = test_sheet.max_row - 1
if row_count > 5000:
raise RuntimeError(f"row count limit exceed: {row_count} (> 5000)")
return test_sheet, row_count
def export_pdf(token):
output_path = f"output/{token}"
if os.path.exists(output_path):
shutil.rmtree(output_path)
os.makedirs(output_path)
solution = json.load(open(f"data/solution.json", "r"))
test_sheet, _ = read_excel(token)
name_map = {}
test_result = []
for row in test_sheet.iter_rows(min_row=2):
name = row[0].value
uid = row[1].value
tid = row[2].value
answer = [[f"{tid}{i + 1:02d}", row[i + 3].value] for i in range(25)]
name_map[uid] = name
test_result.append([uid, answer])
for uid, test in test_result:
merger = PyPDF2.PdfFileMerger()
tid = test[0][0][:4]
wrong_answer_count = 0
for qid, answer in test:
if solution[qid]["answer"] != answer:
wrong_answer_count += 1
merger.append(solution[qid]["pdf"])
if wrong_answer_count > 0:
timestamp = time.strftime("%Y%m%d-%H%M%S", time.localtime(time.time()))
merge_name = f"{timestamp}-{tid}-{name_map[uid]}.pdf"
merger.write(f"{output_path}/{merge_name}")
merger.close()
print(f"pdf exported: {merge_name}")
else:
print(f"all clear: {tid}-{name_map[uid]}")
print("evaluation finished")
def zip_all(token):
target_path = f"output/{token}"
target_zip = zipfile.ZipFile(f"{target_path}/total.zip", "w")
for root, _, files in os.walk(target_path):
for file in files:
if file.endswith(".pdf"):
target_zip.write(
os.path.join(root, file),
file,
compress_type=zipfile.ZIP_DEFLATED
)
target_zip.close()
print("zip finished")
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from torchmetrics import Metric as _Metric
from torchmetrics.collections import MetricCollection as _MetricCollection
from pytorch_lightning.utilities.deprecation import deprecated
from pytorch_lightning.utilities.distributed import rank_zero_warn
class Metric(_Metric):
r"""
.. deprecated::
Use :class:`torchmetrics.Metric`. Will be removed in v1.5.0.
"""
def __init__(
self,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable = None,
):
rank_zero_warn(
"This `Metric` was deprecated since v1.3.0 in favor of `torchmetrics.Metric`."
" It will be removed in v1.5.0", DeprecationWarning
)
super(Metric, self).__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
dist_sync_fn=dist_sync_fn,
)
class MetricCollection(_MetricCollection):
"""
.. deprecated::
Use :class:`torchmetrics.MetricCollection`. Will be removed in v1.5.0.
"""
@deprecated(target=_MetricCollection, ver_deprecate="1.3.0", ver_remove="1.5.0")
def __init__(self, metrics: Union[List[Metric], Tuple[Metric], Dict[str, Metric]]):
pass
|
import copy
import math
import os
import re
colors_pastel = ["#e0f6e7", "#88aee1", "#eddaac", "#95bbef", "#daf4c5", "#cba9d3", "#b5d7a7", "#dec7f5", "#a1c293",
"#e8a7ba", "#72c8b8", "#e1a48e", "#7cd3eb", "#f1c1a6", "#99ceeb", "#c9aa8c", "#b8cff2", "#bbc49a",
"#b9b4dd", "#d7e0b5", "#9db3d6", "#c8f4d6", "#d59e9a", "#b7f3ed", "#eab2ae", "#8dd2d8", "#efc1d7",
"#98c3a6", "#edddf6", "#86bcb1", "#f6d4c9", "#a7dac9", "#bcadc4", "#d7e7cf", "#d8c8e0", "#cebe9b",
"#c7dfee", "#e0bfb4", "#a0c6d1", "#f2e9d6", "#afb9cb", "#c2d2ba", "#efd5dc", "#a6b79f", "#d2b9c0",
"#c6e1db", "#cab5a7", "#97b1ab", "#d5cdbb", "#abc5bf"]
def generate_brat_conf_files(input_dir: str = None):
"""
Generate brat conf files based on an annotated set of documents
Args:
input_dir (str): input filepath
Returns:
None
"""
regex_ann_filename = re.compile(r'.*\.ann')
regex_entity = re.compile(r"T(\d+)\t([^\s]*)\s(\d+\s\d+;?)+\t([^\t]*)")
regex_attribute = re.compile(r'^A(\d+)\t([^\s]+)\sT(\d+)\s(.*)')
regex_relation = re.compile(r'^R(\d+)\t([^\s]+)\sArg1:T(\d+)\sArg2:T(\d+)')
entities_list = set()
attributes_list = {}
relations_list = set()
for root, dirs, files in os.walk(os.path.abspath(input_dir)):
for filename in files:
if regex_ann_filename.match(filename):
with open(os.path.join(root, filename), "r", encoding="UTF-8") as input_file:
for line in input_file:
entity_match = regex_entity.match(line)
if entity_match:
entities_list.add(entity_match.group(2))
attrib_match = regex_attribute.match(line)
if attrib_match:
if attrib_match.group(2) not in attributes_list:
attributes_list[attrib_match.group(2)] = set(attrib_match.group(4))
else:
attributes_list[attrib_match.group(2)].add(attrib_match.group(4))
relation_match = regex_relation.match(line)
if relation_match:
relations_list.add(relation_match.group(2))
write_confs(entities_list, attributes_list, relations_list, input_dir)
def get_last_ids(file_path: str = None):
"""
Return last entity, relation, attribute and annotation IDs from a brat document
Args:
file_path (str): brat document filepath
Returns:
"""
regex_entity = re.compile(r'^T(\d+)\t([^\s]+)\s(.*)\t(.*)')
regex_relation = re.compile(r'^R(\d+)\t([^\s]+)\sArg1:T(\d+)\sArg2:T(\d+)')
regex_attribute = re.compile(r'^A(\d+)\t([^\s]+)\sT(\d+)\s(.*)')
regex_annotation = re.compile(r'#(\d+)\tAnnotatorNotes\s(T|R)(\d+)\t(.*)')
last_entity_id = 0
last_att_id = 0
last_relation_id = 0
last_ann_id = 0
with open(file_path, "r", encoding="UTF-8") as input_file:
for line in input_file:
entity_match = regex_entity.match(line)
if entity_match:
if int(entity_match.group(1)) > last_entity_id:
last_entity_id = int(entity_match.group(1))
relation_match = regex_relation.match(line)
if relation_match:
if int(relation_match.group(1)) > last_relation_id:
last_relation_id = int(relation_match.group(1))
attribute_match = regex_attribute.match(line)
if attribute_match:
if int(attribute_match.group(1)) > last_att_id:
last_att_id = int(attribute_match.group(1))
annotation_match = regex_annotation.match(line)
if annotation_match:
if int(annotation_match.group(1)) > last_ann_id:
last_ann_id = int(annotation_match.group(1))
return last_entity_id, last_att_id, last_relation_id, last_ann_id
def parse_ann_file(ann_filename: str = None):
"""
Parse a brat annotation file and return a dictionary of entities and a list of relations.
Args:
ann_filename (str): brat document filepath
Returns:
(dict, dict): entities and relations
"""
regex_entity = re.compile(r"^T(\d+)\t([^\s]+)\s([^\t]+)\t([^\t]*)$")
regex_attribute = re.compile(r"^A(\d+)\t([^\s]+)\sT(\d+)\s(.*)$")
regex_relation = re.compile(r"^R(\d+)\t([^\s]+)\sArg1:T(\d+)\sArg2:T(\d+)$")
entities = dict()
relations = dict()
# Extraction entity annotations (without attributes)
with open(ann_filename, "r", encoding="UTF-8") as input_file:
for line in input_file:
match_entity = regex_entity.match(line)
if match_entity:
brat_id = int(match_entity.group(1))
current_entity = {
"id": brat_id,
"spans": list(),
"is_split": False,
"type": match_entity.group(2),
"text": match_entity.group(4).rstrip("\n"),
"attributes": dict()
}
spans = match_entity.group(3).split(";")
for span in spans:
begin = int(span.split()[0])
end = int(span.split()[1])
current_entity["spans"].append((begin, end))
if len(current_entity["spans"]) == 1:
current_entity["is_split"] = True
entities[brat_id] = current_entity
# Extracting entity attributes
with open(ann_filename, "r", encoding="UTF-8") as input_file:
for line in input_file:
match_attribute = regex_attribute.match(line)
if match_attribute:
if int(match_attribute.group(3)) in entities:
entities[int(match_attribute.group(3))][
'attributes'][match_attribute.group(2)] = match_attribute.group(4)
# Extracting relations
with open(ann_filename, "r", encoding="UTF-8") as input_file:
for line in input_file:
match_relation = regex_relation.match(line)
if match_relation:
relations[int(match_relation.group(1))] = {
"type": match_relation.group(2),
"arg1": int(match_relation.group(3)),
"arg2": int(match_relation.group(4))
}
return entities, relations
def write_confs(entities_list: list = None,
attributes_list: list = None,
relations_list: list = None,
input_dir: str = None):
"""
Write brat configuration files to disk
Args:
entities_list (list): entity list
attributes_list (list): attribute list
relations_list (list): relation list
input_dir (str): brat directory path
Returns:
None
"""
with open(os.path.join(os.path.abspath(input_dir), "annotation.conf"), "w", encoding="UTF-8") as ann_conf:
# Entities
ann_conf.write("[entities]\n")
for entity in entities_list:
ann_conf.write("{0}\n".format(entity))
# Relations
ann_conf.write("[relations]\n")
ann_conf.write("<OVERLAP> Arg1:<ANY>, Arg2:<ANY>, <OVL-TYPE>:<ANY>\n")
for relation in relations_list:
ann_conf.write("{0}\tArg1:<ANY>, Arg2:<ANY>\n".format(relation))
# Events
ann_conf.write("[events]\n")
# Attributes
ann_conf.write("[attributes]\n")
for attribute in attributes_list:
if attribute not in ["LEMMA", "FORM"]:
ann_conf.write("{0}\tArg:<ANY>, Value:".format(attribute))
for x, value in enumerate(attributes_list[attribute]):
if x < len(attributes_list[attribute])-1:
ann_conf.write(value+"|")
else:
ann_conf.write(value+"\n")
with open(os.path.join(os.path.abspath(input_dir), "visual.conf"), "w", encoding="UTF-8") as visu_conf:
visu_conf.write("[labels]\n")
colors_entities = copy.deepcopy(colors_pastel)
colors_relations = copy.deepcopy(colors_pastel)
for entity in entities_list:
visu_conf.write("{0} | {1}\n".format(entity, entity))
for relation in relations_list:
visu_conf.write("{0} | {1}\n".format(relation, relation))
visu_conf.write("[drawing]\n")
for idx, entity in enumerate(entities_list):
bgcolor = colors_entities.pop(0)
match_rgb = re.match("^#(..)(..)(..)$", bgcolor)
rgb = [int(match_rgb.group(1), 16)/255, int(match_rgb.group(2), 16)/255, int(match_rgb.group(3), 16)/255]
for i, item in enumerate(rgb):
if item < 0.03928:
rgb[i] = item / 12.92
else:
rgb[i] = math.pow((item + 0.055)/1.055, 2.4)
lvl = 0.2126 * rgb[0] + 0.7152 * rgb[1] + 0.0722 * rgb[2]
if lvl > 0.179:
fgcolor = "black"
else:
fgcolor = "white"
visu_conf.write("{}\tfgColor:{}, bgColor:{}, borderColor:darken\n".format(
entity,
fgcolor,
bgcolor
))
for attribute in attributes_list:
visu_conf.write("{0}\tposition:left, glyph:".format(attribute))
for i in range(len(attributes_list[attribute])):
if i < len(attributes_list[attribute])-1:
visu_conf.write("*|")
else:
visu_conf.write("*\n")
for relation in relations_list:
visu_conf.write("{}\tcolor:{}, dashArray:3-3, arrowHead:triangle-5\n".format(
relation,
"#000000"
))
|
# @Author: Mikołaj Stępniewski <maikelSoFly>
# @Date: 2017-12-16T10:40:44+01:00
# @Email: mikolaj.stepniewski1@gmail.com
# @Filename: progressBar.py
# @Last modified by: maikelSoFly
# @Last modified time: 2017-12-16T14:12:14+01:00
# @License: Apache License Version 2.0, January 2004
# @Copyright: Copyright © 2017 Mikołaj Stępniewski. All rights reserved.
from math import ceil
from math import floor
import time
class ProgressBar:
def __init__(self, length=50):
self.__dict__['_l'] = length
self.__dict__['_x'] = None
self.__dict__['_t'] = None
self.__dict__['_mod'] = None
self.__dict__['_times'] = None
self.__dict__['_lineChar'] = '_'
self.__dict__['_char'] = '▋'
self.__dict__['_startTime'] = None
self.__dict__['_elapsedTime'] = None
def start(self, maxVal):
self._x = maxVal
self._t = -1
self._mod = ceil(self._x/self._l) if self._x > self._l else ceil(self._x/self._l)
self._times = 1 if self._x > self._l else floor(self._l/self._x)
self._restTimes = self._l-floor((self._x-1)/self._mod) if self._x > self._l else self._l-(self._x-1)*self._times
print(self._lineChar * self._l, end = '', flush=True)
print()
self._startTime = time.time()
self._elapsedTime = None
def update(self):
self._t += 1
if self._t >= self._x:
raise Exception('ProgressBar:\titerator is out of bounds.')
if self._t != 0 and self._t % self._mod == 0:
print(self._char * self._times, end='', flush=True)
if self._t == self._x-1:
self._elapsedTime = time.time() - self._startTime
print(self._char * self._restTimes, end = '')
print(' DONE\t({:.3f} s)'.format(self._elapsedTime))
|
N, M = map(int, input().split())
if N - M > 0:
if N - M == 1:
print("Dr. Chaz needs 1 more piece of chicken!")
else:
print("Dr. Chaz needs", N-M, "more pieces of chicken!")
elif N - M < 0:
if abs(N - M) == 1:
print("Dr. Chaz will have 1 piece of chicken left over!")
else:
print("Dr. Chaz will have", abs(N-M), "pieces of chicken left over!")
|
import math
rat = 3/7
d_bound = 1000000
n_bound = math.floor(rat*d_bound)
fractions = {}
for i in range(-100,100):
for j in range(-100, 0):
distance = rat - (n_bound+i)/(d_bound+j)
fractions[distance] = str(n_bound+i)
min1 = 100
str1 = ''
for i in fractions:
if i>0 and i<min1:
min1 = i
str1 = fractions[i]
print(str1) # 428570
"""
We know the answer will be a fraction around the 3/7 ratio,
so we just need to search around there.
"""
|
''' Utility Functions for Tests '''
from pgreaper import Table, read_pg
from pgreaper.postgres import get_table_schema
from pgreaper._globals import import_package, SQLIFY_PATH
from pgreaper.config import PG_DEFAULTS
from os import path
import copy
import unittest
import psycopg2
import os
TEST_DIR = os.path.join(os.path.split(SQLIFY_PATH)[:-1][0], 'tests')
DATA_DIR = os.path.join(TEST_DIR, 'data')
CSV_DATA = os.path.join(TEST_DIR, 'csv-data')
JSON_DATA = os.path.join(TEST_DIR, 'json-data')
FAKE_CSV_DATA = os.path.join(CSV_DATA, 'fake_data')
MIMESIS_CSV_DATA = os.path.join(CSV_DATA, 'mimesis_data')
REAL_CSV_DATA = os.path.join(CSV_DATA, 'real_data')
TEST_DB = 'pgreaper_test'
# Flag for testing optional dependencies
if not import_package('pandas'):
TEST_OPTIONAL_DEPENDENCY = False
else:
TEST_OPTIONAL_DEPENDENCY = True
class PostgresTestCase(unittest.TestCase):
'''
Subclass of unittest.TestCase which:
* Drops tables in class attrib drop_tables after all tests have run
* Supplies each test_* method with a working connection
* and closes it after execution
* Provides each instance with a deepcopy of whatever is in class attrib data
'''
data = None
drop_tables = []
def setUp(self):
if type(self).data:
self.data = copy.deepcopy(type(self).data)
try:
self.conn = psycopg2.connect(**PG_DEFAULTS(dbname='pgreaper_test'))
except psycopg2.OperationalError:
''' Test database doesn't exist --> Create it '''
with psycopg2.connect(**PG_DEFAULTS) as conn:
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
conn.cursor().execute('CREATE DATABASE pgreaper_test')
self.conn = psycopg2.connect(**PG_DEFAULTS(dbname='pgreaper_test'))
self.cursor = self.conn.cursor()
def tearDown(self):
self.conn.close()
@classmethod
def tearDownClass(cls):
with psycopg2.connect(**PG_DEFAULTS(dbname='pgreaper_test')) as conn:
cursor = conn.cursor()
for t in cls.drop_tables:
cursor.execute('DROP TABLE IF EXISTS {}'.format(t))
conn.commit()
def assertColumnNames(self, table, col_names):
''' Assert that a table has the specified column names '''
schema = get_table_schema(table, conn=self.conn)
self.assertEqual(schema.col_names, col_names)
def assertColumnTypes(self, table, col_types):
''' Assert that a table has the specified column types '''
schema = get_table_schema(table, conn=self.conn)
self.assertEqual(schema.col_types, col_types)
def assertColumnContains(self, table, col_names):
''' Assert that a table has the column names in any order '''
schema = get_table_schema(table, conn=self.conn)
for col in col_names:
self.assertIn(col, schema.col_names)
def assertCount(self, table, n):
''' Assert that a table has n rows '''
row_count = read_pg(
'SELECT count(*) FROM {} as COUNT'.format(table),
conn=self.conn)
self.assertEqual(row_count['count'][0], n)
def world_countries_cols():
return ['Capital', 'Country', 'Currency', 'Demonym', 'Population']
# def world_countries_types():
# return ['text', 'text', 'text', 'text', 'bigint']
def world_countries():
return [["Washington", "USA", "USD", 'American', 324774000],
["Moscow", "Russia", "RUB", 'Russian', 144554993],
["Ottawa", "Canada", "CAD", 'Canadian', 35151728]]
def world_countries_table():
return Table('Countries',
col_names = world_countries_cols(),
row_values = world_countries()
)
|
import io
import pytest
from aws_lambda_builders.workflows.python_pip import utils
@pytest.fixture
def osutils():
return utils.OSUtils()
class TestOSUtils(object):
def test_can_read_unicode(self, tmpdir, osutils):
filename = str(tmpdir.join("file.txt"))
checkmark = u"\2713"
with io.open(filename, "w", encoding="utf-16") as f:
f.write(checkmark)
content = osutils.get_file_contents(filename, binary=False, encoding="utf-16")
assert content == checkmark
|
from automancy import Radio
class TestRadio(object):
def test_radio_object_can_be_instantiated(self):
test_object = Radio('//div', 'Test Object', 'test_object')
assert test_object.locator == '//div'
assert test_object.name == 'Test Object'
assert test_object.system_name == 'test_object'
|
from __future__ import print_function
import flat
window = flat.Window(800, 600, "hi")
@window.event("on_draw")
def on_draw(window):
flat.gl.glClearColor(0.5, 0.5, 0.5, 1.0)
flat.gl.glClear(flat.gl.GL_COLOR_BUFFER_BIT)
flat.run()
|
"""Remove Nth Node From End of List
Given a linked list, remove the n-th node from the end of list and return its head.
Example:
Given linked list: 1 -> 2 -> 3 -> 4 -> 5, and n = 2.
After removing the second node from the end, the linked list becomes 1 -> 2 -> 3 ->5
Note:
Given n will always be valid.
Refer https://leetcode.com/problems/remove-nth-node-from-end-of-list
"""
def generate_linked_list(n):
assert n > 1
head = ListNode(n)
node = head
for i in range(n - 1, 0, -1):
next_node = ListNode(i)
node.next = next_node
next_node.prev = node
node = node.next
return head
def check_exist(head, val):
while True:
if head.val == val:
return True
if head.next is not None:
head = head.next
else:
break
return False
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
self.prev = None
class Solution:
""" 单向链表只能遍历一次后才能知道其长度
"""
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
length = 1
node = head
while node.next is not None:
length += 1
node = node.next
if length < n:
return head
location = length - n
if location == 0:
return head.next
i, node = 1, head
while i < location:
node = node.next
i += 1
node.next = node.next.next
return head
if __name__ == '__main__':
cases = [(generate_linked_list(5), 5), (generate_linked_list(5), 6),
(generate_linked_list(5), 3)]
solutions = [Solution]
for case in cases:
for solution in solutions:
assert not check_exist(solution().removeNthFromEnd(case[0], case[1]), case[1])
|
def cmdissue(toissue, activesession):
ssh_stdin, ssh_stdout, ssh_stderr = activesession.exec_command(toissue)
return ssh_stdout.read().decode('UTF-8')
|
from copy import deepcopy
from functools import partial
import random
import torch
from common.optim import ParamOptim
from dqn.algo import get_td_error
from dqn.sampler import Sampler
def tde_to_prior(x, eta=0.9):
return (eta * x.max(0).values + (1 - eta) * x.mean(0)).detach().cpu()
class Learner:
def __init__(self, model, buffer, predictor, cfg):
num_env = cfg["agent"]["actors"]
model_t = deepcopy(model)
model_t = model_t.cuda().eval()
self.model, self.model_t = model, model_t
self.buffer = buffer
self.predictor = predictor
self.optim = ParamOptim(params=model.parameters(), **cfg["optim"])
self.batch_size = cfg["agent"]["batch_size"]
self.unroll = cfg["agent"]["unroll"]
self.unroll_prefix = (
cfg["agent"]["burnin"]
+ cfg["agent"]["n_step"]
+ cfg["agent"]["frame_stack"]
- 1
)
self.sample_steps = self.unroll_prefix + self.unroll
self.hx_shift = cfg["agent"]["frame_stack"] - 1
num_unrolls = (self.buffer.maxlen - self.unroll_prefix) // self.unroll
if cfg["buffer"]["prior_exp"] > 0:
self.sampler = Sampler(
num_env=num_env,
maxlen=num_unrolls,
prior_exp=cfg["buffer"]["prior_exp"],
importance_sampling_exp=cfg["buffer"]["importance_sampling_exp"],
)
self.s2b = torch.empty(num_unrolls, dtype=torch.long)
self.hxs = torch.empty(num_unrolls, num_env, 512, device="cuda")
self.hx_cursor = 0
else:
self.sampler = None
self.target_tau = cfg["agent"]["target_tau"]
self.td_error = partial(get_td_error, model=model, model_t=model_t, cfg=cfg)
def _update_target(self):
for t, s in zip(self.model_t.parameters(), self.model.parameters()):
t.data.copy_(t.data * (1.0 - self.target_tau) + s.data * self.target_tau)
def append(self, step, hx, n_iter):
self.buffer.append(step)
if self.sampler is not None:
if (n_iter + 1) % self.unroll == self.hx_shift:
self.hxs[self.hx_cursor] = hx
self.hx_cursor = (self.hx_cursor + 1) % len(self.hxs)
k = n_iter - self.unroll_prefix
if k > 0 and (k + 1) % self.unroll == 0:
self.s2b[self.sampler.cursor] = self.buffer.cursor - 1
x = self.buffer.get_recent(self.sample_steps)
hx = self.hxs[self.sampler.cursor]
with torch.no_grad():
loss, _ = self.td_error(x, hx)
self.sampler.append(tde_to_prior(loss))
if len(self.sampler) == self.sampler.maxlen:
idx_new = self.s2b[self.sampler.cursor - 1]
idx_old = self.s2b[self.sampler.cursor]
d = (idx_old - idx_new) % self.buffer.maxlen
assert self.unroll_prefix + self.unroll <= d
assert d < self.unroll_prefix + self.unroll * 2
def loss_sampler(self, need_stat):
idx0, idx1, weights = self.sampler.sample(self.batch_size)
weights = weights.cuda()
batch = self.buffer.query(self.s2b[idx0], idx1, self.sample_steps)
hx = self.hxs[idx0, idx1]
loss_pred, ri, _ = self.predictor.get_error(batch, update_stats=True)
batch["reward"][1:] += ri
td_error, log = self.td_error(batch, hx, need_stat=need_stat)
self.sampler.update_prior(idx0, idx1, tde_to_prior(td_error))
loss = td_error.pow(2).sum(0) * weights[..., None]
loss_pred = loss_pred.sum(0) * weights[..., None]
return loss, loss_pred, ri, log
def loss_uniform(self, need_stat):
if len(self.buffer) < self.buffer.maxlen:
no_prev = set(range(self.sample_steps))
else:
no_prev = set(
(self.buffer.cursor + i) % self.buffer.maxlen
for i in range(self.sample_steps)
)
all_idx = list(set(range(len(self.buffer))) - no_prev)
idx0 = torch.tensor(random.choices(all_idx, k=self.batch_size))
idx1 = torch.tensor(
random.choices(range(self.buffer.num_env), k=self.batch_size)
)
batch = self.buffer.query(idx0, idx1, self.sample_steps)
loss_pred, ri, _ = self.predictor.get_error(batch, update_stats=True)
batch["reward"][1:] += ri
td_error, log = self.td_error(batch, None, need_stat=need_stat)
loss = td_error.pow(2).sum(0)
loss_pred = loss_pred.sum(0)
return loss, loss_pred, ri, log
def train(self, need_stat=True):
loss_f = self.loss_uniform if self.sampler is None else self.loss_sampler
loss, loss_pred, ri, log = loss_f(need_stat)
self.optim.step(loss.mean())
self.predictor.optim.step(loss_pred.mean())
self._update_target()
if need_stat:
log.update(
{
"ri_std": ri.std(),
"ri_mean": ri.mean(),
"ri_run_mean": self.predictor.ri_mean,
"ri_run_std": self.predictor.ri_std,
"loss_predictor": loss_pred.mean(),
}
)
if self.sampler is not None:
log.update(self.sampler.stats())
return log
|
import datetime
from collections import defaultdict
from logs.constant import OrderLogType, MAP_NO_OPERATOR_ORDER_TYPE, OperateLogModule
from logs.models import OrderLog, OperateLogUnify, ConfigLog, PromotionLog, ProductLog, LogBaseModel, StaffLog
def get_all_module_dict():
module_dict = {}
for k, v in vars(OperateLogModule).items():
if not k.startswith("_"):
module_dict[k] = v
return module_dict
def _create_operate_log_unify(log: LogBaseModel):
"""
在操作记录统一表中创建一条操作记录
:param log:
:return:
"""
log_info = {
"shop_id": log.shop_id,
"operator_id": log.operator.id,
"operate_time": log.operate_time,
"operate_module": log.operate_module,
"log_id": log.id,
}
operate_log = OperateLogUnify(**log_info)
operate_log.save()
def create_order_log(log_info: dict):
"""
创建一个订单操作记录
:param log_info: {
"order_id": 1,
"oder_num": "xxxx",
"shop_id": 1,
"operator_id": 1,
"operate_type": 1,
"operate_content": ""
}
:return:
"""
order_log = OrderLog(**log_info)
order_log.save()
_create_operate_log_unify(order_log)
return order_log
def create_config_log(log_info: dict):
"""
创建一条设置模块操作记录
:param log_info: {
"shop_id": shop_id,
"operator_id": user_id,
"operate_type": ConfigLogType.SHOP_NAME,
"operate_content": ""
}
:return:
"""
config_log = ConfigLog(**log_info)
config_log.save()
_create_operate_log_unify(config_log)
return config_log
def create_promotion_log(log_info: dict):
"""
创建一个玩法日志
:param log_info: {
"shop_id": shop_id,
"operator_id": user_id,
"operate_type": PromotionLogType.ADD_GROUPON,
"operate_content": groupon_name
}
:return:
"""
promotion_log = PromotionLog(**log_info)
promotion_log.save()
_create_operate_log_unify(promotion_log)
return promotion_log
def create_product_log(log_info: dict):
"""
创建一条货品板块操作记录
:param log_info: {
"shop_id": shop_id,
"operator_id": user_id,
"operate_type": ProductLogType.ADD_PRODUCT,
"operate_content": ""
}
:return:
"""
product_log = ProductLog(**log_info)
product_log.save()
_create_operate_log_unify(product_log)
return product_log
def create_staff_log(log_info: dict):
"""
创建一条员工操作日志
:param log_info: {
"shop_id": shop_id,
"operator_id": user_id,
"operate_type": StaffLogType.ADD_STAFF,
"staff_id": staff_id,
"operate_content": ""
}
:return:
"""
staff_log = StaffLog(**log_info)
staff_log.save()
_create_operate_log_unify(staff_log)
return staff_log
def get_order_log_time_by_order_num(order_num: str):
"""
通过订单号从操作记录获取一个开始订单开始配送时间(订单确认时间)or配送完成时间(订单完成时间)
:param order_num:
:return:
"""
order_log = OrderLog.objects.filter(
order_num=order_num,
operate_type__in=[
OrderLogType.DIRECT, OrderLogType.CONFIRM, OrderLogType.FINISH
]
).order_by("-operate_time").first()
return order_log.operate_time
def list_order_log_by_shop_id_and_order_num(shop_id: int, order_num: str):
"""
通过订单号获取一个订单操作记录,带店铺ID版
:param shop_id:
:param order_num:
:return:
"""
log_list = (
OrderLog.objects.filter(shop_id=shop_id, order_num=order_num)
.order_by("-operate_time")
.all()
)
for log in log_list:
# 自动操作时,操作人id为0
if not log.operator_id:
operate_type = MAP_NO_OPERATOR_ORDER_TYPE[log.operate_type]
log.operate_type = operate_type
return log_list
def list_one_module_log_by_ids(module_id: int, log_ids: list):
"""
通过IDS查询一种日志
:param module_id:
:param log_ids:
:return:
"""
Model = OperateLogUnify.get_operate_log_model(module_id)
log_list = Model.objects.filter(id__in=log_ids).order_by("id").all()
return log_list
def list_one_module_log_by_filter(
shop_id: int,
module_id: int,
operator_ids: list,
from_date: datetime,
end_date: datetime,
):
"""
查询一种模块的操作记录
:param shop_id:
:param module_id:
:param operator_ids:
:param from_date:
:param end_date:
:return:
"""
Model = OperateLogUnify.get_operate_log_model(module_id)
log_list_query = Model.objects.filter(shop_id=shop_id, operate_time__range=[from_date, end_date])
if operator_ids:
log_list_query = log_list_query.filter(operator_id__in=operator_ids)
log_list_query = log_list_query.order_by("-operate_time")
log_list = log_list_query.all()
return log_list
def dict_log_ids_from_operate_log_unify_by_filter(
shop_id: int,
module_ids: list,
operator_ids: list,
from_date: datetime,
end_date: datetime,
):
"""
在统一表中查询所有日志的ID
:param shop_id:
:param module_ids:
:param operator_ids:
:param from_date:
:param end_date:
:return:
"""
unify_log_list_query = (
OperateLogUnify.objects.filter(shop_id=shop_id, operate_time__range=[from_date, end_date])
.exclude(operator_id=0)
)
if module_ids:
unify_log_list_query = unify_log_list_query.filter(
operate_module__in=module_ids
)
if operator_ids:
unify_log_list_query = unify_log_list_query.filter(
operator_id__in=operator_ids
)
unify_log_list_query = unify_log_list_query.order_by("-operate_time")
unify_log_list = unify_log_list_query.all()
unify_log_dict = defaultdict(list)
for log in unify_log_list:
unify_log_dict[log.operate_module].append(log.log_id)
return unify_log_dict
def dict_more_modules_log_by_filter(
shop_id: int,
module_ids: list,
operator_ids: list,
from_date: datetime,
end_date: datetime,
):
"""
查询多种模块的操作记录
:param shop_id:
:param module_ids:
:param operator_ids:
:param from_date:
:param end_date:
:return:
"""
log_type_2_ids_dict = dict_log_ids_from_operate_log_unify_by_filter(
shop_id, module_ids, operator_ids, from_date, end_date
)
log_type_2_log_list_dict = defaultdict(list)
for k, v in log_type_2_ids_dict.items():
log_list = list_one_module_log_by_ids(k, v)
log_type_2_log_list_dict[k] = log_list
return log_type_2_log_list_dict
|
# -*- coding: utf-8 -*-
#
# This file is part of pywebmachine released under the MIT license.
# See the NOTICE for more information.
import datetime
import types
import webob
import webob.exc
def b03(res, req, rsp):
"Options?"
if req.method == 'OPTIONS':
for (header, value) in res.options(req, rsp):
rsp.headers[header] = value
return True
return False
def b04(res, req, rsp):
"Request entity too large?"
return not res.valid_entity_length(req, rsp)
def b05(res, req, rsp):
"Unknown Content-Type?"
return not res.known_content_type(req, rsp)
def b06(res, req, rsp):
"Unknown or unsupported Content-* header?"
return not res.valid_content_headers(req, rsp)
def b07(res, req, rsp):
"Forbidden?"
return res.forbidden(req, rsp)
def b08(res, req, rsp):
"Authorized?"
resp = res.is_authorized(req, rsp)
if resp is True:
return True
elif isinstance(resp, basestring):
rsp.headers["WWW-Authenticate"] = resp
return False
def b09(res, req, rsp):
"Malformed?"
return res.malformed_request(req, rsp)
def b10(res, req, rsp):
"Is method allowed?"
if req.method in res.allowed_methods(req, rsp):
return True
rsp.allowed = res.allowed_methods(req, rsp)
return False
def b11(res, req, rsp):
"URI too long?"
return res.uri_too_long(req, rsp)
def b12(res, req, rsp):
"Known method?"
return req.method in res.known_methods(req, rsp)
def b13(res, req, rsp):
"Service available?"
return res.ping(req, rsp) and res.service_available(req, rsp)
def c03(res, req, rsp):
"Accept exists?"
return "accept" in req.headers
def c04(res, req, rsp):
"Acceptable media type available?"
ctypes = [ctype for (ctype, func) in res.content_types_provided(req, rsp)]
ctype = req.accept.best_match(ctypes)
if ctype is None:
return False
rsp.content_type = ctype
return True
def d04(res, req, rsp):
"Accept-Language exists?"
return "accept-language" in req.headers
def d05(res, req, rsp):
"Accept-Language available?"
langs = res.languages_provided(req, rsp)
if langs is not None:
lang = req.accept_language.best_match(langs)
if lang is None:
return False
rsp.content_language = lang
return True
def e05(res, req, rsp):
"Accept-Charset exists?"
return "accept-charset" in req.headers
def e06(res, req, rsp):
"Acceptable charset available?"
charsets = res.charsets_provided(req, rsp)
if charsets is not None:
charset = req.accept_charset.best_match(charsets)
if charset is None:
return False
rsp.charset = charset
return True
def f06(res, req, rsp):
"Accept-Encoding exists?"
return "accept-encoding" in req.headers
def f07(res, req, rsp):
"Acceptable encoding available?"
encodings = res.encodings_provided(req, rsp)
if encodings is not None:
encodings = [enc for (enc, func) in encodings]
enc = req.accept_encoding.best_match(encodings)
if enc is None:
return False
rsp.content_encoding = enc
return True
def g07(res, req, rsp):
"Resource exists?"
# Set variances now that conneg is done
hdr = []
if len(res.content_types_provided(req, rsp) or []) > 1:
hdr.append("Accept")
if len(res.charsets_provided(req, rsp) or []) > 1:
hdr.append("Accept-Charset")
if len(res.encodings_provided(req, rsp) or []) > 1:
hdr.append("Accept-Encoding")
if len(res.languages_provided(req, rsp) or []) > 1:
hdr.append("Accept-Language")
hdr.extend(res.variances(req, rsp))
rsp.vary = hdr
return res.resource_exists(req, rsp)
def g08(res, req, rsp):
"If-Match exists?"
return "if-match" in req.headers
def g09(res, req, rsp):
"If-Match: * exists?"
return '*' in req.if_match
def g11(res, req, rsp):
"Etag in If-Match?"
return res.generate_etag(req, rsp) in req.if_match
def h07(res, req, rsp):
"If-Match: * exists?"
# Need to recheck that if-match was an actual header
# because WebOb is says that '*' will match no header.
return 'if-match' in req.headers and '*' in req.if_match
def h10(res, req, rsp):
"If-Unmodified-Since exists?"
return "if-unmodified-since" in req.headers
def h11(res, req, rsp):
"If-Unmodified-Since is a valid date?"
return req.if_unmodified_since is not None
def h12(res, req, rsp):
"Last-Modified > If-Unmodified-Since?"
rsp.last_modified = res.last_modified(req, rsp)
print rsp.headers
return rsp.last_modified > req.if_unmodified_since
def i04(res, req, rsp):
"Apply to a different URI?"
uri = res.moved_permanently(req, rsp)
if not uri:
return False
rsp.location = uri
return True
def i07(res, req, rsp):
"PUT?"
return req.method == "PUT"
def i12(res, req, rsp):
"If-None-Match exists?"
return "if-none-match" in req.headers
def i13(res, req, rsp):
"If-None-Match: * exists?"
return '*' in req.if_none_match
def j18(res, req, rsp):
"GET/HEAD?"
return req.method in ["GET", "HEAD"]
def k05(res, req, rsp):
"Resource moved permanently?"
uri = res.moved_permanently(req, rsp)
if not uri:
return False
rsp.location = uri
return True
def k07(res, req, rsp):
"Resource previously existed?"
return res.previously_existed(req, rsp)
def k13(res, req, rsp):
"Etag in If-None-Match?"
rsp.etag = res.generate_etag(req, rsp)
return rsp.etag in req.if_none_match
def l05(res, req, rsp):
"Resource moved temporarily?"
uri = res.moved_temporarily(req, rsp)
if not uri:
return False
rsp.location = uri
return True
def l07(res, req, rsp):
"POST?"
return req.method == "POST"
def l13(res, req, rsp):
"If-Modified-Since exists?"
return "if-modified-since" in req.headers
def l14(res, req, rsp):
"If-Modified-Since is a valid date?"
return req.if_modified_since is not None
def l15(res, req, rsp):
"If-Modified-Since > Now?"
return req.if_modified_since > datetime.datetime.now(webob.UTC)
def l17(res, req, rsp):
"Last-Modified > If-Modified-Since?"
rsp.last_modified = res.last_modified(req, rsp)
return rsp.last_modified > req.if_modified_since
def m05(res, req, rsp):
"POST?"
return req.method == "POST"
def m07(res, req, rsp):
"Server permits POST to missing resource?"
return res.allow_missing_post(req, rsp)
def m16(res, req, rsp):
"DELETE?"
return req.method == "DELETE"
def m20(res, req, resp):
"""Delete enacted immediayly?
Also where DELETE is forced."""
return res.delete_resource(req, resp)
def m20b(res, req, resp):
""" Delete completed """
return res.delete_completed(req, resp)
def n05(res, req, rsp):
"Server permits POST to missing resource?"
return res.allow_missing_post(req, rsp)
def n11(res, req, rsp):
"Redirect?"
if res.post_is_create(req, rsp):
handle_request_body(res, req, rsp)
else:
if not res.process_post(req, rsp):
raise webob.exc.HTTPInternalServerError("Failed to process POST.")
return False
rsp.location = res.created_location(req, rsp)
if rsp.location:
return True
return False
def n16(res, req, rsp):
"POST?"
return req.method == "POST"
def o14(res, req, rsp):
"Is conflict?"
return res.is_conflict(req, rsp)
def o16(res, req, rsp):
"PUT?"
return req.method == "PUT"
def o18(res, req, rsp):
"Multiple representations? (Build GET/HEAD body)"
if req.method not in ["GET", "HEAD"]:
return res.multiple_choices(req, rsp)
handle_response_body(res, req, rsp)
return res.multiple_choices(req, rsp)
def o20(res, req, rsp):
"Response includes entity?"
return bool(rsp.body)
def p03(res, req, rsp):
"Conflict?"
if res.is_conflict(req, rsp):
return True
handle_request_body(res, req, rsp)
return False
def p11(res, req, rsp):
"New resource?"
return rsp.location is not None
def first_match(func, req, rsp, expect):
for (key, value) in func(req, rsp):
if key == expect:
return value
return None
def handle_request_body(res, req, rsp):
ctype = req.content_type or "application/octet-stream"
mtype = ctype.split(";", 1)[0]
func = first_match(res.content_types_accepted, req, rsp, mtype)
if func is None:
raise webob.exc.HTTPUnsupportedMediaType()
return func(req, rsp)
def handle_response_body(res, req, rsp):
rsp.etag = res.generate_etag(req, rsp)
rsp.last_modified = res.last_modified(req, rsp)
rsp.expires = res.expires(req, rsp)
# Generate the body
func = first_match(res.content_types_provided, req, rsp, rsp.content_type)
if func is None:
raise webob.exc.HTTPInternalServerError()
body = func(req, rsp)
# If we're using a charset, make sure to use unicode_body.
if rsp.charset:
rsp.unicode_body = unicode(body)
else:
rsp.body = body
# Handle our content encoding.
encoding = rsp.content_encoding
if encoding:
func = first_match(res.encodings_provided, req, rsp, encoding)
if func is None:
raise webob.exc.HTTPInternalServerError()
rsp.body = func(rsp.body)
TRANSITIONS = {
b03: (200, c03), # Options?
b04: (413, b03), # Request entity too large?
b05: (415, b04), # Unknown Content-Type?
b06: (501, b05), # Unknown or unsupported Content-* header?
b07: (403, b06), # Forbidden?
b08: (b07, 401), # Authorized?
b09: (400, b08), # Malformed?
b10: (b09, 405), # Is method allowed?
b11: (414, b10), # URI too long?
b12: (b11, 501), # Known method?
b13: (b12, 503), # Service available?
c03: (c04, d04), # Accept exists?
c04: (d04, 406), # Acceptable media type available?
d04: (d05, e05), # Accept-Language exists?
d05: (e05, 406), # Accept-Language available?
e05: (e06, f06), # Accept-Charset exists?
e06: (f06, 406), # Acceptable charset available?
f06: (f07, g07), # Accept-Encoding exists?
f07: (g07, 406), # Acceptable encoding available?
g07: (g08, h07), # Resource exists?
g08: (g09, h10), # If-Match exists?
g09: (h10, g11), # If-Match: * exists?
g11: (h10, 412), # Etag in If-Match?
h07: (412, i07), # If-Match: * exists?
h10: (h11, i12), # If-Unmodified-Since exists?
h11: (h12, i12), # If-Unmodified-Since is valid date?
h12: (412, i12), # Last-Modified > If-Unmodified-Since?
i04: (301, p03), # Apply to a different URI?
i07: (i04, k07), # PUT?
i12: (i13, l13), # If-None-Match exists?
i13: (j18, k13), # If-None-Match: * exists?
j18: (304, 412), # GET/HEAD?
k05: (301, l05), # Resource moved permanently?
k07: (k05, l07), # Resource previously existed?
k13: (j18, l13), # Etag in If-None-Match?
l05: (307, m05), # Resource moved temporarily?
l07: (m07, 404), # POST?
l13: (l14, m16), # If-Modified-Since exists?
l14: (l15, m16), # If-Modified-Since is valid date?
l15: (m16, l17), # If-Modified-Since > Now?
l17: (m16, 304), # Last-Modified > If-Modified-Since?
m05: (n05, 410), # POST?
m07: (n11, 404), # Server permits POST to missing resource?
m16: (m20, n16), # DELETE?
m20: (m20b, 500), # DELETE enacted immediately?
m20b: (o20, 202), # Delete completeed?
n05: (n11, 410), # Server permits POST to missing resource?
n11: (303, p11), # Redirect?
n16: (n11, o16), # POST?
o14: (409, p11), # Conflict?
o16: (o14, o18), # PUT?
o18: (300, 200), # Multiple representations?
o20: (o18, 204), # Response includes entity?
p03: (409, p11), # Conflict?
p11: (201, o20) # New resource?
}
def process(klass, req, rsp):
res = klass(req, rsp)
# Setup some defaults
rsp.charset = None
ctypes = [ct for (ct, func) in (res.content_types_provided(req, rsp) or [])]
if len(ctypes):
rsp.content_type = ctypes[0]
state = b13
while not isinstance(state, int):
#print state
if state(res, req, rsp):
state = TRANSITIONS[state][0]
else:
state = TRANSITIONS[state][1]
if not isinstance(state, (int, types.FunctionType)):
raise webob.exc.HTTPServerError("Invalid state: %r" % state)
rsp.status = state
return rsp
|
from json import JSONDecodeError
import typing
from starlette.requests import Request as _Request, ClientDisconnect as _CD
ClientDisconnect = _CD
class Request(_Request):
"""The request object, passed to HTTP views and typically named `req`.
This is a subclass of [`Starlette.requests.Request`][starlette-request]. As a result, all methods and attributes on Starlette's `Request` are available on this class. Additional or redefined members are documented here.
For usage tips, see [Requests (Guide)](/guide/requests.md).
[starlette-request]: https://www.starlette.io/requests/
# Methods
`__aiter__`:
shortcut for `.stream()`. Allows to process the request body in
byte chunks using `async for chunk in req: ...`.
"""
async def json(self) -> typing.Any:
"""Parse the request body as JSON.
# Returns
json (dict): the result of `json.loads(await self.body())`.
# Raises
HTTPError(400): if the JSON is malformed.
"""
try:
return await super().json()
except JSONDecodeError:
from .errors import HTTPError # prevent circular imports
raise HTTPError(400, detail="JSON is malformed.")
async def __aiter__(self) -> typing.AsyncGenerator[bytes, None]:
async for chunk in self.stream():
yield chunk
|
from shared.args import arg_help, base_parser
def arguments(name):
parser = base_parser(name)
parser.add_argument(
'-u', '--unfollow', default=0, type=int, dest='num_unfollow',
help=arg_help('how many accounts to unfollow at once (0 = unlimited)')
)
parser.add_argument(
'-f', '--follow', default=9, type=int, dest='num_tofollow',
help=arg_help('how many accounts to follow at once (0 = unlimited)')
)
parser.add_argument(
'-n', '--name', default=None, type=str, dest='account_name',
help=arg_help('follow up to --follow accounts from @name followers')
)
return parser.parse_args()
|
import mock
import pytest
from api.domain import user
from api.db import psql
from api.config import cfg
def test_where_sql():
sql = "SELECT id, username, email, contactid, date_joined FROM auth_user"
assert sql == user.where_sql()
sql = ("SELECT id, username, email, contactid, date_joined FROM auth_user "
"WHERE username = %(username)s")
assert sql == user.where_sql({'username': 'davyjones'}, 'auth_user')
@mock.patch.dict('os.environ', {'ESPA_API_CONFIG_PATH': './run/config.ini'})
@pytest.fixture(scope='module')
def db():
db = psql.connection(**cfg.get('db', lower=True))
db.query("set search_path = espa_unit_test;")
return db
@pytest.fixture
def user1():
return {"username": "dave", "email": "y@xyz.com", "contactid": "1"}
def test_insert_user(user1):
sql, params = user.insert_sql(user1)
assert set(params) - set(user1) == {'date_joined', 'last_login'}
assert sql.startswith('INSERT INTO auth_user')
assert 'ON CONFLICT (username) DO UPDATE SET' in sql
assert sql.endswith('RETURNING (id)')
@pytest.mark.integration
def test_db_make_user(db, user1):
user_id = user.insert(user1, db.query)
assert isinstance(user_id[0]['id'], int)
filters = {'id': user_id[0]['id']}
user2 = user.where(filters, db.query)
assert {'contactid', 'username', 'email', 'id', 'last_login', 'date_joined'} == set(user2[0])
|
def add_facebook_link(strategy, details, user=None, backend=None, is_new=None, *args, **kwargs):
if is_new and backend.name == 'facebook':
user.facebook = 'http://www.facebook.com/{}'.format(kwargs['uid'])
user.save()
|
#Problem : 2016 Qualifiers - Uncle's Super Birthday Party
#Language : Python 3
#Compiled Using : py_compile
#Version : Python 3.4.3
#Input for your program will be provided from STDIN
#Print out all output from your program to STDOUT
import sys
data = sys.stdin.read().splitlines()
n = int(data[0])
segs = []
for i in range(n-1):
x,y = data[i+1].split()
x2,y2 = data[i+2].split()
segs.append(int(x),int(y), int(x2),int(y2))
|
import argparse
from hyperstyle.src.python.review.common.file_system import Extension
from analysis.src.python.evaluation.qodana.imitation_model.common.util import ModelCommonArgument
from evaluation.common.util import AnalysisExtension
def configure_arguments(parser: argparse.ArgumentParser) -> None:
parser.add_argument('test_dataset_path',
type=str,
help='Path to the dataset received by either'
f' src.python.evaluation.qodana.fragment_to_inspections_list{Extension.PY.value}'
'or src.python.evaluation.qodana.fragment_to_inspections_list_line_by_line'
f'{Extension.PY.value}script.')
parser.add_argument('model_weights_directory_path',
type=str,
help='Path to the directory where trained imitation_model weights are stored.')
parser.add_argument('-o', '--output_directory_path',
default=None,
type=str,
help='Path to the directory where labeled dataset will be saved. Default is the parent folder'
'of test_dataset_path.')
parser.add_argument('-sf', '--save_f1_score',
default=None,
action="store_true",
help='If enabled report with f1 scores by class will be '
f'saved to the {AnalysisExtension.CSV.value}'
' File will be saved to the labeled dataset parent directory. Default is False.')
parser.add_argument(ModelCommonArgument.CONTEXT_LENGTH.value.short_name,
ModelCommonArgument.CONTEXT_LENGTH.value.long_name,
type=int,
default=40,
help=ModelCommonArgument.CONTEXT_LENGTH.value.description)
parser.add_argument(ModelCommonArgument.BATCH_SIZE.value.short_name,
ModelCommonArgument.BATCH_SIZE.value.long_name,
type=int,
default=8,
help=ModelCommonArgument.BATCH_SIZE.value.description)
parser.add_argument(ModelCommonArgument.THRESHOLD.value.short_name,
ModelCommonArgument.THRESHOLD.value.long_name,
type=float,
default=0.5,
help=ModelCommonArgument.THRESHOLD.value.description)
|
# Python modules
import time
import datetime
# 3rd party modules
# Our modules
# DateTime objects have a .strftime() method that accepts a format string.
# Below are some format strings for you to use. Please consider using one
# of these rather than creating a new format so that Vespa is at least
# somewhat consistent about date formatting.
# To use, pass the timestamp format constant to .strftime(), e.g. --
# print my_datetime_instance.strftime(util_time.DISPLAY_DATE_FORMAT)
#
# Note that DateTime objects have a method called .isoformat() which returns
# a string in ISO 8601 format. You can get similar results by using the
# ISO_TIMESTAMP_FORMAT constant below. e.g. if you already have a datetime
# object --
# print my_datetime_instance.isoformat()
# is similar to --
# print my_datetime_instance.strftime(util_time.ISO_TIMESTAMP_FORMAT)
# If you want the current time in ISO format --
# print util_time.now().isoformat()
# or --
# print util_time.now(util_time.ISO_TIMESTAMP_FORMAT)
#
# You can use whichever syntax you prefer. Be aware that .isoformat() includes
# microsecond & time zone info if available which may not be what you want.
ISO_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S"
ISO_DATE_FORMAT = "%Y-%m-%d"
# DISPLAY_DATE_FORMAT is a human-friendly date format. e.g. 21 April, 2010
# It spells out the month so there's no confusion over mm/dd/yy versus
# dd/mm/yy and so forth.
DISPLAY_DATE_FORMAT = "%d %B, %Y"
# DISPLAY_TIMESTAMP_FORMAT is the human-friendly date plus the "locale's
# appropriate time representation".
DISPLAY_TIMESTAMP_FORMAT = "%d %B, %Y %X"
# CLONE_TIMESTAMP_FORMAT is the human-friendly date plus the "locale's
# appropriate time representation" usable as the name of a cloned object
# (experiment, pulse project, metabolite, etc.)
CLONE_TIMESTAMP_FORMAT = "%Y-%m-%dT%H_%M_%S"
def now(format=None):
"""Returns the current local date & time.
By default, the function returns a datetime object. If the format
parameter is a string appropriate for time.strftime(), the function
returns the current time as a string formatted per the format string.
If a datetime object is returned, resolution is accurate only to one
second. (That is, the object's .microsecond attribute is always 0.)
This is by design. If you need sub-second accuracy, then this will
return a datetime object with an accurate microsecond attribute:
current = util_time.now().now()
"""
if format:
return time.strftime(format, time.localtime())
else:
return datetime.datetime(*time.localtime()[:6])
def datetime_from_iso(iso_timestamp):
"""Given an ISO timestamp string, returns an equivalent datetime object.
The string must include a date and time. Microseconds are optional. If
present, they're present in the datetime object.
Examples --
>>> util_time.datetime_from_iso("2010-06-11T16:16:24.387335")
datetime.datetime(2010, 6, 11, 16, 16, 24, 387335)
>>> util_time.datetime_from_iso("2010-06-11T16:16:24")
datetime.datetime(2010, 6, 11, 16, 16, 24)
>>>
"""
# Figure out whether or not usecs are present.
i = iso_timestamp.rfind(".")
if i == -1:
microseconds = 0
else:
# Extract microsecond info and remove it from the string because
# microseconds confuse & upset time.strptime().
microseconds = int(iso_timestamp[i + 1:])
iso_timestamp = iso_timestamp[:i]
params = time.strptime(iso_timestamp, ISO_TIMESTAMP_FORMAT)[:6]
params = list(params) + [microseconds]
return datetime.datetime(*params)
def filename_timestamp():
"""Returns a timestamp appropriate for inclusion as part of a filename.
The timestamp includes microseconds, and so subsequent calls to this
function are guaranteed to return different filenames.
"""
# FILENAME_TIMESTAMP_FORMAT is hidden inside this function because it
# can't be used on its own to create unique filenames due to its lack of
# sub-second information. It's easy enough to create multiple files in
# one second, so without sub-second information appended the returned
# filenames are not guaranteed to be different.
FILENAME_TIMESTAMP_FORMAT = "%Y%m%d.%H%M%S"
current = now().now()
# Python's strftime relies on the underlying C library's strftime and
# so can only guarantee the existence of the strftime formatting codes
# that are guaranteed by C89. Unfortunately, this doesn't include
# any sub-second values so I have to tack the microseconds on myself.
s = current.strftime(FILENAME_TIMESTAMP_FORMAT)
return s + ".%d" % current.microsecond
|
from datetime import datetime, timedelta
from typing import Optional, Union
from lineage.bigquery_query import BigQueryQuery
from exceptions.exceptions import SerializationError
from lineage.query import Query
from lineage.query_history_stats import QueryHistoryStats
from lineage.snowflake_query import SnowflakeQuery
from utils.env_vars import is_flight_mode_on
import json
import os
class QueryHistory(object):
INFORMATION_SCHEMA_QUERY_HISTORY = None
PLATFORM_TYPE = None
SUCCESS_QUERIES_FILE = './latest_query_history.json'
FAILED_QUERIES_FILE = './failed_queries.json'
def __init__(self, con, dbs: str, should_export_query_history: bool = True, full_table_names: bool = True) -> None:
self._con = con
self._dbs = self.str_to_list(dbs)
self._should_export_query_history = should_export_query_history
self._full_table_names = full_table_names
self._query_history_stats = QueryHistoryStats()
self.success_queries = []
self.failed_queries = []
@staticmethod
def str_to_list(dbs_str: str) -> Union[list, None]:
if dbs_str is None:
return None
return [db.strip() for db in dbs_str.split(',')]
@staticmethod
def _normalize_database_name(db: str) -> str:
return db.lower().replace('-', '_').replace(' ', '').replace('`', '').replace('"', '')
@staticmethod
def serialize_queries_to_file(filename: str, queries: [Query]) -> None:
with open(filename, 'w') as queries_file:
serialized_queries = []
for query in queries:
serialized_queries.append(query.to_dict())
json.dump(serialized_queries, queries_file)
def _serialize_query_history(self) -> None:
if self._should_export_query_history:
self.serialize_queries_to_file(self.SUCCESS_QUERIES_FILE, self.success_queries)
self.serialize_queries_to_file(self.FAILED_QUERIES_FILE, self.failed_queries)
def _deserialize_query_history(self) -> [Query]:
if os.path.exists(self.SUCCESS_QUERIES_FILE):
with open(self.SUCCESS_QUERIES_FILE, 'r') as query_history_file:
queries = json.load(query_history_file)
for query_dict in queries:
platform_type = query_dict.pop('platform_type')
if platform_type == SnowflakeQuery.PLATFORM_TYPE:
query = SnowflakeQuery.from_dict(query_dict)
elif platform_type == BigQueryQuery.PLATFORM_TYPE:
query = BigQueryQuery.from_dict(query_dict)
else:
raise SerializationError(f'Invalid platform type - {platform_type}')
self.add_query(query)
@staticmethod
def _include_end_date(end_date: datetime) -> Optional[datetime]:
if end_date is not None and (end_date.hour, end_date.minute, end_date.second) == (0, 0, 0):
return end_date + timedelta(hours=23, minutes=59, seconds=59)
return end_date
def add_query(self, query: Query):
if query.parse(self._full_table_names):
self.success_queries.append(query)
else:
self.failed_queries.append(query)
self._query_history_stats.update_stats(query.query_context)
def extract_queries(self, start_date: datetime, end_date: datetime) -> [Query]:
if is_flight_mode_on():
self._deserialize_query_history()
else:
self._query_history_table(start_date, end_date)
self._serialize_query_history()
return self.success_queries
def _query_history_table(self, start_date: datetime, end_date: datetime) -> [Query]:
pass
def properties(self) -> dict:
failed_queries_count = len(self.failed_queries)
success_queries_count = len(self.success_queries)
queries_count = success_queries_count + failed_queries_count
query_history_properties = {'query_history_properties': {'failed_queries': failed_queries_count,
'success_queries': success_queries_count,
'queries_count': queries_count,
'platform_type': self.PLATFORM_TYPE}}
query_history_properties.update({'query_stats': self._query_history_stats.to_dict()})
return query_history_properties
|
import logging
import random
import traceback
import warnings
from typing import List
import func_utils
import Pages
from selenium.webdriver.remote.remote_connection import LOGGER
warnings.filterwarnings("ignore")
LOGGER.setLevel(logging.WARNING)
def main() -> None:
"""Start the script."""
profiles_visited = func_utils.get_profiles_visited()
profiles_not_to_visit = func_utils.get_profiles_blacklist()
email, password = func_utils.get_credentials()
with func_utils.get_webdriver() as wd:
Pages.LoginPage(wd).login(email, password)
profiles_to_visit = Pages.RecommendationPage(wd).collect_profiles_to_visit(
number_of_profiles=func_utils.get_n_profile_visits(),
profiles_not_to_visit=set(profiles_not_to_visit + profiles_visited),
mandatory_role_words=func_utils.get_job_titles(),
role_blacklist=func_utils.get_headlines_blacklist(),
)
profiles_to_connect = random.sample(
profiles_to_visit, k=func_utils.get_n_connects()
)
profiles_visited_now: List[str] = []
try:
for profile_visited in Pages.ProfilePage(wd).iterate_profiles_list(
profiles_to_visit, profiles_to_connect
):
profiles_visited_now.append(profile_visited)
except BaseException:
print(traceback.format_exc())
finally:
all_profiles_visited = profiles_visited + profiles_visited_now
func_utils.save_profiles_visited(all_profiles_visited)
print("Exiting...")
if __name__ == "__main__":
main()
|
print("How many time I have left If I live until 90 years old?")
age = input("What is your current age? ")
age = int(age)
years_until_90 = 90
months_until_90 = years_until_90 * 12
weeks_until_90 = years_until_90 * 52
days_until_90 = years_until_90 * 365
current_age = age
current_month = age * 12
current_weeks_lived = age * 52
current_days_lived = age * 365
#Days, weeks, years reminder
year_remind = years_until_90 - current_age
month_remind = months_until_90 - current_month
weeks_remind = weeks_until_90 - current_weeks_lived
days_remind = days_until_90 - current_days_lived
print("------------------------------------------")
print(f"You have {days_remind} days, {weeks_remind} weeks, {month_remind} months, and {year_remind} years left.")
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for protocol buffer enum types."""
import enum
class AnnotationSentiment(enum.IntEnum):
"""
Attributes:
ANNOTATION_SENTIMENT_UNSPECIFIED (int)
NEGATIVE (int): This annotation describes negatively about the data.
POSITIVE (int): This label describes positively about the data.
"""
ANNOTATION_SENTIMENT_UNSPECIFIED = 0
NEGATIVE = 1
POSITIVE = 2
class AnnotationSource(enum.IntEnum):
"""
Specifies where is the answer from.
Attributes:
ANNOTATION_SOURCE_UNSPECIFIED (int)
OPERATOR (int): Answer is provided by a human contributor.
"""
ANNOTATION_SOURCE_UNSPECIFIED = 0
OPERATOR = 3
class AnnotationType(enum.IntEnum):
"""
Attributes:
ANNOTATION_TYPE_UNSPECIFIED (int)
IMAGE_CLASSIFICATION_ANNOTATION (int): Classification annotations in an image.
IMAGE_BOUNDING_BOX_ANNOTATION (int): Bounding box annotations in an image.
IMAGE_ORIENTED_BOUNDING_BOX_ANNOTATION (int): Oriented bounding box. The box does not have to be parallel to horizontal
line.
IMAGE_BOUNDING_POLY_ANNOTATION (int): Bounding poly annotations in an image.
IMAGE_POLYLINE_ANNOTATION (int): Polyline annotations in an image.
IMAGE_SEGMENTATION_ANNOTATION (int): Segmentation annotations in an image.
VIDEO_SHOTS_CLASSIFICATION_ANNOTATION (int): Classification annotations in video shots.
VIDEO_OBJECT_TRACKING_ANNOTATION (int): Video object tracking annotation.
VIDEO_OBJECT_DETECTION_ANNOTATION (int): Video object detection annotation.
VIDEO_EVENT_ANNOTATION (int): Video event annotation.
TEXT_CLASSIFICATION_ANNOTATION (int): Classification for text.
TEXT_ENTITY_EXTRACTION_ANNOTATION (int): Entity extraction for text.
GENERAL_CLASSIFICATION_ANNOTATION (int): General classification.
"""
ANNOTATION_TYPE_UNSPECIFIED = 0
IMAGE_CLASSIFICATION_ANNOTATION = 1
IMAGE_BOUNDING_BOX_ANNOTATION = 2
IMAGE_ORIENTED_BOUNDING_BOX_ANNOTATION = 13
IMAGE_BOUNDING_POLY_ANNOTATION = 10
IMAGE_POLYLINE_ANNOTATION = 11
IMAGE_SEGMENTATION_ANNOTATION = 12
VIDEO_SHOTS_CLASSIFICATION_ANNOTATION = 3
VIDEO_OBJECT_TRACKING_ANNOTATION = 4
VIDEO_OBJECT_DETECTION_ANNOTATION = 5
VIDEO_EVENT_ANNOTATION = 6
TEXT_CLASSIFICATION_ANNOTATION = 8
TEXT_ENTITY_EXTRACTION_ANNOTATION = 9
GENERAL_CLASSIFICATION_ANNOTATION = 14
class DataType(enum.IntEnum):
"""
Attributes:
DATA_TYPE_UNSPECIFIED (int)
IMAGE (int)
VIDEO (int)
TEXT (int)
GENERAL_DATA (int)
"""
DATA_TYPE_UNSPECIFIED = 0
IMAGE = 1
VIDEO = 2
TEXT = 4
GENERAL_DATA = 6
class StringAggregationType(enum.IntEnum):
"""
Attributes:
STRING_AGGREGATION_TYPE_UNSPECIFIED (int)
MAJORITY_VOTE (int): Majority vote to aggregate answers.
UNANIMOUS_VOTE (int): Unanimous answers will be adopted.
NO_AGGREGATION (int): Preserve all answers by crowd compute.
"""
STRING_AGGREGATION_TYPE_UNSPECIFIED = 0
MAJORITY_VOTE = 1
UNANIMOUS_VOTE = 2
NO_AGGREGATION = 3
class EvaluationJob(object):
class State(enum.IntEnum):
"""
State of the job.
Attributes:
STATE_UNSPECIFIED (int)
SCHEDULED (int)
RUNNING (int)
PAUSED (int)
STOPPED (int)
"""
STATE_UNSPECIFIED = 0
SCHEDULED = 1
RUNNING = 2
PAUSED = 3
STOPPED = 4
class LabelImageRequest(object):
class Feature(enum.IntEnum):
"""
Image labeling task feature.
Attributes:
FEATURE_UNSPECIFIED (int)
CLASSIFICATION (int): Label whole image with one or more of labels.
BOUNDING_BOX (int): Label image with bounding boxes for labels.
ORIENTED_BOUNDING_BOX (int): Label oriented bounding box. The box does not have to be parallel to
horizontal line.
BOUNDING_POLY (int): Label images with bounding poly. A bounding poly is a plane figure that
is bounded by a finite chain of straight line segments closing in a loop.
POLYLINE (int): Label images with polyline. Polyline is formed by connected line segments
which are not in closed form.
SEGMENTATION (int): Label images with segmentation. Segmentation is different from bounding
poly since it is more fine-grained, pixel level annotation.
"""
FEATURE_UNSPECIFIED = 0
CLASSIFICATION = 1
BOUNDING_BOX = 2
ORIENTED_BOUNDING_BOX = 6
BOUNDING_POLY = 3
POLYLINE = 4
SEGMENTATION = 5
class LabelTextRequest(object):
class Feature(enum.IntEnum):
"""
Text labeling task feature.
Attributes:
FEATURE_UNSPECIFIED (int)
TEXT_CLASSIFICATION (int): Label text content to one of more labels.
TEXT_ENTITY_EXTRACTION (int): Label entities and their span in text.
"""
FEATURE_UNSPECIFIED = 0
TEXT_CLASSIFICATION = 1
TEXT_ENTITY_EXTRACTION = 2
class LabelVideoRequest(object):
class Feature(enum.IntEnum):
"""
Video labeling task feature.
Attributes:
FEATURE_UNSPECIFIED (int)
CLASSIFICATION (int): Label whole video or video segment with one or more labels.
OBJECT_DETECTION (int): Label objects with bounding box on image frames extracted from the video.
OBJECT_TRACKING (int): Label and track objects in video.
EVENT (int): Label the range of video for the specified events.
"""
FEATURE_UNSPECIFIED = 0
CLASSIFICATION = 1
OBJECT_DETECTION = 2
OBJECT_TRACKING = 3
EVENT = 4
|
import os, cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
_intensityBGR = 256
_projectDirectory = os.path.dirname(__file__)
_imagesDirectory = os.path.join(_projectDirectory, "images")
_images = []
for _root, _dirs, _files in os.walk(_imagesDirectory):
for _file in _files:
if _file.endswith(".jpg"):
_images.append(os.path.join(_imagesDirectory, _file))
_imageIndex = 0
_imageTotal = len(_images)
_img = cv2.imread(_images[_imageIndex], cv2.IMREAD_UNCHANGED)
_img = cv2.cvtColor(_img, cv2.COLOR_BGR2GRAY)
_fig = plt.figure("Histogram Equalization")
_gs = GridSpec(16, 2)
_fig1 = plt.subplot(_gs[0:7, 0])
_fig1.set_title("Image")
plt.tight_layout()
_fig1ShowIt = plt.imshow(_img, cmap = "gray")
_fig2 = plt.subplot(_gs[0:7, 1])
_fig2.set_title("Histogram")
_hist = cv2.calcHist([_img], [0], None, [256], [0, 256]) / _img.size
plt.xlabel("Intensity")
plt.ylabel("PMF")
plt.tight_layout()
_fig2ShowIt = plt.plot(_hist)
_imgEqu = cv2.equalizeHist(_img)
_fig3 = plt.subplot(_gs[8:15, 0])
_fig3.set_title("Equalized Image")
plt.tight_layout()
_fig3ShowIt = plt.imshow(_imgEqu, cmap = "gray")
_fig4 = plt.subplot(_gs[8:15, 1])
_fig4.set_title("Equalized Histogram")
_histEqu = cv2.calcHist([_imgEqu], [0], None, [256], [0, 256]) / _imgEqu.size
plt.xlabel("Intensity")
plt.ylabel("PMF")
plt.tight_layout()
_fig4ShowIt = plt.plot(_histEqu)
plt.show()
|
import pytest
import qcdb
from qcdb.keywords import AliasKeyword, Keyword, Keywords, parsers, register_kwds
def validator(v):
if v > 5:
return v
else:
raise qcdb.KeywordValidationError
def test_1():
subject = Keyword(keyword="opt1", glossary="does stuff", validator=validator, default=6)
assert subject.value == 6
# assert subject.has_changed is False
assert subject.is_default() is True
assert subject.keyword == "OPT1"
# subject.value = 6
subject.require(6)
# assert subject.has_changed is True
assert subject.is_default() is True
def test_2():
with pytest.raises(qcdb.KeywordValidationError):
subject = Keyword(keyword="opt2", glossary="does stuff", validator=validator, default=4)
def test_3():
subject = Keyword(keyword="opt1", glossary="does stuff", validator=validator, default=6)
# subject.value = 10
subject.require(10)
assert subject.value == 10
# assert subject.has_changed is True
assert subject.is_default() is False
def test_4_conv():
subject = Keyword(keyword="opt1", glossary="does stuff", validator=parsers.parse_convergence, default=6)
assert subject.value == 1.0e-6
# assert subject.has_changed is False
assert subject.is_default() is True
def test_5_conv():
subject = Keyword(keyword="opt1", glossary="does stuff", validator=parsers.parse_convergence, default=6)
with pytest.raises(qcdb.KeywordValidationError):
# subject.value = 5.5
subject.require(5.5)
def test_6_conv():
subject = Keyword(keyword="opt1", glossary="does stuff", validator=parsers.parse_convergence, default=6)
# subject.value = 1
subject.require(1)
assert subject.value == 1.0e-1
# assert subject.has_changed is True
assert subject.is_default() is False
def test_7_conv():
subject = Keyword(keyword="opt1", glossary="does stuff", validator=parsers.parse_convergence, default=6)
with pytest.raises(qcdb.KeywordValidationError):
# subject.value = -1
subject.require(-1)
def test_8_conv():
subject = Keyword(keyword="opt1", glossary="does stuff", validator=parsers.parse_convergence, default=6)
with pytest.raises(qcdb.KeywordValidationError):
# subject.value = -1.0
subject.require(-1.0)
def test_9_mem():
subject = Keyword(keyword="memory", default="700 mb", validator=parsers.parse_memory)
assert subject.value == 700000000
subject.require(800000000)
assert subject.value == 800000000
subject.require(".6 Gb")
assert subject.value == 600000000
with pytest.raises(qcdb.KeywordValidationError):
subject.require("8 dimms")
def test_20():
subjects = Keywords()
subjects.add("qcdb", Keyword(keyword="memory", default="700 mb", validator=parsers.parse_memory))
subjects.add("qcdb", Keyword(keyword="e_convergence", default=7, validator=parsers.parse_convergence))
subjects.add("dftd3", Keyword(keyword="Opt1", default=4, validator=lambda x: x))
subjects.add("dftd3", Keyword(keyword="Opt1", default="cat", validator=lambda x: x))
print(subjects)
# assert False
def test_21a():
subjects = Keywords()
with pytest.raises(qcdb.ValidationError):
subjects.add("random", Keyword(keyword="memory", default="700 mb", validator=parsers.parse_memory))
def test_21b():
subjects = Keywords()
with pytest.raises(qcdb.ValidationError):
subjects.require("qcdb", "mmry", "4 gb", 1234)
def test_22a():
subjects = Keywords()
subjects.add("qcdb", Keyword(keyword="memory", default="700 mb", validator=parsers.parse_memory))
subjects.require("qcdb", "memory", 9000000000, 22342345)
subjects.suggest("qcdb", "memory", 4000000000, 12342345)
subjects.require("qcdb", "memory", "9 gb", "00000000")
assert subjects.scroll["QCDB"]["MEMORY"].value == 9000000000
def test_22b():
subjects = Keywords()
subjects.add("qcdb", Keyword(keyword="memory", default="700 mb", validator=parsers.parse_memory))
subjects.require("qcdb", "memory", 9000000000, 22342345)
subjects.suggest("qcdb", "memory", 4000000000, 12342345)
subjects.require("qcdb", "memory", "8 gb", "00000000")
with pytest.raises(qcdb.KeywordReconciliationError):
assert subjects.scroll["QCDB"]["MEMORY"].value == 8000000000
def test_22c():
subjects = Keywords()
subjects.add("qcdb", Keyword(keyword="memory", default="700 mb", validator=parsers.parse_memory))
subjects.require("qcdb", "memory", 9000000000, 22342345)
subjects.suggest("qcdb", "memory", 4000000000, 12342345)
subjects.require("qcdb", "memory", "8 gb", 555) # no user signal so trumps 2234
assert subjects.scroll["QCDB"]["MEMORY"].value == 8000000000
assert subjects.scroll["QCDB"]["MEMORY"].is_default() is False
import json
s = json.dumps(subjects, sort_keys=True, indent=2, default=lambda x: x.__dict__)
print(s) # all but validator
def test_22d():
subjects = Keywords()
subjects.add("qcdb", Keyword(keyword="memory", default="700 mb", validator=parsers.parse_memory))
subjects.suggest("qcdb", "memory", 4000000000, 12342345)
assert subjects.scroll["QCDB"]["MEMORY"].value == 4000000000
assert subjects.scroll["QCDB"]["MEMORY"].is_default() is False
def test_22e():
subjects = Keywords()
subjects.add("qcdb", Keyword(keyword="memory", default="700 mb", validator=parsers.parse_memory))
assert subjects.scroll["QCDB"]["MEMORY"].value == 700000000
assert subjects.scroll["QCDB"]["MEMORY"].is_default() is True
def test_23a():
subjects = Keywords()
subjects.add("qcdb", Keyword(keyword="scf_e_conv", default=5, validator=parsers.parse_convergence))
subjects.suggest("qcdb", "scf_e_conv", 1.0e-6, 1234)
assert subjects.scroll["QCDB"]["SCF_E_CONV"].value == 1.0e-6
@pytest.mark.xfail(True, reason="have not yet healed namespaced options", run=True, strict=True)
def test_23b():
subjects = Keywords()
subjects.add("qcdb", Keyword(keyword="scf_e_conv", default=5, validator=parsers.parse_convergence))
subjects.suggest("qcdb", "e_conv", 1.0e-6, 1234)
assert subjects.scroll["QCDB"]["SCF_E_CONV"].value == 1.0e-5
@pytest.mark.xfail(True, reason="have not yet healed namespaced options", run=True, strict=True)
def test_23c():
subjects = Keywords()
subjects.add("qcdb", Keyword(keyword="scf_e_conv", default=5, validator=parsers.parse_convergence))
subjects.require("qcdb", "e_conv", 1.0e-6, 1234)
assert subjects.scroll["QCDB"]["SCF_E_CONV"].value == 1.0e-6
@pytest.mark.xfail(True, reason="have not yet healed namespaced options", run=True, strict=True)
def test_23d():
subjects = Keywords()
subjects.add("qcdb", Keyword(keyword="scf_e_conv", default=5, validator=parsers.parse_convergence))
subjects.require("qcdb", "scf_e_conv", 7, 1234)
subjects.require("qcdb", "e_conv", 1.0e-6, 1234)
assert subjects.scroll["QCDB"]["SCF_E_CONV"].value == 1.0e-7
def test_24():
subjects = Keywords()
subjects.add("qcdb", Keyword(keyword="scf_e_conv", default=5, validator=parsers.parse_convergence))
@register_kwds(subjects)
def energy(count=1, **kwargs):
if count > 3:
assert subjects.scroll["QCDB"]["SCF_E_CONV"].value == 1.0e-4
return
else:
count += 1
# subjects.require('qcdb', 'scf_c_conv', count, accession=kwargs['accession'])
subjects.require("qcdb", "scf_e_conv", count, accession=kwargs["accession"])
proc(count)
@register_kwds(subjects)
def proc(count, **kwargs):
energy(count)
assert subjects.scroll["QCDB"]["SCF_E_CONV"].value == 1.0e-5
energy()
assert subjects.scroll["QCDB"]["SCF_E_CONV"].value == 1.0e-5
@pytest.fixture
def alias_setup():
subjects = Keywords()
subjects.add("qcdb", Keyword(keyword="freeze__core", default=0, validator=parsers.nonnegative_integer))
subjects.add_alias("qcdb", AliasKeyword(alias="freeze", target="freeze__core"))
return subjects
def test_alias_a(alias_setup):
alias_setup.require("qcdb", "freeze__core", 4, Keywords.mark_of_the_user)
assert alias_setup.scroll["QCDB"]["FREEZE__CORE"].value == 4
assert alias_setup.scroll["QCDB"]["FREEZE__CORE"].is_default() is False
def test_alias_b(alias_setup):
alias_setup.require("qcdb", "freeze", 4, Keywords.mark_of_the_user)
assert alias_setup.scroll["QCDB"]["FREEZE__CORE"].value == 4
assert alias_setup.scroll["QCDB"]["FREEZE__CORE"].is_default() is False
def test_alias_c(alias_setup):
with pytest.raises(qcdb.KeywordValidationError) as e:
alias_setup.require("qcdb", "freeze", -1, Keywords.mark_of_the_user)
assert "Keyword (FREEZE__CORE) value (-1) does not pass" in str(e.value)
def test_alias_d(alias_setup):
with pytest.raises(qcdb.ValidationError) as e:
alias_setup.add_alias("qcdb", AliasKeyword(alias="melt", target="melt__core"))
assert "Keyword alias must point to existing keyword proper" in str(e.value)
def test_alias_e(alias_setup):
with pytest.raises(qcdb.ValidationError) as e:
alias_setup.add_alias("qcdb", AliasKeyword(alias="freeze__core", target="melt__core"))
assert "Keyword alias must not share a name with keyword proper" in str(e.value)
|
from data.data_set_home import DataSetHome, create_random_source
from train_ctc import build_model
from api import CompilationHome
from data.example_adapters import CTCAdapter
from data.generators import MiniBatchGenerator
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', type=str, default='ds1')
parser.add_argument('--cuda', type=bool, default=False)
args = parser.parse_args()
location = CompilationHome(args.dataset_name).root_dir
home = DataSetHome(location, create_random_source)
encoding_table = home.get_encoding_table()
ctc_model = build_model(args.cuda, encoding_table=encoding_table)
train_source, val_source, test_slice = home.get_slices()
sentinel = encoding_table.sentinel
adapter = CTCAdapter(y_padding=sentinel)
test_gen = MiniBatchGenerator(test_slice, adapter, batch_size=1)
model = ctc_model.compile_model(0.001)
res = model.evaluate_generator(test_gen.get_examples(), steps=len(test_slice))
print('evaluation on test data:', res)
|
from pathlib import Path
from image import load
from feature_extraction import OrientationField
if __name__ == '__main__':
a = "database/UPEK/1_2.png"
b = "database/FVC2004/DB1_B/101_1.tif"
c = "database/AVA2017/AS.png"
d = "database/AVA2017/city_test2.jpg"
image_path = Path(__file__).parents[1] / a
# Create image
fingerprint_image = load(image_path)
orientation_field = OrientationField(fingerprint_image, 10)
orientation_field.show()
|
from django.shortcuts import render
from django.core.files.storage import FileSystemStorage
import sys
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from django.template.defaulttags import register
from pdfminer.converter import XMLConverter, HTMLConverter, TextConverter
from pdfminer.layout import LAParams
from io import StringIO
from .utilityFunctions import *
def pdfparser(data):
fp = open(data, 'rb')
rsrcmgr = PDFResourceManager()
retstr = StringIO()
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.get_pages(fp):
interpreter.process_page(page)
data = retstr.getvalue()
text_file = open("Output.txt", "w", encoding="utf-8")
text_file.write(data)
text_file = open("Output.txt",'r', encoding="utf-8")
a = ""
for x in text_file:
if len(x)>2:
b = x.split()
for i in b:
a += i + " "
return a
def analysis(request):
return render(request,'realworld/analysis.html')
def get_clean_text(text):
text = removeLinks(text)
text = stripEmojis(text)
text = removeSpecialChar(text)
text = stripPunctuations(text)
text = stripExtraWhiteSpaces(text)
return text
def input(request):
if request.method=='POST':
name = request.POST.get("Name", "")
file = request.FILES['document']
fs = FileSystemStorage()
fs.save(file.name,file)
pathname = "media/"
value = pdfparser(pathname+file.name)
# Sentiment Analysis
text = get_clean_text(value)
result = sentiment_scores(text)
print(result)
return render(request, 'realworld/sentiment_graph.html', {'sentiment': result})
else:
note = "Please Enter the file you want it to be uploaded"
return render(request, 'realworld/home.html', {'note': note})
def productanalysis(request):
if request.method=='POST':
blogname = request.POST.get("blogname", "")
text_file = open("ProductAnalysis.txt", "w")
text_file.write(blogname)
else:
note = "Please Enter the product blog name for analysis"
return render(request, 'realworld/productanalysis.html', {'note': note})
# Custom template filter to retrieve a dictionary value by key.
@register.filter(name='get_item')
def get_item(dictionary, key):
return dictionary.get(key, 0)
|
import argparse
import json
import random
import requests
import time
from generate.idioms.score import Score
from generate.midi_export import MIDIFile
import generate.voices.chorale as chorale
from generate.voices.melody import Melody
from generate.voices.voice import Voice
def make_lily_file():
"""Generate Lilypond file from musical sequence"""
if Voice.mode == "ionian":
mode = "major"
elif Voice.mode == "aeolian":
mode = "minor"
else:
mode = Voice.mode
if Voice.beat_division == 3:
time_sig = f"{Voice.measure_length * 3}/8"
elif Voice.beat_division == 2:
time_sig = f"{Voice.measure_length}/4"
title = f"Medley in {Voice.tonic} {mode}"
with open("logs/old_layout.txt", 'r') as f:
sheet_code = f.read()
for lily_part in Voice.lily_score:
sheet_code = sheet_code.replace(
"PART_SLOT", " ".join([
"\\key", Voice.tonic.replace('#', 'is').replace('b', "es").lower(),
f"\\{mode} \\time {time_sig} {lily_part}",
]), 1
)
sheet_code = sheet_code.replace("PART_SLOT", "")
sheet_code = sheet_code.replace("Medley", title)
with open("logs/new_layout.txt", 'w') as f:
f.write(sheet_code)
try:
make_score_pdf(sheet_code)
except PermissionError:
print("You must close out the previous pdf to overwrite it.")
except requests.exceptions.ConnectionError:
rejection_message = "An error occured with the API and/or internet connection. "
rejection_message += "Check your internet connection and try again."
print(rejection_message)
def make_score_pdf(sheet_code):
"""Generate sheet music pdf from lilyPond format"""
payload = {
"version": "stable", "code": sheet_code, "id": ""
}
# AWS can't parse python dictionaries
with open("payload.json", 'w') as f:
json.dump(payload, f)
with open("payload.json", 'rb') as f:
sheet_music_response = requests.post(
"https://7icpm9qr6a.execute-api.us-west-2.amazonaws.com/prod/prepare_preview/stable", data=f)
time.sleep(1)
response_id = sheet_music_response.json()["id"]
pdf_response = requests.get(
f"https://s3-us-west-2.amazonaws.com/lilybin-scores/{response_id}.pdf"
)
with open("final_score.pdf", "wb") as f:
f.write(pdf_response.content)
def reset_score_settings(score_args):
"""Reset parameters of score to allow creation of a new piece"""
Score.reset(score_args.tonic, score_args.mode, score_args.style)
Voice.chord_sequence = []
Voice.all_midi_pitches = []
Voice.midi_score = []
Voice.lily_score = []
Voice.chorale_scale_degrees = []
Voice.pickup = False
Voice.pickup_duration = 0
Voice.bass_motion = []
Voice.tenor_motion = []
Voice.alto_motion = []
Voice.soprano_motion = []
with open("logs/chorale.log", 'w') as f:
pass
with open("logs/melody.log", 'w') as f:
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="A pseudo-random music generator"
)
parser.add_argument('-t', "--tonic")
parser.add_argument('-m', "--mode")
parser.add_argument("-s", "--style", default="Mm")
score_args = parser.parse_args()
track = 0
current_time = 0
channel = 0
tempo = 60 # In BPM
# volume 0-127, as per the MIDI standard
MyMIDI = MIDIFile(5, eventtime_is_ticks=True)
# defaults to format 1 (tempo track automatically created)
MyMIDI.addProgramChange(track, channel, current_time, 73)
MyMIDI.addProgramChange(1, 1, current_time, 32)
MyMIDI.addProgramChange(2, 2, current_time, 32)
MyMIDI.addProgramChange(3, 3, current_time, 32)
MyMIDI.addProgramChange(4, 3, current_time, 32)
while True:
try:
reset_score_settings(score_args)
Melody().make_melody()
chorale.Chorale().create_parts()
chorale.Bass().create_part()
chorale.Tenor().create_part()
chorale.Alto().create_part()
chorale.Soprano().create_part()
break
except AssertionError:
print("Restarting...\n")
continue
for new_note in Voice.midi_score[0]:
if isinstance(new_note.pitch, int):
MyMIDI.addNote(track, channel, *new_note, 100)
strum_ending = random.choice((True, True, True, False))
print(f"Strum ending: {strum_ending}")
if strum_ending:
time_shift = 0
for voice_index, part in enumerate(Voice.midi_score[2:], 2):
time_shift += 90
old_midi_obj = Voice.midi_score[voice_index][-2]
new_midi_obj = Voice.Note(
old_midi_obj.pitch, old_midi_obj.time + time_shift,
old_midi_obj.duration,
)
Voice.midi_score[voice_index][-2] = new_midi_obj
for voice_index, part in enumerate(Voice.midi_score[1:]):
track += 1
channel += 1
volume = Voice.voice_volumes[voice_index]
for new_note in part:
if isinstance(new_note.pitch, int):
MyMIDI.addNote(track, channel, *new_note, volume)
# 3/4 time sig feels slower at same tempo because
# beats are in groups of 3 instead of 2
if Voice.time_sig == (3, 2):
MOD_SPEED = 1.5
else:
MOD_SPEED = 1
if Voice.mode == "aeolian":
tempo = random.choice(range(85, 101)) * MOD_SPEED
else:
tempo = random.choice(range(85, 111)) * MOD_SPEED
MyMIDI.addTempo(track, current_time, tempo)
slow_ending = random.choice((True, False))
if slow_ending:
if Voice.repeat_ending:
measure_mark = 16
else:
measure_mark = 13
MyMIDI.addTempo(
0, Voice.pickup_duration + Voice.max_note_duration * measure_mark,
tempo * 0.93
)
print(f"Slow ending? {slow_ending}")
print(f"Tempo: {tempo}")
try:
with open("song0.mid", "wb") as output_file:
MyMIDI.writeFile(output_file)
except PermissionError:
print("You must close the previous midi file to overwrite it.")
make_lily_file()
|
from django.utils.translation import ugettext_lazy as _
from django.contrib import admin
from models import Confirmation
class ConfirmationAdmin(admin.ModelAdmin):
list_display = ('shopper_id', 'ret_transdatetime', 'addr_name', 'trx_amount',
'trx_currency', 'ret_status')
fieldsets = (
(None, {
'fields': ('shopper_id', 'ret_status', 'vendor_comment')
}),
(_('Payment details'), {
'fields': ('ret_transdatetime', 'trx_paymentmethod', 'addr_name',
'trx_amount', 'trx_currency', 'trx_paymenttyp')
}),
(_('Customer identification'), {
'fields': ('ret_ip', 'trx_remoteip_country')
}),
(_('Transaction information'), {
'fields': ('ret_booknr', 'ret_errorcode', 'ret_trx_number',
'ret_authcode', 'trx_typ', 'trxuser_id')
}),
)
readonly_fields = ('shopper_id', 'ret_booknr', 'ret_errorcode',
'trx_paymentmethod', 'ret_trx_number', 'ret_transdatetime', 'ret_ip',
'trx_typ', 'addr_name', 'trx_amount', 'trx_remoteip_country',
'trx_currency', 'ret_authcode', 'trx_paymenttyp', 'ret_status',
'trxuser_id'
)
admin.site.register(Confirmation, ConfirmationAdmin)
|
import logging
from django.utils.translation import gettext as _
from outpost.django.campusonline import models as co
from rest_framework import authentication, permissions, status
from rest_framework.exceptions import NotFound
from rest_framework.response import Response
from rest_framework.views import APIView
from outpost.django.campusonline.serializers import AuthenticatedStudentSerializer
from . import models, serializers
logger = logging.getLogger(__name__)
class ClockView(APIView):
permission_classes = [permissions.IsAuthenticated]
def initial(self, request, terminal_id, card_id, *args, **kwargs):
super().initial(request, *args, **kwargs)
logger.debug(f"Incoming request for terminal {terminal_id}")
try:
self.terminal = models.Terminal.objects.get(
pk=terminal_id, online=True, enabled=True
)
except models.Terminal.DoesNotExist:
logger.warn(f"Unknown terminal {terminal_id}")
raise NotFound(_("Unknown terminal identification"))
try:
self.student = co.Student.objects.get(cardid=card_id)
except co.Student.DoesNotExist:
logger.warn(f"No student found for cardid {card_id}")
raise NotFound(_("Unknown student identification"))
def get(self, request, **kwargs):
logger.debug(f"Preflight request for {self.terminal}:{self.student}")
try:
data = self.terminal.plugins.hook.preflight(
terminal=self.terminal, student=self.student
)
except Exception as e:
return Response(str(e), status.HTTP_400_BAD_REQUEST)
return Response(
{
"terminal": serializers.TerminalSerializer(self.terminal).data,
"student": AuthenticatedStudentSerializer(self.student).data,
"cardid": self.student.cardid,
"data": [entry for entry in data if entry],
}
)
def post(self, request, **kwargs):
logger.debug(f"Clock request for {self.terminal}:{self.student}")
entry = models.Entry.objects.create(
student=self.student, terminal=self.terminal
)
data = self.terminal.plugins.hook.clock(entry=entry, payload=request.data)
return Response(
{
"terminal": serializers.TerminalSerializer(self.terminal).data,
"student": AuthenticatedStudentSerializer(self.student).data,
"cardid": self.student.cardid,
"entry": entry.pk,
"data": [entry for entry in data if entry],
}
)
|
# https://leetcode.com/explore/interview/card/top-interview-questions-medium/103/array-and-strings/780/
# https://leetcode.com/problems/longest-palindromic-substring/description/
# Longest Palindromic Substring
#
# Given a string s, find the longest palindromic substring in s. You
# may assume that the maximum length of s is 1000.
import collections
import unittest
class Solution:
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
if not s or len(s) < 2:
return s
return self.longest2(s)
# Note: try to grow 2 palindrome using each pos as center.
#
# for each pos, there are 2 cases: (i, i) for odd length
# palindrome, and (i, i+1) for even length palindrome.
# came up with this after looking at the hints.. expand around center
# 94 / 94 test cases passed.
# Status: Accepted
# Runtime: 1004 ms (beats 63.56% py3)
def longest2(self, s):
# ok, new idea, for each pos, 'grow' as big as we can
n = len(s)
max_so_far = ''
for i in range(0, n-1):
# 2 cases, left/right point to same, or left/right point to
max1 = self.grow_palindrome(s, i, i+1) # even length palindrome, s[i], s[i+1] is center
max2 = self.grow_palindrome(s, i, i) # odd length, s[i] is center
local_max = max(max1, max2, key=len)
max_so_far = max(max_so_far, local_max, key=len)
return max_so_far
def grow_palindrome(self, s, x, y):
"""Grow palindrome from s[x...y] to as large as possible"""
n = len(s)
while x >= 0 and y < n and s[x] == s[y]:
x -= 1; y += 1
# one size smaller, either mismatch, or out of bound
return s[x+1:y]
# what I came up with, build a char table and optimize search a bit
class SolutionMe:
# 94 / 94 test cases passed.
# Status: Accepted
# Runtime: 7464 ms (beats 8.76%, haha)
def longest1(self, s):
n = len(s)
chars = collections.defaultdict(list) # map of chars to their positions
for i in range(n):
chars[s[i]].append(i) # list of pos is sorted, since i goes up
longest = 1 # shortest palindrome by default
pa_pos = [0, 0] # default answer (1st char)
for k, v in chars.items():
if len(v) < 2: # single char, can't be start of longest
continue
if v[-1] - v[0] + 1 <= longest: # if range is too small, ignore
continue
pos = self.check_positions(s, v, longest)
if pos:
cur_size = pos[1] - pos[0] + 1
if cur_size > longest:
longest = cur_size
pa_pos = pos
return s[pa_pos[0]:pa_pos[1]+1]
def is_palindrome(self, s, i, j):
"""Is string from pos [i...j] a palindrome"""
while i < j: # while point to distinct position
if s[i] != s[j]:
return False
i += 1; j -= 1
return True
def check_positions(self, s, pos_list, longest):
"""Check possible palindrome starting from both ends"""
n = len(pos_list)
local_max = 0
local_range = []
for i in range(n-1): # i from beginning to 2nd last
for j in reversed(range(i+1, n)): # j from end to before i
a, b = pos_list[i], pos_list[j]
cur_size = b - a + 1
if cur_size <= longest or cur_size <= local_max:
break # no need to continue this loop
if self.is_palindrome(s, a, b):
local_max = cur_size
local_range = [a, b]
return local_range
class TestLongest(unittest.TestCase):
def setUp(self):
self.sol = Solution()
def test1(self):
self.assertIn(self.sol.longestPalindrome('babad'),
['bab', 'aba'])
self.assertIn(self.sol.longestPalindrome('cbbd'),
['bb'])
self.assertIn(self.sol.longestPalindrome('babadada'),
['adada'])
self.assertIn(self.sol.longestPalindrome('bb'),
['bb'])
if __name__ == '__main__':
unittest.main()
|
"""Defines the ObservationModel for the 2D Multi-Object Search domain.
Origin: Multi-Object Search using Object-Oriented POMDPs (ICRA 2019)
(extensions: action space changes, different sensor model, gridworld instead of
topological graph)
Observation: {objid : pose(x,y) or NULL}. The sensor model could vary;
it could be a fan-shaped model as the original paper, or
it could be something else. But the resulting observation
should be a map from object id to observed pose or NULL (not observed).
Observation Model
The agent can observe its own state, as well as object poses
that are within its sensor range. We only need to model object
observation.
"""
import pomdp_py
import math
import random
import numpy as np
from pomdp_problems.multi_object_search.domain.state import *
from pomdp_problems.multi_object_search.domain.action import *
from pomdp_problems.multi_object_search.domain.observation import *
#### Observation Models ####
class MosObservationModel(pomdp_py.OOObservationModel):
"""Object-oriented transition model"""
def __init__(self,
dim,
sensor,
object_ids,
sigma=0.01,
epsilon=1):
self.sigma = sigma
self.epsilon = epsilon
observation_models = {objid: ObjectObservationModel(objid, sensor, dim,
sigma=sigma, epsilon=epsilon)
for objid in object_ids}
pomdp_py.OOObservationModel.__init__(self, observation_models)
def sample(self, next_state, action, argmax=False, **kwargs):
if not isinstance(action, LookAction):
return MosOOObservation({})
# return MosOOObservation({objid: ObjectObservationModel.NULL
# for objid in next_state.object_states
# if objid != next_state.object_states[objid].objclass != "robot"})
factored_observations = super().sample(next_state, action, argmax=argmax)
return MosOOObservation.merge(factored_observations, next_state)
class ObjectObservationModel(pomdp_py.ObservationModel):
def __init__(self, objid, sensor, dim, sigma=0, epsilon=1):
"""
sigma and epsilon are parameters of the observation model (see paper),
dim (tuple): a tuple (width, length) for the dimension of the world"""
self._objid = objid
self._sensor = sensor
self.sigma = sigma
self.epsilon = epsilon
def _compute_params(self, object_in_sensing_region):
if object_in_sensing_region:
# Object is in the sensing region
alpha = self.epsilon
beta = (1.0 - self.epsilon) / 2.0
gamma = (1.0 - self.epsilon) / 2.0
else:
# Object is not in the sensing region.
alpha = (1.0 - self.epsilon) / 2.0
beta = (1.0 - self.epsilon) / 2.0
gamma = self.epsilon
return alpha, beta, gamma
def probability(self, observation, next_state, action, **kwargs):
"""
Returns the probability of Pr (observation | next_state, action).
Args:
observation (ObjectObservation)
next_state (State)
action (Action)
"""
if not isinstance(action, LookAction):
# No observation should be received
if observation.pose == ObjectObservation.NULL:
return 1.0
else:
return 0.0
if observation.objid != self._objid:
raise ValueError("The observation is not about the same object")
# The (funny) business of allowing histogram belief update using O(oi|si',sr',a).
next_robot_state = kwargs.get("next_robot_state", None)
if next_robot_state is not None:
assert next_robot_state["id"] == self._sensor.robot_id,\
"Robot id of observation model mismatch with given state"
robot_pose = next_robot_state.pose
if isinstance(next_state, ObjectState):
assert next_state["id"] == self._objid,\
"Object id of observation model mismatch with given state"
object_pose = next_state.pose
else:
object_pose = next_state.pose(self._objid)
else:
robot_pose = next_state.pose(self._sensor.robot_id)
object_pose = next_state.pose(self._objid)
# Compute the probability
zi = observation.pose
alpha, beta, gamma = self._compute_params(self._sensor.within_range(robot_pose, object_pose))
# Requires Python >= 3.6
event_occured = random.choices(["A", "B", "C"], weights=[alpha, beta, gamma], k=1)[0]
if event_occured == "A":
# object in sensing region and observation comes from object i
if zi == ObjectObservation.NULL:
# Even though event A occurred, the observation is NULL.
# This has 0.0 probability.
return 0.0 * alpha
else:
gaussian = pomdp_py.Gaussian(list(object_pose),
[[self.sigma**2, 0],
[0, self.sigma**2]])
return gaussian[zi] * alpha
elif event_occured == "B":
return (1.0 / self._sensor.sensing_region_size) * beta
else: # event_occured == "C":
prob = 1.0 if zi == ObjectObservation.NULL else 0.0 # indicator zi == NULL
return prob * gamma
def sample(self, next_state, action, **kwargs):
"""Returns observation"""
if not isinstance(action, LookAction):
# Not a look action. So no observation
return ObjectObservation(self._objid, ObjectObservation.NULL)
robot_pose = next_state.pose(self._sensor.robot_id)
object_pose = next_state.pose(self._objid)
# Obtain observation according to distribution.
alpha, beta, gamma = self._compute_params(self._sensor.within_range(robot_pose, object_pose))
# Requires Python >= 3.6
event_occured = random.choices(["A", "B", "C"], weights=[alpha, beta, gamma], k=1)[0]
zi = self._sample_zi(event_occured, next_state)
return ObjectObservation(self._objid, zi)
def argmax(self, next_state, action, **kwargs):
# Obtain observation according to distribution.
alpha, beta, gamma = self._compute_params(self._sensor.within_range(robot_pose, object_pose))
event_probs = {"A": alpha,
"B": beta,
"C": gamma}
event_occured = max(event_probs, key=lambda e: event_probs[e])
zi = self._sample_zi(event_occured, next_state, argmax=True)
return ObjectObservation(self._objid, zi)
def _sample_zi(self, event, next_state, argmax=False):
if event == "A":
object_true_pose = next_state.object_pose(self._objid)
gaussian = pomdp_py.Gaussian(list(object_true_pose),
[[self.sigma**2, 0],
[0, self.sigma**2]])
if not argmax:
zi = gaussian.random()
else:
zi = gaussian.mpe()
zi = (int(round(zi[0])), int(round(zi[1])))
elif event == "B":
zi = (random.randint(0, self._gridworld.width), # x axis
random.randint(0, self._gridworld.height)) # y axis
else: # event == C
zi = ObjectObservation.NULL
return zi
### Unit test ###
def unittest():
from ..env.env import make_laser_sensor,\
make_proximity_sensor, equip_sensors,\
interpret, interpret_robot_id
# Test within search region check,
# and the observation model probability and
# sampling functions.
worldmap =\
"""
..........
....T.....
......x...
..T.r.T...
..x.......
....T.....
..........
"""
#0123456789
# 10 x 8
worldstr = equip_sensors(worldmap,
{"r": make_laser_sensor(90, (1,5), 0.5, False)})
env = interpret(worldstr)
robot_id = interpret_robot_id("r")
robot_pose = env.state.pose(robot_id)
# within_range test
sensor = env.sensors[robot_id]
assert sensor.within_range(robot_pose, (4,3)) == False
assert sensor.within_range(robot_pose, (5,3)) == True
assert sensor.within_range(robot_pose, (6,3)) == True
assert sensor.within_range(robot_pose, (7,2)) == True
assert sensor.within_range(robot_pose, (7,3)) == True
assert sensor.within_range(robot_pose, (4,3)) == False
assert sensor.within_range(robot_pose, (2,4)) == False
assert sensor.within_range(robot_pose, (4,1)) == False
assert sensor.within_range(robot_pose, (4,5)) == False
assert sensor.within_range(robot_pose, (0,0)) == False
print(env.state)
# observation model test
O0 = ObjectObservationModel(0, sensor, (env.width, env.length), sigma=0.01, epsilon=1)
O2 = ObjectObservationModel(2, sensor, (env.width, env.length), sigma=0.01, epsilon=1)
O3 = ObjectObservationModel(3, sensor, (env.width, env.length), sigma=0.01, epsilon=1)
O5 = ObjectObservationModel(5, sensor, (env.width, env.length), sigma=0.01, epsilon=1)
z0 = O0.sample(env.state, Look)
assert z0.pose == ObjectObservation.NULL
z2 = O2.sample(env.state, Look)
assert z2.pose == ObjectObservation.NULL
z3 = O3.sample(env.state, Look)
assert z3.pose == (6, 3)
z5 = O5.sample(env.state, Look)
assert z5.pose == ObjectObservation.NULL
assert O0.probability(z0, env.state, Look) == 1.0
assert O2.probability(z2, env.state, Look) == 1.0
assert O3.probability(z3, env.state, Look) >= 1.0
assert O3.probability(ObjectObservation(3, ObjectObservation.NULL),
env.state, Look) == 0.0
assert O5.probability(z5, env.state, Look) == 1.0
if __name__ == "__main__":
unittest()
|
from manimlib import *
class ImplictIntro(Scene):
def construct(self):
eq1 = Tex("x^2+y-1=0", color=YELLOW, isolate=list("x^2+y-1=0")).scale(1.2).to_edge(UP, buff=1)
xy1 = Tex("x=1", "\\quad\\Longrightarrow\\quad ", "y=0").next_to(eq1, DOWN, buff=.5)
implicit_label = VGroup(TexText("Implicit Equation"), TexText("隐式方程")).arrange(DOWN).scale(.8).next_to(eq1,
DOWN)
what_we_cares = TexText("是否给出了函数$y=f(x)$?")
another_way = TexText("给定$x$,", "确定了唯一的$y$.", color=BLUE)
eq1_ex = Tex(r"y=1-x^2", isolate=list("y=1-x^2"), color=YELLOW).scale(1.2).move_to(eq1)
eq2 = Tex(r"\sin x+\ln y-xy^3=0", color=YELLOW).scale(1.2).next_to(what_we_cares, DOWN, buff=.5)
xy2 = Tex("x=1", "\\quad\\Longrightarrow\\quad ", "y=0.48").next_to(eq2, DOWN, buff=.5)
for i, j in zip(xy1, xy2):
j.align_to(i, LEFT)
y_add = Tex("y=0.91").next_to(xy2[-1], DOWN)
v = VGroup(xy2, y_add)
cross = Cross(v, stroke_width=[6, 6, 6])
self.play(Write(eq1))
self.wait()
self.play(Write(implicit_label))
self.wait()
self.play(Write(what_we_cares))
self.wait()
self.play(TransformMatchingShapes(eq1, eq1_ex, path_arc=PI / 2))
self.wait()
self.play(Write(eq2))
self.wait()
self.play(FadeTransform(eq1_ex, eq1), FadeOut(implicit_label))
self.wait()
self.play(FadeOut(what_we_cares))
self.wait()
self.play(Write(another_way[0]), Write(xy1[0]))
self.wait()
self.play(Write(another_way[1]), Write(xy1[1:]))
self.wait()
self.play(Write(xy2))
self.play(Write(y_add))
self.wait()
self.play(ShowCreation(cross))
self.wait()
class RoughTheorem(Scene):
def construct(self):
title = Title(r"\heiti 隐函数定理(大概?)", color=YELLOW, underline_color=YELLOW)
theorem = VGroup(TexText("$F(x,y)=0$", r"局部", "确定了函数", "$y=y(x)$", ",且"),
Tex(r"{\d y \over \d x}", r"=-{F'_x \over F'_y}")).arrange(DOWN).next_to(title, DOWN, buff=.5)
partial = Tex(r"F'_x\coloneqq{\partial F\over\partial x}", color=BLUE).next_to(theorem[1], RIGHT, buff=1)
issues = VGroup(*[TexText(i, color=YELLOW) for i in ["什么样的?", "哪儿?", "什么样的?"]])
arrows = VGroup()
for i, (issue, index) in enumerate(zip(issues, [0, 1, 3])):
issue.next_to(theorem[0][index], DOWN, buff=1)
if i > 0:
issue.align_to(issues[0], DOWN)
arrows.add(Arrow(theorem[0][index].get_bottom(), issue.get_top()).set_color(YELLOW))
self.add(title)
self.wait()
self.play(Write(theorem[0]))
self.wait()
self.play(Write(theorem[1]))
self.wait()
self.play(Write(partial))
self.wait()
self.play(FadeOut(partial))
self.wait()
self.play(Indicate(theorem[1]))
self.wait()
# seems to be easy
method = TexText(r"对$F\left(x,y(x)\right)$求导:", color=BLUE).next_to(theorem, DOWN, buff=.5)
Fx = Tex(r"F'_x+F'_y\cdot y'=0", r"~\Longrightarrow~", r"y'=-{F'_x\over F'_y}", color=BLUE).next_to(method,
DOWN)
self.play(Write(method))
self.wait()
self.play(Write(Fx[0]))
self.wait()
self.play(Write(Fx[1:]))
self.wait()
box1 = SurroundingRectangle(theorem[1][0])
box2 = SurroundingRectangle(theorem[0][-2])
local_box = SurroundingRectangle(theorem[0][1])
self.play(ShowCreation(box1))
self.wait()
self.play(ShowCreation(box2))
self.wait()
self.play(FadeOut(VGroup(method, Fx)))
self.play(ShowCreation(local_box), FadeOut(VGroup(box1, box2)))
self.wait()
# draw the circle
axes = Axes(
x_range=[-1.5, 1.5],
y_range=[-1.5, 1.5],
axis_config={"include_tip": True, "include_ticks": False},
height=5,
width=5
).to_corner(DL)
circle = Circle(radius=axes.x_axis.get_unit_size(), color=RED).move_to(axes.get_origin())
self.play(FadeIn(axes), ShowCreation(circle))
circle_label = Tex("x^2+y^2=1", color=RED).scale(.8).next_to(circle, UL, buff=-.4).shift(UP * .6 + RIGHT)
F_label = Tex("F(x,y)=x^2+y^2-1", color=RED).next_to(axes, buff=1)
self.play(Write(circle_label))
self.wait()
self.play(Write(F_label))
self.wait()
# locally
point1 = Dot(axes.c2p(np.cos(PI / 3), np.sin(PI / 3))).set_color(YELLOW)
point0 = Dot(axes.c2p(-1, 0)).set_color(YELLOW)
point00 = Dot(axes.c2p(1, 0)).set_color(BLUE)
self.play(FadeIn(point1, scale=.5))
self.wait()
graph1 = axes.get_graph(lambda t: np.sqrt(1 - t ** 2), x_range=[0.3, 0.7], color=YELLOW)
graph0 = axes.get_graph(lambda t: np.sqrt(1 - t ** 2), x_range=[-0.3, 0.3], color=YELLOW) \
.rotate(angle=PI / 2, about_point=axes.get_origin())
line = Line(axes.c2p(-0.97, -np.sqrt(1 - 0.97 ** 2)), axes.c2p(-0.97, np.sqrt(1 - 0.97 ** 2)),
color=BLUE).set_stroke(width=3)
self.play(GrowFromCenter(graph1))
self.wait()
# but not every point
self.play(FadeIn(point0, scale=0.5))
self.wait()
self.play(GrowFromCenter(graph0))
self.wait()
frame = self.camera.frame
frame.save_state()
self.play(frame.animate.move_to(point0.get_center() + RIGHT * .5).scale(.2), run_time=2)
self.play(GrowFromCenter(line))
self.wait()
self.play(Restore(frame), run_time=2)
self.play(FadeOut(line))
# partial y
Fynot0 = Tex(r"F'_y\neq0", color=BLUE).next_to(theorem[1][-1][-2:], buff=1)
arrow = Arrow(Fynot0.get_left(), theorem[1][-1][-2:].get_right(), color=BLUE)
self.play(FadeIn(Fynot0, LEFT), GrowArrow(arrow))
self.wait()
Fy = Tex("F'_y=2y", color=BLUE).next_to(F_label, DOWN, aligned_edge=LEFT)
Fyis0 = Tex(r"F'_y(\pm1,0)=0", color=BLUE).next_to(Fy, DOWN, aligned_edge=LEFT)
self.play(Write(Fy))
self.wait()
self.play(FadeToColor(VGroup(graph0, point0), BLUE))
self.play(FadeIn(point00))
self.play(Write(Fyis0))
self.wait()
self.play(FadeOut(
VGroup(
axes, circle, circle_label, point0, point1, point00, graph1, graph0,
F_label, Fy, Fyis0,
arrow, Fynot0,
local_box
)
))
self.play(theorem[1].animate.shift(DOWN * 2))
self.wait()
# conditions
for issue, arrow in zip(issues, arrows):
self.play(FadeIn(issue, UP), GrowArrow(arrow))
self.wait()
class Theorem(Scene):
def construct(self):
boxes = VGroup()
m = {"x_0": RED, "F": BLUE}
title = TexText("\\underline{\\heiti 隐函数定理}", color=YELLOW).to_corner(UL)
self.add(title)
pre = Tex(r"F\colon D\to\R, ~ D\subset\R^2", r"\text{ 是开集}", color=BLUE).scale(.8).align_to(title, DOWN)
self.play(Write(pre))
arrow = Arrow(ORIGIN, UP).set_color(BLUE).next_to(pre[-1], DOWN)
self.wait()
self.play(GrowArrow(arrow))
self.wait()
self.play(FadeOut(arrow))
self.wait()
# conditions
conditions = VGroup(
Tex(r"(1)~", "F", r"\in C^1(D)"),
Tex(r"(2)~", "F", "(x_0,y_0)", r"=0,\text{ 其中 }", "(x_0,y_0)", r"\in D").tm(m),
Tex(r"(3)~", r"F'_y", "(x_0,y_0)", r"\neq0").tm(m)
).arrange(DOWN, aligned_edge=LEFT, buff=.3).next_to(pre, DOWN, buff=.3, aligned_edge=LEFT)
boxes.add(
SurroundingRectangle(conditions[1][2][1:3]),
SurroundingRectangle(conditions[1][-2][1:3]),
SurroundingRectangle(conditions[2][2][1:3]),
)
comments = VGroup(
TexText("(光滑)").next_to(conditions[0], LEFT, buff=.8),
TexText("(上面有个点)").next_to(conditions[1], LEFT),
TexText("(偏导的要求)").next_to(conditions[2], LEFT)
).set_color(YELLOW)
comments[1].align_to(comments[0], RIGHT)
comments[2].align_to(comments[0], RIGHT)
self.play(Write(conditions[0]))
self.wait()
self.play(Write(comments[0]))
self.wait()
self.play(Write(conditions[1]))
self.wait()
self.play(Write(comments[1]))
self.wait()
self.play(Write(conditions[2]))
self.wait()
self.play(Write(comments[2]))
self.wait()
# longbar = DashedLine(color=GREEN).set_width(FRAME_WIDTH).next_to(comments, DOWN, buff=.3).set_x(0)
# self.play(GrowFromCenter(longbar))
# self.wait()
local = TexText(r"那么存在一个包含$(x_0,y_0)$的开矩形,$I\times J\subset D$,使得:", color=BLUE) \
.scale(.8).next_to(conditions, DOWN, buff=.7).set_x(1.5)
# self.add(Debug(local[0]))
boxes.add(SurroundingRectangle(local[0][9:11]))
local_label = TexText("(局部)", color=YELLOW).next_to(local, LEFT, buff=.8).align_to(comments, LEFT)
self.play(Write(local))
self.wait()
self.play(Write(local_label))
self.wait()
conclusions = VGroup(
TexText(r"(1)~给定$x\in I$, $F(x,y)=0$确定唯一$f(x)\in J$"),
TexText(r"(2)~$y_0=f(x_0)$"),
TexText(r"(3)~$f\in C^1(I)$, 且$f'(x)=-{F_x'\over F_y'}$"),
).arrange(DOWN, aligned_edge=LEFT).next_to(local, DOWN, aligned_edge=LEFT)
# self.add(Debug(conclusions[0][0]),Debug(conclusions[1][0]),Debug(conclusions[2][0]))
boxes.add(
SurroundingRectangle(conclusions[0][0][5]),
SurroundingRectangle(conclusions[0][0][11]),
SurroundingRectangle(conclusions[0][0][23]),
SurroundingRectangle(conclusions[1][0][8:10]),
# SurroundingRectangle(conclusions[2][0][15]),
)
comments2 = VGroup(
TexText("(存在性)", color=YELLOW).next_to(conclusions[0], LEFT).align_to(local_label, LEFT),
TexText("(过点)", color=YELLOW).next_to(conclusions[1], LEFT),
TexText("(连续可导)", color=YELLOW).next_to(conclusions[2], LEFT)
)
comments2[1].align_to(local_label, LEFT)
comments2[2].align_to(local_label, LEFT)
for conclusion, comment in zip(conclusions, comments2):
self.play(Write(conclusion))
self.wait()
self.play(Write(comment))
self.wait()
Rn = Tex("\\R^{n+1}", color=YELLOW).next_to(conditions[0], buff=.5)
self.play(Write(Rn))
self.wait()
partial = Tex(r"{\partial f\over \partial x_i}=-{F'_{x_i}\over F'_y}", color=YELLOW)\
.next_to(conclusions[2], buff=.5)
self.play(ShowCreation(boxes))
self.wait()
self.play(Write(partial))
self.wait()
class CircleInterpretation(Scene):
def construct(self):
axes = Axes(
x_range=[-1.5, 1.5, 1],
y_range=[-1.5, 1.5, 1],
height=8,
axis_config={"include_ticks": False},
width=8,
)
# circle = Circle(radius=, color=BLUE)
# f_always(circle.move_to, lambda:)
circle = always_redraw(lambda: Circle(
arc_center=axes.get_origin(),
radius=(axes.c2p(1, 0) - axes.c2p(0, 0))[0],
color=BLUE)
)
self.add(axes, circle)
F = Tex("F(x,y)=x^2+y^2-1", color=BLUE).to_corner(UL)
self.play(Write(F))
self.wait()
point = Dot(color=RED)
f_always(point.move_to, lambda: axes.c2p(np.cos(PI / 3), np.sin(PI / 3)))
point_label = Tex("(x_0,y_0)", color=point.get_color())
always(point_label.next_to, point)
self.play(FadeIn(point, scale=.5))
self.play(Write(point_label))
self.wait()
rect = DashedRectangle(dash_length=0.15, width=1.5).move_to(point)
# always(rect.move_to, point)
self.play(ShowCreation(rect))
self.wait()
# print(axes.p2c(point))
# graph =
graph = axes.get_graph(
lambda t: np.sqrt(1 - t ** 2),
x_range=[
np.cos(PI / 3) - (rect.get_width() / 2) / axes.x_axis.unit_size,
np.cos(PI / 3) + (rect.get_width() / 2) / axes.x_axis.unit_size,
],
color=YELLOW
)
self.play(ShowCreation(graph))
self.wait()
# graph.add_updater(lambda t: t.become(axes.get_graph(
# lambda t: np.sqrt(1 - t ** 2),
# x_range=[
# np.cos(PI / 3) - (rect.get_width() / 2) / axes.x_axis.unit_size,
# np.cos(PI / 3) + (rect.get_width() / 2) / axes.x_axis.unit_size,
# ],
# color=YELLOW
# )))
graph_label = Tex("y=f(x)", color=graph.get_color())
passing = Tex("f(x_0)=y_0", color=graph.get_color())
derivative = Tex("f'(x)", r"=-{F'_x\over F'_y}", color=graph.get_color())
result = Tex(r"=-{x\over y}", color=graph.get_color())
v = VGroup(graph_label, passing, derivative, result) \
.arrange(DOWN, aligned_edge=LEFT).next_to(F, DOWN, buff=.5, aligned_edge=LEFT)
result.align_to(derivative[-1], LEFT)
for i in v:
self.play(Write(i))
self.wait()
self.play(
FadeOut(VGroup(F, v, rect, graph)),
axes.animate.shift(DOWN * 3 + LEFT * 5), run_time=2
)
self.wait()
# ====== proof start =========
assume = TexText("不妨设", "$F'_y(x_0,y_0)>0$", r"~$\Longrightarrow$~", "邻域内", "$F'_y(x,y)>0$", ".") \
.tm({"x_0": RED, "(x,y)": BLUE}).to_edge(UP).shift(LEFT)
IJ_ = DashedRectangle(dash_length=0.15).move_to(point)
self.play(Write(assume[:2]))
self.wait()
self.play(Write(assume[2:4]), ShowCreation(IJ_))
self.wait()
self.play(Write(assume[4:]))
self.wait()
# we have to shrink the I interval
arrow = Arrow(ORIGIN, UP * 2).set_color(GREEN).next_to(IJ_, DOWN).shift(RIGHT * 1.5)
self.play(GrowArrow(arrow))
self.wait()
self.play(FadeOut(arrow))
self.wait()
# increasing w.r.t y
increasing_y = Arrow(ORIGIN, IJ_.get_height() * UP, buff=0).set_color(YELLOW).next_to(IJ_, buff=.5)
increasing_ylabel = TexText("$F(x,y)$关于$y$递增", color=YELLOW).next_to(increasing_y)
self.play(GrowArrow(increasing_y))
self.wait()
self.play(Write(increasing_ylabel))
self.wait()
# setting x to x_0
passing = Line(ORIGIN, IJ_.get_height() * UP, color=YELLOW).move_to(point)
self.play(ShowCreation(passing))
self.wait()
# closed interval [c,d]\in J
dline = Line(ORIGIN, IJ_.get_width() * RIGHT).move_to(IJ_[0])
dlabel = Tex("d").next_to(dline, LEFT)
cline = Line(ORIGIN, IJ_.get_width() * RIGHT).move_to(IJ_[2])
clabel = Tex("c").next_to(cline, LEFT)
self.play(FadeTransform(IJ_[2], cline))
self.play(Write(clabel))
self.wait()
self.play(FadeTransform(IJ_[0], dline))
self.play(Write(dlabel))
self.wait()
# F(x_0, c) and F(x_0, d)
F_point_label = Tex("F(x_0,y_0)=0", color=RED).scale(.7).next_to(point).add_background_rectangle()
cpoint = Dot(passing.get_start(), color=BLUE)
cpoint_label = Tex("F(x_0,c)<0", color=BLUE).next_to(cpoint, DOWN).add_background_rectangle()
cinterval_label = Tex("F(x,c)<0", color=YELLOW).next_to(cpoint, DOWN).add_background_rectangle()
dpoint = Dot(passing.get_end(), color=BLUE)
dpoint_label = Tex("F(x_0,d)>0", color=BLUE).next_to(dpoint, UP).add_background_rectangle()
dinterval_label = Tex("F(x,d)>0", color=YELLOW).next_to(dpoint, UP).add_background_rectangle()
self.play(FadeTransform(point_label, F_point_label))
self.wait()
self.play(GrowFromCenter(cpoint), Write(cpoint_label))
self.wait()
self.play(GrowFromCenter(dpoint), Write(dpoint_label))
self.wait()
# continuous w.r.t x
continuous_x = Arrow(ORIGIN, IJ_.get_width() * RIGHT, buff=0).set_color(YELLOW).next_to(IJ_, buff=2)
continuous_x_label = TexText("$F(x,y)$关于$x$连续", color=YELLOW).next_to(continuous_x, DOWN)
self.play(
FadeOut(increasing_y),
FadeOut(increasing_ylabel),
FadeTransform(F_point_label, point_label)
)
self.play(GrowArrow(continuous_x))
self.wait()
self.play(Write(continuous_x_label))
self.wait()
# from point to open interval
self.play(FadeToColor(VGroup(cpoint, cpoint_label[1]), YELLOW))
copen = VGroup(Tex("("), Tex(")")).set_color(YELLOW).arrange(buff=1).move_to(cpoint)
dopen = VGroup(Tex("("), Tex(")")).set_color(YELLOW).arrange(buff=1.5).move_to(dpoint)
self.wait()
self.play(FadeIn(copen))
self.wait()
self.play(FadeToColor(VGroup(dpoint, dpoint_label[1]), YELLOW))
self.play(FadeIn(dopen))
self.wait()
IJ = DashedRectangle(dash_length=0.15, width=copen.get_width()).set_color(YELLOW).move_to(point)
self.play(FadeIn(IJ))
self.wait()
self.play(FadeOut(VGroup(copen, dopen, passing, cpoint, dpoint)))
self.play(
FadeOut(VGroup(IJ_[1], IJ_[3], cline, dline))
)
self.wait()
self.play(
FadeTransform(cpoint_label, cinterval_label),
FadeTransform(dpoint_label, dinterval_label),
)
self.play(FadeOut(VGroup(point, point_label, continuous_x, continuous_x_label)))
zero = TexText("零点定理", "、严格单调", color=YELLOW).move_to(continuous_x)
self.wait()
self.play(Write(zero[0]))
self.wait()
# fix x
xval = ValueTracker(np.cos(PI / 3))
xmin = np.cos(PI / 3) - (IJ.get_width() / 2) / axes.x_axis.unit_size
xmax = np.cos(PI / 3) + (IJ.get_width() / 2) / axes.x_axis.unit_size
passing.set_color(WHITE)
f_always(passing.set_x, lambda: axes.c2p(xval.get_value(), 0)[0])
self.play(ShowCreation(passing))
self.wait()
dot = point.copy().set_color(YELLOW)
self.play(GrowFromCenter(dot))
self.wait()
self.play(Write(zero[1]))
self.wait()
self.remove(dot)
xval.set_value(xmin + 0.01)
graph_part = axes.get_graph(np.cos)
graph_part.add_updater(lambda m: m.become(
axes.get_graph(
lambda t: np.sqrt(1 - t ** 2),
x_range=[xmin, xval.get_value(), 0.01],
color=YELLOW,
use_smoothing=False
)
))
self.add(graph_part)
self.play(xval.animate.set_value(xmax - 1e-3), run_time=3)
self.play(FadeOut(passing))
self.wait()
# locally exists
local = TexText("$y=f(x)$局部存在,", "且满足$y_0=f(x_0)$.").next_to(assume, DOWN, aligned_edge=LEFT)
self.play(Write(local[0]))
self.wait()
self.play(FadeIn(VGroup(point, point_label)))
self.wait()
self.play(Write(local[1]))
self.wait()
# what we want next
self.play(FadeOut(VGroup(
cinterval_label, dinterval_label, zero,
clabel, dlabel
)))
IJlabel = VGroup(
Tex("I").next_to(IJ, DOWN),
Tex("J").next_to(IJ, LEFT)
).set_color(YELLOW)
self.play(Write(IJlabel))
self.wait()
differentiable = TexText("还需证$f(x)$在$I$上有连续的导函数", ",且", "$f'(x)=-{F'_x\\over F'_y}$.") \
.next_to(local, DOWN, aligned_edge=LEFT)
self.play(Write(differentiable[0]))
self.wait()
self.play(Write(differentiable[1:]))
self.wait()
# only need the second part
self.play(FadeToColor(differentiable[-1], YELLOW))
FxyContinuous = VGroup(
TexText("连续").scale(.7).next_to(differentiable[-1], UP).shift(RIGHT),
TexText("连续,非零").scale(.7).next_to(differentiable[-1], DOWN).shift(RIGHT)
) # .arrange(DOWN, aligned_edge=LEFT).next_to(differentiable)
self.wait()
self.play(Write(FxyContinuous[0]))
self.wait()
self.play(Write(FxyContinuous[1]))
self.wait()
# scene transformation
self.play(FadeOut(FxyContinuous),
FadeOut(VGroup(IJ, IJlabel)))
self.play(axes.animate.shift(LEFT * 3 + DOWN * 5.5).scale(3),
VGroup(assume, local, differentiable).animate.shift(UP * 2),
run_time=2)
self.wait()
# continuous
continuous = TexText("先证", "$f(x)$在$I$连续", ":").next_to(differentiable, DOWN, aligned_edge=LEFT)
self.play(Write(continuous))
self.wait()
# two steps
cont_at_x0 = TexText(r"\textbullet~$f$在$x_0$处连续:", "收缩$x$范围.")
cont_at_x1 = TexText(r"\textbullet~$f$在$x_1\in I$处连续:", "存在$g$在$x_1$处连续", ",而$g=f$.")
VGroup(cont_at_x0, cont_at_x1) \
.arrange(DOWN, aligned_edge=LEFT) \
.next_to(continuous, DOWN, aligned_edge=LEFT).shift(RIGHT)
VGroup(cont_at_x0[0], cont_at_x1[0]).set_color(BLUE)
VGroup(cont_at_x0[1:], cont_at_x1[1:]).set_color(YELLOW)
self.play(Write(cont_at_x0[0]))
self.wait()
self.play(Write(cont_at_x1[0]))
self.wait()
# at x_0
IJ_2 = DashedRectangle(dash_length=0.15, height=1.2).move_to(point)
IJ2 = DashedRectangle(dash_length=0.15, height=1.2, width=1).move_to(point).set_color(RED)
cont_def = Tex("|x-x_0|<\\delta", "~\\Longrightarrow~", "|f(x)-y_0|<\\varepsilon", color=YELLOW) \
.next_to(cont_at_x0[0])
self.play(Write(cont_def))
self.wait()
self.play(ShowCreation(IJ_2))
self.wait()
self.play(RT(IJ_2.copy(), IJ2), FadeOut(point_label))
self.wait()
self.play(cont_def[-1].animate.next_to(IJ_2, buff=.5), run_time=2)
self.wait()
self.play(cont_def[0].animate.next_to(IJ2, DOWN), FadeOut(cont_def[1]), run_time=2)
self.wait()
self.play(Write(cont_at_x0[1:]))
self.wait()
self.play(
FadeOut(VGroup(
IJ_2, IJ2, cont_def[0], cont_def[2]
)),
FadeIn(point_label))
self.wait()
# at x1
point1 = Dot(axes.c2p(np.cos(.9), np.sin(.9)), color=PINK)
point1_label = Tex("(x_1,y_1)", color=point1.get_color()).next_to(point1)
self.play(FadeIn(VGroup(point1, point1_label), scale=0.5))
self.wait()
same_condition = VGroup(TexText("$F(x_1,y_1)=0$"),
TexText("$F'_y(x_1,y_1)>0$")) \
.arrange(DOWN).next_to(point1_label, buff=1)
self.play(Write(same_condition[0]))
self.wait()
self.play(Write(same_condition[1]))
self.wait()
self.play(FadeOut(VGroup(point, point_label)))
self.wait()
graph1 = axes.get_graph(
lambda t: np.sqrt(1-t**2),
x_range=[np.cos(.9)-.1, np.cos(.9)+.1, .01],
use_smoothing=False,
color=point1.get_color()
)
self.play(ShowCreation(graph1))
self.wait()
graph1_label = Tex("y=g(x)", color=point1.get_color()).next_to(graph1, LEFT).shift(DR+UP*.5)
self.play(Write(graph1_label))
self.wait()
self.play(Indicate(cont_at_x0[0]))
self.wait()
self.play(Write(cont_at_x1[1]))
self.wait()
self.play(Write(cont_at_x1[2]))
self.wait()
self.play(FadeOut(VGroup(graph1, graph1_label)))
self.wait()
self.play(FadeOut(VGroup(point1, point1_label, same_condition)))
self.wait()
# derivative
cm = {'k': RED, 'h': RED, 'right': WHITE, 'alpha': WHITE}
point_x = Dot(axes.c2p(np.cos(1.2), np.sin(1.2)), color=WHITE)
x_label = Tex("(x,","f(x)",")", color=point_x.get_color()).next_to(point_x)
point_xh = Dot(axes.c2p(np.cos(0.9), np.sin(0.9)), color=WHITE)
xh_label = Tex("\\left(x+","h",",","f(x+","h",")","\\right)").next_to(point_xh).tm(cm)
xh_label2 = Tex("\\left(x+","h",",","f(x)","+","k","\\right)") \
.next_to(point_xh).tm(cm)
self.play(GrowFromCenter(point_x))
self.play(Write(x_label))
self.wait()
self.play(GrowFromCenter(point_xh))
self.play(Write(xh_label))
self.wait()
self.play(TransformMatchingTex(xh_label, xh_label2, key_map={"f(x+h)": "f(x)+k"}))
self.wait()
xh = Dot().set_x(point_x.get_x()).set_y(point_xh.get_y())
vert = Line(point_x, xh.get_center(), color=RED)
vert_label = Tex("k", color=RED).next_to(vert, LEFT)
horizon = Line(point_xh, xh.get_center(), color=RED)
horizon_label = Tex("h", color=RED).next_to(horizon, DOWN)
self.play(ShowCreation(vert), ShowCreation(horizon))
self.play(Write(vert_label), Write(horizon_label))
self.wait()
# formula begins
derivative_def = Tex(r"\lim_{h\to0}{f(x+h)-f(x) \over h}", color=YELLOW).next_to(xh_label2, buff=.5).shift(UP)
derivative_defk = Tex(r"\lim_{h\to0}",r"{k \over h}", color=YELLOW).move_to(derivative_def)
self.play(Write(derivative_def))
self.wait()
self.play(TransformMatchingTex(derivative_def, derivative_defk))
self.wait()
self.play(derivative_defk.animate.to_corner(DL),
VGroup(differentiable, continuous, cont_at_x0, cont_at_x1).animate.shift(UP*3.3),
run_time=2)
# Frechet
self.wait()
frechet = TexText(r"$F$可微:", ) \
.next_to(cont_at_x1, DOWN).align_to(continuous, LEFT)
frechet1 = Tex(
r"F(x+h,y+k)-F(x,y)=F'_x h+F'_y k+o(\sqrt{h^2+k^2})",
) \
.next_to(frechet, DOWN).set_x(0)
hk20 = Tex("(h\\to0,k\\to0)", color=YELLOW).next_to(frechet1, DOWN, aligned_edge=RIGHT)
frechet2 = Tex(
r"F(x+h,y+k)-F(x,y)=F'_x h+F'_y k+\alpha h+\beta k",
).next_to(hk20, DOWN).set_x(0)
ab20 = Tex("(h\\to0,k\\to0\\text{~时~}\\alpha\\to0, \\beta\\to0)", color=YELLOW)\
.next_to(frechet2, DOWN).align_to(hk20, RIGHT)
for i in [4,8,21,26]+list(range(30,37)):
frechet1[0][i].set_color(RED)
for i in [4,8,21,26, -1, -4]:
frechet2[0][i].set_color(RED)
self.play(Write(frechet))
self.wait()
self.play(Write(frechet1))
self.wait()
# self.add(Debug(frechet1[0]))
self.play(Write(hk20))
self.wait()
self.play(RT(frechet1[0][:28].copy(), frechet2[0][:28]))
self.wait()
self.play(FadeTransform(frechet1[0][28:].copy(), frechet2[0][28:]))
self.wait()
self.play(Write(ab20))
self.wait()
self.play(FadeOut(VGroup(frechet1, hk20)), VGroup(frechet2, ab20).animate.shift(UP*1.6))
# self.add(Debug(frechet2[0], scale=.5))
self.wait()
VGroup(frechet2[0][12:17], point_x, x_label).set_color(BLUE)
self.play(Indicate(VGroup(frechet2[0][12:17], x_label), color=BLUE,),
# FadeToColor(frechet2[0][12:17], BLUE), FadeToColor(VGroup(point_x, x_label), BLUE)
)
# self.play(FadeToColor(frechet2[0][12:17], BLUE), FadeToColor(VGroup(point_x, x_label), BLUE))
self.wait()
VGroup(frechet2[0][1:10], point_xh, xh_label2).set_color(BLUE)
self.play(Indicate(VGroup(frechet2[0][1:10], xh_label2), color=BLUE,),
# FadeToColor(frechet2[0][1:10], BLUE), FadeToColor(VGroup(point_xh, xh_label2), BLUE)
)
# self.play(FadeToColor(frechet2[0][1:10], BLUE), FadeToColor(VGroup(point_xh, xh_label2), BLUE))
self.wait()
zero = VGroup(
Tex(r"0-0","=",r"F'_x ","h",r"+F'_y ",r"k",r"+\alpha ",r"h",r"+\beta",r" k").tm(cm),
Tex(r"0","=",r"F'_x ","h",r"+F'_y ",r"k",r"+\alpha ",r"h",r"+\beta",r" k").tm(cm),
Tex(r"0","=",r"F'_x ",r"+F'_y ",r"{k \over h}",r"+\alpha ",r"+\beta",r" {k \over h}").tm(cm),
)\
.next_to(ab20, DOWN, buff=.5)
for i in range(3):
zero[i].align_to(frechet2, RIGHT)
zero[0][0].set_color(BLUE)
zero[1][0].set_color(BLUE)
self.play(Write(zero[0]))
self.wait()
self.play(TransformMatchingTex(zero[0], zero[1], key_map={'0-0': '0'}))
self.wait()
# mention k
k = VGroup(Tex("k=f(x+h)-f(x):"), TexText("$h\\to0$时$k\\to0$"))\
.arrange(DOWN).set_color(YELLOW).next_to(zero, DOWN)
ab202 = Tex("(h\\to0\\text{~时~}k\\to0, \\alpha\\to0, \\beta\\to0)", color=YELLOW) \
.next_to(zero, DOWN).align_to(hk20, RIGHT)
self.play(Write(k[0]))
self.wait()
self.play(Write(k[1:]))
self.wait()
self.play(FadeTransform(k, ab202))
self.wait()
# derive k/h
self.play(TransformMatchingTex(zero[1], zero[2], key_map={'k': r'{k \over h}'}))
self.wait()
derivative_defk.generate_target()
derivative_defk.target[1].set_color(RED)
derivative_defk.target[0].set_color(WHITE)
limk = VGroup(
derivative_defk.target,
Tex(r"=\lim_{h\to0}\left(-{F'_x+\alpha\over F'_y+\beta}\right)"),
Tex(r"=-{F'_x\over F'_y}"),
).arrange().next_to(ab202, DOWN)
self.play(FadeOut(VGroup(x_label, xh_label2)), MoveToTarget(derivative_defk))
self.wait()
self.play(Write(limk[1]))
self.wait()
self.play(Write(limk[2]))
self.wait()
fp = Tex("f'(x)").next_to(limk[1], LEFT)
self.play(FadeTransform(derivative_defk, fp))
self.wait()
qed = TexText("Q.E.D.", color=YELLOW).to_corner(DR, buff=.5)
self.play(Write(qed))
self.wait()
class Multi(Scene):
def construct(self):
func = TexText(r"$\sin z-xyz=0$","局部确定了隐函数","$z=z(x,y).$").to_edge(UP)
func[0].set_color(YELLOW)
self.add(func)
self.wait(2)
zx = Tex(r"{\partial z\over\partial x}",r"=",r"-{F'_x\over F'_z}").next_to(func, DOWN, buff=1)
res0 = Tex("=",r"-{-yz\over \cos z-xy}").next_to(zx, DOWN).align_to(zx[1], LEFT)
res = Tex("=",r"{yz\over \cos z-xy}").next_to(res0, DOWN, aligned_edge=LEFT)
zx[0].set_color(YELLOW)
self.play(Write(zx))
self.wait()
self.play(Write(res0))
self.wait()
self.play(Write(res))
self.wait()
class pic(Scene):
def construct(self):
axes = Axes(
x_range=[-1.5, 1.5, 1],
y_range=[-1.5, 1.5, 1],
height=8,
axis_config={"include_ticks": False},
width=8,
).to_edge(LEFT, buff=-2)
# circle = Circle(radius=, color=BLUE)
# f_always(circle.move_to, lambda:)
circle = always_redraw(lambda: Circle(
arc_center=axes.get_origin(),
radius=(axes.c2p(1, 0) - axes.c2p(0, 0))[0],
color=BLUE,
stroke_width=15
),
)
point = Dot(color=RED)
f_always(point.move_to, lambda: axes.c2p(np.cos(PI / 3), np.sin(PI / 3)))
rect = DashedRectangle(dash_length=0.15, width=1.5).move_to(point)
graph = axes.get_graph(
lambda t: np.sqrt(1 - t ** 2),
x_range=[
np.cos(PI / 3) - (rect.get_width() / 2) / axes.x_axis.unit_size,
np.cos(PI / 3) + (rect.get_width() / 2) / axes.x_axis.unit_size,
],
color=YELLOW,
stroke_width=15
)
implicit = VGroup(TexText("\\heiti 隐函数").scale(3),
Tex(r"{\d y\over\d x}=-{F'_x\over F'_y}", color=YELLOW).scale(3))\
.arrange(DOWN, buff=.5).to_edge(RIGHT, buff=1)
self.add(axes, circle, rect, graph, implicit)
|
import numpy as np
from scipy.stats import multivariate_normal
class GaussianMixture:
def __init__(self, n_components, max_iter=100, n_restart=10, tol=1e-3):
self.n_components = n_components
self.max_iter = max_iter
self.n_restart = n_restart
self._loss = -np.inf
self._w = None
self._mu = None
self._sigma = None
self._tol = tol
def fit(self, X, y=None):
for i in range(self.n_restart):
w, mu, sigma, loss = self._fit(X, y)
if loss > self._loss:
self._loss = loss
self._w = w
self._mu = mu
self._sigma = sigma
return self
def predict(self, X):
return self._e_step(X, self._w, self._mu, self._sigma)
def fit_predict(self, X, y=None):
return self.fit(X).predict(X)
def _fit(self, X, y=None):
loss = -np.inf
w, mu, sigma = self._init_params(X)
for i in range(self.max_iter):
loss_prev = loss
probs = self._e_step(X, w, mu, sigma)
w, mu, sigma = self._m_step(X, probs)
loss = self._compute_loss(X, w, mu, sigma)
if np.abs(loss_prev - loss) < self._tol:
break
return w, mu, sigma, loss
def _init_params(self, X):
w = np.ones((self.n_components)) / self.n_components
mu = (X[np.random.randint(X.shape[0], size=self.n_components)]).T
sigma = np.dstack([np.cov(X.T)] * self.n_components)
return w, mu, sigma
def _gaussian_prob(self, x, mu, sigma, log=False):
if log:
return multivariate_normal.logpdf(x, mean=mu, cov=sigma)
else:
return multivariate_normal.pdf(x, mean=mu, cov=sigma)
def _e_step(self, X, w, mu, sigma):
probs = np.zeros((X.shape[0], self.n_components))
for k in range(self.n_components):
probs[:, k] = w[k] * self._gaussian_prob(X, mu[:, k], sigma[:, :, k])
probs /= probs.sum(axis=1).reshape(-1, 1)
return probs
def _m_step(self, X, probs):
n_k, n = probs.sum(axis=0), X.shape[0]
w = n_k / n
mu = (X.T @ probs) / n_k
sigma = np.zeros((X.shape[1], X.shape[1], self.n_components))
for k in range(self.n_components):
sigma[:, :, k] = (probs[:, k].reshape(X.shape[0], 1) * (X - mu[:, k])).T @ (X - mu[:, k]) / (probs[:, k].sum())
return w, mu, sigma
def _compute_loss(self, X, w, mu, sigma):
loss = 0
for k in range(self.n_components):
loss += np.log(w[k]) + self._gaussian_prob(X, mu[:, k], sigma[:, :, k], log=True)
return loss.sum()
|
# Copyright (c) 2018, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
from rdkit.Chem import AllChem
import os, numpy, sys
from . import raw
import logging, shutil, pickle
import csv
from io import StringIO
import functools
def SDFNameGetter(buffer):
return buffer.split("\n")[0].strip()
namefxns = {None: None, "molfile": SDFNameGetter}
def nameOptFile(indexdir):
return os.path.join(indexdir, "__opts__")
class MolFileIter:
def __init__(self, raw):
self.raw = raw
self.i = -1
def __next__(self):
self.i += 1
try:
if self.i >= len(self.raw):
raise StopIteration()
return self.raw.get(self.i)
except IndexError:
raise StopIteration()
def next(self): return self.__next__()
def reader(s, dialect=None):
return csv.reader(StringIO(s), dialect=dialect)
def whitespace_reader(s):
return [s.split()]
class MolFileIndex:
"""Index for a molecule file to provide random access to the internal molecules.
"""
def __init__(self, indexDirectory, mode=raw.Mode.READONLY):
"""Fast random access to a smiles file by row index
indexDirectory = directory of the molfile index
Example
-----------
See MakeSmilesIndex and MakeSDF Index to make indexed files
"""
optfile = nameOptFile(indexDirectory)
if os.path.exists(optfile):
with open(optfile, 'rb') as f:
options = pickle.load(f)
else:
raise IOError("Not a molfile index")
self.db = raw.RawStore(indexDirectory, mode=mode)
self._nameGetter = None
self.filename = options['filename']
self.hasHeader = options['hasHeader']
self.smilesColumn = options['smilesColumn']
self.nameColumn = options['nameColumn']
self.sep = options['sep']
self.nameFxnName = options['nameFxnName']
try:
if self.sep == None:
self.sep = ' '
if self.sep == 'excel':
self.reader = reader
elif self.sep == 'excel_tab':
self.reader = functools.partial(reader, dialect=csv.excel_tab)
elif self.sep == 'whitespace':
self.reader = whitespace_reader
else:
# assume a seperator
csv.register_dialect('custom_dialect', delimiter=self.sep, skipinitialspace=True)
self.reader = functools.partial(reader, dialect='custom_dialect')
except:
logging.exception("Can't initialize delimiter: %s", self.sep)
self.nameFxn = namefxns[self.nameFxnName]
if self.hasHeader:
self.N = self.db.N - 3
else:
self.N = self.db.N - 2
# mmap?
self.filename = os.path.join(indexDirectory, self.filename)
self.f = open(self.filename, 'r')
if self.hasHeader:
colnames = self.colnames = self._get(None)
else:
colnames = self.colnames = ["column_%d"%x for x in range(len(self._get(0)))]
# get the first entry
row = self._get(0) # takes header into account internally
if self.smilesColumn != -1:
try:
self.smilesColIdx = int(self.smilesColumn)
except ValueError:
try:
self.smilesColIdx = colnames.index(self.smilesColumn)
except ValueError:
raise IndexError("Specified smiles column %r name not in header\n"
"\tHeader is %r\n"
"\tPerhaps the seperator is misspecified (currently %r)"%(
self.smilesColumn,
self.colnames,
self.sep)
)
if len(row) <= self.smilesColIdx:
raise IndexError("Smiles Column %d greater than rowsize %s\n"
"Perhaps the `seperator is mispecified (currently %r)"% (
self.smilesColIdx,
len(row),
self.sep))
self.nameidx = -1
if self.nameColumn is not None:
try:
self.nameidx = int(self.nameColumn)
except ValueError:
try:
self.nameidx = colnames.index(self.nameColumn)
except ValueError:
raise IndexError("Specified name column %r name not in header\n"
"\tHeader is %r\n"
"\tPerhaps the seperator is misspecified (currently %r)"%(
self.smilesColumn,
self.colnames,
self.sep)
)
if len(row) <= self.nameidx:
raise IndexError("Name Column %d greater than rowsize %s\n"
"Perhaps the seperator is mispecified (currently %r)"% (
self.smilesColIdx,
len(row),
self.sep))
def __del__(self):
self.close()
def close(self):
self.db.close()
self.f.close()
def __len__(self):
return self.N
def __iter__(self):
return MolFileIter(self)
def _get(self, idx):
if idx is None:
idx = 0
elif self.hasHeader:
idx += 1
start = self.db.get(idx)[0]
end = self.db.get(idx+1)[0]
self.f.seek(start,0)
buf = self.f.read(end-start-1)
try:
if self.smilesColumn != -1:
return list(self.reader(buf))[0]#buf.split(self.sep)
except:
logging.exception("Whups, can't split")
raise
return buf
def header(self):
"""Return header column (throws ValueError if no header column is available)"""
if self.hasHeader:
return self._get(None)
raise ValueError("Datastore doesn't have a header")
def get(self, idx):
"""idx -> gets the data at row idx
return a list if the data is a smiles like file
returns a string buffer otherwise
"""
v = self._get(idx)
if self.smilesColIdx != -1:
moldata = v[self.smilesColIdx]
if self.nameidx != -1:
name = v[self.nameidx]
return moldata, name
return moldata
if self._nameGetter:
return v, self._nameGetter(v)
return v
def getMol(self, idx):
"""Returns input data for the molecule"""
if self.smilesColIdx != -1:
return self._get(idx)[self.smilesColIdx]
return self._get(idx)
def getRDMol(self, idx):
"""Returns the RDKit molecular representation of the input data"""
data = self._get(idx)
if self.smilesColIdx != -1:
m = AllChem.MolFromSmiles(data[0])
if m:
if len(data) > 0:
m.SetProp("_Name", data[1])
return m
return AllChem.MolFromMolBlock(data)
def getName(self, idx):
if self.nameidx == -1:
if self._nameGetter:
return self._nameGetter(self._get(idx))
raise ValueError("SmilesIndex does not have a name column or a name retriever")
return self._get(idx)[self.nameidx]
def simplecount(filename):
lines = 0
with open(filename) as f:
for line in f:
lines += 1
return lines
def index(fname, word):
fsize = os.path.getsize(fname)
bsize = 2**16
with open(fname, 'rb') as f:
buffer = None
overlap = len(word) - 1
while True:
if (f.tell() >= overlap and f.tell() < fsize):
f.seek(f.tell() - overlap)
buffer = f.read(bsize)
if buffer:
pos = buffer.find(word)
while pos != -1:
yield f.tell() - (len(buffer) - pos)
pos = buffer.find(word, pos+1)
else:
break
def MakeSmilesIndex(filename, dbdir, hasHeader, smilesColumn, nameColumn=-1, sep=None,
reIndex=False):
"""Make smiles index -> index a smiles file for random access
filename: filename to index
dbdir: name of the index store
hasHeader: do we have a header
smilesColumn: column for the smiles string
nameColumn: column for the name string
sep: seperator for the file i.e. '\t'
reIndex: reIndex the existing file
otherwise Copies file over to index"""
targetFilename = os.path.join(dbdir, os.path.basename(filename))
if reIndex and os.path.abspath(filename) != os.path.abspath(targetFilename):
raise ValueError("If reindex is set, filename must be the storage filename (this is a sanity check\n%s\n%s"%(filename, targetFilename))
sz = os.path.getsize(filename)
N = simplecount(filename)
if sz < 2**8:
dtype = numpy.uint8
elif sz < 2**16:
dtype = numpy.uint16
elif sz < 2**32:
dtype = numpy.uint32
else:
dtype = numpy.uint64
db = raw.MakeStore([("index", dtype)], N+2, dbdir,
checkDirectoryExists=(not reIndex))
cpfile = targetFilename
if not reIndex:
logging.info("Copying molecule file to index...")
shutil.copy(filename, cpfile)
logging.info("Done copying")
else:
logging.info("Reindexing existing smiles file...")
options = {'filename': os.path.basename(filename),
'hasHeader': hasHeader,
'smilesColumn': smilesColumn,
'nameColumn': nameColumn,
'nameFxnName': None,
'sep': sep}
# save the options
optfile = nameOptFile(dbdir)
with open(optfile, 'wb') as f:
pickle.dump(options, f)
# first row
# TODO sniff newline...
logging.info("Indexing...")
db.putRow(0, [0])
for i,pos in enumerate(index(cpfile, b"\n")):
db.putRow(i+1, [pos+1])
db.close()
return MolFileIndex(dbdir)
#, os.path.basename(filename), smilesColumn,
# nameColumn=nameColumn, hasHeader=hasHeader,
# sep=sep)
def MakeSDFIndex(filename, dbdir):
"""Make smiles index -> index a smiles file for random access"""
sadf
sz = os.path.getsize(filename)
N = simplecount(filename)
if sz < 2**8:
dtype = numpy.uint8
elif sz < 2**16:
dtype = numpy.uint16
elif sz < 2**32:
dtype = numpy.uint32
else:
dtype = numpy.uint64
# TODO sniff newline ...
indices = list(index(filename, b"$$$$\n"))
db = raw.MakeStore([("index", dtype)], N+1, dbdir)
# first row
db.putRow(0, [0])
for i, idx in enumerate(indices):
db.putRow(i+1, [pos+1])
return MolFileIndex(filename, dbdir, nameFxn=SDFNameGetter)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PYTHON_ARGCOMPLETE_OK
from __future__ import print_function, unicode_literals
import argcomplete
import argparse
import os
import sys
import logging
import hashlib
import glob
import tempfile
import zipfile
import shutil
import platform
import subprocess
import appdirs
import serial
import requests
from distutils.version import LooseVersion
from bcf.firmware.FirmwareList import FirmwareList
from bcf import flasher
from bcf.log import log
__version__ = '@@VERSION@@'
SKELETON_URL_ZIP = 'https://codeload.github.com/bigclownlabs/bcf-skeleton/zip/master'
SDK_URL_ZIP = 'https://codeload.github.com/bigclownlabs/bcf-sdk/zip/master'
SDK_GIT = 'https://github.com/bigclownlabs/bcf-sdk.git'
VSCODE_GIT = 'https://github.com/bigclownlabs/bcf-vscode.git'
VSCODE_URL_ZIP = 'https://codeload.github.com/bigclownlabs/bcf-vscode/zip/master'
pyserial_34 = LooseVersion(serial.VERSION) >= LooseVersion("3.4.0")
user_cache_dir = appdirs.user_cache_dir('bcf')
user_config_dir = appdirs.user_config_dir('bcf')
def print_table(labels, rows):
if not labels and not rows:
return
max_lengths = [0] * (len(rows[0]) if rows else len(labels))
for i, label in enumerate(labels):
max_lengths[i] = len(label)
for row in rows:
for i, v in enumerate(row):
if len(v) > max_lengths[i]:
max_lengths[i] = len(v)
row_format = "{:<" + "} {:<".join(map(str, max_lengths)) + "}"
if labels:
print(row_format.format(*labels))
print("=" * (sum(max_lengths) + len(labels) * 2))
for row in rows:
print(row_format.format(*row))
def print_progress_bar(title, progress, total, length=20):
filled_length = int(length * progress // total)
if filled_length < 0:
filled_length = 0
bar = '#' * filled_length
bar += '-' * (length - filled_length)
percent = 100 * (progress / float(total))
if percent > 100:
percent = 100
elif percent < 0:
percent = 0
sys.stdout.write('\r\r')
sys.stdout.write(title + ' [' + bar + '] ' + "{:5.1f}%".format(percent))
sys.stdout.flush()
if percent == 100:
sys.stdout.write('\n')
sys.stdout.flush()
def try_run(fce, *args, **kwargs):
try:
fce(*args, **kwargs)
except KeyboardInterrupt as e:
sys.exit(1)
except Exception as e:
print()
print(e)
if os.getenv('DEBUG', False):
raise e
sys.exit(1)
def download_url_reporthook(count, blockSize, totalSize):
print_progress_bar('Download', count * blockSize, totalSize)
def download_url(url, use_cache=True):
if url.startswith("https://github.com/bigclownlabs/bcf-"):
filename = url.rsplit('/', 1)[1]
else:
filename = hashlib.sha256(url.encode()).hexdigest()
filename_bin = os.path.join(user_cache_dir, filename)
if use_cache and os.path.exists(filename_bin):
return filename_bin
print('download firmware from', url)
print('save as', filename_bin)
try:
response = requests.get(url, stream=True, allow_redirects=True)
total_length = response.headers.get('content-length')
with open(filename_bin, "wb") as f:
if total_length is None: # no content length header
f.write(response.content)
else:
dl = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
download_url_reporthook(1, dl, total_length)
except Exception as e:
print("Firmware download problem:", e.args[0])
sys.exit(1)
return filename_bin
class FirmwareChoicesCompleter(object):
def __init__(self, find_bin):
self._find_bin = find_bin
def __call__(self, **kwargs):
fwlist = FirmwareList(user_cache_dir)
firmwares = fwlist.get_firmware_list()
if self._find_bin:
firmwares += glob.glob('*.bin')
return firmwares
def command_devices(verbose=False, include_links=False):
if os.name == 'nt' or sys.platform == 'win32':
from serial.tools.list_ports_windows import comports
elif os.name == 'posix':
from serial.tools.list_ports_posix import comports
if pyserial_34:
ports = comports(include_links=include_links)
else:
ports = comports()
sorted(ports)
for port, desc, hwid in ports:
sys.stdout.write("{:20}\n".format(port))
if verbose:
sys.stdout.write(" desc: {}\n".format(desc))
sys.stdout.write(" hwid: {}\n".format(hwid))
def command_flash(args, fwlist):
if args.what.startswith('http'):
filename_bin = download_url(args.what)
elif os.path.exists(args.what) and os.path.isfile(args.what):
filename_bin = args.what
else:
firmware = fwlist.get_firmware(args.what)
if not firmware:
print('Firmware not found, try updating first')
sys.exit(1)
filename_bin = download_url(firmware['url'])
try:
device = 'dfu' if args.dfu else args.device
flasher.flash(filename_bin, device, reporthook=print_progress_bar, run=not args.log, erase_eeprom=args.erase_eeprom)
if args.log:
log.run_args(args, reset=True)
except KeyboardInterrupt as e:
print("")
sys.exit(1)
except Exception as e:
print(e)
if isinstance(e, flasher.serialport.error.ErrorLockDevice):
print("TIP: Maybe the bcg service is running - you need to stop it first.")
if os.path.exists("/etc/init.d/bcg-ud"):
print("Try this command:")
print("/etc/init.d/bcg-ud stop")
else:
try:
process = subprocess.Popen(['pm2', '-m', 'list'], stdout=subprocess.PIPE)
out, err = process.communicate()
for line in out.splitlines():
if line.startswith(b"+---"):
name = line[5:].decode()
if 'bcg' in name and name != 'bcg-cm':
print("Try this command:")
print("pm2 stop %s" % name)
except Exception as e:
pass
if os.getenv('DEBUG', False):
raise e
sys.exit(1)
def command_reset(args):
try:
if args.log:
log.run_args(args, reset=True)
else:
flasher.reset(args.device)
except KeyboardInterrupt as e:
sys.exit(1)
except Exception as e:
print(e)
if os.getenv('DEBUG', False):
raise e
sys.exit(1)
def command_eeprom(args):
device = 'dfu' if args.dfu else args.device
if args.erase:
try_run(flasher.eeprom_erase, device, reporthook=print_progress_bar)
def test_log_argumensts(args, parser):
if not args.log and (args.time or args.no_color or args.raw or args.record):
parser.error('--log is required when use --time or --no-color or --raw or --record.')
def main():
parser = argparse.ArgumentParser(description='BigClown Firmware Tool')
subparsers = {}
subparser = parser.add_subparsers(dest='command', metavar='COMMAND')
subparsers['update'] = subparser.add_parser('update', help="update list of available firmware")
subparsers['list'] = subparser.add_parser('list', help="list firmware")
subparsers['list'].add_argument('--all', help='show all releases', action='store_true')
subparsers['list'].add_argument('--description', help='show description', action='store_true')
subparsers['list'].add_argument('--show-pre-release', help='show pre-release version', action='store_true')
subparsers['flash'] = subparser.add_parser('flash', help="flash firmware",
usage='%(prog)s\n %(prog)s <firmware>\n %(prog)s <file>\n %(prog)s <url>')
subparsers['flash'].add_argument('what', help=argparse.SUPPRESS, nargs='?',
default="firmware.bin").completer = FirmwareChoicesCompleter(True)
subparsers['flash'].add_argument('--device', help='device', required='--dfu' not in sys.argv)
group = subparsers['flash'].add_mutually_exclusive_group()
group.add_argument('--dfu', help='use dfu mode', action='store_true')
group.add_argument('--log', help='run log', action='store_true')
group_log = subparsers['flash'].add_argument_group('optional for --log arguments')
log.add_arguments(group_log)
subparsers['flash'].add_argument('--erase-eeprom', help='erase eeprom', action='store_true')
subparsers['devices'] = subparser.add_parser('devices', help="show devices")
subparsers['devices'].add_argument('-v', '--verbose', action='store_true', help='show more messages')
subparsers['devices'].add_argument('-s', '--include-links', action='store_true', help='include entries that are symlinks to real devices' if pyserial_34 else argparse.SUPPRESS)
subparsers['search'] = subparser.add_parser('search', help="search in firmware names and descriptions")
subparsers['search'].add_argument('pattern', help='search pattern')
subparsers['search'].add_argument('--all', help='show all releases', action='store_true')
subparsers['search'].add_argument('--description', help='show description', action='store_true')
subparsers['search'].add_argument('--show-pre-release', help='show pre-release version', action='store_true')
subparsers['pull'] = subparser.add_parser('pull', help="pull firmware to cache",
usage='%(prog)s <firmware>\n %(prog)s <url>')
subparsers['pull'].add_argument('what', help=argparse.SUPPRESS).completer = FirmwareChoicesCompleter(False)
subparsers['clean'] = subparser.add_parser('clean', help="clean cache")
subparsers['create'] = subparser.add_parser('create', help="create new firmware")
subparsers['create'].add_argument('name', help=argparse.SUPPRESS)
subparsers['create'].add_argument('--no-git', help='disable git', action='store_true')
subparsers['read'] = subparser.add_parser('read', help="download firmware to file")
subparsers['read'].add_argument('filename', help=argparse.SUPPRESS)
subparsers['read'].add_argument('--device', help='device', required=True)
subparsers['read'].add_argument('--length', help='length', default=196608, type=int)
subparsers['log'] = subparser.add_parser('log', help="show log")
subparsers['log'].add_argument('--device', help='device', required=True)
log.add_arguments(subparsers['log'])
subparsers['reset'] = subparser.add_parser('reset', help="reset core module, not work for r1.3")
subparsers['reset'].add_argument('--device', help='device', required=True)
subparsers['reset'].add_argument('--log', help='run log', action='store_true')
group_log = subparsers['reset'].add_argument_group('optional for --log arguments')
log.add_arguments(group_log)
subparsers['eeprom'] = subparser.add_parser('eeprom', help="eeprom")
subparsers['eeprom'].add_argument('--device', help='device', required='--dfu' not in sys.argv)
subparsers['eeprom'].add_argument('--dfu', help='use dfu mode', action='store_true')
group = subparsers['eeprom'].add_mutually_exclusive_group()
group.add_argument('--erase', help='erase', action='store_true')
subparser_help = subparser.add_parser('help', help="show help")
subparser_help.add_argument('what', help=argparse.SUPPRESS, nargs='?', choices=subparsers.keys())
subparser_help.add_argument('--all', help='show help for all commands', action='store_true')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
argcomplete.autocomplete(parser)
args = parser.parse_args()
if not args.command:
parser.print_help()
sys.exit()
if args.command == 'help':
if args.what:
subparsers[args.what].print_help()
else:
parser.print_help()
print(" --all show help for all commands")
if args.all:
print("=" * 60 + os.linesep)
for subparser in subparser.choices:
if subparser in subparsers:
subparsers[subparser].print_help()
print(os.linesep)
sys.exit()
fwlist = FirmwareList(user_cache_dir)
if args.command == 'list' or args.command == 'search':
# labels = ['Name:Bin:Version']
# if args.description:
# labels.append('description')
rows = fwlist.get_firmware_table(search=args.pattern if args.command == 'search' else None,
all=args.all,
description=args.description,
show_pre_release=args.show_pre_release)
if rows:
print_table([], rows)
elif args.command == 'list':
print('Nothing found, try updating first')
else:
print('Nothing found')
elif args.command == 'flash':
test_log_argumensts(args, subparsers['flash'])
command_flash(args, fwlist)
elif args.command == 'update':
fwlist.update()
elif args.command == 'devices':
command_devices(verbose=args.verbose, include_links=args.include_links)
elif args.command == 'pull':
if args.what == 'last':
for name in fwlist.get_firmware_list():
firmware = fwlist.get_firmware(name)
print('pull', name)
download_url(firmware['url'], True)
print()
elif args.what.startswith('http'):
download_url(args.what, True)
else:
firmware = fwlist.get_firmware(args.what)
if not firmware:
print('Firmware not found, try updating first, command: bcf update')
sys.exit(1)
download_url(firmware['url'], True)
elif args.command == 'clean':
fwlist.clear()
for filename in os.listdir(user_cache_dir):
os.unlink(os.path.join(user_cache_dir, filename))
elif args.command == 'create':
name = args.name
if os.path.exists(name):
print('Directory already exists')
sys.exit(1)
skeleton_zip_filename = download_url(SKELETON_URL_ZIP)
print()
tmp_dir = tempfile.mkdtemp()
zip_ref = zipfile.ZipFile(skeleton_zip_filename, 'r')
zip_ref.extractall(tmp_dir)
zip_ref.close()
skeleton_path = os.path.join(tmp_dir, os.listdir(tmp_dir)[0])
shutil.move(skeleton_path, name)
os.rmdir(os.path.join(name, 'sdk'))
os.rmdir(os.path.join(name, '.vscode'))
os.unlink(os.path.join(name, '.gitmodules'))
os.chdir(name)
if args.no_git:
sdk_zip_filename = download_url(SDK_URL_ZIP)
zip_ref = zipfile.ZipFile(sdk_zip_filename, 'r')
zip_ref.extractall(tmp_dir)
zip_ref.close()
sdk_path = os.path.join(tmp_dir, os.listdir(tmp_dir)[0])
shutil.move(sdk_path, 'sdk')
sdk_zip_filename = download_url(VSCODE_URL_ZIP)
zip_ref = zipfile.ZipFile(sdk_zip_filename, 'r')
zip_ref.extractall(tmp_dir)
zip_ref.close()
sdk_path = os.path.join(tmp_dir, os.listdir(tmp_dir)[0])
shutil.move(sdk_path, '.vscode')
else:
os.system('git init')
os.system('git submodule add --depth 1 "' + SDK_GIT + '" sdk')
os.system('git submodule add --depth 1 "' + VSCODE_GIT + '" .vscode')
os.rmdir(tmp_dir)
elif args.command == 'read':
flasher.uart.clone(args.device, args.filename, args.length, reporthook=print_progress_bar)
elif args.command == 'log':
log.run_args(args)
elif args.command == 'reset':
test_log_argumensts(args, subparsers['reset'])
command_reset(args)
elif args.command == 'eeprom':
command_eeprom(args)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
"""
git style args
----------------
Using sub-commands (subparser)
https://docs.python.org/2.7/library/argparse.html#sub-commands
add_argument
name : name, -n, --name
type : type to which to convert this
int, file,
(most builtins) - add a path type
required
help :
metavar : name for argument in usage message
dest : name of attribute to be added to return object
@todo: Create subparsers for each command (list|open|create|delete)
@todo: Combine with positional argument parsers
Advanced:
@todo: setdir - for setting the user's project directory
"""
import sublp
import argparse
parser = argparse.ArgumentParser(prog="sublp")
# parser.add_argument(
# "--projdir", type=str, dest="project_directory"
# )
subparsers = parser.add_subparsers(help="List of commands: ", dest="command")
_list = subparsers.add_parser('list', help="List projects in user's project directory.")
_create = subparsers.add_parser('create',
help="Create a new project file in user's project directory."
)
_create.add_argument('name', type=str, help="Name of .sublime-project")
_create.add_argument('path', type=str, help="Path to directory of project files.")
_open = subparsers.add_parser('open', help="Open an existing project.")
_open.add_argument('name', type=str, help="Name of .sublime-project")
_delete = subparsers.add_parser('delete')
def create(name, path):
print("I see name: {0} and path: {1}".format(name, path))
def _callit(namespace, function):
return function(**vars(namespace))
def main(argv):
pass
if __name__ == "__main__":
arguments = parser.parse_args()
print()
print("arguments:", type(arguments), arguments)
print()
import pdb
pdb.set_trace()
print()
|
import unittest
from parsers import ConsurfParser
from utils.exceptions import InvalidFormat
class ConsurfParserTestCase(unittest.TestCase):
def test_1(self):
dummy_prediction = """ Amino Acid Conservation Scores
===============================
- POS: The position of the AA in the SEQRES derived sequence.
- SEQ: The SEQRES derived sequence in one letter code.
- SCORE: The normalized conservation scores.
- COLOR: The color scale representing the conservation scores (9 - conserved, 1 - variable).
- CONFIDENCE INTERVAL: When using the bayesian method for calculating rates, a confidence interval is assigned to each of the inferred evolutionary conservation scores.
- CONFIDENCE INTERVAL COLORS: When using the bayesian method for calculating rates. The color scale representing the lower and upper bounds of the confidence interval.
- B/E: Burried (b) or Exposed (e) residue.
- FUNCTION: functional (f) or structural (s) residue (f - highly conserved and exposed, s - highly conserved and burried).
- MSA DATA: The number of aligned sequences having an amino acid (non-gapped) from the overall number of sequences at each position.
- RESIDUE VARIETY: The residues variety at each position of the multiple sequence alignment.
POS SEQ SCORE COLOR CONFIDENCE INTERVAL CONFIDENCE INTERVAL COLORS B/E FUNCTION MSA DATA RESIDUE VARIETY
(normalized)
1 M -0.743 7* -1.183,-0.492 9,6 e 5/150 V,M
2 S -0.971 8* -1.398,-0.769 9,7 e f 4/150 S
3 L 0.790 3* -0.115, 1.537 5,1 e 6/150 L,K,V,I
4 E 0.170 4 -0.492, 0.493 6,4 e 10/150 V,A,E,Q,K
5 A 0.689 3 -0.115, 1.051 5,2 e 12/150 G,D,S,E,N,A,T
6 T 2.476 1 1.537, 2.816 1,1 b 18/150 V,I,A,S,K,H,T,Y,L,Q,E
7 V -0.163 5 -0.568, 0.143 7,5 b 29/150 M,L,F,V,I
8 L 0.532 3 0.006, 0.733 5,3 b 38/150 I,V,F,Y,T,M,N,Q,L,D
9 D 0.732 3 0.143, 1.051 5,2 e 38/150 R,A,E,Q,N,D,S
10 L 2.200 1 1.051, 2.816 2,1 b 42/150 N,M,L,Y,W,T,H,F,A,V,I
11 L -0.341 6 -0.639,-0.115 7,5 b 52/150 V,I,T,A,L,S,F
12 S 0.266 4 -0.115, 0.493 5,4 e 62/150 P,D,Q,N,E,V,A,R,S,K,G,F
13 S 0.936 2 0.493, 1.051 4,2 e 71/150 T,E,N,M,L,D,A,V,I,G,F,K,S
14 F 0.092 5 -0.223, 0.302 6,4 b 73/150 L,F,V,I,W
15 P 0.003 5 -0.321, 0.143 6,5 e 76/150 S,K,G,F,H,V,A,N,Q,P,T,W
16 H 2.481 1 1.537, 2.816 1,1 e 83/150 S,H,G,K,I,V,R,A,N,E,Q,D,L,T,P
17 W -0.322 6 -0.568,-0.115 7,5 e 99/150 Y,W,Q,E,R,C,V,F,G,S
18 L -0.101 5 -0.321, 0.006 6,5 b 114/150 V,I,A,W,M,L,F
19 A 0.961 2 0.493, 1.051 4,2 b 123/150 Q,E,N,M,L,D,Y,T,H,F,K,S,A,I,V
20 T -0.950 8 -1.090,-0.885 8,8 b 130/150 A,V,I,T,M
21 M -0.237 6 -0.492,-0.115 6,5 b 135/150 L,M,F,I,V,T,A
22 V 0.340 4 0.006, 0.493 5,4 b 136/150 S,G,F,V,I,A,M,L,T
23 I -0.281 6 -0.492,-0.115 6,5 b 137/150 M,L,W,T,G,F,A,C,I,V
24 G -1.012 8 -1.137,-0.940 8,8 b 138/150 A,T,G,S
25 A -0.826 8 -0.940,-0.769 8,7 b 138/150 S,G,F,V,I,C,A,L,M,T
26 M 0.282 4 0.006, 0.493 5,4 b 146/150 T,M,L,A,V,I,G,F,S
27 P -1.464 9 -1.518,-1.442 9,9 b s 146/150 P
28 I -0.424 6 -0.639,-0.321 7,6 b 147/150 V,I,T,W,A,L,F
29 F 0.192 4 -0.115, 0.302 5,4 b 147/150 L,T,G,F,S,A,V,I
30 E -1.483 9 -1.518,-1.489 9,9 e f 148/150 E
31 L -1.184 9 -1.270,-1.137 9,8 b s 148/150 S,L,Q,A,V,I
32 R -1.459 9 -1.518,-1.442 9,9 e f 148/150 R,K
33 G -0.645 7 -0.829,-0.568 8,7 e 148/150 I,V,Y,A,L,G,F
34 A -0.914 8 -1.042,-0.829 8,8 b 148/150 I,V,T,A,S,M,G
35 I -1.251 9 -1.356,-1.227 9,9 b s 148/150 L,I,V
36 P -1.190 9 -1.313,-1.137 9,8 b s 148/150 S,L,I,V,P
37 I 0.378 4 0.006, 0.493 5,4 b 148/150 F,I,V,A,M,L,Y,W
*Below the confidence cut-off - The calculations for this site were performed on less than 6 non-gaped homologue sequences,
or the confidence interval for the estimated score is equal to- or larger than- 4 color grades.
"""
expected = [7, 8, 3, 4, 3, 1, 5, 3, 3, 1, 6, 4, 2, 5, 5, 1, 6, 5, 2,
8, 6, 4, 6, 8, 8, 4, 9, 6, 4, 9, 9, 9, 7, 8, 9, 9, 4]
output = ConsurfParser(dummy_prediction)
self.assertEqual(37, len(output))
self.assertListEqual(expected, output)
def test_2(self):
dummy_prediction = """ Amino Acid Conservation Scores
===============================
- POS: The position of the AA in the SEQRES derived sequence.
- SEQ: The SEQRES derived sequence in one letter code.
- SCORE: The normalized conservation scores.
- COLOR: The color scale representing the conservation scores (9 - conserved, 1 - variable).
- CONFIDENCE INTERVAL: When using the bayesian method for calculating rates, a confidence interval is assigned to each of the inferred evolutionary conservation scores.
- CONFIDENCE INTERVAL COLORS: When using the bayesian method for calculating rates. The color scale representing the lower and upper bounds of the confidence interval.
- B/E: Burried (b) or Exposed (e) residue.
- FUNCTION: functional (f) or structural (s) residue (f - highly conserved and exposed, s - highly conserved and burried).
- MSA DATA: The number of aligned sequences having an amino acid (non-gapped) from the overall number of sequences at each position.
- RESIDUE VARIETY: The residues variety at each position of the multiple sequence alignment.
"""
with self.assertRaises(InvalidFormat):
output = ConsurfParser(dummy_prediction)
|
# -*- coding: utf-8 -*-
'''
:codeauthor: Rahul Handay <rahulha@saltstack.com>
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
mock_open,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.nfs3 as nfs3
@skipIf(NO_MOCK, NO_MOCK_REASON)
class NfsTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.nfs3
'''
def setup_loader_modules(self):
return {nfs3: {}}
def test_list_exports(self):
'''
Test for List configured exports
'''
with patch('salt.utils.files.fopen', mock_open(read_data='A B1(23')):
exports = nfs3.list_exports()
assert exports == {'A': [{'hosts': 'B1', 'options': ['23']}]}, exports
def test_del_export(self):
'''
Test for Remove an export
'''
list_exports_mock = MagicMock(return_value={
'A': [
{'hosts': ['B1'],
'options': ['23']},
],
})
with patch.object(nfs3, 'list_exports', list_exports_mock), \
patch.object(nfs3, '_write_exports', MagicMock(return_value=None)):
result = nfs3.del_export(path='A')
assert result == {}, result
|
#!/usr/bin/python3
import requests
import json
from bs4 import BeautifulSoup
token = 'Your token'
owner_id = 415577518
v = 5.63
def write_json(data, filename):
with open(filename, 'w') as file:
json.dump(data, file, indent=2, ensure_ascii=False)
def download_file(url):
r = requests.get(url, stream=True)
filename = url.split('/')[-1]
with open(filename, 'bw') as file:
for chunk in r.iter_content(1024000):
file.write(chunk)
def parse_playlist():
return requests.get('https://api.vk.com/method/video.getAlbums?', params={'owner_id': owner_id, 'need_system': True,'count': 100, 'access_token': token, 'v': v})
def parse_videos(album_id):
return requests.get('https://api.vk.com/method/video.get?', params={'owner_id': owner_id, 'album_id': album_id, 'count': 1, 'access_token': token, 'v': v})
def get_url(url):
html = requests.get(url).text
soup = BeautifulSoup(html, 'lxml')
video_url = soup.find('div', id='page_wrap').find('source').get('src').split('?')[0]
download_file(video_url)
def main():
playlist = parse_playlist()
write_json(playlist.json()['response'], 'video_playlists.json')
videos = parse_videos(-2).json()['response']['items']
write_json(videos, 'videos.json')
for video in videos:
if 'vk.com' in video['player']:
url = video['player']
get_url(url)
if __name__ == '__main__':
main()
|
from dojson.contrib.marc21.utils import create_record, split_stream
from scoap3.hep.model import hep
from invenio_records import Record
from invenio_db import db
from invenio_indexer.api import RecordIndexer
from scoap3.modules.pidstore.minters import scoap3_recid_minter
recs = [hep.do(create_record(data)) for data in split_stream(open('../data/scoap3export.xml', 'r'))]
for i, obj in enumerate(recs, start=1):
print("Creating record {}/{}".format(i, len(recs)))
record = Record.create(data, id_=None)
print record
# Create persistent identifier.
pid = scoap3_recid_minter(str(record.id), record)
print(pid.object_uuid)
# Commit any changes to record
record.commit()
# Commit to DB before indexing
db.session.commit()
# Index record
indexer = RecordIndexer()
indexer.index_by_id(pid.object_uuid)
|
# (request_status , db_acronym)
DEFAULT_STATUS_VALUES = {'waiting':'WAIT','accept':'ACCP','decline':'DECL'}
DEFAULT_CHANGE_APPOINTMENT_REQ_SIZE = 2
KEY_REQ_USERNAME = 'username'
KEY_REQ_PASSWORD = 'password'
KEY_REQ_FIRSTNAME = 'firstName'
KEY_REQ_LASTNAME = 'lastName'
KEY_REQ_PHONE = 'phone'
KEY_REQ_EMAIL = 'email'
KEY_APP_DATE = 'date'
SALT_LEN = 10
KEY_FOR_JWT_RETURN_ID = 'id'
KEY_REQ_WORKHOURS_DICT_NAME = 'workHours'
KEY_FOR_HAIRDR_ID = 'hairdr_id'
KEY_FOR_CLIENT_ID = 'client_id'
KEY_FOR_STARTHOUR = 'startHour'
KEY_FOR_ENDHOUR = 'endHour'
KEY_APP_STATUS_CHANGE = 'status'
#Duration of the haircut (minutes)
DEFAULT_HAIRCUT_DURATION=30
|
#!/usr/bin/env python2
__author__ = 'pgmillon'
import sys
import json
def main():
if len(sys.argv) == 2:
infile = sys.stdin
outfile = sys.stdout
path = sys.argv[1]
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
path = sys.argv[2]
else:
raise SystemExit(sys.argv[0] + " [infile] 'property'")
with infile:
try:
obj = json.load(infile)
except ValueError, e:
raise SystemExit(e)
with outfile:
outfile.write(eval(path, {}, {"this": obj}) + '\n')
if __name__ == '__main__':
main()
|
from logging import getLogger
import chainer
from chainer import functions as F
from chainer.initializers import LeCunNormal
from chainer import links as L
from chainerrl import distribution
from chainerrl.functions.bound_by_tanh import bound_by_tanh
from chainerrl.links.mlp import MLP
from chainerrl.links.mlp_bn import MLPBN
from chainerrl.policy import Policy
from chainerrl.recurrent import RecurrentChainMixin
logger = getLogger(__name__)
class ContinuousDeterministicPolicy(
chainer.Chain, Policy, RecurrentChainMixin):
"""Continuous deterministic policy.
Args:
model (chainer.Link):
Link that is callable and outputs action values.
model_call (callable or None):
Callable used instead of model.__call__ if not None
action_filter (callable or None):
Callable applied to the outputs of the model if not None
"""
def __init__(self, model, model_call=None, action_filter=None):
super().__init__(model=model)
self.model_call = model_call
self.action_filter = action_filter
def __call__(self, x):
# Model
if self.model_call is not None:
h = self.model_call(self.model, x)
else:
h = self.model(x)
# Action filter
if self.action_filter is not None:
h = self.action_filter(h)
# Wrap by Distribution
return distribution.ContinuousDeterministicDistribution(h)
class FCDeterministicPolicy(ContinuousDeterministicPolicy):
"""Fully-connected deterministic policy.
Args:
n_input_channels (int): Number of input channels.
n_hidden_layers (int): Number of hidden layers.
n_hidden_channels (int): Number of hidden channels.
action_size (int): Size of actions.
min_action (ndarray or None): Minimum action. Used only if bound_action
is set to True.
min_action (ndarray or None): Minimum action. Used only if bound_action
is set to True.
bound_action (bool): If set to True, actions are bounded to
[min_action, max_action] by tanh.
nonlinearity (callable): Nonlinearity between layers. It must accept a
Variable as an argument and return a Variable with the same shape.
Nonlinearities with learnable parameters such as PReLU are not
supported. It is not used if n_hidden_layers is zero.
last_wscale (float): Scale of weight initialization of the last layer.
"""
def __init__(self, n_input_channels, n_hidden_layers,
n_hidden_channels, action_size,
min_action=None, max_action=None, bound_action=True,
nonlinearity=F.relu,
last_wscale=1.):
self.n_input_channels = n_input_channels
self.n_hidden_layers = n_hidden_layers
self.n_hidden_channels = n_hidden_channels
self.action_size = action_size
self.min_action = min_action
self.max_action = max_action
self.bound_action = bound_action
if self.bound_action:
def action_filter(x):
return bound_by_tanh(
x, self.min_action, self.max_action)
else:
action_filter = None
super().__init__(
model=MLP(n_input_channels,
action_size,
(n_hidden_channels,) * n_hidden_layers,
nonlinearity=nonlinearity,
last_wscale=last_wscale,
),
action_filter=action_filter)
class FCBNDeterministicPolicy(ContinuousDeterministicPolicy):
"""Fully-connected deterministic policy with Batch Normalization.
Args:
n_input_channels (int): Number of input channels.
n_hidden_layers (int): Number of hidden layers.
n_hidden_channels (int): Number of hidden channels.
action_size (int): Size of actions.
min_action (ndarray or None): Minimum action. Used only if bound_action
is set to True.
min_action (ndarray or None): Minimum action. Used only if bound_action
is set to True.
bound_action (bool): If set to True, actions are bounded to
[min_action, max_action] by tanh.
normalize_input (bool): If set to True, Batch Normalization is applied
to inputs as well as hidden activations.
nonlinearity (callable): Nonlinearity between layers. It must accept a
Variable as an argument and return a Variable with the same shape.
Nonlinearities with learnable parameters such as PReLU are not
supported. It is not used if n_hidden_layers is zero.
last_wscale (float): Scale of weight initialization of the last layer.
"""
def __init__(self, n_input_channels, n_hidden_layers,
n_hidden_channels, action_size,
min_action=None, max_action=None, bound_action=True,
normalize_input=True,
nonlinearity=F.relu,
last_wscale=1.):
self.n_input_channels = n_input_channels
self.n_hidden_layers = n_hidden_layers
self.n_hidden_channels = n_hidden_channels
self.action_size = action_size
self.min_action = min_action
self.max_action = max_action
self.bound_action = bound_action
self.normalize_input = normalize_input
if self.bound_action:
def action_filter(x):
return bound_by_tanh(
x, self.min_action, self.max_action)
else:
action_filter = None
super().__init__(
model=MLPBN(n_input_channels,
action_size,
(n_hidden_channels,) * n_hidden_layers,
normalize_input=self.normalize_input,
nonlinearity=nonlinearity,
last_wscale=last_wscale,
),
action_filter=action_filter)
class FCLSTMDeterministicPolicy(ContinuousDeterministicPolicy):
"""Fully-connected deterministic policy with LSTM.
Args:
n_input_channels (int): Number of input channels.
n_hidden_layers (int): Number of hidden layers.
n_hidden_channels (int): Number of hidden channels.
action_size (int): Size of actions.
min_action (ndarray or None): Minimum action. Used only if bound_action
is set to True.
min_action (ndarray or None): Minimum action. Used only if bound_action
is set to True.
bound_action (bool): If set to True, actions are bounded to
[min_action, max_action] by tanh.
nonlinearity (callable): Nonlinearity between layers. It must accept a
Variable as an argument and return a Variable with the same shape.
Nonlinearities with learnable parameters such as PReLU are not
supported.
last_wscale (float): Scale of weight initialization of the last layer.
"""
def __init__(self, n_input_channels, n_hidden_layers,
n_hidden_channels, action_size,
min_action=None, max_action=None, bound_action=True,
nonlinearity=F.relu,
last_wscale=1.):
self.n_input_channels = n_input_channels
self.n_hidden_layers = n_hidden_layers
self.n_hidden_channels = n_hidden_channels
self.action_size = action_size
self.min_action = min_action
self.max_action = max_action
self.bound_action = bound_action
if self.bound_action:
def action_filter(x):
return bound_by_tanh(
x, self.min_action, self.max_action)
else:
action_filter = None
model = chainer.Chain(
fc=MLP(self.n_input_channels,
n_hidden_channels,
(self.n_hidden_channels,) * self.n_hidden_layers,
nonlinearity=nonlinearity,
),
lstm=L.LSTM(n_hidden_channels, n_hidden_channels),
out=L.Linear(n_hidden_channels, action_size,
initialW=LeCunNormal(last_wscale)),
)
def model_call(model, x):
h = nonlinearity(model.fc(x))
h = model.lstm(h)
h = model.out(h)
return h
super().__init__(
model=model,
model_call=model_call,
action_filter=action_filter)
|
from django.contrib import admin
from web_app.models import Location, Posts
admin.site.register(Location)
admin.site.register(Posts)
|
'''
Workflow Serialization Unit Tests
To run unittests:
# Using standard library unittest
python -m unittest -v
python -m unittest tests/unit/test_indicator.py -v
or
python -m unittest discover <test_directory>
python -m unittest discover -s <directory> -p 'test_*.py'
# Using pytest
# "conda install pytest" or "pip install pytest"
pytest -v tests
pytest -v tests/unit/test_indicator.py
'''
import warnings
import pandas as pd
import unittest
import pathlib
import cudf
import os
from gquant.dataframe_flow.task import load_modules
load_modules(os.getenv('MODULEPATH')+'/rapids_modules/')
import rapids_modules.cuindicator as gi
from . import technical_indicators as ti
from .utils import make_orderer, error_function
import numpy as np
ordered, compare = make_orderer()
unittest.defaultTestLoader.sortTestMethodsUsing = compare
class TestIndicator(unittest.TestCase):
def setUp(self):
# ignore importlib warnings.
path = pathlib.Path(__file__)
self._pandas_data = pd.read_csv(str(path.parent)+'/testdata.csv.gz')
self._pandas_data['Volume'] /= 1000.0
self._cudf_data = cudf.from_pandas(self._pandas_data)
warnings.simplefilter('ignore', category=ImportWarning)
warnings.simplefilter('ignore', category=DeprecationWarning)
def tearDown(self):
pass
@ordered
def test_rate_of_return(self):
'''Test rate of return calculation'''
r_cudf = gi.rate_of_change(self._cudf_data['Close'], 2)
r_pandas = ti.rate_of_change(self._pandas_data, 2)
err = error_function(r_cudf, r_pandas.ROC_2)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_trix(self):
""" test the trix calculation"""
r_cudf = gi.trix(self._cudf_data['Close'], 3)
r_pandas = ti.trix(self._pandas_data, 3)
err = error_function(r_cudf, r_pandas.Trix_3)
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_bollinger_bands(self):
""" test the bollinger_bands """
r_cudf = gi.bollinger_bands(self._cudf_data['Close'], 20)
r_pandas = ti.bollinger_bands(self._pandas_data, 20)
err = error_function(r_cudf.b1, r_pandas['BollingerB_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.b2, r_pandas['Bollinger%b_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_macd(self):
""" test the macd """
n_fast = 10
n_slow = 20
r_cudf = gi.macd(self._cudf_data['Close'], n_fast, n_slow)
r_pandas = ti.macd(self._pandas_data, n_fast, n_slow)
err = error_function(r_cudf.MACD, r_pandas['MACD_10_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.MACDdiff, r_pandas['MACDdiff_10_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.MACDsign, r_pandas['MACDsign_10_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_average_true_range(self):
""" test the average true range """
r_cudf = gi.average_true_range(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'], 10)
r_pandas = ti.average_true_range(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['ATR_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_ppsr(self):
""" test the ppsr """
r_cudf = gi.ppsr(self._cudf_data['High'], self._cudf_data['Low'],
self._cudf_data['Close'])
r_pandas = ti.ppsr(self._pandas_data)
err = error_function(r_cudf.PP, r_pandas['PP'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.R1, r_pandas['R1'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.S1, r_pandas['S1'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.R2, r_pandas['R2'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.S2, r_pandas['S2'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.R3, r_pandas['R3'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.S3, r_pandas['S3'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_stochastic_oscillator_k(self):
""" test the stochastic oscillator k """
r_cudf = gi.stochastic_oscillator_k(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'])
r_pandas = ti.stochastic_oscillator_k(self._pandas_data)
err = error_function(r_cudf, r_pandas['SO%k'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_stochastic_oscillator_d(self):
""" test the stochastic oscillator d """
r_cudf = gi.stochastic_oscillator_d(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'], 10)
r_pandas = ti.stochastic_oscillator_d(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['SO%d_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_average_directional_movement_index(self):
""" test the average_directional_movement_index """
r_cudf = gi.average_directional_movement_index(
self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'],
10, 20)
r_pandas = ti.average_directional_movement_index(self._pandas_data,
10, 20)
err = error_function(r_cudf, r_pandas['ADX_10_20'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_vortex_indicator(self):
""" test the vortex_indicator """
r_cudf = gi.vortex_indicator(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'], 10)
r_pandas = ti.vortex_indicator(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['Vortex_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_kst_oscillator(self):
""" test the kst_oscillator """
r_cudf = gi.kst_oscillator(self._cudf_data['Close'],
3, 4, 5, 6, 7, 8, 9, 10)
r_pandas = ti.kst_oscillator(self._pandas_data,
3, 4, 5, 6, 7, 8, 9, 10)
err = error_function(r_cudf, r_pandas['KST_3_4_5_6_7_8_9_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_relative_strength_index(self):
""" test the relative_strength_index """
r_cudf = gi.relative_strength_index(self._cudf_data['High'],
self._cudf_data['Low'], 10)
r_pandas = ti.relative_strength_index(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['RSI_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_mass_index(self):
""" test the mass_index """
r_cudf = gi.mass_index(self._cudf_data['High'],
self._cudf_data['Low'], 9, 25)
r_pandas = ti.mass_index(self._pandas_data)
err = error_function(r_cudf, r_pandas['Mass Index'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_true_strength_index(self):
""" test the true_strength_index """
r_cudf = gi.true_strength_index(self._cudf_data['Close'], 5, 8)
r_pandas = ti.true_strength_index(self._pandas_data, 5, 8)
err = error_function(r_cudf, r_pandas['TSI_5_8'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_chaikin_oscillator(self):
""" test the chaikin_oscillator """
r_cudf = gi.chaikin_oscillator(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'],
self._cudf_data['Volume'], 3, 10)
r_pandas = ti.chaikin_oscillator(self._pandas_data)
err = error_function(r_cudf, r_pandas['Chaikin'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_money_flow_index(self):
""" test the money_flow_index """
r_cudf = gi.money_flow_index(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'],
self._cudf_data['Volume'], 10)
r_pandas = ti.money_flow_index(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['MFI_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_on_balance_volume(self):
""" test the on_balance_volume """
r_cudf = gi.on_balance_volume(self._cudf_data['Close'],
self._cudf_data['Volume'], 10)
r_pandas = ti.on_balance_volume(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['OBV_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_force_index(self):
""" test the force index """
r_cudf = gi.force_index(self._cudf_data['Close'],
self._cudf_data['Volume'], 10)
r_pandas = ti.force_index(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['Force_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_ease_of_movement(self):
""" test the ease_of_movement """
r_cudf = gi.ease_of_movement(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Volume'], 10)
r_pandas = ti.ease_of_movement(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['EoM_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_ultimate_oscillator(self):
""" test the ultimate_oscillator """
r_cudf = gi.ultimate_oscillator(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'])
r_pandas = ti.ultimate_oscillator(self._pandas_data)
err = error_function(r_cudf, r_pandas['Ultimate_Osc'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_donchian_channel(self):
""" test the donchian_channel """
r_cudf = gi.donchian_channel(self._cudf_data['High'],
self._cudf_data['Low'], 10)
r_pandas = ti.donchian_channel(self._pandas_data, 10)
err = error_function(r_cudf[:-1], r_pandas['Donchian_10'][:-1])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_keltner_channel(self):
""" test the keltner_channel """
r_cudf = gi.keltner_channel(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'], 10)
r_pandas = ti.keltner_channel(self._pandas_data, 10)
err = error_function(r_cudf.KelChD, r_pandas['KelChD_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.KelChM, r_pandas['KelChM_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
err = error_function(r_cudf.KelChU, r_pandas['KelChU_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_coppock_curve(self):
""" test the coppock_curve """
r_cudf = gi.coppock_curve(self._cudf_data['Close'], 10)
r_pandas = ti.coppock_curve(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['Copp_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_accumulation_distribution(self):
""" test the accumulation_distribution """
r_cudf = gi.accumulation_distribution(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'],
self._cudf_data['Volume'], 10)
r_pandas = ti.accumulation_distribution(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['Acc/Dist_ROC_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_commodity_channel_index(self):
""" test the commodity_channel_index """
r_cudf = gi.commodity_channel_index(self._cudf_data['High'],
self._cudf_data['Low'],
self._cudf_data['Close'], 10)
r_pandas = ti.commodity_channel_index(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['CCI_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_momentum(self):
""" test the momentum """
r_cudf = gi.momentum(self._cudf_data['Close'], 10)
r_pandas = ti.momentum(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['Momentum_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_moving_average(self):
""" test the moving average """
r_cudf = gi.moving_average(self._cudf_data['Close'], 10)
r_pandas = ti.moving_average(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['MA_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
@ordered
def test_exponential_moving_average(self):
""" test the exponential moving average """
r_cudf = gi.exponential_moving_average(self._cudf_data['Close'], 10)
r_pandas = ti.exponential_moving_average(self._pandas_data, 10)
err = error_function(r_cudf, r_pandas['EMA_10'])
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
if __name__ == '__main__':
unittest.main()
|
import pygame
from pygame.locals import *
import sys
import random
screen = pygame.display.set_mode((400, 700))
bird = pygame.Rect(65, 50, 50, 50)
background = pygame.image.load("pics/background.png").convert()
pics = [pygame.image.load("pics/1.png").convert_alpha(), pygame.image.load("pics/2.png").convert_alpha(), pygame.image.load("pics/dead.png")]
top_pipe = pygame.image.load("pics/bottom.png").convert_alpha()
bottom_pipe = pygame.image.load("pics/top.png").convert_alpha()
gap = 170
wallx = 400
birdY = 350
birdX = 65
jump = 0
jumpSpeed = 10
gravity = 5
alive = True
sprite = 0
score = 0
high_score = 0
orig_high_score = 0
space = random.randint(-110, 110)
wall_speed = 2
with open("high_score.txt", "r") as f:
high_score = int(f.read())
orig_high_score = high_score
def updateWalls():
global wallx, score, space, gap, high_score, alive, wall_speed
wallx-=wall_speed
if wallx < -80:
wallx = 400
if alive:
score += 1
if(score > high_score):
high_score = score
space = random.randint(-110, 110)
if score % 5 == 0:
gap-=20
if score % 3 == 0:
wall_speed+=1
def birdUpdate():
global jump, jumpSpeed, birdY, gravity, wallx, space, alive, score, orig_high_score, wall_speed, gap
if jump:
jumpSpeed -= 1
birdY -= jumpSpeed
jump -= 1
else:
birdY += gravity
gravity += 0.2
bird[1] = birdY
upRect = pygame.Rect(wallx, 360 + gap - space + 10, top_pipe.get_width() - 10, top_pipe.get_height())
downRect = pygame.Rect(wallx, 0 - gap - space - 10, bottom_pipe.get_width() - 10, bottom_pipe.get_height())
if upRect.colliderect(bird):
alive = False
if downRect.colliderect(bird):
alive = False
if not 0 < bird[1] < 720:
if orig_high_score < high_score:
with open("high_score.txt", "w") as f:
f.write(str(high_score))
wall_speed = 2
bird[1] = 50
birdY = 50
gap = 170
alive = True
score = 0
wallx = 400
space = random.randint(-110, 110)
gravity = 5
def runner():
global jump, sprite, gravity, jumpSpeed, gap, high_score, orig_high_score, alive, birdX, wallx
clock = pygame.time.Clock()
pygame.font.init()
score_font = pygame.font.SysFont("Arial", 50)
high_score_font = pygame.font.SysFont("Arial", 20)
running = True
state = True
count = 0
while running:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN and alive == True:
jump = 17
gravity = 5
jumpSpeed = 10
# if(count == 10):
# state = False
if state:
print(wallx-birdX)
screen.fill((255, 255, 255))
screen.blit(background, (0, 0))
screen.blit(top_pipe, (wallx, 360 + gap - space))
screen.blit(bottom_pipe, (wallx, 0 - gap - space))
if state:
print()
screen.blit(score_font.render(str(score), -1, (255, 255, 255)), (200, 50))
screen.blit(high_score_font.render("High Score: " + str(high_score), -1, (255, 255, 255)), (100, 10))
if alive == False:
sprite = 2
elif jump:
sprite = 1
screen.blit(pics[sprite], (70, birdY))
if alive == True:
sprite = 0
updateWalls()
birdUpdate()
pygame.display.update()
runner()
|
# Generated from Documents\THESE\pycropml_pheno\src\pycropml\antlr_grammarV4\python\python3-py\Python3.g4 by ANTLR 4.8
from antlr4 import *
if __name__ is not None and "." in __name__:
from .Python3Parser import Python3Parser
else:
from Python3Parser import Python3Parser
# This class defines a complete generic visitor for a parse tree produced by Python3Parser.
class Python3Visitor(ParseTreeVisitor):
# Visit a parse tree produced by Python3Parser#single_input.
def visitSingle_input(self, ctx:Python3Parser.Single_inputContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#file_input.
def visitFile_input(self, ctx:Python3Parser.File_inputContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#eval_input.
def visitEval_input(self, ctx:Python3Parser.Eval_inputContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#decorator.
def visitDecorator(self, ctx:Python3Parser.DecoratorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#decorators.
def visitDecorators(self, ctx:Python3Parser.DecoratorsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#decorated.
def visitDecorated(self, ctx:Python3Parser.DecoratedContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#async_funcdef.
def visitAsync_funcdef(self, ctx:Python3Parser.Async_funcdefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#funcdef.
def visitFuncdef(self, ctx:Python3Parser.FuncdefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#parameters.
def visitParameters(self, ctx:Python3Parser.ParametersContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#typedargslist.
def visitTypedargslist(self, ctx:Python3Parser.TypedargslistContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#tfpdef.
def visitTfpdef(self, ctx:Python3Parser.TfpdefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#varargslist.
def visitVarargslist(self, ctx:Python3Parser.VarargslistContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#vfpdef.
def visitVfpdef(self, ctx:Python3Parser.VfpdefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#stmt.
def visitStmt(self, ctx:Python3Parser.StmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#simple_stmt.
def visitSimple_stmt(self, ctx:Python3Parser.Simple_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#small_stmt.
def visitSmall_stmt(self, ctx:Python3Parser.Small_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#expr_stmt.
def visitExpr_stmt(self, ctx:Python3Parser.Expr_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#annassign.
def visitAnnassign(self, ctx:Python3Parser.AnnassignContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#testlist_star_expr.
def visitTestlist_star_expr(self, ctx:Python3Parser.Testlist_star_exprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#augassign.
def visitAugassign(self, ctx:Python3Parser.AugassignContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#del_stmt.
def visitDel_stmt(self, ctx:Python3Parser.Del_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#pass_stmt.
def visitPass_stmt(self, ctx:Python3Parser.Pass_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#flow_stmt.
def visitFlow_stmt(self, ctx:Python3Parser.Flow_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#break_stmt.
def visitBreak_stmt(self, ctx:Python3Parser.Break_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#continue_stmt.
def visitContinue_stmt(self, ctx:Python3Parser.Continue_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#return_stmt.
def visitReturn_stmt(self, ctx:Python3Parser.Return_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#yield_stmt.
def visitYield_stmt(self, ctx:Python3Parser.Yield_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#raise_stmt.
def visitRaise_stmt(self, ctx:Python3Parser.Raise_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#import_stmt.
def visitImport_stmt(self, ctx:Python3Parser.Import_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#import_name.
def visitImport_name(self, ctx:Python3Parser.Import_nameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#import_from.
def visitImport_from(self, ctx:Python3Parser.Import_fromContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#import_as_name.
def visitImport_as_name(self, ctx:Python3Parser.Import_as_nameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#dotted_as_name.
def visitDotted_as_name(self, ctx:Python3Parser.Dotted_as_nameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#import_as_names.
def visitImport_as_names(self, ctx:Python3Parser.Import_as_namesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#dotted_as_names.
def visitDotted_as_names(self, ctx:Python3Parser.Dotted_as_namesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#dotted_name.
def visitDotted_name(self, ctx:Python3Parser.Dotted_nameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#global_stmt.
def visitGlobal_stmt(self, ctx:Python3Parser.Global_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#nonlocal_stmt.
def visitNonlocal_stmt(self, ctx:Python3Parser.Nonlocal_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#assert_stmt.
def visitAssert_stmt(self, ctx:Python3Parser.Assert_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#compound_stmt.
def visitCompound_stmt(self, ctx:Python3Parser.Compound_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#async_stmt.
def visitAsync_stmt(self, ctx:Python3Parser.Async_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#if_stmt.
def visitIf_stmt(self, ctx:Python3Parser.If_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#while_stmt.
def visitWhile_stmt(self, ctx:Python3Parser.While_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#for_stmt.
def visitFor_stmt(self, ctx:Python3Parser.For_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#try_stmt.
def visitTry_stmt(self, ctx:Python3Parser.Try_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#with_stmt.
def visitWith_stmt(self, ctx:Python3Parser.With_stmtContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#with_item.
def visitWith_item(self, ctx:Python3Parser.With_itemContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#except_clause.
def visitExcept_clause(self, ctx:Python3Parser.Except_clauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#suite.
def visitSuite(self, ctx:Python3Parser.SuiteContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#test.
def visitTest(self, ctx:Python3Parser.TestContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#test_nocond.
def visitTest_nocond(self, ctx:Python3Parser.Test_nocondContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#lambdef.
def visitLambdef(self, ctx:Python3Parser.LambdefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#lambdef_nocond.
def visitLambdef_nocond(self, ctx:Python3Parser.Lambdef_nocondContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#or_test.
def visitOr_test(self, ctx:Python3Parser.Or_testContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#and_test.
def visitAnd_test(self, ctx:Python3Parser.And_testContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#not_test.
def visitNot_test(self, ctx:Python3Parser.Not_testContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#comparison.
def visitComparison(self, ctx:Python3Parser.ComparisonContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#comp_op.
def visitComp_op(self, ctx:Python3Parser.Comp_opContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#star_expr.
def visitStar_expr(self, ctx:Python3Parser.Star_exprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#expr.
def visitExpr(self, ctx:Python3Parser.ExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#xor_expr.
def visitXor_expr(self, ctx:Python3Parser.Xor_exprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#and_expr.
def visitAnd_expr(self, ctx:Python3Parser.And_exprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#shift_expr.
def visitShift_expr(self, ctx:Python3Parser.Shift_exprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#arith_expr.
def visitArith_expr(self, ctx:Python3Parser.Arith_exprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#term.
def visitTerm(self, ctx:Python3Parser.TermContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#factor.
def visitFactor(self, ctx:Python3Parser.FactorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#power.
def visitPower(self, ctx:Python3Parser.PowerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#atom_expr.
def visitAtom_expr(self, ctx:Python3Parser.Atom_exprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#atom.
def visitAtom(self, ctx:Python3Parser.AtomContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#testlist_comp.
def visitTestlist_comp(self, ctx:Python3Parser.Testlist_compContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#trailer.
def visitTrailer(self, ctx:Python3Parser.TrailerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#subscriptlist.
def visitSubscriptlist(self, ctx:Python3Parser.SubscriptlistContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#subscript.
def visitSubscript(self, ctx:Python3Parser.SubscriptContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#sliceop.
def visitSliceop(self, ctx:Python3Parser.SliceopContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#exprlist.
def visitExprlist(self, ctx:Python3Parser.ExprlistContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#testlist.
def visitTestlist(self, ctx:Python3Parser.TestlistContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#dictorsetmaker.
def visitDictorsetmaker(self, ctx:Python3Parser.DictorsetmakerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#classdef.
def visitClassdef(self, ctx:Python3Parser.ClassdefContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#arglist.
def visitArglist(self, ctx:Python3Parser.ArglistContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#argument.
def visitArgument(self, ctx:Python3Parser.ArgumentContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#comp_iter.
def visitComp_iter(self, ctx:Python3Parser.Comp_iterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#comp_for.
def visitComp_for(self, ctx:Python3Parser.Comp_forContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#comp_if.
def visitComp_if(self, ctx:Python3Parser.Comp_ifContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#encoding_decl.
def visitEncoding_decl(self, ctx:Python3Parser.Encoding_declContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#yield_expr.
def visitYield_expr(self, ctx:Python3Parser.Yield_exprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Python3Parser#yield_arg.
def visitYield_arg(self, ctx:Python3Parser.Yield_argContext):
return self.visitChildren(ctx)
del Python3Parser
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.test.utils import override_settings
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class GlanceApiTests(test.APITestCase):
@override_settings(API_RESULT_PAGE_SIZE=2)
def test_image_list_detailed_no_pagination(self):
# Verify that all images are returned even with a small page size
api_images = self.images.list()
filters = {}
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
glanceclient = self.stub_glanceclient()
glanceclient.images = self.mox.CreateMockAnything()
glanceclient.images.list(page_size=limit,
limit=limit,
filters=filters,
sort_dir='desc',
sort_key='created_at',) \
.AndReturn(iter(api_images))
self.mox.ReplayAll()
images, has_more, has_prev = api.glance.image_list_detailed(
self.request)
self.assertItemsEqual(images, api_images)
self.assertFalse(has_more)
self.assertFalse(has_prev)
@override_settings(API_RESULT_PAGE_SIZE=2)
def test_image_list_detailed_sort_options(self):
# Verify that sort_dir and sort_key work
api_images = self.images.list()
filters = {}
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
sort_dir = 'asc'
sort_key = 'min_disk'
glanceclient = self.stub_glanceclient()
glanceclient.images = self.mox.CreateMockAnything()
glanceclient.images.list(page_size=limit,
limit=limit,
filters=filters,
sort_dir=sort_dir,
sort_key=sort_key) \
.AndReturn(iter(api_images))
self.mox.ReplayAll()
images, has_more, has_prev = api.glance.image_list_detailed(
self.request,
sort_dir=sort_dir,
sort_key=sort_key)
self.assertItemsEqual(images, api_images)
self.assertFalse(has_more)
self.assertFalse(has_prev)
@override_settings(API_RESULT_PAGE_SIZE=2)
def test_image_list_detailed_pagination_more_page_size(self):
# The total snapshot count is over page size, should return
# page_size images.
filters = {}
page_size = settings.API_RESULT_PAGE_SIZE
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
api_images = self.images.list()
images_iter = iter(api_images)
glanceclient = self.stub_glanceclient()
glanceclient.images = self.mox.CreateMockAnything()
# Pass back all images, ignoring filters
glanceclient.images.list(limit=limit,
page_size=page_size + 1,
filters=filters,
sort_dir='desc',
sort_key='created_at',).AndReturn(images_iter)
self.mox.ReplayAll()
images, has_more, has_prev = api.glance.image_list_detailed(
self.request,
marker=None,
filters=filters,
paginate=True)
expected_images = api_images[:page_size]
self.assertItemsEqual(images, expected_images)
self.assertTrue(has_more)
self.assertFalse(has_prev)
# Ensure that only the needed number of images are consumed
# from the iterator (page_size + 1).
self.assertEqual(len(list(images_iter)),
len(api_images) - len(expected_images) - 1)
@override_settings(API_RESULT_PAGE_SIZE=20)
def test_image_list_detailed_pagination_less_page_size(self):
# The total image count is less than page size, should return images
# more, prev should return False.
filters = {}
page_size = settings.API_RESULT_PAGE_SIZE
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
api_images = self.images.list()
images_iter = iter(api_images)
glanceclient = self.stub_glanceclient()
glanceclient.images = self.mox.CreateMockAnything()
# Pass back all images, ignoring filters
glanceclient.images.list(limit=limit,
page_size=page_size + 1,
filters=filters,
sort_dir='desc',
sort_key='created_at',).AndReturn(images_iter)
self.mox.ReplayAll()
images, has_more, has_prev = api.glance.image_list_detailed(
self.request,
filters=filters,
paginate=True)
expected_images = api_images[:page_size]
self.assertItemsEqual(images, expected_images)
self.assertFalse(has_more)
self.assertFalse(has_prev)
@override_settings(API_RESULT_PAGE_SIZE=9)
def test_image_list_detailed_pagination_equal_page_size(self):
# The total image count equals page size, should return
# page_size images. more, prev should return False
filters = {}
page_size = settings.API_RESULT_PAGE_SIZE
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
api_images = self.images.list()
images_iter = iter(api_images)
glanceclient = self.stub_glanceclient()
glanceclient.images = self.mox.CreateMockAnything()
glanceclient.images.list(limit=limit,
page_size=page_size + 1,
filters=filters,
sort_dir='desc',
sort_key='created_at',).AndReturn(images_iter)
self.mox.ReplayAll()
images, has_more, has_prev = api.glance.image_list_detailed(
self.request,
filters=filters,
paginate=True)
expected_images = api_images[:page_size]
self.assertItemsEqual(images, expected_images)
self.assertFalse(has_more)
self.assertFalse(has_prev)
self.assertEqual(len(expected_images), len(images))
@override_settings(API_RESULT_PAGE_SIZE=2)
def test_image_list_detailed_pagination_marker(self):
# Tests getting a second page with a marker.
filters = {}
page_size = settings.API_RESULT_PAGE_SIZE
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
marker = 'nonsense'
api_images = self.images.list()[page_size:]
images_iter = iter(api_images)
glanceclient = self.stub_glanceclient()
glanceclient.images = self.mox.CreateMockAnything()
# Pass back all images, ignoring filters
glanceclient.images.list(limit=limit,
page_size=page_size + 1,
filters=filters,
marker=marker,
sort_dir='desc',
sort_key='created_at',) \
.AndReturn(images_iter)
self.mox.ReplayAll()
images, has_more, has_prev = api.glance.image_list_detailed(
self.request,
marker=marker,
filters=filters,
paginate=True)
expected_images = api_images[:page_size]
self.assertItemsEqual(images, expected_images)
self.assertTrue(has_more)
self.assertTrue(has_prev)
self.assertEqual(len(list(images_iter)),
len(api_images) - len(expected_images) - 1)
@override_settings(API_RESULT_PAGE_SIZE=2)
def test_image_list_detailed_pagination_marker_prev(self):
# Tests getting previous page with a marker.
filters = {}
page_size = settings.API_RESULT_PAGE_SIZE
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
marker = 'nonsense'
api_images = self.images.list()[page_size:]
images_iter = iter(api_images)
glanceclient = self.stub_glanceclient()
glanceclient.images = self.mox.CreateMockAnything()
# Pass back all images, ignoring filters
glanceclient.images.list(limit=limit,
page_size=page_size + 1,
marker=marker,
filters=filters,
sort_dir='asc',
sort_key='created_at',) \
.AndReturn(images_iter)
self.mox.ReplayAll()
images, has_more, has_prev = api.glance.image_list_detailed(
self.request,
marker=marker,
filters=filters,
sort_dir='asc',
paginate=True)
expected_images = api_images[:page_size]
self.assertItemsEqual(images, expected_images)
self.assertTrue(has_more)
self.assertTrue(has_prev)
self.assertEqual(len(list(images_iter)),
len(api_images) - len(expected_images) - 1)
def test_get_image_empty_name(self):
glanceclient = self.stub_glanceclient()
glanceclient.images = self.mox.CreateMockAnything()
glanceclient.images.get('empty').AndReturn(self.empty_name_image)
self.mox.ReplayAll()
image = api.glance.image_get(self.request, 'empty')
self.assertIsNone(image.name)
|
"""
This module contains code for transition-based decoding. "Transition-based decoding" is where you
start in some state, iteratively transition between states, and have some kind of supervision
signal that tells you which end states, or which transition sequences, are "good".
If you want to do decoding for a vocabulary-based model, where the allowable outputs are the same
at every timestep of decoding, this code is not what you are looking for, and it will be quite
inefficient compared to other things you could do.
The key abstractions in this code are the following:
- ``DecoderState`` represents the current state of decoding, containing a list of all of the
actions taken so far, and a current score for the state. It also has methods around
determining whether the state is "finished" and for combining states for batched computation.
- ``DecoderStep`` is a ``torch.nn.Module`` that models the transition function between states.
Its main method is ``take_step``, which generates a ranked list of next states given a
current state.
- ``DecoderTrainer`` is an algorithm for training the transition function with some kind of
supervision signal. There are many options for training algorithms and supervision signals;
this is an abstract class that is generic over the type of the supervision signal.
The module also has some classes to help represent the ``DecoderState``, including ``RnnState``,
which you can use to keep track of a decoder RNN's internal state, ``GrammarState``, which
keeps track of what actions are allowed at each timestep of decoding, if your outputs are
production rules from a grammar, and ``ChecklistState`` that keeps track of coverage inforation if
you are training a coverage based parser.
There is also a generic ``BeamSearch`` class for finding the ``k`` highest-scoring transition
sequences given a trained ``DecoderStep`` and an initial ``DecoderState``.
"""
from allennlp.nn.decoding.beam_search import BeamSearch
from allennlp.nn.decoding.checklist_state import ChecklistState
from allennlp.nn.decoding.constrained_beam_search import ConstrainedBeamSearch
from allennlp.nn.decoding.decoder_state import DecoderState
from allennlp.nn.decoding.decoder_step import DecoderStep
from allennlp.nn.decoding.decoder_trainers.decoder_trainer import DecoderTrainer
from allennlp.nn.decoding.grammar_state import GrammarState
from allennlp.nn.decoding.rnn_state import RnnState
|
from __future__ import absolute_import
import sys
try:
import v8eval
except ImportError:
sys.tracebacklimit = 0
raise RuntimeError('Please install the python module v8eval either via pip or download it from https://github.com/sony/v8eval')
from . import JavaScriptInterpreter
from .encapsulated import template
# ------------------------------------------------------------------------------- #
class ChallengeInterpreter(JavaScriptInterpreter):
def __init__(self):
super(ChallengeInterpreter, self).__init__('v8')
# ------------------------------------------------------------------------------- #
def eval(self, body, domain):
try:
return v8eval.V8().eval(template(body, domain))
except (TypeError, v8eval.V8Error):
RuntimeError('We encountered an error running the V8 Engine.')
# ------------------------------------------------------------------------------- #
ChallengeInterpreter()
|
from __future__ import unicode_literals
from django.conf.urls import url
from extras.views import ObjectChangeLogView
from . import views
from .models import Tenant, TenantGroup, Package
app_name = 'tenancy'
urlpatterns = [
# Tenant groups
url(r'^service-providers/$', views.TenantGroupListView.as_view(), name='tenantgroup_list'),
url(r'^service-providers/add/$', views.TenantGroupCreateView.as_view(), name='tenantgroup_add'),
url(r'^service-providers/import/$', views.TenantGroupBulkImportView.as_view(), name='tenantgroup_import'),
url(r'^service-providers/delete/$', views.TenantGroupBulkDeleteView.as_view(), name='tenantgroup_bulk_delete'),
url(r'^service-providers/(?P<slug>[\w-]+)/edit/$', views.TenantGroupEditView.as_view(), name='tenantgroup_edit'),
url(r'^service-providers/(?P<slug>[\w-]+)/changelog/$', ObjectChangeLogView.as_view(), name='tenantgroup_changelog', kwargs={'model': TenantGroup}),
# Tenants
url(r'^customers/$', views.TenantListView.as_view(), name='tenant_list'),
url(r'^customers/add/$', views.TenantCreateView.as_view(), name='tenant_add'),
url(r'^customers/import/$', views.TenantBulkImportView.as_view(), name='tenant_import'),
url(r'^customers/edit/$', views.TenantBulkEditView.as_view(), name='tenant_bulk_edit'),
url(r'^customers/delete/$', views.TenantBulkDeleteView.as_view(), name='tenant_bulk_delete'),
url(r'^customers/(?P<slug>[\w-]+)/$', views.TenantView.as_view(), name='tenant'),
url(r'^customers/(?P<slug>[\w-]+)/edit/$', views.TenantEditView.as_view(), name='tenant_edit'),
url(r'^customers/(?P<slug>[\w-]+)/delete/$', views.TenantDeleteView.as_view(), name='tenant_delete'),
url(r'^customers/(?P<slug>[\w-]+)/changelog/$', ObjectChangeLogView.as_view(), name='tenant_changelog', kwargs={'model': Tenant}),
# Packages
url(r'^packages/$', views.PackageListView.as_view(), name='package_list'),
url(r'^packages/add/$', views.PackageCreateView.as_view(), name='package_add'),
url(r'^packages/import/$', views.PackageBulkImportView.as_view(), name='package_import'),
url(r'^packages/edit/$', views.PackageBulkEditView.as_view(), name='package_bulk_edit'),
url(r'^packages/delete/$', views.PackageBulkDeleteView.as_view(), name='package_bulk_delete'),
url(r'^packages/(?P<slug>[\w-]+)/$', views.PackageView.as_view(), name='package'),
url(r'^packages/(?P<slug>[\w-]+)/edit/$', views.PackageEditView.as_view(), name='package_edit'),
url(r'^packages/(?P<slug>[\w-]+)/delete/$', views.PackageDeleteView.as_view(), name='package_delete'),
url(r'^packages/(?P<slug>[\w-]+)/changelog/$', ObjectChangeLogView.as_view(), name='package_changelog', kwargs={'model': Package}),
]
|
import meep as mp
from meep import mpb
import numpy as np
import matplotlib.pyplot as plt
# Compute modes of a rectangular Si strip waveguide on top of oxide.
# Note that you should only pay attention, here, to the guided modes,
# which are the modes whose frequency falls under the light line --
# that is, frequency < beta / 1.45, where 1.45 is the SiO2 index.
# Since there's no special lengthscale here, I'll just
# use microns. In general, if you use units of x, the frequencies
# output are equivalent to x/lambda# so here, the freqeuncies will be
# output as um/lambda, e.g. 1.5um would correspond to the frequency
# 1/1.5 = 0.6667.
w = 0.5 # Si width (um)
h = 0.22 # Si height (um)
Si = mp.Medium(index=3.4757)
SiO2 = mp.Medium(index=1.444)
# Define the computational cell. We'll make x the propagation direction.
# the other cell sizes should be big enough so that the boundaries are
# far away from the mode field.
sc_y = 2 # supercell width (um)
sc_z = 2 # supercell height (um)
geometry_lattice = mp.Lattice(size=mp.Vector3(0, sc_y, sc_z))
# define the 2d blocks for the strip and substrate
width = 0.5
height = 0.22
thickness = 1e20
waveguide = mp.Block(
size=mp.Vector3(thickness,width,height),
center=mp.Vector3(0,0,0),
material=Si)
geometry = [waveguide]
# The k (i.e. beta, i.e. propagation constant) points to look at, in
# units of 2*pi/um. We'll look at num_k points from k_min to k_max.
num_k = 9
k_min = 0.1
k_max = 3.0
k_points = mp.interpolate(num_k, [mp.Vector3(k_min), mp.Vector3(k_max)])
k_points = [mp.Vector3(1/1.55)]
resolution = 64 # pixels/um
# Increase this to see more modes. (The guided ones are the ones below the
# light line, i.e. those with frequencies < kmag / 1.45, where kmag
# is the corresponding column in the output if you grep for "freqs:".)
filename_prefix = 'strip-' # use this prefix for output files
ms = mpb.ModeSolver(
geometry_lattice=geometry_lattice,
geometry=geometry,
resolution=resolution,
filename_prefix=filename_prefix,
default_material=SiO2,
)
# compute num_bands lowest frequencies as a function of k. Also display
# "parities", i.e. whether the mode is symmetric or anti_symmetric
# through the y=0 and z=0 planes.
#ms.run()
# Output the x component of the Poynting vector for num_bands bands at omega
num_bands = 1
efields = []
def addField(tr_ms, band):
efields.append(tr_ms.get_efield(band))
lam = 1.55
omega = 1 / 1.55 # frequency corresponding to 1.55um
foundk = ms.find_k(mp.EVEN_Z,
omega,
1,
num_bands,
mp.Vector3(1),
1e-3,
omega * 3.45,
omega * 0.1,
omega * 4,
addField)
foundk = np.array(foundk)
n = foundk * lam
print(n)
first_band = 1
#temp = ms.get_poynting(first_band,num_bands)
efield = ms.get_efield(1)
#field2 = np.squeeze(efields[2])
#print(field1.shape)
temp = efield[:,:,0,:]
#temp = field2
Ex = temp[:,:,0]
Ey = temp[:,:,1]
Ez = temp[:,:,2]
I = (abs(Ex)**2 + abs(Ey)**2 + abs(Ez)**2)
Ix = abs(Ex)**2
Iy = abs(Ey)**2
Iz = abs(Ez)**2
eps_data = ms.get_epsilon()
plt.figure(dpi=100)
plt.subplot(221)
plt.imshow(np.abs(eps_data.transpose()),interpolation='spline36',cmap='binary')
plt.imshow(np.abs(Ix.transpose()),interpolation='spline36',alpha=0.7)
plt.title('$I_x$')
plt.colorbar()
plt.subplot(222)
plt.imshow(np.abs(eps_data.transpose()),interpolation='spline36',cmap='binary')
plt.imshow(np.abs(Iy.transpose()),interpolation='spline36',alpha=0.7)
plt.title('$I_y$')
plt.colorbar()
plt.subplot(223)
plt.imshow(np.abs(eps_data.transpose()),interpolation='spline36',cmap='binary')
plt.imshow(np.abs(Iz.transpose()),interpolation='spline36',alpha=0.7)
plt.title('$I_z$')
plt.colorbar()
plt.subplot(224)
plt.imshow(np.abs(eps_data.transpose()),interpolation='spline36',cmap='binary')
plt.imshow(np.abs(I.transpose()),interpolation='spline36',alpha=0.7)
plt.title('$I$')
plt.colorbar()
plt.show()
###########################################################################
# Above, we outputted the dispersion relation: frequency (omega) as a
# function of wavevector kx (beta). Alternatively, you can compute
# beta for a given omega -- for example, you might want to find the
# modes and wavevectors at a fixed wavelength of 1.55 microns. You
# can do that using the find_k function:
'''
'''
|
# -*- coding: utf-8 -*-
import pickle
import os
import matplotlib.pyplot as plt
def get_ints(fid_list):
for fid in fid_list:
jar = pickle.load(open(dir+fid, 'r'))
obs = jar['observations'][0]
for o in obs.resolution_filter(d_max=1.81,d_min=1.78):
yield o[1]
dir = "/dls/x02-1/data/2017/mx15722-8/processing/danny/dials/berlin-0-completed/"
#'/dls/i24/data/2017/nt14493-63/processing/scripts/experiment_refinement_process_test_folder/waylinCD/'
file_list = [fid for fid in os.listdir(dir) if fid.startswith('int-') and fid.endswith('.pickle')]
int_bin_dict = {}
list_of_ints= list(get_ints(file_list))
plt.hist(list_of_ints, bins=300)
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.