blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9ef74f70e4c745e37b3f4f71db31e27d2cb4df84 | Python | nogicoder/sorting-algorithm | /Algorithms/bubble_sort.py | UTF-8 | 380 | 3.609375 | 4 | [] | no_license | n = [23, 9, -8, 7, 3]
n1 = [1, 2, 3, 6, 90]
def bubble_sort(lst):
for t in range(len(lst)):
for i in range(len(lst) - 1):
count = 0
if lst[i + 1] < lst[i]:
lst[i], lst[i + 1] = lst[i + 1], lst[i]
count += 1
if count >= 1:
print(" ".join(str(item) for item in lst))
bubble_sort(n)
| true |
6db58fdc04f3570c16077f0a9e49614e7ac76ba1 | Python | SonjaGrusche/LPTHW | /EX03/ex3.py | UTF-8 | 2,098 | 4.53125 | 5 | [] | no_license | # + "plus" does addition
# - "minus" does subtraction
# / "slash" does division
# * "asterisk" does multiplication
# % "percent" does modulus calculation (divides and displays the remainder)
# < "less-than" says if the number before the character < is smaller than the number behind it by giving the statement "True" or "False"
# > "greater-than" says if the number before the character > is greater than the number behind it by giving the statement "True" or "False"
# <= "less-than-equal" says if the number before the characters <= is smalleror equal than the number behind it by giving the statement "True" or "False"
# >= "greater-than-equal" says if the number before the characters >= is greater or equal than the number behind it by giving the statment "True" or "False"
# Remember that Python operations follow the PEMDAS (US) order.
# That stands for: Parantheses Exponents Multilication Division Addition Substraction
# prints what's inside the quotes
print "I will now count my chickens:"
# prints "Hens" and calculates 30
print "Hens", 25 + 30 / 6
# prints "Roosters" and calculates 97
print "Roosters", 100 - 25 * 3 % 4
# prints what's inside the quotes
print "Now I will count the eggs:"
# only prints the solution "7"
print 3 + 2 + 1 - 5 + 4 % 2 - 1 / 4 + 6
# prints what's inside the quotes
print "Is it true that 3 + 2 < 5 - 7?"
# prints that the calculus is "False"
print 3 + 2 < 5 - 7
# prints what's inside the quotes and prints "5"
print "What is 3 + 2?", 3 + 2
# prints what's inside the quotes and prints "-2"
print "What is 5 - 7?", 5 - 7
# prints what's inside the quotes
print "Oh, that's why it's False."
# prints what's inside the quotes
print "How about some more."
# prints what's inside the quotes and gives a Boolean statement about the calculation "True"
print "Is it greater?", 5 > -2
# prints what's inside the quotes and gives a Boolean statement about the calculation "True"
print "Is it greater or equal?", 5 >= -2
# prints what's inside the quotes and gives a Boolean statement about the calculation "False"
print "Is it less or equal?", 5 <= -2
| true |
4fa166a889d750377fb3bcb9d0c73a7974d9f50f | Python | jshcrm/wallet | /wallet/accounts/tests.py | UTF-8 | 726 | 2.703125 | 3 | [] | no_license | from django.test import TestCase
from accounts.models import User, Wallet
class WalletTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create(username='test')
cls.wallet = Wallet.objects.create(user=cls.user, savings=100.00)
def test_str(self):
assert str(self.wallet) == 'test'
def test_update_savings(self):
assert self.wallet.savings == 100.00
self.wallet.update_savings(500.00)
assert self.wallet.savings == 500.00
class UserTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create(username='test')
def test_str(self):
assert str(self.user) == self.user.username
| true |
d4e608a9f381e9923091958862a896619d034a28 | Python | wing603/python3 | /02_分支/01_判断年龄.py | UTF-8 | 215 | 3.296875 | 3 | [] | no_license | # 1.定义一个整数变量记录年龄
age = 15
if age >= 18 :
# 3.如果满了18岁可以进网吧
print("可以进网吧")
print("欢迎欢迎")
print("看看执行什么")
# 2.判断是否满了18岁
| true |
fda5d8fb385381fd9915cf11e9eea57164b9e37e | Python | RajaAyyanar/Computational_Intelligence_Optimization | /ArtificialBeeColony.py | UTF-8 | 3,518 | 3.078125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 31 12:46:23 2017
@author: Raja Ayyanar
"""
def Sphere(Colony):
S=Colony*Colony;
ObjVal=sum(S);
return ObjVal
def calculateFitness(fObjV):
import numpy as np
fFitness=np.zeros(np.max(np.shape(fObjV)));
ind=np.nonzero(fObjV>=0);
fFitness[ind]=1/(fObjV[ind]+1);
ind=np.nonzero(fObjV<0);
fFitness[ind]=1+abs(fObjV[ind]);
return fFitness
import numpy as np
import time
import random as random
# Control Parameters of ABC algorithm
population=30; #The number of colony size (employed bees+onlooker bees)
FoodNumber=population/2; #The number of food sources equals the half of the colony size
limit=100; # A food source which could not be improved through "limit" trials is abandoned by its employed bee
max_iterations=200; #The number of cycles for foraging
d=2; #The number of parameters of the problem to be optimized
ub=np.ones((1,d))*5.12; #lower bounds of the parameters.
lb=np.ones((1,d))*(-5.12);#upper bound of the parameters.
runtime=10;#No of runs in order to see its robustness
Global_gbest=np.zeros((1,runtime));
for r in range(0,runtime):
# All food sources are initialized
# Variables are initialized in the range [lb,ub]
Range=np.tile((ub-lb), (round(FoodNumber), 1))
Lower=np.tile(lb, (round(FoodNumber), 1))
Foods=np.random.uniform(0,1,(round(FoodNumber),d))*Range + Lower;
# Foods is the population of food sources.
# Each row of Foods matrix is a vector holding d parameters to be optimized.
# The number of rows of Foods matrix equals to the FoodNumber
Fun_Cost = Sphere(Foods); # Result from the function
Fitness=calculateFitness(Fun_Cost); # Fitness of cost
# reset trial counters
# trial vector holds trial numbers through which solutions can not be improved
trial=np.zeros((1,FoodNumber));
#The best food source is memorized
BestInd=np.nonzero(Fun_Cost==Fun_Cost.min());
BestInd=BestInd[-1];
gbest=Fun_Cost[BestInd]; # Optimal solution
gbest_Params=Foods[BestInd,:]; # Parameters of Optimal Solution
iter=1;
start=time.time()
while iter<=max_iterations:
######### EMPLOYED BEE PHASE ########################
for i in range(0,FoodNumber):
#The parameter to be changed is determined randomly
k=np.floor(random.random()*d);
# A randomly chosen solution is used in producing a mutant solution of the solution i
j=np.floor(random.random()*(FoodNumber));
#Randomly selected solution must be different from the solution i
while(j==i):
j=np.floor(random.random()*(FoodNumber));
# Generate a new solution
new_sol=Foods[i,:];
# v_{ij}=x_{ij}+\phi_{ij}*(x_{kj}-x_{ij})
new_sol[k]=Foods[i,k]+(Foods[i,k]-Foods[j,k])*(random.random()-0.5)*2;
# if generated parameter value is out of boundaries, it is shifted onto the boundaries
ind=np.nonzero(new_sol<lb);
new_sol[ind]=lb[ind];
ind=np.nonzero(new_sol>ub);
new_sol[ind]=ub[ind];
#evaluate new solution
Sol_cost = Sphere(new_sol);
# Fitness value of new solution
FitnessSol=calculateFitness(Sol_cost);
| true |
7c4060db4bfa377d4995a4ea6362b57762a45aaa | Python | jonovik/cgptoolbox | /cgp/virtexp/elphys/examples.py | UTF-8 | 16,295 | 2.828125 | 3 | [] | no_license | """
Virtual experiments for cellular electrophysiology.
These protocols assume that the :wiki:`transmembrane potential` is a variable
named *V* in the model. (If the transmembrane potential is named differently,
use the *rename* argument to the
:meth:`~cgp.physmod.cellmlmodel.Cellmlmodel` constructor.)
Many models of cellular electrophysiology, such as the
:cellml:`Bondarenko
<11df840d0150d34c9716cd4cbdd164c8/bondarenko_szigeti_bett_kim_rasmusson_2004_apical>`
and
:cellml:`Ten Tusscher
<e946a72663bdf17ef6752980a0232351/tentusscher_noble_noble_panfilov_2004_a>`
models, have hardcoded a protocol of regular
pacing, which must be disabled to apply other protocols such as regular pacing
(:doi:`Cooper et al. 2011 <10.1016/j.pbiomolbio.2011.06.003>`).
Here, regular pacing is assumed to be governed by the following parameters:
* *stim_amplitude* : Magnitude of the stimulus current.
* *stim_period* : Interval between the beginnings of successive stimuli.
Some models have a parameter called *stim_start*, running the model unpaced
for some time before the stimulus starts. Here it is assumed that any such
delay is set to zero.
The implementation of voltage clamp experiments assumes that the model object
has a
:meth:`~cgp.cvodeint.namedcvodeint.Namedcvodeint.clamp` method.
""" # pylint: disable=C0301,E1002,W0611
from __future__ import division # 7 / 4 = 1.75 rather than 1
import numpy as np
from ...physmod.cellmlmodel import Cellmlmodel
from . import Paceable, Clampable
from ...utils.ordereddict import OrderedDict
class Hodgkin(Cellmlmodel, Paceable, Clampable):
"""
Hodgkin-Huxley model of action potential.
I have made some hacks to make this model more commensurable with the
Bondarenko (2004) model. The current
:cellml:`CellML version <5d116522c3b43ccaeb87a1ed10139016/hodgkin_huxley_1952>`
has stimulus duration and amplitude hardcoded, and the stimulus is not
periodic. I have hacked this in the CellML source for now.
I've also fixed a 0/0 bug in alpha_m for V == -50.
.. plot::
from cgp.virtexp.elphys.examples import Hodgkin
hh = Hodgkin()
t, y, stats = hh.ap()
plt.plot(t, y.V)
"""
def __init__(self, localfile="hodgkin_huxley_1952", **kwargs):
super(Hodgkin, self).__init__(localfile=localfile, **kwargs)
class Tentusscher(Cellmlmodel, Paceable, Clampable):
"""
Example for class :class:`Paceable` - Ten Tusscher heart M-cell model.
Reference: :doi:`Ten Tusscher et al. 2004 <10.1152/ajpheart.00794.2003>`.
.. plot::
:width: 300
from cgp.virtexp.elphys.examples import Tentusscher
tt = Tentusscher()
t, y, stats = tt.ap()
plt.plot(t, y.V, '.-')
i = stats["i"]
plt.plot(t[i], y.V[i], 'ro')
This model's `main CellML page
<http://models.cellml.org/exposure/c7f7ced1e002d9f0af1b56b15a873736>`_
links to three versions of the model corresponding to different cell types:
:cellml:`a (midmyocardial)
<c7f7ced1e002d9f0af1b56b15a873736/tentusscher_noble_noble_panfilov_2004_a>`,
:cellml:`b (epicardial)
<c7f7ced1e002d9f0af1b56b15a873736/tentusscher_noble_noble_panfilov_2004_b>`,
:cellml:`c (endocardial)
<c7f7ced1e002d9f0af1b56b15a873736/tentusscher_noble_noble_panfilov_2004_c>`.
Other keyword arguments are passed through to
:meth:`~cgp.physmod.cellmlmodel.Cellmlmodel`.
In particular, the "rename" argument changes some state and parameter
names to follow the conventions of a :class:`Paceable` object.
"""
def __init__(self, # pylint: disable=W0102
workspace="tentusscher_noble_noble_panfilov_2004",
rename={"y": {"Na_i": "Nai", "Ca_i": "Cai", "K_i": "Ki"}, "p": {
"IstimStart": "stim_start",
"IstimEnd": "stim_end",
"IstimAmplitude": "stim_amplitude",
"IstimPeriod": "stim_period",
"IstimPulseDuration": "stim_duration"
}}, **kwargs):
kwargs["rename"] = rename
super(Tentusscher, self).__init__(workspace=workspace, **kwargs)
self.pr.stim_start = 0
class Bond(Cellmlmodel, Paceable, Clampable):
"""
:mod:`cgp.virtexp.elphys` example: Bondarenko et al. 2004 model.
Please **see the source code** for how this class uses the
:class:`Paceable` and :class:`Clampable` mixins to add an experimental
protocols to a :class:`~cgp.physmod.cgp.physmod.Cellmlmodel`.
.. inheritance-diagram:: cgp.physmod.cellmlmodel.Cellmlmodel Paceable Clampable Bond
:parts: 1
.. todo:: Add voltage clamping.
Note: The redundant parameter *stim_start* is set to zero.
Once defined, the :class:`Bond` class can be used as follows:
.. plot::
:include-source:
:width: 300
from cgp.virtexp.elphys.examples import Bond
bond = Bond()
t, y, stats = bond.ap()
plt.plot(t, y.V)
References:
* :doi:`original paper <10.1152/ajpheart.00185.2003>`
* :cellml:`CellML implementation
<11df840d0150d34c9716cd4cbdd164c8/bondarenko_szigeti_bett_kim_rasmusson_2004_apical>`
"""
def __init__(self, workspace="bondarenko_szigeti_bett_kim_rasmusson_2004",
*args, **kwargs):
"""Constructor for the :class:`Bond` class."""
super(Bond, self).__init__(workspace=workspace, *args, **kwargs)
if "stim_start" in self.dtype.p.names:
self.pr.stim_start = 0.0
# Mapping None to an empty dict, and letting the scenario name default
# to None, makes self.scenario() equivalent to self.autorestore().
self.scenarios = OrderedDict({None: {}})
kwargs = kwargs.copy()
kwargs.update(workspace=self.workspace, exposure=self.exposure)
for variant in self.get_variants():
name = variant.replace(self.workspace, "").replace("_", "")
self.scenarios[name] = dict(workspace=self.workspace,
exposure=self.exposure, changeset=self.changeset,
variant=variant, y=self.y0r.copy(), p=self.pr.copy())
kwargs["variant"] = variant
m = Cellmlmodel(**kwargs)
m.pr.stim_start = 0.0
for k in set(m.dtype.p.names) & set(self.dtype.p.names):
self.scenarios[name]["p"][k] = m.pr[k]
for k in set(m.dtype.y.names) & set(self.dtype.y.names):
self.scenarios[name]["y"][k] = m.y0r[k]
def scenario(self, name=None, **kwargs):
"""
Context manager to set parameters and initial state to a named scenario.
This is just a wrapper for
:meth:`~cvodeint.namedcvodeint.Namedcvodeint.autorestore`,
and defaults to a plain :meth:`autorestore` if *name* = None.
A Bond object has scenarios representing "apex" and "septum" cells,
with apex as the default. The two scenarios are available as separate
models at cellml.org.
Subclasses may define additional scenarios. Note that scenarios will
need adaptation to work with subclasses that have different parameter
names.
``**kwargs`` are passed to
:meth:`~cvodeint.namedcvodeint.Namedcvodeint.autorestore`.
.. plot::
from cgp.virtexp.elphys.examples import Bond
bond = Bond()
for k in "apical", "septal":
with bond.scenario(k):
t, y, stats = bond.ap()
plt.plot(t, y.V, label=k)
plt.legend()
>>> bond = Bond()
>>> bond.scenarios
OrderedDict([(None, {}),
('apical', {'changeset': '99f4fd6804311c571a7143515003691ab2e430fb',
'workspace': 'bondarenko_szigeti_bett_kim_rasmusson_2004',
'p': rec.array([ (1.0, ...)], dtype=[('Cm', '<f8'), ...)]),
'y': rec.array([ (-82.4202, ...)], dtype=[('V', '<f8'), ...]),
'variant': 'bondarenko_szigeti_bett_kim_rasmusson_2004_apical',
'exposure': '11df840d0150d34c9716cd4cbdd164c8'}),
('septal', {'changeset': '99f4fd6804311c571a7143515003691ab2e430fb',
'workspace': 'bondarenko_szigeti_bett_kim_rasmusson_2004',
'p': rec.array([ (1.0, ...)], dtype=[('Cm', '<f8'), ...)]),
'y': rec.array([ (-82.4202, ...)], dtype=[('V', '<f8'), ...]),
'variant': 'bondarenko_szigeti_bett_kim_rasmusson_2004_septal',
'exposure': '11df840d0150d34c9716cd4cbdd164c8'})])
"""
return self.autorestore(_y=self.scenarios[name].get("y"),
_p=self.scenarios[name].get("p"), **kwargs)
class Fitz(Bond):
r"""
CellML implementation of the FitzHugh-Nagumo nerve axon model.
References:
* Nagumo, J., Animoto, S., Yoshizawa, S. (1962)
:doi:`An active pulse transmission line simulating nerve axon
<10.1109/JRPROC.1962.288235>`.
Proc. Inst. Radio Engineers, 50, 2061-2070.
* FitzHugh R (1961)
:doi:`Impulses and physiological states in theoretical models of nerve
membrane <10.1016/S0006-3495(61)86902-6>`.
Biophysical J. 1:445-466
In Fitzhugh (1961), the definition of the transmembrane potential is
such that it decreases during depolarization, so that the action potential
starts with a downstroke, contrary to the convention used in FitzHugh 1969
and in most other work. The equations are also somewhat rearranged.
However, figure 1 of FitzHugh 1961 gives a very good overview of the phase
plane of the model.
The nullclines of the model are:
.. math::
\dot{v} = 0 &\Leftrightarrow& w = v (v-\alpha) (1-v) + I \\
\dot{w} = 0 &\Leftrightarrow& w = (1/\gamma) v
A high gamma makes w change slowly relative to v, making the system more
stiff and the action potentials more "square".
In the absence of a stimulus current, the model has a limit cycle if
:math:`\alpha \gamma < -1`
I have made some hacks to make this model more commensurable with the
Bondarenko (2004) model. The current
:cellml:`CellML version <cf32346a9e5c4b2cdb559b11da5f1ae1/fitzhugh_1961>`
has stimulus duration and amplitude hardcoded, and the stimulus is not
periodic. I have hacked this in the CellML source for now.
Also, I rename the state variable *v* to *V* for compatibility with the
pacing and clamping protocols.
The hardcoded stimulus protocol in the CellML version is strange in that
the stimulus *decreases* the transmembrane potential, and with a magnitude
far beyond that of the model's action potential. My guess is that
*stim_duration* and *stim_amplitude* were copied directly from the
Bondarenko model.
.. plot::
:include-source:
:width: 300
from cgp.virtexp.elphys.examples import Fitz
fitz = Fitz(reltol=1e-10)
for t, y, stats in fitz.aps(n=3):
plt.plot(t, y.view(float))
>>> fitz = Fitz(reltol=1e-10)
>>> with fitz.autorestore():
... t, y, stats = fitz.ap()
>>> [float(stats[k]) for k in "base peak ttp".split()]
[0.0, 0.984..., 28.2...]
In fact, this parameter scenario is self-exciting even without a stimulus
current. :math:`(V=0, w=0)` is an equilibrium (though unstable), so we
choose a different initial value.
.. plot::
:include-source:
:width: 300
from cgp.virtexp.elphys.examples import Fitz
fitz = Fitz()
with fitz.autorestore(stim_amplitude=0, V=0.01):
t, y, flag = fitz.integrate(t=[0, 700])
plt.plot(t, y.view(float))
plt.legend(fitz.dtype.y.names)
The constructor defines a "paced" :meth:`~Bond.scenario` where small
periodic stimuli elicit action potentials. To hack this, we impose a
negative stimulus most of the time, removing it briefly to elicit the
action potential.
"""
def __init__(self, localfile="fitzhugh_1961", # pylint: disable=W0102
rename={"y": {"v": "V"}}, **kwargs):
"""
Return a Fitzhugh (1961) model object.
Keyword arguments are passed through to
:class:`~cellmlmodels.cellmlmodel.Cellmlmodel`.
In particular, the *rename* argument changes some state and parameter
names to match those of the Bondarenko model, from which this class is
derived.
See the class docstring for examples.
"""
kwargs["rename"] = rename
super(Fitz, self).__init__(localfile=localfile, **kwargs)
pr = self.pr.copy()
pr.stim_period = 200
pr.stim_duration = 190
pr.stim_amplitude = -0.1
self.scenarios["paced"] = dict(p=pr, y=(0.01, 0.01))
class Li(Cellmlmodel, Paceable, Clampable):
"""
CellML implementation of the LNCS modified Bondarenko model by Li et al.
.. seealso:: :doi:`10.1152/ajpheart.00219.2010`
"""
def __init__(self, localfile="BL6WT_260710", **kwargs):
"""
Return a Li-Niederer-Casadei-Smith (LNCS) model object.
"exposure_workspace" will eventually refer to the cellml.org repository.
Currently, I work with code generated from the CellML file using
OpenCell 0.8.
Other keyword arguments are passed through to
:mod:`cellmlmodels.cellmlmodel.Cellmlmodel`.
This constructor sets *stim_offset* = 0.0, overriding the default.
This is because the stepwise integration assumes that the stimulus is
at the start of the action potential.
"""
super(Li, self).__init__(localfile=localfile, **kwargs)
# Assume AP starts with stimulus
self.pr.stim_offset = 0.0
# Disable caffeine injection unless specifically requested
if "prepulses_number" in self.pr.dtype.names:
self.pr.prepulses_number = np.inf
class Bond_uhc(Bond):
"""
Bondarenko model with most constants unhardcoded.
Comparing the details of the original and unhardcoded Bond models.
>>> b = Bond(reltol=1e-10)
>>> bu = Bond_uhc(reltol=1e-10)
Increased number of parameters.
>>> len(b.dtype.p), len(bu.dtype.p)
(73, 204)
Verify that the original parameters still have the same value.
>>> for k in np.intersect1d(b.dtype.p.names, bu.dtype.p.names):
... if b.pr[k] != bu.pr[k]:
... print k, b.pr[k], bu.pr[k]
The variable iKss was dropped because it was really a constant.
>>> list(np.setdiff1d(b.dtype.y.names, bu.dtype.y.names))
['iKss']
The sodium-calcium exchanger current was renamed.
>>> list(np.setxor1d(b.dtype.a.names, bu.dtype.a.names))
['i_NCX', 'i_NaCa']
"""
def __init__(self, localfile="bond_uhc", *args, **kwargs):
super(Bond_uhc, self).__init__(localfile=localfile, *args, **kwargs)
class Li_uhc(Li):
"""
LNCS model with most constants unhardcoded.
>>> li = Li()
>>> liu = Li_uhc()
>>> len(li.dtype.p), len(liu.dtype.p)
(86, 188)
>>> for k in np.intersect1d(li.dtype.p.names, liu.dtype.p.names):
... if li.pr[k] != liu.pr[k]:
... print k, li.pr[k], liu.pr[k]
>>> list(np.setdiff1d(li.dtype.y.names, liu.dtype.y.names)) # dropped variable
['iKss']
>>> li.dtype.a == liu.dtype.a
True
"""
def __init__(self, localfile="li_uhc", *args, **kwargs):
super(Li_uhc, self).__init__(localfile=localfile, *args, **kwargs)
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE)
| true |
2a02fa94187c064e90be7eafd4b4130b4735ccc7 | Python | geniscuadrado/Crafting-Test-Driven-Software-with-Python | /Chapter07/tests/unit/test_persistence.py | UTF-8 | 735 | 2.75 | 3 | [
"MIT"
] | permissive | import os
import json
from contacts import Application
class TestLoading:
def test_load(self):
app = Application()
with open("./contacts.json", "w+") as f:
json.dump({"_contacts": [("NAME SURNAME", "3333")]}, f)
app.load()
assert app._contacts == [
("NAME SURNAME", "3333")
]
class TestSaving:
def test_save(self):
app = Application()
app._contacts = [
("NAME SURNAME", "3333")
]
try:
os.unlink("./contacts.json")
except FileNotFoundError:
pass
app.save()
with open("./contacts.json") as f:
assert json.load(f) == {"_contacts": [["NAME SURNAME", "3333"]]}
| true |
51d181d13b1b47fa72e570d24369ef32227994ab | Python | alexandraback/datacollection | /solutions_5708921029263360_0/Python/Aurel/code.py | UTF-8 | 1,171 | 2.6875 | 3 | [] | no_license | import sys
import itertools
import math
import collections
import functools
sys.setrecursionlimit(10000)
def inputInts():
return map(int, raw_input().split())
T = int(raw_input())
for testId in range(T):
J, P, S, K = inputInts()
res = []
pairsJP = {}
pairsJS = {}
pairsPS = {}
for j in xrange(J):
for p in xrange(P):
for s in xrange(S):
jp = str(j)+str(p)
if jp not in pairsJP:
pairsJP[jp] = 0
if pairsJP[jp] >= K:
continue
js = str(j)+str(s)
if js not in pairsJS:
pairsJS[js] = 0
if pairsJS[js] >= K:
continue
ps = str(p)+str(s)
if ps not in pairsPS:
pairsPS[ps] = 0
if pairsPS[ps] >= K:
continue
pairsJP[jp] += 1
pairsJS[js] += 1
pairsPS[ps] += 1
res.append([j+1,p+1,s+1])
print "Case #{:d}: {:d}".format(testId+1, len(res))
for row in res:
print ' '.join(map(str, row))
| true |
a18625a5b2a27998030ba927915d01b98c086ee3 | Python | sharkbound/Python-Projects | /code_wars/Solutions/walk_up_the_stairs.py | UTF-8 | 790 | 3.59375 | 4 | [] | no_license | from unittest import TestCase
def stairs(n):
spaces, segments = ' ' * (n-1), []
for x in range(1, n+1):
left = ' '.join(str(y + 1)[-1] for y in range(x))
segments.append(spaces + left + ' ' + left[::-1])
spaces = spaces[0:-4]
return '\n'.join(segments)
class unittest(TestCase):
def test_n_is_3(self):
print("Testing for n = 3")
self.assertEqual(stairs(3), " 1 1\n 1 2 2 1\n1 2 3 3 2 1")
def test_n_is_7(self):
print("Testing for n = 7")
self.assertEqual(stairs(7),
" 1 1\n 1 2 2 1\n 1 2 3 3 2 1\n 1 2 3 4 4 3 2 1\n 1 2 3 4 5 5 4 3 2 1\n 1 2 3 4 5 6 6 5 4 3 2 1\n1 2 3 4 5 6 7 7 6 5 4 3 2 1")
| true |
477b321e18ec8e4bab56502fba64c59ca3f9a2f2 | Python | mccdaq/daqhats | /examples/python/mcc134/web_server/web_server.py | UTF-8 | 25,760 | 2.640625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This example demonstrates a simple web server providing visualization of data
from a MCC 134 DAQ HAT device for a single client. It makes use of the Dash
Python framework for web-based interfaces and a plotly graph. To install the
dependencies for this example, run:
$ pip install dash
Running this example:
1. Start the server by running the web_server.py module in a terminal.
$ ./web_server.py
2. Open a web browser on a device on the same network as the host device and
enter http://<host>:8080 in the address bar,
replacing <host> with the IP Address or hostname of the host device.
Stopping this example:
1. To stop the server press Ctrl+C in the terminal window where there server
was started.
"""
import socket
import json
from collections import deque
from dash import Dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
from daqhats import hat_list, mcc134, HatIDs, TcTypes
_app = Dash(__name__) # pylint: disable=invalid-name,no-member
_app.css.config.serve_locally = True
_app.scripts.config.serve_locally = True
_HAT = None # Store the hat object in a global for use in multiple callbacks.
MCC134_CHANNEL_COUNT = 4
def create_hat_selector():
"""
Gets a list of available MCC 134 devices and creates a corresponding
dash-core-components Dropdown element for the user interface.
Returns:
dcc.Dropdown: A dash-core-components Dropdown object.
"""
hats = hat_list(filter_by_id=HatIDs.MCC_134)
hat_selection_options = []
for hat in hats:
# Create the label from the address and product name
label = '{0}: {1}'.format(hat.address, hat.product_name)
# Create the value by converting the descriptor to a JSON object
option = {'label': label, 'value': json.dumps(hat._asdict())}
hat_selection_options.append(option)
selection = None
if hat_selection_options:
selection = hat_selection_options[0]['value']
return dcc.Dropdown( # pylint: disable=no-member
id='hatSelector', options=hat_selection_options,
value=selection, clearable=False)
def init_chart_data(number_of_channels, number_of_samples):
"""
Initializes the chart with the specified number of samples.
Args:
number_of_channels (int): The number of channels to be displayed.
number_of_samples (int): The number of samples to be displayed.
Returns:
str: A string representation of a JSON object containing the chart data.
"""
samples = []
for i in range(number_of_samples):
samples.append(i)
data = []
for _ in range(number_of_channels):
data.append([None]*number_of_samples)
chart_data = {'data': data, 'samples': samples, 'sample_count': 0}
return json.dumps(chart_data)
# Define the HTML layout for the user interface, consisting of
# dash-html-components and dash-core-components.
_TC_TYPE_OPTIONS = [{'label': 'J', 'value': TcTypes.TYPE_J},
{'label': 'K', 'value': TcTypes.TYPE_K},
{'label': 'T', 'value': TcTypes.TYPE_T},
{'label': 'E', 'value': TcTypes.TYPE_E},
{'label': 'R', 'value': TcTypes.TYPE_R},
{'label': 'S', 'value': TcTypes.TYPE_S},
{'label': 'B', 'value': TcTypes.TYPE_B},
{'label': 'N', 'value': TcTypes.TYPE_N}]
# pylint: disable=no-member
_app.layout = html.Div([
html.H1(
children='MCC 134 DAQ HAT Web Server Example',
id='exampleTitle'),
html.Div(
children=[
html.Div(
id='rightContent',
children=[
dcc.Graph(id='stripChart'),
html.Div(id='errorDisplay',
children='',
style={'font-weight': 'bold', 'color': 'red'})],
style={'width': '100%', 'box-sizing': 'border-box',
'float': 'left', 'padding-left': 320}),
html.Div(
id='leftContent',
children=[
html.Label('Select a HAT...',
style={'font-weight': 'bold'}),
create_hat_selector(),
html.Label('Seconds per sample',
style={'font-weight': 'bold', 'display': 'block',
'margin-top': 10}),
dcc.Input(id='secondsPerSample', type='number',
step=1, value=1.0, min=1.0,
style={'width': 100, 'display': 'block'}),
html.Label('Samples to display',
style={'font-weight': 'bold',
'display': 'block', 'margin-top': 10}),
dcc.Input(id='samplesToDisplay', type='number', min=2,
max=1000, step=1, value=100,
style={'width': 100, 'display': 'block'}),
html.Div(
children=[
html.Label('Active Channels',
style={'font-weight': 'bold',
'display': 'block',
'margin-top': 10,
'margin-bottom': 8}),
dcc.Checklist(
id='channelSelections',
options=[
{'label': 'Channel 0', 'value': 0},
{'label': 'Channel 1', 'value': 1},
{'label': 'Channel 2', 'value': 2},
{'label': 'Channel 3', 'value': 3}],
labelStyle={'display': 'block', 'height': 36},
value=[0])],
style={'float': 'left', 'width': 150}),
html.Div(
id='tcTypeSelectors',
children=[
html.Label('TC Type',
style={'font-weight': 'bold',
'display': 'block',
'margin-top': 10}),
dcc.Dropdown(id='tcTypeSelector0',
options=_TC_TYPE_OPTIONS,
value=TcTypes.TYPE_J,
clearable=False),
dcc.Dropdown(id='tcTypeSelector1',
options=_TC_TYPE_OPTIONS,
value=TcTypes.TYPE_J,
clearable=False),
dcc.Dropdown(id='tcTypeSelector2',
options=_TC_TYPE_OPTIONS,
value=TcTypes.TYPE_J,
clearable=False),
dcc.Dropdown(id='tcTypeSelector3',
options=_TC_TYPE_OPTIONS,
value=TcTypes.TYPE_J,
clearable=False)],
style={'float': 'left', 'width': 150,
'margin-bottom': 8}),
html.Button(
children='Configure',
id='startStopButton',
style={'width': 100, 'height': 35,
'text-align': 'center', 'margin-top': 30})],
style={'width': 320, 'box-sizing': 'border-box', 'padding': 10,
'position': 'absolute', 'top': 0, 'left': 0})],
style={'position': 'relative', 'display': 'block',
'overflow': 'hidden', 'padding-bottom': 100}),
dcc.Interval(
id='timer',
interval=1000*60*60*24, # in milliseconds
n_intervals=0),
html.Div(
id='chartData',
style={'display': 'none'},
children=init_chart_data(1, 1000)),
html.Div(
id='chartInfo',
style={'display': 'none'},
children=json.dumps({'sample_count': 0})),
html.Div(
id='status',
style={'display': 'none'}),
])
# pylint: enable=no-member
@_app.callback(
Output('status', 'children'),
[Input('startStopButton', 'n_clicks')],
[State('startStopButton', 'children'),
State('hatSelector', 'value'),
State('channelSelections', 'value'),
State('tcTypeSelector0', 'value'),
State('tcTypeSelector1', 'value'),
State('tcTypeSelector2', 'value'),
State('tcTypeSelector3', 'value')]
) # pylint: disable=too-many-arguments
def start_stop_click(n_clicks, button_label, hat_descriptor_json_str,
active_channels, tc_type0, tc_type1, tc_type2, tc_type3):
"""
A callback function to change the application status when the Configure,
Start or Stop button is clicked.
Args:
n_clicks (int): Number of button clicks - triggers the callback.
button_label (str): The current label on the button.
hat_descriptor_json_str (str): A string representation of a JSON object
containing the descriptor for the selected MCC 134 DAQ HAT.
active_channels ([int]): A list of integers corresponding to the user
selected Active channel checkboxes.
tc_type0 (TcTypes): The selected TC Type for channel 0.
tc_type1 (TcTypes): The selected TC Type for channel 0.
tc_type2 (TcTypes): The selected TC Type for channel 0.
tc_type3 (TcTypes): The selected TC Type for channel 0.
Returns:
str: The new application status - "idle", "configured", "running"
or "error"
"""
output = 'idle'
if n_clicks is not None and n_clicks > 0:
output = 'error'
if button_label == 'Configure':
# If configuring, create the hat object.
if hat_descriptor_json_str:
hat_descriptor = json.loads(hat_descriptor_json_str)
# The hat object is retained as a global for use in
# other callbacks.
global _HAT # pylint: disable=global-statement
_HAT = mcc134(hat_descriptor['address'])
if active_channels:
# Set the TC type for all active channels to the selected TC
# type prior to acquiring data.
tc_types = [tc_type0, tc_type1, tc_type2, tc_type3]
for channel in active_channels:
_HAT.tc_type_write(channel, tc_types[channel])
output = 'configured'
elif button_label == 'Start':
output = 'running'
elif button_label == 'Stop':
output = 'idle'
return output
@_app.callback(
Output('timer', 'interval'),
[Input('status', 'children')],
[State('secondsPerSample', 'value')]
)
def update_timer_interval(acq_state, seconds_per_sample):
"""
A callback function to update the timer interval. The timer interval is
set to one day when idle.
Args:
acq_state (str): The application state of "idle", "configured",
"running" or "error" - triggers the callback.
seconds_per_sample (float): The user specified sample rate value in
seconds per sample.
Returns:
float: Timer interval value in ms.
"""
interval = 1000*60*60*24 # 1 day
if acq_state == 'running':
interval = seconds_per_sample * 1000 # Convert to ms
return interval
@_app.callback(
Output('hatSelector', 'disabled'),
[Input('status', 'children')]
)
def disable_hat_selector_dropdown(acq_state):
"""
A callback function to disable the HAT selector dropdown when the
application status changes to configured or running.
"""
disabled = False
if acq_state == 'configured' or acq_state == 'running':
disabled = True
return disabled
@_app.callback(
Output('secondsPerSample', 'disabled'),
[Input('status', 'children')]
)
def disable_sample_rate_input(acq_state):
"""
A callback function to disable the sample rate input when the
application status changes to configured or running.
"""
disabled = False
if acq_state == 'configured' or acq_state == 'running':
disabled = True
return disabled
@_app.callback(
Output('samplesToDisplay', 'disabled'),
[Input('status', 'children')]
)
def disable_samples_to_disp_input(acq_state):
"""
A callback function to disable the number of samples to display input
when the application status changes to configured or running.
"""
disabled = False
if acq_state == 'configured' or acq_state == 'running':
disabled = True
return disabled
@_app.callback(
Output('channelSelections', 'options'),
[Input('status', 'children')]
)
def disable_channel_checkboxes(acq_state):
"""
A callback function to disable the active channel checkboxes when the
application status changes to configured or running.
"""
options = []
for channel in range(MCC134_CHANNEL_COUNT):
label = 'Channel ' + str(channel)
disabled = False
if acq_state == 'configured' or acq_state == 'running':
disabled = True
options.append({'label': label, 'value': channel, 'disabled': disabled})
return options
@_app.callback(
Output('tcTypeSelectors', 'style'),
[Input('status', 'children')],
[State('tcTypeSelectors', 'style')]
)
def disable_tc_type_selector_dropdowns(acq_state, div_style):
# pylint: disable=invalid-name
"""
A callback function to disable all TC Type selector dropdowns when
the application status changes to configured or running.
"""
div_style['pointer-events'] = 'auto'
div_style['opacity'] = 1.0
if acq_state == 'configured' or acq_state == 'running':
div_style['pointer-events'] = 'none'
div_style['opacity'] = 0.8
return div_style
@_app.callback(
Output('startStopButton', 'children'),
[Input('status', 'children')]
)
def update_start_stop_button_name(acq_state):
"""
A callback function to update the label on the button when the application
status changes.
Args:
acq_state (str): The application state of "idle", "configured",
"running" or "error" - triggers the callback.
Returns:
str: The new button label of "Configure", "Start" or "Stop"
"""
output = 'Configure'
if acq_state == 'configured':
output = 'Start'
elif acq_state == 'running':
output = 'Stop'
return output
@_app.callback(
Output('chartData', 'children'),
[Input('timer', 'n_intervals'),
Input('status', 'children')],
[State('chartData', 'children'),
State('samplesToDisplay', 'value'),
State('channelSelections', 'value')]
)
def update_strip_chart_data(_n_intervals, acq_state, chart_data_json_str,
samples_to_display_val, active_channels):
"""
A callback function to update the chart data stored in the chartData HTML
div element. The chartData element is used to store the existing data
values, which allows sharing of data between callback functions. Global
variables cannot be used to share data between callbacks (see
https://dash.plot.ly/sharing-data-between-callbacks).
Args:
_n_intervals (int): Number of timer intervals - triggers the callback.
acq_state (str): The application state of "idle", "configured",
"running" or "error" - triggers the callback.
chart_data_json_str (str): A string representation of a JSON object
containing the current chart data.
samples_to_display_val (float): The number of samples to be displayed.
active_channels ([int]): A list of integers corresponding to the user
selected active channel checkboxes.
Returns:
str: A string representation of a JSON object containing the updated
chart data.
"""
updated_chart_data = chart_data_json_str
samples_to_display = int(samples_to_display_val)
num_channels = len(active_channels)
if acq_state == 'running':
hat = globals()['_HAT']
if hat is not None:
chart_data = json.loads(chart_data_json_str)
# Reset error flags
chart_data['open_tc_error'] = False
chart_data['over_range_error'] = False
chart_data['common_mode_range_error'] = False
data = []
for channel in active_channels:
temp_val = hat.t_in_read(channel)
if temp_val == mcc134.OPEN_TC_VALUE:
chart_data['open_tc_error'] = True
data.append(None)
elif temp_val == mcc134.OVERRANGE_TC_VALUE:
chart_data['over_range_error'] = True
data.append(None)
elif temp_val == mcc134.COMMON_MODE_TC_VALUE:
chart_data['common_mode_range_error'] = True
data.append(None)
else:
data.append(temp_val)
# Add the samples read to the chart_data object.
sample_count = add_samples_to_data(samples_to_display, num_channels,
chart_data, data)
# Update the total sample count.
chart_data['sample_count'] = sample_count
updated_chart_data = json.dumps(chart_data)
elif acq_state == 'configured':
# Clear the data in the strip chart when Configure is clicked.
updated_chart_data = init_chart_data(num_channels, samples_to_display)
return updated_chart_data
def add_samples_to_data(samples_to_display, num_chans, chart_data, data):
"""
Adds the samples read from the mcc134 hat device to the chart_data object
used to update the strip chart.
Args:
samples_to_display (int): The number of samples to be displayed.
num_chans (int): The number of selected channels.
chart_data (dict): A dictionary containing the data used to update the
strip chart display.
data (list): A list of values for each active channel.
Returns:
int: The updated total sample count after the data is added.
"""
num_samples_read = int(len(data) / num_chans)
current_sample_count = int(chart_data['sample_count'])
# Convert lists to deque objects with the maximum length set to the number
# of samples to be displayed. This will pop off the oldest data
# automatically when new data is appended.
chart_data['samples'] = deque(chart_data['samples'],
maxlen=samples_to_display)
for chan in range(num_chans):
chart_data['data'][chan] = deque(chart_data['data'][chan],
maxlen=samples_to_display)
start_sample = 0
if num_samples_read > samples_to_display:
start_sample = num_samples_read - samples_to_display
for sample in range(start_sample, num_samples_read):
chart_data['samples'].append(current_sample_count + sample)
for chan in range(num_chans):
data_index = sample * num_chans + chan
chart_data['data'][chan].append(data[data_index])
# Convert deque objects back to lists so they can be written to to div
# element.
chart_data['samples'] = list(chart_data['samples'])
for chan in range(num_chans):
chart_data['data'][chan] = list(chart_data['data'][chan])
return current_sample_count + num_samples_read
@_app.callback(
Output('stripChart', 'figure'),
[Input('chartData', 'children')],
[State('channelSelections', 'value')]
)
def update_strip_chart(chart_data_json_str, active_channels):
"""
A callback function to update the strip chart display when new data is read.
Args:
chart_data_json_str (str): A string representation of a JSON object
containing the current chart data - triggers the callback.
active_channels ([int]): A list of integers corresponding to the user
selected Active channel checkboxes.
Returns:
object: A figure object for a dash-core-components Graph, updated with
the most recently read data.
"""
data = []
xaxis_range = [0, 1000]
chart_data = json.loads(chart_data_json_str)
if 'samples' in chart_data and chart_data['samples']:
xaxis_range = [min(chart_data['samples']), max(chart_data['samples'])]
if 'data' in chart_data:
data = chart_data['data']
plot_data = []
colors = ['#DD3222', '#FFC000', '#3482CB', '#FF6A00']
# Update the serie data for each active channel.
for chan_idx, channel in enumerate(active_channels):
scatter_serie = go.Scatter(
x=list(chart_data['samples']),
y=list(data[chan_idx]),
name='Channel {0:d}'.format(channel),
marker={'color': colors[channel]}
)
plot_data.append(scatter_serie)
# Get min and max data values
y_min = None
y_max = None
for chan_data in data:
for y_val in chan_data:
if y_min is None or (y_val is not None and y_val < y_min):
y_min = y_val
if y_max is None or (y_val is not None and y_val > y_max):
y_max = y_val
# Set the Y scale
y_max = y_max + 5.0 if y_max is not None else 100.0
y_min = y_min - 5.0 if y_min is not None else 0.0
figure = {
'data': plot_data,
'layout': go.Layout(
xaxis=dict(title='Samples', range=xaxis_range),
yaxis=dict(title='Temperature (°C)', range=[y_min, y_max]),
margin={'l': 50, 'r': 40, 't': 50, 'b': 40, 'pad': 0},
showlegend=True,
title='Strip Chart'
)
}
return figure
@_app.callback(
Output('chartInfo', 'children'),
[Input('stripChart', 'figure')],
[State('chartData', 'children')]
)
def update_chart_info(_figure, chart_data_json_str):
"""
A callback function to set the sample count for the number of samples that
have been displayed on the chart.
Args:
_figure (object): A figure object for a dash-core-components Graph for
the strip chart - triggers the callback.
chart_data_json_str (str): A string representation of a JSON object
containing the current chart data - triggers the callback.
Returns:
str: A string representation of a JSON object containing the chart info
with the updated sample count.
"""
chart_data = json.loads(chart_data_json_str)
chart_info = {'sample_count': chart_data['sample_count']}
return json.dumps(chart_info)
@_app.callback(
Output('errorDisplay', 'children'),
[Input('chartData', 'children'),
Input('status', 'children')],
[State('hatSelector', 'value'),
State('channelSelections', 'value')]
) # pylint: disable=too-many-arguments
def update_error_message(chart_data_json_str, acq_state, hat_selection,
active_channels):
"""
A callback function to display error messages.
Args:
chart_data_json_str (str): A string representation of a JSON object
containing the current chart data - triggers the callback.
acq_state (str): The application state of "idle", "configured",
"running" or "error" - triggers the callback.
hat_selection (str): A string representation of a JSON object
containing the descriptor for the selected MCC 134 DAQ HAT.
active_channels ([int]): A list of integers corresponding to the user
selected Active channel checkboxes.
Returns:
str: The error message to display.
"""
error_message = ''
if acq_state == 'running':
chart_data = json.loads(chart_data_json_str)
if ('open_tc_error' in chart_data.keys()
and chart_data['open_tc_error']):
error_message += 'Open thermocouple; '
if ('over_range_error' in chart_data.keys()
and chart_data['over_range_error']):
error_message += 'Temp outside valid range; '
if ('common_mode_range_error' in chart_data.keys()
and chart_data['common_mode_range_error']):
error_message += 'Temp outside common-mode range; '
elif acq_state == 'error':
num_active_channels = len(active_channels)
if not hat_selection:
error_message += 'Invalid HAT selection; '
if num_active_channels <= 0:
error_message += 'Invalid channel selection (min 1); '
return error_message
def get_ip_address():
""" Utility function to get the IP address of the device. """
ip_address = '127.0.0.1' # Default to localhost
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.connect(('1.1.1.1', 1)) # Does not have to be reachable
ip_address = sock.getsockname()[0]
finally:
sock.close()
return ip_address
if __name__ == '__main__':
# This will only be run when the module is called directly.
_app.run_server(host=get_ip_address(), port=8080)
| true |
742a937de2b1dda61387a01636852e81a29c9dbd | Python | chetanchandc/multimodal_dataset_creation | /src/lib/Windows/recording_Window.py | UTF-8 | 10,142 | 2.59375 | 3 | [
"MIT"
] | permissive | import PyQt5
from PyQt5 import QtCore, QtGui, QtWidgets
## class Ui_Recording_Window
# This class contains the recording window design
class Ui_Recording_Window(object):
def setupUi(self, Recording_Window):
Recording_Window.setObjectName("Recording_Window")
Recording_Window.resize(892, 600)
## Label containing the window's title
self.Recording_Title = QtWidgets.QLabel(Recording_Window)
self.Recording_Title.setGeometry(QtCore.QRect(325, 0, 250, 50))
font = QtGui.QFont()
font.setPointSize(24)
font.setBold(True)
font.setWeight(75)
self.Recording_Title.setFont(font)
self.Recording_Title.setFrameShape(QtWidgets.QFrame.Panel)
self.Recording_Title.setFrameShadow(QtWidgets.QFrame.Raised)
self.Recording_Title.setLineWidth(4)
self.Recording_Title.setAlignment(QtCore.Qt.AlignCenter)
self.Recording_Title.setObjectName("Recording_Title")
## Button used to come back to the configuration window
self.GoBack_Button = QtWidgets.QPushButton(Recording_Window)
self.GoBack_Button.setGeometry(QtCore.QRect(20, 20, 100, 50))
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(True)
font.setWeight(75)
self.GoBack_Button.setFont(font)
self.GoBack_Button.setObjectName("GoBack_Button")
## Layout to contain the two countdown labels
self.verticalLayoutWidget = QtWidgets.QWidget(Recording_Window)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(370, 70, 160, 91))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.Countdown_Layout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.Countdown_Layout.setContentsMargins(0, 0, 0, 0)
self.Countdown_Layout.setObjectName("Countdown_Layout")
## Simple title label
self.Countdown_Title_Label = QtWidgets.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(18)
font.setItalic(True)
self.Countdown_Title_Label.setFont(font)
self.Countdown_Title_Label.setAlignment(QtCore.Qt.AlignCenter)
self.Countdown_Title_Label.setObjectName("Countdown_Title_Label")
## Add the title to the layout
self.Countdown_Layout.addWidget(self.Countdown_Title_Label)
## Label that stores the countdown value
self.Countdown_Value_Label = QtWidgets.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(30)
font.setBold(True)
font.setWeight(75)
self.Countdown_Value_Label.setFont(font)
self.Countdown_Value_Label.setAlignment(QtCore.Qt.AlignCenter)
self.Countdown_Value_Label.setObjectName("Countdown_Value_Label")
## Add the label to the layout
self.Countdown_Layout.addWidget(self.Countdown_Value_Label)
## Label that will display the images corresponding to the gesture to perform
self.Gesture_Image_Label = QtWidgets.QLabel(Recording_Window)
self.Gesture_Image_Label.setGeometry(QtCore.QRect(275, 180, 350, 300))
self.Gesture_Image_Label.setFrameShape(QtWidgets.QFrame.Panel)
self.Gesture_Image_Label.setFrameShadow(QtWidgets.QFrame.Plain)
self.Gesture_Image_Label.setLineWidth(3)
self.Gesture_Image_Label.setText("")
self.Gesture_Image_Label.setScaledContents(True)
self.Gesture_Image_Label.setAlignment(QtCore.Qt.AlignCenter)
self.Gesture_Image_Label.setObjectName("Gesture_Image_Label")
## Layout to contain the play/stop button and the progress bar representing the time elapsed
self.horizontalLayoutWidget = QtWidgets.QWidget(Recording_Window)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(70, 500, 761, 34))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.Time_Progress_Layout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.Time_Progress_Layout.setContentsMargins(0, 0, 0, 0)
self.Time_Progress_Layout.setObjectName("Time_Progress_Layout")
## Button that starts the countdown and then is used to stop the recording
self.PlayStop_Button = QtWidgets.QPushButton(self.horizontalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PlayStop_Button.sizePolicy().hasHeightForWidth())
self.PlayStop_Button.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.PlayStop_Button.setFont(font)
self.PlayStop_Button.setObjectName("PlayStop_Button")
## Add the button to the layout
self.Time_Progress_Layout.addWidget(self.PlayStop_Button)
## Progress bar used to show the time elapsed as a percentage bar
self.Time_Elapsed_ProgressBar = QtWidgets.QProgressBar(self.horizontalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Time_Elapsed_ProgressBar.sizePolicy().hasHeightForWidth())
self.Time_Elapsed_ProgressBar.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
self.Time_Elapsed_ProgressBar.setFont(font)
self.Time_Elapsed_ProgressBar.setProperty("value", 0)
self.Time_Elapsed_ProgressBar.setObjectName("Time_Elapsed_ProgressBar")
## Add the progress bar to the layout
self.Time_Progress_Layout.addWidget(self.Time_Elapsed_ProgressBar)
## Layout to contain the string that shows the time elapsed in a "minutes:seconds" format
self.horizontalLayoutWidget_2 = QtWidgets.QWidget(Recording_Window)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(300, 550, 301, 41))
self.horizontalLayoutWidget_2.setObjectName("horizontalLayoutWidget_2")
self.Time_Elapsed_Layout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_2)
self.Time_Elapsed_Layout.setContentsMargins(0, 0, 0, 0)
self.Time_Elapsed_Layout.setSpacing(10)
self.Time_Elapsed_Layout.setObjectName("Time_Elapsed_Layout")
## Label that contains the first part of the string
self.Time_Elapsed_Label = QtWidgets.QLabel(self.horizontalLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Time_Elapsed_Label.sizePolicy().hasHeightForWidth())
self.Time_Elapsed_Label.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(16)
font.setItalic(True)
self.Time_Elapsed_Label.setFont(font)
self.Time_Elapsed_Label.setObjectName("Time_Elapsed_Label")
## Add the label to the layout
self.Time_Elapsed_Layout.addWidget(self.Time_Elapsed_Label)
## Label that contains the current time, which will be updated every second
self.Current_Time_Label = QtWidgets.QLabel(self.horizontalLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Current_Time_Label.sizePolicy().hasHeightForWidth())
self.Current_Time_Label.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(24)
self.Current_Time_Label.setFont(font)
self.Current_Time_Label.setAlignment(QtCore.Qt.AlignCenter)
self.Current_Time_Label.setObjectName("Current_Time_Label")
## Add the label to the layout
self.Time_Elapsed_Layout.addWidget(self.Current_Time_Label)
## Label that contains the total duration of the recording
self.Total_Time_Label = QtWidgets.QLabel(self.horizontalLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Total_Time_Label.sizePolicy().hasHeightForWidth())
self.Total_Time_Label.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(24)
self.Total_Time_Label.setFont(font)
self.Total_Time_Label.setAlignment(QtCore.Qt.AlignCenter)
self.Total_Time_Label.setObjectName("Total_Time_Label")
## Add the label to the layout
self.Time_Elapsed_Layout.addWidget(self.Total_Time_Label)
self.retranslateUi(Recording_Window)
QtCore.QMetaObject.connectSlotsByName(Recording_Window)
## function retranslateUi
# Adds text to the window's elements
# @param self The object pointer
# @param Recording_Window The window whose elements's text must be modified
def retranslateUi(self, Recording_Window):
_translate = QtCore.QCoreApplication.translate
Recording_Window.setWindowTitle(_translate("Recording_Window", "Form"))
self.Recording_Title.setText(_translate("Recording_Window", "Recording"))
self.GoBack_Button.setText(_translate("Recording_Window", "Go back"))
self.Countdown_Title_Label.setText(_translate("Recording_Window", "Countdown:"))
self.Countdown_Value_Label.setText(_translate("Recording_Window", ""))
self.PlayStop_Button.setText(_translate("Recording_Window", "Play"))
self.Time_Elapsed_Label.setText(_translate("Recording_Window", "Time elapsed is "))
self.Current_Time_Label.setText(_translate("Recording_Window", "0:00"))
self.Total_Time_Label.setText(_translate("Recording_Window", "/0:00"))
| true |
530dfb8efc7f39f7f108f134beff733d24fbee82 | Python | pitcons/amarak | /amarak/models/concept.py | UTF-8 | 1,445 | 2.609375 | 3 | [] | no_license | # encoding: utf8
from .labels_manager import LabelsManager
from .manager import Manager
from .link import Link
from .note import Note
from .notes_manager import NotesManager
from amarak.utils import smart_encode, smart_decode
class LinkManager(Manager):
def __init__(self):
super(LinkManager, self).__init__(Link)
class Concept(object):
name = None
scheme = None
labels = None
def __init__(self, name, scheme):
self.name = name
self.scheme = scheme
self.labels = LabelsManager()
self.links = LinkManager()
self.notes = NotesManager()
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = smart_decode(value)
def skos_name(self):
return self.name.replace(' ', '_').replace('"', '')
def __repr__(self):
return 'Concept("%s")' % (smart_encode(self.name))
@classmethod
def from_python(cls, scheme, data):
concept = cls(name=data['name'], scheme=scheme)
for label_d in data['labels']:
concept.labels._add_raw(
label_d['lang'],
label_d['type'],
label_d['literal'],
label_d.get('id'))
return concept
def to_python(self):
return {
'scheme': self.scheme.id,
'name': self.name,
'labels': self.labels.to_python()
}
| true |
02036516b14d48db11e3d3bc9feebaa6f632199d | Python | shawnmjones/VisHash | /calc_query_matches.py | UTF-8 | 3,382 | 2.546875 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | # ©2020. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S. Department of Energy/National Nuclear Security Administration. All rights in the program are reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear Security Administration. The Government is granted for itself and others acting on its behalf a nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare derivative works, distribute copies to the public, perform publicly and display publicly, and to permit others to do so.
import argparse
import numpy as np
import time
import util
import filehandling
def calc_distances(ftr):
print(time.asctime(), ' Computing distances')
start = time.time()
dst = util.compute_pair_distances(ftr)
end = time.time()
print(time.asctime(), ' Done Computing distances in ', end-start, ' seconds', flush=True)
return dst
def get_min_distances(dst, n_matches):
print(time.asctime(), ' Sorting distances')
start = time.time()
midx = util.mindist(dst, n=n_matches)
n_matches = len(midx[0])
end = time.time()
print(time.asctime(), ' Done Sorting distances in ', end-start, ' seconds', flush=True)
return midx
def get_topk(dst, n_matches):
print(time.asctime(), ' Calculating matches')
start = time.time()
retrievals = util.topk(dst, k=n_matches)
end = time.time()
print(time.asctime(), ' Done Calculating matches ' , end-start, ' seconds', flush=True)
return retrievals
def save_matchlist(retrievals, dst, filenames, postfix):
# For all images in query list, save list of matches
n_query = len(retrievals)
file_groups = [(filenames[i], [filenames[x] for x in retrievals[i]]) for
i in range(n_query)]
dist_groups = [[dst[i, x] for x in retrievals[i]] for i in range(n_query)]
query_dict = {file_groups[i][0]:(file_groups[i][1:], dist_groups[i]) for i in range(len(file_groups))}
filehandling.write_topk(postfix, './', query_dict)
return query_dict
def get_args():
parser = argparse.ArgumentParser(description='Calculate distances between all-pairs of given signature list, saving output as a query with top-k matches.')
parser.add_argument('--postfix', type=str,
help='filenames_[postfix].csv and signatures_[postfix].npy will be read while topk_[postfix].csv will be written')
parser.add_argument('--path', type=str, default='./',
help='Path to input files [./]')
parser.add_argument('-k', type=int, default=10,
help='If specified limit number of matches k per query [10]')
args = parser.parse_args()
return(args)
def main():
args = get_args()
postfix = args.postfix
# Read signatures and filename list
filenames = filehandling.read_filenames(postfix, args.path)
sigs = np.load(args.path + '/signatures_' + postfix + '.npy')
# Compute all-pairs distances
dist_mat = calc_distances(sigs)
# Retrieve k matches per query
retrievals = get_topk(dist_mat, args.k)
save_matchlist(retrievals, dist_mat, filenames, postfix)
if __name__ == '__main__':
main()
| true |
45ba85dc6ca0fe3c8be648bb0cabf4490b19c47a | Python | tyfeeney/truss-structures-numpy | /client.py | UTF-8 | 368 | 2.734375 | 3 | [] | no_license | from project import Truss, Brace, Node
n1 = Node("a",0,1)
n2 = Node("b",1,1)
n3 = Node("c",0,0)
n4 = Node("d",2,0)
b1 = Brace("beam 1",n1,n2)
b2 = Brace("beam 2",n3,n2)
b3 = Brace("beam 3",n3,n4)
b4 = Brace("beam 4",n2,n4)
t = Truss([n1,n2,n3,n4])
answer = t.calculate(upward_force = 1000)
for entry in answer:
print(entry + " " + str(answer[entry]))
| true |
1575354ecf771b24836c581581161d85cee7be51 | Python | JoeA42/calculador-de-notas | /Calculo y Registro de Examenes.py | UTF-8 | 1,617 | 3.765625 | 4 | [] | no_license | # se importa la libreria os
import os
# se asgina la variable restart1 para el primer loop
restart1=True
# se abre el primer loop
while restart1!="n":
# se toma el dato de la prueba y se crea un documento de texto con su nombre
prueba = input("Prueba: ")
documento = prueba+'.txt'
datos = os.open(documento, os.O_RDWR|os.O_CREAT)
# se abre este documento
registro = open(documento,'r')
# se asigna la variable restart2 para el segundo loop
restart2=True
# se abre el segundo loop
while restart2!="n":
# se toman los datos del estudiante
nombre = input("Nombre de estudiante: ")
puntosTotales = int(input("Puntos totales de prueba: "))
puntosObtenidos = int(input("Puntos obtenidos: "))
# se calcula la nota
nota = (puntosObtenidos/puntosTotales)*100
# se escriben los datos en el documento
registro = open(documento,'a')
registro.write("Estudiante: " + nombre +" - Nota obtenida: "+str(nota)+ '\n')
# se imprime mensaje con nombre, nota y se indica que se han guardado los datos
print ("Estudiante: " + nombre +" - Nota obtenida: "+str(nota) + " -DATOS GUARDADOS-")
# se cierra el documento
registro.close()
# se toma un dato para cerrar el primer loop
restart2 = input("Desea ingresar otro ESTUDIANTE? ('s' para continuar 'n' para salir)")
# se toma un dato para cerrar el segundo loop
restart1 = input("Desea ingresar otra PRUEBA? ('s' para continuar 'n' para salir)")
# se cierra el documento
os.close( datos )
| true |
085e8d56f86c54f8ce175f657732863abc107c07 | Python | alfonsusenrico/proyekTOS | /server.py | UTF-8 | 3,542 | 2.578125 | 3 | [] | no_license | import eventlet
import socketio
socket = socketio.Server()
app = socketio.WSGIApp(socket)
#list object
users = []
allDevice = []
class Device:
user_id = ''
token = ''
def __init__(self, id, token):
self.user_id = id
self.token = token
class User:
user_id = ''
deviceCount = 0
devices = []
activeDevice = ''
def __init__(self, id):
self.user_id = id
self.deviceCount = 0
def addDevice(self, device):
self.devices.append(device)
self.deviceCount += 1
self.activeDevice = device.token
#BOT
@socket.event
def connect(sid, data):
print('Socket connected')
@socket.on('addUser')
def addUser(sid, data):
for user in users:
if user.user_id == data:
return
users.append(User(data))
socket.emit('afterData', data)
@socket.on('tokenGenerated')
def setToken(sid, data):
for device in allDevice
if device.token == data['token']:
return
for user in users:
if user.user_id == data['id']:
user.addDevice(Device(data['id'], data['token']))
allDevice.append(Device(data['id'], data['token']))
socket.enter_room(sid, str(data['token']))
break
#CLIENT
#inputData
@socket.on('afterInput')
def afterInput(sid, data):
rm = str(data)
socket.enter_room(sid, rm)
socket.emit('inputted', data, room=rm)
#CheckSystem
@socket.on('checkSystem')
def checkSystem(sid, data):
for user in users:
if user.user_id == data:
rm = str(user.activeDevice)
socket.emit('client_checkSystem', {'id': data, 'room': rm}, room=rm)
break
@socket.on('client_systemChecked')
def system(sid, data):
socket.emit('systemChecked', data, room=data['room'])
#RunApps(Firefox)
@socket.on('launchFirefox')
def launchFirefox(sid, data):
for user in users:
if user.user_id == data:
rm = str(user.activeDevice)
socket.emit('client_launchFirefox', {'id': data, 'room': rm}, room=rm)
break
@socket.on('client_firefoxLaunched')
def firefoxLaunched(sid, data):
socket.emit('firefoxLaunched', data, room=data['room'])
#RunApps(Chrome)
@socket.on('launchChrome')
def launchChrome(sid, data):
for user in users:
if user.user_id == data:
rm = str(user.activeDevice)
socket.emit('client_launchChrome', {'id': data, 'room': rm}, room=rm)
break
@socket.on('client_chromeLaunched')
def chromeLaunched(sid, data):
socket.emit('chromeLaunched', data, room=data['room'])
#shutdown
@socket.on('turnOFf')
def turnOff(sid, data):
for user in users:
if user.user_id == data:
rm = str(user.activeDevice)
socket.emit('client_turnOff', {'id': data, 'room': rm}, room=rm)
break
@socket.on('client_turnedOff')
def turnedOff(sid, data):
socket.emit('turnedOff', data, room=data['room'])
#restart
@socket.on('restart')
def restart(sid, data):
for user in users:
if user.user_id == data:
rm = str(user.activeDevice)
socket.emit('client_restart', {'id': data, 'room': rm}, room=rm)
break
@socket.on('client_restarted')
def restarted(sid, data):
socket.emit('restarted', data, room=data['room'])
#DEBUG / CHECKING
@socket.on('test')
def tes(sid):
print(users)
print(allDevice)
for item in users:
print(item.user_id)
print(item.deviceCount)
print(item.activeDevice)
eventlet.wsgi.server(eventlet.listen(('',5000)), app) | true |
9b10977c371518b365ae8f65bfe2731596e235a1 | Python | 981377660LMT/algorithm-study | /7_graph/bfs求无权图的最短路径/双向BFS两面包夹芝士/1210. 穿过迷宫的最少移动次数.py | UTF-8 | 2,885 | 3.3125 | 3 | [] | no_license | from typing import List
import collections
# 2 <= n <= 100
# 注意状态是(x1,y1,x2,y2)
# 移动时需要分🐍水平还是竖直讨论
class Solution:
def minimumMoves(self, grid: List[List[int]]) -> int:
n = len(grid)
start = (0, 0, 0, 1)
end = (n - 1, n - 2, n - 1, n - 1)
if grid[0][0] == 1 or grid[0][1] == 1 or grid[n - 1][n - 2] == 1 or grid[n - 1][n - 1] == 1:
return -1
queue = collections.deque()
visited = set()
queue.append(start)
visited.add(start)
step = 0
while queue:
cur_len = len(queue)
for _ in range(cur_len):
r1, c1, r2, c2 = queue.popleft()
if (r1, c1, r2, c2) == end:
return step
# ------------------------------------ 蛇身水平
if r1 == r2:
# ------------- 右侧是空的,往右走一步
if c2 + 1 < n and grid[r2][c2 + 1] != 1 and (r2, c2, r2, c2 + 1) not in visited:
queue.append((r2, c2, r2, c2 + 1))
visited.add((r2, c2, r2, c2 + 1))
# ------------- 下侧都是空的
if r1 + 1 < n and grid[r1 + 1][c1] != 1 and grid[r1 + 1][c2] != 1:
# ---- 下移
if (r1 + 1, c1, r2 + 1, c2) not in visited:
queue.append((r1 + 1, c1, r2 + 1, c2))
visited.add((r1 + 1, c1, r2 + 1, c2))
# ---- 顺时针
if (r1, c1, r1 + 1, c1) not in visited:
queue.append((r1, c1, r1 + 1, c1))
visited.add((r1, c1, r1 + 1, c1))
# ------------------------------------ 蛇身竖直
if c1 == c2:
# ------------- 下侧是空的,往下一步
if r2 + 1 < n and grid[r2 + 1][c2] != 1 and (r2, c2, r2 + 1, c2) not in visited:
queue.append((r2, c2, r2 + 1, c2))
visited.add((r2, c2, r2 + 1, c2))
# ------------ 右侧2点都是空的
if c1 + 1 < n and grid[r1][c1 + 1] != 1 and grid[r2][c2 + 1] != 1:
# ---- 右移
if (r1, c1 + 1, r2, c2 + 1) not in visited:
queue.append((r1, c1 + 1, r2, c2 + 1))
visited.add((r1, c1 + 1, r2, c2 + 1))
# ---- 逆时针
if (r1, c1, r1, c1 + 1) not in visited:
queue.append((r1, c1, r1, c1 + 1))
visited.add((r1, c1, r1, c1 + 1))
step += 1
return -1
| true |
8d49c444bd42cd3901cf8bd33874c49124d7d096 | Python | goru47/INF1L-PRJ-2 | /INF1L-PRJ-2/blit cards.py | UTF-8 | 1,181 | 2.8125 | 3 | [
"MIT"
] | permissive | import pygame
pygame.init()
#window aanmaken
window = pygame.display.set_mode((1000,600))
#window naam instellen
pygame.display.set_caption("blit card")
# icoon toevoegen
gameIcon = pygame.image.load('images/BPicon.png')
pygame.display.set_icon(gameIcon)
# eigenschappen kaart
cardposX = 200
cardposY = 100
cardsizeX = 120
cardsizeY = 180
#kleur
white = (255,255,255)
clock = pygame.time.Clock()
#================================================================
# load card sprite
#card offensive advanced rifling
CardOfAdRi = pygame.image.load("normalekaarten/kaartendef/kaartoffensief_advrifling.png")
CardOfAdRi = pygame.transform.scale(CardOfAdRi, (cardsizeX, cardsizeY))
#game
gameLoop=True
while gameLoop:
for event in pygame.event.get():
if (event.type==pygame.QUIT):
gameLoop=False
#scherm
window.fill(white)
#blit card
window.blit(CardOfAdRi, (cardposX, cardposY))
# ================================================================
pygame.display.flip()
clock.tick(10)
pygame.quit() | true |
6755c99b1f8198299bc560f8f37218862a2e054f | Python | woutdenolf/spectrocrunch | /spectrocrunch/visualization/colormap.py | UTF-8 | 1,739 | 3.09375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.colors as pltcolors
from ..utils import instance
def NormalizedToRGB(x):
"""
Args:
x(num|array): data values between 0 and 1
Retruns:
r(array):
g(array):
b(array):
"""
x, f = instance.asarrayf(x)
x = np.round(x * (2**24 - 1)).astype(int)
red = f(x & 255)
green = f((x >> 8) & 255)
blue = f((x >> 16) & 255)
return red, green, blue
class LambdaColormap(pltcolors.Colormap):
def __init__(self, name, func, N=256):
self._func = func
super(LambdaColormap, self).__init__(name, N=N)
def __call__(self, x, alpha=None, bytes=False):
"""
Args:
x(num|array): data values between 0 and
alpha(Optional(num)): scalar between 0 and 1
bytes(Optional(bool)): as byte (0-255) or float (0-1)
Returns:
tuple: RGBA
"""
r, g, b = self._func(x)
if not bytes:
r = r / 255.0
g = g / 255.0
b = b / 255.0
if instance.isarray(r):
return np.stack([r, g, b, np.full(r.shape, alpha)], axis=-1)
else:
return r, g, b, alpha
def RGBcolormap():
return LambdaColormap("RGB", NormalizedToRGB, N=2**24 - 1)
def Linearcolormap(name, a, b, alpha=None):
a = pltcolors.to_rgba(a, alpha=alpha)
b = pltcolors.to_rgba(b, alpha=alpha)
cdict = {
"red": [(0.0, a[0], a[0]), (1.0, b[0], b[0])],
"green": [(0.0, a[1], a[1]), (1.0, b[1], b[1])],
"blue": [(0.0, a[2], a[2]), (1.0, b[2], b[2])],
"alpha": [(0.0, a[3], a[3]), (1.0, b[3], b[3])],
}
return pltcolors.ListedColormap(name, cdict)
| true |
b24b486a32aef9353879ea6058d21750507a807c | Python | eddie221/TrainingCode_Detection | /function.py | UTF-8 | 2,589 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 11 13:51:59 2021
@author: mmplab603
"""
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import torch
import math
def draw_bbox(img, bboxes):
plt.figure()
plt.imshow(img)
for bbox in bboxes:
x = np.array([bbox[0], bbox[0] + bbox[2], bbox[0] + bbox[2], bbox[0], bbox[0]])
y = np.array([bbox[1], bbox[1], bbox[1] + bbox[3], bbox[1] + bbox[3], bbox[1]])
plt.plot(x * img.shape[1], y * img.shape[0], 'r')
def bboxes_iou(bboxes_a, bboxes_b, GIoU=False, DIoU=False, CIoU=False):
if bboxes_a.shape[1] != 4 or bboxes_b.shape[1] != 4:
raise IndexError
tl = torch.max(bboxes_a[:, None, :2], bboxes_b[:, :2])
br = torch.min(bboxes_a[:, None, :2] + bboxes_a[:, None, 2:], bboxes_b[:, :2] + bboxes_b[:, 2:])
con_tl = torch.min(bboxes_a[:, None, :2], bboxes_b[:, :2])
con_br = torch.max(bboxes_a[:, None, :2] + bboxes_a[:, None, 2:], bboxes_b[:, :2] + bboxes_b[:, 2:])
center_dist = ((bboxes_a[:, None, 0] + bboxes_a[:, None, 2] / 2) - (bboxes_b[:, 0] + bboxes_b[:, 2] / 2)) ** 2 / 4 + \
((bboxes_a[:, None, 1] + bboxes_a[:, None, 3] / 2) - (bboxes_b[:, 1] + bboxes_b[:, 3] / 2)) ** 2 / 4
area_a = torch.prod(bboxes_a[:, 2:] - bboxes_a[:, :2], 1)
area_b = torch.prod(bboxes_b[:, 2:] - bboxes_b[:, :2], 1)
w1 = bboxes_a[:, 2]
h1 = bboxes_a[:, 3]
w2 = bboxes_b[:, 2]
h2 = bboxes_b[:, 3]
inter_mask = (tl < br).type(tl.type()).prod(dim = 2)
area_i = torch.prod(br - tl, 2) * inter_mask
area_u = area_a[:, None] + area_b - area_i
iou = area_i / area_u
if GIoU or DIoU or CIoU:
if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
area_c = torch.prod(con_br - con_tl, 2) # convex area
return iou - (area_c - area_u) / area_c # GIoU
if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
# convex diagonal squared
c2 = torch.pow(con_br - con_tl, 2).sum(dim=2) + 1e-16
if DIoU:
return iou - center_dist / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w1 / h1).unsqueeze(1) - torch.atan(w2 / h2), 2)
with torch.no_grad():
alpha = v / (1 - iou + v)
return iou - (center_dist / c2 + v * alpha) # CIoU
return iou | true |
33a75acf95f2deda7c3859e201d294e0cc555636 | Python | darthlukan/pysys | /courses/edX/MITx/600x/sqroot_recipe.py | UTF-8 | 1,260 | 3.390625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
def test_find_over_zero(find):
if find > 0:
return True
else:
print "We can't find sqroots for integers zero or less."
return False
def get_find(find):
find = input("Which number do we want the sqroot for?: ")
return find
def start_guessing(find, guess, c_guess, counter):
if counter > 0:
counter = counter
else:
counter = 0
if c_guess > 1:
start = c_guess
else:
start = 1
for i in range(start, find):
c_guess += i
if (i * i) == find:
guess = i
return guess
else:
counter += 1
if counter > 500:
print "We've run too many times, breaking"
guess = "FAILED!"
return guess
else:
start_guessing(find, guess, c_guess, counter)
return guess
def main():
guess = 0
c_guess = 0
find = 0
find = get_find(find)
if test_find_over_zero(find):
final_guess = start_guessing(find, guess, c_guess, counter=0)
response = "Our best guess is %d" % final_guess
return response
else:
find = get_find(find)
if __name__ == '__main__':
main()
| true |
806cfbcd20cb7536c4c2ac4337421d126ae5a3a0 | Python | vasjuta/HS_HA | /utilities.py | UTF-8 | 600 | 2.671875 | 3 | [] | no_license | import numpy as np
def hamming_score(y_true, y_pred, normalize=True, sample_weight=None):
"""
Compute the Hamming score (aka label-based accuracy) for multi-label case
"""
acc_list = []
for i in range(y_true.shape[0]):
set_true = set(np.where(y_true[i])[0])
set_pred = set(np.where(y_pred[i])[0])
tmp_a = None
if len(set_true) == 0 and len(set_pred) == 0:
tmp_a = 1
else:
tmp_a = len(set_true.intersection(set_pred))/float(len(set_true.union(set_pred)) )
acc_list.append(tmp_a)
return np.mean(acc_list) | true |
934f685a515269d27eda1f3ca98a71767410d4cd | Python | ankycheng/damages-calculator | /utils.py | UTF-8 | 6,054 | 2.9375 | 3 | [
"MIT"
] | permissive | import pandas as pd
import os, sys, requests
def downloadFile(url, fileName, targetPath):
with open(targetPath+fileName, 'wb') as f:
print("Downloading {}".format(fileName))
response = requests.get(url, stream=True)
total_length = response.headers.get('content-length')
if total_length is None: # no content length header
f.write(response.content)
else:
dl = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
done = int(50 * dl / total_length)
sys.stdout.write("\r[%s%s]" % ('=' * done, ' ' * (50-done)) )
sys.stdout.flush()
print('download finished')
# car='汽車種類'、new='新品價值','old'=最終殘值,'已使用年'
def Present_value(car_type, new, old, use_year):
# 機車
if car_type == '1':
date = 2
# 非營業用汽車
elif car_type == '2':
date = 5
# 其他
else:
date = 4
d = []
Depreciation_rate = 1-((old/new)**(1/date))
for i in range(3):
new = new*(1-Depreciation_rate)
d.append(new)
if use_year >= date:
result = d[-1]
else:
result = d[use_year-1]
return round(result)
# (每月淨收入/該月日數)*日期
# month_revnue='每月收入',month_date='當月天數',date='不能工作天數'
def revnue_loss(month_revnue, month_date, date):
return round(month_revnue/month_date*date)
# 失能等級共分為十五等級,各等級之給付標準,按平均日投保薪資,依下列規定日數計算之:
def ability_loss_rate(loss_level):
# loss_level='失能等級',insurance_revnue_daily='平均日投保薪資',now_age='現在年齡',retire_age='退休年齡'
if loss_level == 1:
date = 1200
elif loss_level == 2:
date = 1000
elif loss_level == 3:
date = 840
elif loss_level == 4:
date = 740
elif loss_level == 5:
date = 640
elif loss_level == 6:
date = 540
elif loss_level == 7:
date = 440
elif loss_level == 8:
date = 360
elif loss_level == 9:
date = 280
elif loss_level == 10:
date = 220
elif loss_level == 11:
date = 160
elif loss_level == 12:
date = 100
elif loss_level == 13:
date = 60
elif loss_level == 14:
date = 40
else:
date = 30
loss_rate = date/1200
return round(loss_rate*100, 2)
def ability_loss_num(loss_level, month_revnue, now_age, retire_age=65):
# loss_level='失能等級',insurance_revnue_daily='平均日投保薪資',now_age='現在年齡',retire_age='退休年齡'
return round(ability_loss_rate(loss_level)*month_revnue*(retire_age-now_age))
def solatium_cal(solatium_pars):
diff_edu = solatium_pars['accuser_edu'] - solatium_pars['defendant_edu']
diff_age = solatium_pars['accuser_age'] - solatium_pars['defendant_age']
diff_occupation = solatium_pars['accuser_occupation'] - \
solatium_pars['defendant_occupation']
diff_annual_rev = solatium_pars['accuser_annual_rev'] - \
solatium_pars['defendant_annual_rev']
diff_investment = solatium_pars['accuser_investment'] - \
solatium_pars['defendant_investment']
edu_PN = 1 if diff_edu >= 0 else -1
edu_diff_rate = diff_edu*0.5 if abs(diff_edu) <= 3 else 3*0.5*edu_PN
age_PN = 1 if diff_age >= 0 else -1
age_diff_rate = diff_age*0.5 if abs(diff_age) <= 2 else 2*0.5*age_PN
occupation_diff_rate = diff_occupation*1.25
annual_rev_diff_rate, inv_diff_rate = get_eco_diff_rate(
diff_annual_rev, diff_investment)
rate_total = edu_diff_rate + age_diff_rate + \
occupation_diff_rate + annual_rev_diff_rate
print(rate_total)
solatium_rate = 1 + rate_total*0.05 if rate_total >= 0 else 1 + rate_total*0.08
return solatium_rate
def get_eco_diff_rate(diff_annual_rev, diff_investment):
annual_rev_diff_rate = 0
inv_diff_rate = 0
if diff_annual_rev > 2:
annual_rev_diff_rate = -2
elif diff_annual_rev >= 2:
annual_rev_diff_rate = -1
elif diff_annual_rev >= 1:
annual_rev_diff_rate = -0.5
elif diff_annual_rev >= 0:
annual_rev_diff_rate = 0
elif diff_annual_rev >= -1:
annual_rev_diff_rate = 0.5
elif diff_annual_rev >= -2:
annual_rev_diff_rate = 1
elif diff_annual_rev < -2:
annual_rev_diff_rate = 2
if diff_investment > 2:
inv_diff_rate = -1.25
elif diff_investment >= 2:
inv_diff_rate = -1
elif diff_investment >= 1:
inv_diff_rate = -0.5
elif diff_investment >= 0:
inv_diff_rate = 0
elif diff_investment >= -1:
inv_diff_rate = 0.5
elif diff_investment >= -2:
inv_diff_rate = 1
elif diff_investment < -2:
inv_diff_rate = 1.25
return annual_rev_diff_rate, inv_diff_rate
def jdReport(dataframe, col_name, *args):
for key in args:
select = dataframe[col_name].values.tolist()
a = [key in i for i in select]
dataframe = dataframe[a]
p1 = dataframe[dataframe['winOrloss'] == '被告敗訴']
p2 = p1[p1['jd_money']-p1['solatium_request'] >= 0]
win_probability = round(len(p1)/len(dataframe)*100, 2)
solatium_satisfy_probability = round(len(p2)/len(dataframe)*100, 2)
JD_Request = round(
(dataframe['jd_money'].mean())/(dataframe['solatium_request'].mean()), 2)
data = {
'jd_num': len(dataframe),
'win_probability': win_probability,
'solatium_satisfy_probability': solatium_satisfy_probability,
'JD/request': JD_Request,
'jd_money_mean': round(dataframe['jd_money'].mean(), 2),
'solatium_request_mean': round(dataframe['solatium_request'].mean(), 2)
}
name = '&'.join([key for key in args])
result = pd.DataFrame(data, index=[name])
return result.iloc[0].jd_money_mean
| true |
9d961719336f99c91b9d26be253a6a5d2d375f0b | Python | CSBG-LSU/GEXF- | /Absolute_gene_expression/gexf_file_absolute_gene_exp.py | UTF-8 | 1,484 | 2.734375 | 3 | [] | no_license | import pandas as pd
import networkx as nx
import numpy as np
import argparse
import time
def adding_geneexp_absolute_value(inputgexf, geneexp, output):
Graph = nx.read_gexf(inputgexf)
protein_labels = nx.get_node_attributes(Graph, 'ENSP-ID')
node_labels = nx.get_node_attributes(Graph, 'label')
protein_dict = pd.DataFrame.from_dict(protein_labels, orient='index', columns=['ENSP-ID'])
node_dict = pd.DataFrame.from_dict(node_labels, orient='index', columns=['Nodes'])
dictionary = node_dict.join(protein_dict)
gene_exp = pd.read_csv(geneexp, sep=",", usecols=['gene', 'expression_value'])
merged_geneexp_Nodes = pd.merge(gene_exp, dictionary, left_on=['gene'], right_on=['ENSP-ID'], how='right')
new_df = merged_geneexp_Nodes[['Nodes', 'expression_value']]
new_df.columns = ['Nodes', 'gene-exp']
new_df_pivot= pd.pivot_table(new_df, columns=['Nodes'], values=['gene-exp'])
nx.set_node_attributes(Graph, new_df_pivot)
nx.write_gexf(Graph, output)
def getArgs():
parser = argparse.ArgumentParser('python')
parser.add_argument('-inputgexf',required=True)
parser.add_argument('-geneexp',required=True)
parser.add_argument('-output',required=True)
return parser.parse_args()
if __name__ == "__main__":
args = getArgs()
network = adding_geneexp_absolute_value(args.inputgexf, args.geneexp, args.output)
start = time.time()
end = time.time()
print ('time elapsed:' + str(end - start))
| true |
428d38ede136ea97941a5b39dc73c18ff762d7a7 | Python | dbarbella/analogy | /nn/keras/bert.py | UTF-8 | 6,327 | 3.015625 | 3 | [] | no_license | import torch
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM
# OPTIONAL: if you want to have more information on what's happening, activate the logger as follows
import logging
#logging.basicConfig(level=logging.INFO)
import matplotlib.pyplot as plt
def demo_example():
##########################
# From https://mccormickml.com/2019/05/14/BERT-word-embeddings-tutorial/
##########################
# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
text = "Here is the sentence I want embeddings for."
text = "After stealing money from the bank vault, the bank robber was seen fishing on the Mississippi river bank."
marked_text = "[CLS] " + text + " [SEP]"
print(marked_text)
tokenized_text = tokenizer.tokenize(marked_text)
print(tokenized_text)
print(list(tokenizer.vocab.keys())[5000:5020])
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
for tup in zip(tokenized_text, indexed_tokens):
print(tup)
segments_ids = [1] * len(tokenized_text)
print(segments_ids)
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
# Load pre-trained model (weights)
model = BertModel.from_pretrained('bert-base-uncased')
# Put the model in "evaluation" mode, meaning feed-forward operation.
model.eval()
# Predict hidden states features for each layer
with torch.no_grad():
encoded_layers, _ = model(tokens_tensor, segments_tensors)
print("Number of layers:", len(encoded_layers))
layer_i = 0
print("Number of batches:", len(encoded_layers[layer_i]))
batch_i = 0
print("Number of tokens:", len(encoded_layers[layer_i][batch_i]))
token_i = 0
print("Number of hidden units:", len(encoded_layers[layer_i][batch_i][token_i]))
# Convert the hidden state embeddings into single token vectors
# Holds the list of 12 layer embeddings for each token
# Will have the shape: [# tokens, # layers, # features]
token_embeddings = []
# For each token in the sentence...
for token_i in range(len(tokenized_text)):
# Holds 12 layers of hidden states for each token
hidden_layers = []
# For each of the 12 layers...
for layer_i in range(len(encoded_layers)):
# Lookup the vector for `token_i` in `layer_i`
vec = encoded_layers[layer_i][batch_i][token_i]
hidden_layers.append(vec)
token_embeddings.append(hidden_layers)
# Sanity check the dimensions:
print("Number of tokens in sequence:", len(token_embeddings))
print("Number of layers per token:", len(token_embeddings[0]))
concatenated_last_4_layers = [torch.cat((layer[-1], layer[-2], layer[-3], layer[-4]), 0) for layer in
token_embeddings] # [number_of_tokens, 3072]
summed_last_4_layers = [torch.sum(torch.stack(layer)[-4:], 0) for layer in
token_embeddings] # [number_of_tokens, 768]
print(len(concatenated_last_4_layers))
print(len(summed_last_4_layers))
def sen_to_bert_embedding(sentence, sum_method):
"""
:param sentence: A sentence to be processed.
:param sum_method:
:return: A list of torch tensors of torch tensors that store a float.
Each element of the list is for one of the words.
Each of the torch tensors holds a set of torch tensors, with one float each.
"""
# Probably don't want to set up a new one of these every time.
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
marked_text = "[CLS] " + sentence + " [SEP]"
tokenized_text = tokenizer.tokenize(marked_text)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
segments_ids = [1] * len(tokenized_text)
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
# Load pre-trained model (weights)
model = BertModel.from_pretrained('bert-base-uncased')
# Put the model in "evaluation" mode, meaning feed-forward operation.
model.eval()
# Predict hidden states features for each layer
with torch.no_grad():
encoded_layers, _ = model(tokens_tensor, segments_tensors)
print("Number of layers:", len(encoded_layers))
layer_i = 0
print("Number of batches:", len(encoded_layers[layer_i]))
batch_i = 0
print("Number of tokens:", len(encoded_layers[layer_i][batch_i]))
token_i = 0
print("Number of hidden units:", len(encoded_layers[layer_i][batch_i][token_i]))
token_embeddings = []
# For each token in the sentence...
for token_i in range(len(tokenized_text)):
# Holds 12 layers of hidden states for each token
hidden_layers = []
# For each of the 12 layers...
for layer_i in range(len(encoded_layers)):
# Lookup the vector for `token_i` in `layer_i`
vec = encoded_layers[layer_i][batch_i][token_i]
hidden_layers.append(vec)
token_embeddings.append(hidden_layers)
# Sanity check the dimensions:
print("Number of tokens in sequence:", len(token_embeddings))
print("Number of layers per token:", len(token_embeddings[0]))
if sum_method == 'concat4':
return_vector = [torch.cat((layer[-1], layer[-2], layer[-3], layer[-4]), 0) for layer in
token_embeddings] # [number_of_tokens, 3072]
elif sum_method == 'sum4':
return_vector = [torch.sum(torch.stack(layer)[-4:], 0) for layer in
token_embeddings] # [number_of_tokens, 768]
else:
print("Invalid sum method passed to sen_to_bert_embedding: ", sum_method)
print(len(return_vector))
return return_vector
if __name__ == '__main__':
text = "After stealing money from the bank vault, the bank robber was seen fishing on the Mississippi river bank."
vector = sen_to_bert_embedding(text, 'concat4')
print(type(vector))
print(len(vector))
print(type(vector[0]))
print(len(vector[0]))
print(type(vector[0][0]))
print(vector[0][0])
print(type(vector[0][0].item()))
| true |
61a49ba8b9889e171a3da90732f138a81616ba87 | Python | Aasthaengg/IBMdataset | /Python_codes/p02267/s839149158.py | UTF-8 | 151 | 3.078125 | 3 | [] | no_license | n = int(input())
s = input().split()
n = int(input())
t = input().split()
count = 0
for a in t:
if a in s:
count += 1
print(count) | true |
04a2a8fcfa5f182704403f84210cd1aa5f06b8fb | Python | xxiaocheng/ZhengFangCaptcha | /run.py | UTF-8 | 907 | 2.765625 | 3 | [] | no_license | import pickle
from network import TwoLayerNet
from data_process import processImg
import numpy as np
def predict(img_path):
'''
img_path: 图片文件路径
----------------
s: 以字符串形式返回验证码
'''
(im1,im2,im3,im4),(a,b,c,d)=processImg(img_path)
with open('params.pickle', 'rb') as f:
params = pickle.load(f)
network = TwoLayerNet(input_size=260, hidden_size=60, output_size=36,params=params)
ims=np.array([im1,im2,im3,im4])
x=network.predict(ims)
index=np.argmax(x,axis=1)
word = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
s=''
for w in index:
s+=word[w]
return s
def main():
print(predict('0ak7.gif'))
if __name__ == '__main__':
main()
| true |
5c1bca3e272f8efaec4ff2891038c32a5f942cee | Python | v1ktos/Python_RTU_08_20 | /u2_g1.py | UTF-8 | 814 | 3.59375 | 4 | [
"MIT"
] | permissive | # def replace_dict_value(d, bad_val, good_val):
# for key, value in d.items() :
# if value == bad_val:
# d[key] = good_val
# return d
# my_dict = {'h': 1, 'u': 2, 'b': 5, 'a': 2}
# print(replace_dict_value(my_dict, 2, 7))
# print(my_dict)
# def clean_dict_valuesOR(d: dict, v_list: list) -> dict:
# temp_dict = d.copy()
# for k in temp_dict:
# if d[k] in v_list:
# deleted_value = d.pop(k) # deletes the key but returns value, here works the same as del
# print(f"We deleted {deleted_value=}")
# return d
def clean_dict_valuesOR(d: dict, v_list: list) -> dict:
return {k:v for k,v in d.items() if d.get(k) not in v_list}
# print(clean_dict_valuesOR("Valdis", [3,4,5]))
print(clean_dict_valuesOR({'a':5,'b':6,'c':5, 'd':4}, [3,4,5])) | true |
0b2a5b668d4800c91df201a67253b20de74e3f2e | Python | Fotoon1992/Programming-for-Digital-Media | /inClass/madlibs.py | UTF-8 | 270 | 3.625 | 4 | [] | no_license | import random
#%%
verbs = ["A", "B", "C"]
print(verbs)
#%%
verbs = ["مرحبا"]
print(verbs)
#%%
verbs = ["Hello"]
print(verbs)
#%%
verbs = ["2019"]
print(5+5)
#%%
verbs = ["runs", "jumps", "plays"]
print("The boy " + random.choice(verbs) + " all day long")
#%%
| true |
2a7352a6709249112ca6b29f7e5f758830839700 | Python | shubham14/Deep-Learning-Pytorch | /Reinforcement Learning/actor_critic.py | UTF-8 | 4,766 | 3.0625 | 3 | [] | no_license | import argparse
import gym
import numpy as np
from itertools import count
from collections import namedtuple
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
parser = argparse.ArgumentParser(description='PyTorch actor-critic example')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor (default: 0.99)')
parser.add_argument('--seed', type=int, default=543, metavar='N',
help='random seed (default: 543)')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='interval between training status logs (default: 10)')
args = parser.parse_args()
env = gym.make('CartPole-v0')
env.seed(args.seed)
torch.manual_seed(args.seed)
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
##### TODO ######
### Complete definition
self.lin1 = nn.Linear(4, 128)
self.lin2 = nn.Linear(128, 2)
self.lin3 = nn.Linear(128, 1)
def forward(self, x):
##### TODO ######
### Complete definition
out1 = F.relu(self.lin1(x))
out1 = F.softmax(self.lin2(out1), -1)
#critic
out2 = F.relu(self.lin1(x))
out2 = self.lin3(out2)
return out1, out2
model = Policy()
optimizer = optim.Adam(model.parameters(), lr=3e-2)
eps = np.finfo(np.float32).eps.item()
def select_action(state):
state = torch.from_numpy(state).float()
probs, state_value = model(state)
m = Categorical(probs)
action = m.sample()
return action
def sample_episode():
state, ep_reward = env.reset(), 0
episode = []
for t in range(1, 10000): # Run for a max of 10k steps
action = select_action(state)
# Perform action
next_state, reward, done, _ = env.step(action.item())
episode.append((state, action, reward))
state = next_state
ep_reward += reward
if args.render:
env.render()
if done:
break
return episode, ep_reward
def plot_durations(episode_durations):
plt.figure(2)
plt.clf()
durations_t = torch.FloatTensor(episode_durations)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy())
# Take 100 episode averages and plot them too
if len(durations_t) >= 100:
means = durations_t.unfold(0, 100, 1).mean(1).view(-1)
means = torch.cat((torch.zeros(99), means))
plt.plot(means.numpy())
plt.pause(0.001) # pause a bit so that plots are updated
def compute_losses(episode):
####### TODO #######
#### Compute the actor and critic losses
actor_loss, critic_loss = None, None
A = [] # list to store advantage values for every time step
pol_list = []
for i, eps in enumerate(episode):
A_sum = 0
state, action, reward = eps
state = torch.Tensor(state)
act, val = model(state)
pol_list.append(act.max())
for t_prime in range(i + 1, len(episode) + 1):
A_sum += (args.gamma ** (t_prime - i + 1)) * episode[i][2]
A_sum -= val # model(state) is the value function for each
A.append(A_sum)
A = torch.stack(A)
actor_loss = -torch.sum(torch.log(torch.stack(pol_list)) * A)
critic_loss = torch.sum(A ** 2)
return actor_loss, critic_loss
def main():
running_reward = 10
loss_history = []
for i_episode in count(1):
episode, episode_reward = sample_episode()
optimizer.zero_grad()
actor_loss, critic_loss = compute_losses(episode)
loss = actor_loss + critic_loss
loss.backward()
optimizer.step()
running_reward = 0.05 * episode_reward + (1 - 0.05) * running_reward
loss_history.append(running_reward)
if i_episode % args.log_interval == 0:
print('Episode {}\tLast reward: {:.2f}\tAverage reward: {:.2f}'.format(
i_episode, episode_reward, running_reward))
if running_reward > env.spec.reward_threshold:
print("Solved! Running reward is now {} and "
"the last episode runs to {} time steps!".format(running_reward, len(episode)))
# episodes = [i + 1 for i in range(len(loss_history))]
# plt.plot(episodes, loss_history)
# plt.show()
plot_durations(loss_history)
plt.savefig('reward_history.png')
break
if __name__ == '__main__':
main()
| true |
a8cd6d73d06015a91fbc1773600aaa55be1ec881 | Python | jess-monter/Backend-Test-Monter | /backend_test/orders/tests/test_models.py | UTF-8 | 1,166 | 2.75 | 3 | [] | no_license | from django.test import TestCase
from django.contrib.auth.models import User
from backend_test.users.models import Employee
from backend_test.meals.models import Meal
from backend_test.orders.models import Order
class ModelTestCase(TestCase):
"""Test Orders models."""
@classmethod
def setUpTestData(cls):
user = User.objects.create(username="john.doe", email="john.doe@cornershop.com")
Employee.objects.create(user=user, country="CHL")
def test_order_belongs_to_employee(self):
"""Created order belongs to employee."""
meal = Meal.objects.create(dishes="Hot Wings with Fries and Salad")
employee = Employee.objects.get(pk=1)
order = Order.objects.create(meal=meal, employee=employee)
self.assertEqual(order.employee.user.username, employee.user.username)
def test_model_str(self):
"""Model str representation."""
meal = Meal.objects.create(dishes="Hot Wings with Fries and Salad")
employee = Employee.objects.get(pk=1)
order = Order.objects.create(meal=meal, employee=employee)
self.assertEqual(str(order), "john.doe ordered Hot Wings with Fries and Salad")
| true |
c6e63dd46e2990cd1f97bc8bef2bd9d259dbc135 | Python | sonialfajardo/algos | /binary_tree_zigzag.py | UTF-8 | 1,383 | 3.875 | 4 | [] | no_license | class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class TreePrinter:
def zig_zag(self, root):
# Reverse every other result list
if root is None:
return []
stack = []
stack.append(root)
stack.append(None)
results = []
current_level = []
left_to_right = True
while len(stack) > 0:
current = stack.pop(0) # O(1)
if current != None:
# Add it to the current level
current_level.append(current.val)
# Add children to stack
if current.left:
stack.append(current.left) # O(1)
if current.right:
stack.append(current.right)
else:
# Add current level to response, reverse every other level
if left_to_right:
results.append(current_level)
left_to_right = False
else:
rev = list(reversed(current_level)) # O(n)
results.append(rev)
left_to_right = True
# Reset level list
current_level = []
if len(stack) > 0:
# Append delimiter
stack.append(None)
return results
def test():
root = TreeNode(3)
root.left = TreeNode(9)
root.right = TreeNode(20)
root.right.left = TreeNode(15)
root.right.right = TreeNode(7)
printer = TreePrinter()
print printer.zig_zag(root)
test()
| true |
6f957b47ef596e21559d711599bd82b879f28d55 | Python | WojciechKoz/MyFirstNeuralNetwork | /ML_introduction/perceptron/test_single_perceptron.py | UTF-8 | 1,166 | 2.90625 | 3 | [] | no_license | from perceptron import Perceptron
from initialization_objects import create_data, prepare_data, create_obj
import numpy as np
import random as rand
import matplotlib.pyplot as plt
def main():
model = Perceptron(0.1, 5)
groups = create_data([(10, 0), (0, 5)], 1)
X_set, Y_set = prepare_data(groups)
model.fit(X_set, np.where(Y_set == 'A', 1, -1))
print(model.w_)
print(model.predict(np.array([1, 7])))
print(model.predict(np.array([5, 2])))
A = []
B = []
for _ in range(50):
obj = np.array([rand.uniform(0, 10), rand.uniform(0, 10)])
if model.predict(obj) == 1:
A.append(obj)
else:
B.append(obj)
plt.scatter(np.array(A).T[0], np.array(A).T[1], color='red', marker='o', label="new A")
plt.scatter(np.array(B).T[0], np.array(B).T[1], color='blue', marker='x', label="new B")
plt.scatter(1 , 7, color='yellow', marker='s', label='A center')
plt.scatter(5 , 2, color='green', marker='p', label='B center')
plt.xlabel("X-attribute")
plt.ylabel("Y-attribute")
plt.legend(loc='upper right')
plt.show()
if __name__ == "__main__":
main()
| true |
969609ade1a0cffdce59f718fcba3ee4106f69db | Python | wangmingjun666/OpenCV-CameraCalibration-Example | /01-02_undistort.py | UTF-8 | 2,052 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import cv2 as cv
import numpy as np
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--device", type=int, default=0)
parser.add_argument("--file", type=str, default=None)
parser.add_argument("--width", type=int, default=640)
parser.add_argument("--height", type=int, default=360)
parser.add_argument("--k_new_param", type=float, default=1.0)
parser.add_argument("--k_filename", type=str, default="K.csv")
parser.add_argument("--d_filename", type=str, default="d.csv")
args = parser.parse_args()
return args
def main():
# コマンドライン引数
args = get_args()
cap_device = args.device
filepath = args.file
cap_width = args.width
cap_height = args.height
k_new_param = args.k_new_param
k_filename = args.k_filename
d_filename = args.d_filename
# カメラ準備
cap = None
if filepath is None:
cap = cv.VideoCapture(cap_device)
cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)
else:
cap = cv.VideoCapture(filepath)
# キャリブレーションデータの読み込み
camera_mat = np.loadtxt(k_filename, delimiter=',')
dist_coef = np.loadtxt(d_filename, delimiter=',')
new_camera_mat = camera_mat.copy()
new_camera_mat[(0, 1), (0, 1)] = k_new_param * new_camera_mat[(0, 1),
(0, 1)]
while (True):
ret, frame = cap.read()
undistort_image = cv.undistort(
frame,
camera_mat,
dist_coef,
new_camera_mat,
)
cv.imshow('original', frame)
cv.imshow('undistort', undistort_image)
key = cv.waitKey(1) & 0xFF
if key == 27: # ESC
cap.release()
cv.destroyAllWindows()
break
cap.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
| true |
95ffeecf39bdf09a7a374272a5a0c34246690147 | Python | fjf3997/python_plane_war | /f_3_创建游戏窗口.py | UTF-8 | 1,096 | 2.8125 | 3 | [] | no_license | import pygame
from plane_sprite import *
pygame.init()
screen = pygame.display.set_mode((480, 700))
# 加载图片
bg = pygame.image.load("./images/background.png")
screen.blit(bg, (0, 0))
hero = pygame.image.load("./images/me1.png")
screen.blit(hero, (180, 500))
# 图片绘制完成之后统一update,显示最终屏幕的结果
pygame.display.update()
clock = pygame.time.Clock()
hero_rect = pygame.Rect(180, 500, 102, 126)
enemy = GameSprite("./images/enemy1.png")
enemy2 = GameSprite("./images/enemy1.png")
enemy_group = pygame.sprite.Group(enemy, enemy2)
while True:
clock.tick(60)
if hero_rect.y <= -hero_rect.height:
hero_rect.y = 700
screen.blit(bg, (0, 0))
screen.blit(hero, hero_rect)
hero_rect.y -= 1
enemy_group.update()
enemy_group.draw(screen)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
print("退出游戏")
pygame.quit()
exit()
# event_list = pygame.event.get()
# if len(event_list) > 0:
# print(event_list)
pygame.quit()
| true |
55e3d6c5c8dd8790e057273147d31972db4d6edf | Python | ayakamal/S_Python_2 | /Change_Date_Format.py | UTF-8 | 555 | 3.296875 | 3 | [] | no_license | import datetime
import dateutil.parser
# str=dateutil.parser.parse("15/12/2016")
# str = datetime.datetime.strptime('15/12/2016', '%d/%m/%Y').strftime('%Y%m%d')
# str = parse("11-15-2012")
# print(str)
# print(parse(str).strftime('%Y%m%d'))
from dateutil.parser import parse
def change_date_format(*args):
list_dates = []
for s in args:
str = parse(s).strftime('%Y%m%d')
list_dates.append(str)
print("The Date List in YYYYMMDD: ")
print(list_dates)
change_date_format("2010/03/30","15/12/2016", "11-20-2018", "20130720")
| true |
d4088cfe3c579dd3f1627b06c511ca7962a1c9a8 | Python | ahmad27/socialMining | /getTweets.py | UTF-8 | 1,503 | 2.84375 | 3 | [] | no_license | import selenium
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import time
import json
import sys
# first param is what to search and the second is how many page to scroll
search = sys.argv[1]
numberofpage = sys.argv[2]
# Open twitter with requested search
browser = webdriver.Firefox()
numpage=int(numberofpage)
browser.get('https://twitter.com/search?q="'+ search +'"')
# Scroll requested number of pages
while numpage != 0:
browser.execute_script("window.scrollTo(0, document.body.scrollHeight)")
time.sleep(1)
numpage= numpage - 1
data={}
data['tr'] = []
tweets = browser.find_elements_by_class_name("content")
# Get all tweets
for tweet in tweets:
t= tweet.find_element_by_class_name("tweet-text")
name= tweet.find_element_by_class_name("fullname")
username=tweet.find_element_by_class_name("username")
comment = tweet.find_element_by_xpath("(//span[@class='ProfileTweet-actionCount'])[1]").get_attribute("data-tweet-stat-count")
retweet = tweet.find_element_by_xpath("(//span[@class='ProfileTweet-actionCount'])[2]").get_attribute("data-tweet-stat-count")
like = tweet.find_element_by_xpath("(//span[@class='ProfileTweet-actionCount'])[3]").get_attribute("data-tweet-stat-count")
data['tr'].append({
'tweet': t.text,
'name': name.text,
'username': username.text,
'comment': comment,
'retweet': retweet,
'like': like
})
# Store json in data.txt
with open('data.txt', 'w') as outfile:
json.dump(data, outfile)
| true |
4a5d0baef3af639e7a77d45e84d95822a0188eb4 | Python | szonglong/diode-analysis | /dytest_semilog.py | UTF-8 | 3,099 | 2.8125 | 3 | [] | no_license | import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
import Tkinter
import tkFileDialog
############ Settings ##############
jcol=2 #Column to take current. First column is 0
vcol=1 #column to take voltage. Usually @ column
plotxrange=[-8,8]
plotyrange=[10**-5,10**4]
m = 1 #mismatch factor; def = 1
d = -1 #direction of connectors' def = 1, acceptable args = +-1
area=4.29e-6
############ File Search ############
root = Tkinter.Tk()
root.withdraw() #use to hide tkinter window
#currdir = os.getcwd()
#tempdir = tkFileDialog.askdirectory(parent=root, initialdir='C:\\Users\\E0004621\\Desktop\\Zong Long\\Papers\\Data', title='Please select the data folder') #select directory for data
#tempdir = 'C:\\Users\\E0004621\\Desktop\\Pythontest' #Debugging use
tempdir = 'C:\\Users\\E0004621\\Desktop\\Zong Long\\Papers\\Data\\180719' #Debugging use
os.chdir(tempdir)
filelist = os.listdir(os.getcwd()) # working dir
if os.path.exists('processed_semilog')!=True: # make file
os.mkdir('processed_semilog')
wlist=[] # get working list, only xls files
for i in xrange(len(filelist)):
ext = os.path.splitext(filelist[i])[1]
filename = os.path.splitext(filelist[i])[0]
if ext in ('.xls','.XLS'):
wlist.append(os.path.splitext(filelist[i])[0])
print wlist #is shown on command prompt dialogue
############ Processing data ############
for file in wlist:
wb=pd.read_excel(r'%s.xls' %file, None)
ana_array = pd.DataFrame(index=['Jsc (mA/cm2)','Voc (V)','I_inj (mA)','Pmax (W)','Vmpp (V)','Jmpp (mA/cm2)','Rs','PCE (%)','FF (%)'])
for sheet_index in range(0,1) + range(3,len(wb.keys())):
sh=wb.values()[sheet_index]
if sheet_index == 0:
sheet_name = ['p1','j1']
dat1=pd.DataFrame({'V1': sh.V1})
dat2=pd.DataFrame({'I1': sh.I1/(10*area)*d}) #current density in mA/cm2
final_array = dat1.join(np.abs(dat2))
power_array = dat1
quarter_length=int(len(power_array)/4)
else:
sheet_name = ['p%i' % (sheet_index-1),'j%i' % (sheet_index-1)]
dat2=pd.DataFrame({'j%i' % (sheet_index-1) : sh.I1/(10*area)*d})
final_array=final_array.join(np.abs(dat2))
#Final array has columns Vs and J1~J8
final_array.plot(x="V1", figsize=(9,9), logy=True) #Semi-log plot
############ Plotting settings ############
plt.title('%s'%(file))
plt.grid(True)
plt.xlabel('Applied voltage(V)')
plt.ylabel('Log [Current density](mAcm$^-$$^2$)')
plt.xlim(plotxrange)
plt.ylim(plotyrange)
font={'size':18}
plt.rc('font',**font)
plt.legend(loc='lower left', prop={"size":12})
############ Export ############
writer = pd.ExcelWriter('processed_semilog/%s _p.xlsx' %file, engine='xlsxwriter')
final_array.to_excel(writer, sheet_name = 'Final array')
writer.save()
plt.savefig('processed_semilog/%s _p.png' %file)
plt.close()
| true |
89fa09f297ba42121d95af442c23c685380efbef | Python | cbohara/python_exercises | /test_add.py | UTF-8 | 703 | 3.328125 | 3 | [] | no_license | import pytest
from add import add
def test_add_two_matrix_size_two():
matrix1 = [[1, -2], [-3, 4]]
matrix2 = [[2, -1], [0, -1]]
assert add(matrix1, matrix2) == [[3, -3], [-3, 3]]
def test_add_two_matrix_size_three():
matrix1 = [[1, -2, 3], [-4, 5, -6], [7, -8, 9]]
matrix2 = [[1, 1, 0], [1, -2, 3], [-2, 2, -2]]
assert add(matrix1, matrix2) == [[2, -1, 3], [-3, 3, -3], [5, -6, 7]]
def test_add_three_matrix_size_two():
matrix1 = [[1, 9], [7, 3]]
matrix2 = [[5, -4], [3, 3]]
matrix3 = [[2, 3], [-3, 1]]
assert add(matrix1, matrix2, matrix3) == [[8, 8], [7, 7]]
def test_add_uneven_matrices_raise_exception():
with pytest.raises(ValueException):
add([[1, 9], [7, 3]], [[1, 2], [3]])
| true |
cac483380bb7762c473dc58bf94a45235163a0ab | Python | EugeneMondkar/web-crawler | /writer.py | UTF-8 | 905 | 2.796875 | 3 | [] | no_license | # Author: Emily Villalba
# Group 10
# Tracking of Modifications, Refactoring, and Corrections to code:
# DONE (Eugene Mondkar): Removed httplib2 import
# DONE (Eugene Mondkar): Added language parameter to append to csv filename
# DONE (Eugene Mondkar): Removed other unnecessary libraries
# DONE (Eugene Mondkar): Had to accomadate for links with commas (google maps links)
# DONE (Eugene Mondkar): Add Reports Directory
def write_csv(sites_and_outlinks, reports_directory, language):
report_name = reports_directory + 'report_{}.csv'.format(language)
# clear current report
open(report_name, 'w').close()
# opening a file in append mode
report = open(report_name, 'a')
for site, outlinks in sites_and_outlinks:
site = site.replace(',','.')
report.write("URL: {}, Number of Outlinks: {}".format(site, outlinks))
report.write("\n")
report.close()
| true |
1848297f66832aacba1c5d6488f1bb83b1c37705 | Python | lbingbing/leetcode | /Algorithms/p115_Distinct_Subsequences/p115_Distinct_Subsequences_testcase_gen.py | UTF-8 | 372 | 2.875 | 3 | [] | no_license | import random
k = 500
n = 20
set1 = [chr(ord('a')+i) for i in range(26)]
for k1 in range(k):
n1 = random.randint(0,n)
l = [random.choice(set1) for i in range(n1)]
t = ''.join(l)
n2 = random.randint(0,n*4)
for i in range(n2):
l.insert(random.randint(0,len(l)),random.choice(set1))
s = ''.join(l)
print('"'+s+'"')
print('"'+t+'"')
| true |
b9ec3d3ff7ecbc4b0404f5d78d277c54570e0b32 | Python | irohit7/python | /function and docstring.py | UTF-8 | 828 | 4.28125 | 4 | [] | no_license | """def function():
print("This is function ")
print(function()) """
"""
def sum(a,b):
c = a+b
print(c)
return c
a = int(input())
b = int(input())
sum(a,b) """
"""
def average(a,b):
#doc astring
c = (a+b)/2
print(c)
print(average.__doc__)
average(5,7) """
#calculator using function
def sum(a,b):
c = a+b
print(c)
def minus(a,b):
c = a-b
print(c)
def mul(a,b):
c = a*b
print(c)
def div(a,b):
c = a/b
print(c)
a = int(input("Enter the 1st Number -> "))
b = int(input("Enter the 2nd Number -> "))
operand = input("Which operand you want to perform +,-,*,/ -> ")
if operand == "+":
sum(a,b);
if operand == "-":
min(a,b);
if operand == "*":
mul(a,b);
if operand == "/":
div(a,b);
| true |
87c055b87523a7ab55c05458983027bb4b21eadf | Python | victorlorena/DESlib | /deslib/des/des_clustering.py | UTF-8 | 15,255 | 2.875 | 3 | [
"BSD-3-Clause"
] | permissive | # coding=utf-8
# Author: Rafael Menelau Oliveira e Cruz <rafaelmenelau@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from sklearn.base import ClusterMixin
from sklearn.cluster import KMeans
from deslib.base import BaseDS
from deslib.util.aggregation import majority_voting_rule
from deslib.util.diversity import Q_statistic, ratio_errors, negative_double_fault, compute_pairwise_diversity
class DESClustering(BaseDS):
"""Dynamic ensemble selection-Clustering (DES-Clustering).
This method selects an ensemble of classifiers taking into account the
accuracy and diversity of the base classifiers. The K-means algorithm is used to define the region of competence.
For each cluster, the N most accurate classifiers are first selected. Then, the J more diverse classifiers from the
N most accurate classifiers are selected to compose the ensemble.
Parameters
----------
pool_classifiers : list of classifiers (Default = None)
The generated_pool of classifiers trained for the corresponding classification problem.
Each base classifiers should support the method "predict".
If None, then the pool of classifiers is a bagging classifier.
clustering : sklearn.cluster (Default = None)
The clustering model used to estimate the region of competence. If None, a KMeans with
K = 5 is used.
pct_accuracy : float (Default = 0.5)
Percentage of base classifiers selected based on accuracy
pct_diversity : float (Default = 0.33)
Percentage of base classifiers selected based n diversity
more_diverse : Boolean (Default = True)
Whether we select the most or the least diverse classifiers to add to the pre-selected ensemble
metric : String (Default = 'df')
Metric used to estimate the diversity of the base classifiers. Can
be either the double fault (df), Q-statistics (Q), or error correlation (corr)
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
DSEL_perc : float (Default = 0.5)
Percentage of the input data used to fit DSEL.
Note: This parameter is only used if the pool of classifier is None or unfitted.
References
----------
Soares, R. G., Santana, A., Canuto, A. M., & de Souto, M. C. P. "Using accuracy and more_diverse to select
classifiers to build ensembles." International Joint Conference on Neural Networks (IJCNN)., 2006.
Britto, Alceu S., Robert Sabourin, and Luiz ES Oliveira. "Dynamic selection of classifiers—a comprehensive review."
Pattern Recognition 47.11 (2014): 3665-3680.
R. M. O. Cruz, R. Sabourin, and G. D. Cavalcanti, “Dynamic classifier selection: Recent advances and perspectives,”
Information Fusion, vol. 41, pp. 195 – 216, 2018.
"""
def __init__(self, pool_classifiers=None,
clustering=None,
with_IH=False,
safe_k=None,
IH_rate=0.30,
pct_accuracy=0.5,
pct_diversity=0.33,
more_diverse=True,
metric='DF',
random_state=None,
DSEL_perc=0.5):
super(DESClustering, self).__init__(pool_classifiers=pool_classifiers,
with_IH=with_IH,
safe_k=safe_k,
IH_rate=IH_rate,
random_state=random_state,
DSEL_perc=DSEL_perc)
self.metric = metric
self.clustering = clustering
self.pct_accuracy = pct_accuracy
self.pct_diversity = pct_diversity
self.more_diverse = more_diverse
def fit(self, X, y):
""" Train the DS model by setting the Clustering algorithm and
pre-processing the information required to apply the DS
methods.
First the data is divided into K clusters. Then, for each cluster, the N most accurate classifiers
are first selected. Then, the J more diverse classifiers from the N most accurate classifiers are
selected to compose the ensemble of the corresponding cluster. An ensemble of classifiers is assigned
to each of the K clusters.
Parameters
----------
X : array of shape = [n_samples, n_features]
Data used to fit the model.
y : array of shape = [n_samples]
class labels of each example in X.
Returns
-------
self
"""
super(DESClustering, self).fit(X, y)
self.N_ = int(self.n_classifiers_ * self.pct_accuracy)
self.J_ = int(np.ceil(self.n_classifiers_ * self.pct_diversity))
# Check if the specific parameters are correct (N_, J_, metric and clustering)
self._check_parameters()
# Set-up the clustering method used to estimate the region of competence
if self.clustering is None:
self.clustering_ = KMeans(n_clusters=5, random_state=self.random_state)
self.clustering_.fit(self.DSEL_data_)
else:
self.clustering_ = self.clustering.fit(self.DSEL_data_)
# set the diversity metric used
self._set_diversity_func()
# Since the clusters are fixed, we can pre-compute the accuracy and diversity of each cluster as well as the
# selected classifiers (indices) for each one. These pre-computed information will be kept on
# those three variables:
self.accuracy_cluster_ = np.zeros((self.clustering_.n_clusters, self.n_classifiers_))
self.diversity_cluster_ = np.zeros((self.clustering_.n_clusters, self.n_classifiers_))
self.indices_ = np.zeros((self.clustering_.n_clusters, self.J_), dtype=int)
self._preprocess_clusters()
return self
def _preprocess_clusters(self):
"""Preprocess the competence as well as the average diversity of each base classifier for each specific cluster.
This process makes the test routines faster, since the ensemble of classifiers of each cluster
is already predefined.
The class attributes Accuracy_cluster_ and diversity_cluster_ stores the accuracy and diversity information
respectively of each base classifier for each cluster. The attribute indices_ stores the pre-selected
base classifiers for each cluster.
"""
labels = self.clustering_.predict(self.DSEL_data_)
# For each cluster estimate the most accurate and most competent classifiers for it.
for cluster_index in range(self.clustering_.n_clusters):
# Get the indices_ of the samples in the corresponding cluster.
sample_indices = np.where(labels == cluster_index)[0]
# Compute accuracy of each classifier in this cluster
accuracy = np.mean(self.DSEL_processed_[sample_indices, :], axis=0)
self.accuracy_cluster_[cluster_index, :] = accuracy
# Get the N_ most accurate classifiers for the corresponding cluster
accuracy_indices = np.argsort(accuracy)[::-1][0:self.N_]
# Get the target labels for the samples in the corresponding cluster for the diversity calculation.
targets = self.DSEL_target_[sample_indices]
self.diversity_cluster_[cluster_index, :] = \
compute_pairwise_diversity(targets, self.BKS_DSEL_[sample_indices, :], self.diversity_func_)
diversity_of_selected = self.diversity_cluster_[cluster_index, accuracy_indices]
if self.more_diverse:
diversity_indices = np.argsort(diversity_of_selected)[::-1][0:self.J_]
else:
diversity_indices = np.argsort(diversity_of_selected)[0:self.J_]
self.indices_[cluster_index, :] = accuracy_indices[diversity_indices]
def estimate_competence(self, query, predictions=None):
"""Get the competence estimates of each base classifier :math:`c_{i}`
for the classification of the query sample.
In this case, the competences were already pre-calculated for each cluster. So this method computes the
nearest cluster and get the pre-calculated competences of the base classifiers
for the corresponding cluster.
Parameters
----------
query : array of shape = [n_samples, n_features]
The query sample.
predictions : array of shape = [n_samples, n_classifiers]
Predictions of the base classifiers for all test examples.
Returns
-------
competences : array = [n_samples, n_classifiers]
The competence level estimated for each base classifier.
"""
cluster_index = self.clustering_.predict(query)
competences = self.accuracy_cluster_[cluster_index][:]
return competences
def select(self, query):
"""Select an ensemble with the most accurate and most diverse classifier for the classification of the query.
The ensemble for each cluster was already pre-calculated in the fit method. So, this method calculates the
closest cluster, and returns the ensemble associated to this cluster.
Parameters
----------
query : array of shape = [n_samples, n_features]
The test examples.
Returns
-------
selected_classifiers : array of shape = [n_samples, self.k]
Indices of the selected base classifier for each test example.
"""
cluster_index = self.clustering_.predict(query)
selected_classifiers = self.indices_[cluster_index, :]
return selected_classifiers
def classify_with_ds(self, query, predictions, probabilities=None, neighbors=None, distances=None, DFP_mask=None):
"""Predicts the label of the corresponding query sample.
Parameters
----------
query : array of shape = [n_features]
The test sample.
predictions : array of shape = [n_samples, n_classifiers]
Predictions of the base classifiers for all test examples.
probabilities : array of shape = [n_samples, n_classifiers, n_classes]
Probabilities estimates of each base classifier for all test examples.
neighbors : array of shale = [n_samples, n_neighbors]
Indices of the k nearest neighbors according for each test sample
distances : array of shale = [n_samples, n_neighbors]
Distances of the k nearest neighbors according for each test sample
DFP_mask : array of shape = [n_samples, n_classifiers]
Mask containing 1 for the selected base classifier and 0 otherwise.
Returns
-------
predicted_label : array of shape = [n_samples]
Predicted class label for each test example.
"""
if query.ndim < 2:
query = query.reshape(1, -1)
if predictions.ndim < 2:
predictions = predictions.reshape(1, -1)
if query.shape[0] != predictions.shape[0]:
raise ValueError('The arrays query and predictions must have the same number of samples. query.shape is {}'
'and predictions.shape is {}' .format(query.shape, predictions.shape))
selected_classifiers = self.select(query)
votes = predictions[np.arange(predictions.shape[0])[:, None], selected_classifiers]
predicted_label = majority_voting_rule(votes)
return predicted_label
def predict_proba_with_ds(self, query, predictions, probabilities, neighbors=None, distances=None, DFP_mask=None):
"""Predicts the label of the corresponding query sample.
Parameters
----------
query : array of shape = [n_samples, n_features]
The test examples.
predictions : array of shape = [n_samples, n_classifiers]
Predictions of the base classifiers for all test examples.
probabilities : array of shape = [n_samples, n_classifiers, n_classes]
Probabilities estimates of each base classifier for all test examples.
neighbors : array of shale = [n_samples, n_neighbors]
Indices of the k nearest neighbors according for each test sample
distances : array of shale = [n_samples, n_neighbors]
Distances of the k nearest neighbors according for each test sample
DFP_mask : array of shape = [n_samples, n_classifiers]
Mask containing 1 for the selected base classifier and 0 otherwise.
Returns
-------
predicted_proba : array of shape = [n_samples, n_classes]
Posterior probabilities estimates for each test example
"""
if query.shape[0] != probabilities.shape[0]:
raise ValueError('The arrays query and predictions must have the same number of samples. query.shape is {}'
'and predictions.shape is {}' .format(query.shape, predictions.shape))
selected_classifiers = self.select(query)
ensemble_proba = probabilities[np.arange(probabilities.shape[0])[:, None], selected_classifiers, :]
predicted_proba = np.mean(ensemble_proba, axis=1)
return predicted_proba
def _check_parameters(self):
"""Check if the parameters passed as argument are correct.
Raises
------
ValueError
If the hyper-parameters are incorrect.
"""
if self.metric not in ['DF', 'Q', 'ratio']:
raise ValueError('Diversity metric must be one of the following values: "DF", "Q" or "Ratio"')
if self.N_ <= 0 or self.J_ <= 0:
raise ValueError("The values of N_ and J_ should be higher than 0"
"N_ = {}, J_= {} " .format(self.N_, self.J_))
if self.N_ < self.J_:
raise ValueError("The value of N_ should be greater or equals than J_"
"N_ = {}, J_= {} " .format(self.N_, self.J_))
if self.clustering is not None:
if not isinstance(self.clustering, ClusterMixin):
raise ValueError("Parameter clustering must be a sklearn cluster estimator.")
def _set_diversity_func(self):
"""Set the diversity function to be used according to the hyper-parameter metric
The diversity_func_ can be either the Double Fault, Q-Statistics or Ratio of errors.
"""
if self.metric == 'DF':
self.diversity_func_ = negative_double_fault
elif self.metric == 'Q':
self.diversity_func_ = Q_statistic
else:
self.diversity_func_ = ratio_errors
| true |
82d8bbadb3ce098f9fd96aabd2063dc6373d593c | Python | DarkJoney/python_examples | /hillel8/1.py | UTF-8 | 959 | 4.34375 | 4 | [] | no_license | """1. Написать функцию `arithmetic`, принимающую 3 аргумента: первые 2 - числа, третий - операция, которая должна быть
произведена над ними. Если третий аргумент +, сложить их; если —, то вычесть; * — умножить; / — разделить (первое на
второе). В остальных случаях вернуть строку `"Неизвестная операция"`.
"""
def arithmetic(a, b, action):
if action == '/':
return a/b
elif action == '*':
return a*b
elif action == '+':
return a+b
elif action == '-':
return a-b
else:
return "unknown OPERATION"
v1 = int(input("Enter the first value: "))
v2 = int(input("Enter the second value: "))
action = input("Enter + or - or / or *")
result = arithmetic(v1, v2, action)
print(result)
| true |
78d6746a6624ff885c80c92f39a432f2a380de88 | Python | jcgwt/manim | /triple-clebsch-graph/triple-clebsch-graph.py | UTF-8 | 5,122 | 3.109375 | 3 | [] | no_license | from manim import *
import numpy as np
# this produces an attractive 3-colouring of 3 copies of the Clebsch graph, producing a 3-coloring of the complete graph on 16 vertices
# in particular, coupled with the standard argument that R(3,3,3) ≤ 17, this shows R(3,3,3) = 17
class TripleClebschGraph(Scene):
def construct(self):
# create vertices
centre = Dot([0,0,0])
inner = [Dot([np.cos(2.05*PI/10+k*2*PI/5),np.sin(2.05*PI/10+k*2*PI/5),0]) for k in range(5)]
middle = [Dot([1.8*np.cos(8.1*PI/20+k*2*PI/5),1.8*np.sin(8.1*PI/20+k*2*PI/5),0]) for k in range(5)]
outer = [Dot([2.8*np.cos(PI/10+k*2*PI/5),2.8*np.sin(PI/10+k*2*PI/5),0]) for k in range(5)]
vtx = [centre]
for ls in [outer,middle,inner]:
vtx.extend(ls)
self.add_foreground_mobjects(*vtx)
# the following values are parameters for defining the Clebsch graph; they are the base on which the graph bases its symmetries
# for outer pentagon
pts_out_out = [1]
pts_out_mid = [0]
pts_out_in = [11,14]
# for middle pentagon
pts_mid_mid = [2,3]
pts_mid_in = [7,8]
# no transformation for edges within the inner hexagon
wdth = 1.8 # width for edges
# edges between outer pentagon and relevant vertices
edges = []
for n in range(1,6):
for crd in pts_out_out:
end = n%5+crd
e = Line(vtx[n].get_center(),vtx[end].get_center(),stroke_width = wdth)
edges.append(e)
for crd in pts_out_mid:
end = (n+crd-1)%5+6
e = Line(vtx[n].get_center(),vtx[end].get_center(),stroke_width = wdth)
edges.append(e)
for crd in pts_out_in:
end = (n+crd-1)%5+11
e = Line(vtx[n].get_center(),vtx[end].get_center(),stroke_width = wdth)
edges.append(e)
# edges between middle pentagon and vertices excluding outer pentagon
for n in range(6,11):
for crd in pts_mid_mid:
end = (n+crd-1)%5+6
e = Line(vtx[n].get_center(),vtx[end].get_center(),stroke_width = wdth)
edges.append(e)
for crd in pts_mid_in:
end = (n+crd-1)%5+11
e = Line(vtx[n].get_center(),vtx[end].get_center(),stroke_width = wdth)
edges.append(e)
# edges between inner hexagon and relevant vertices exluding middle and outer pentagons
for n in range(11,16):
e = Line(vtx[n].get_center(),vtx[0].get_center(),stroke_width = wdth)
edges.append(e)
edges = [e.set_color(BLUE_D) for e in edges]
# PLAY: fade in vertices, then edges
self.play(*[FadeIn(v) for v in vtx])
self.add_foreground_mobjects(*vtx)
self.play(*[FadeIn(e,run_time=1.5) for e in edges])
self.wait(1/2)
def transformation(permutation,colour):
# copy vertices
vtx_cp = [v.copy() for v in vtx]
self.add_foreground_mobjects(*vtx_cp)
# vertex targets
vtx_targ = [centre.copy()] + [vtx_cp[p] for p in permutation]
for v in vtx_cp:
v.generate_target()
v.target.move_to(vtx_targ[vtx_cp.index(v)])
# edges for copy
edges_cp = [e.copy().set_opacity(0) for e in edges]
# create target for copies using
edge_targ = []
for e in edges_cp:
start = e.get_start()
end = e.get_end()
# this is a little messy because manim seems to struggle with keeping floats consistent passed a certain number of decimal places
# hence rounding to 4 dp to allow an accurate check for whether vertices are in the same spot (there is probably a nicer way of dealing with this)
s_target = [v.target for v in vtx_cp if [round(x,4) for x in list(start)] == [round(y,4) for y in list(v.get_center())]]
e_target = [v.target for v in vtx_cp if [round(x,4) for x in list(end)] == [round(y,4) for y in list(v.get_center())]]
line_target = Line(s_target[0],e_target[0],stroke_width = wdth).set_color(colour)
edge_targ.append(line_target)
# package animations
move_edges = AnimationGroup(*[ReplacementTransform(edges_cp[k],edge_targ[k]) for k in range(len(edges))])
move_pts = AnimationGroup(*[MoveToTarget(v) for v in vtx_cp])
# PLAY: transformation
self.play(move_edges,move_pts,run_time=1.5)
# first transformation
first_perm = [x for x in range(11,16)]+[x for x in range(1,6)]+[x for x in range(6,11)]
transformation(first_perm,RED_D)
# second transformation
scnd_perm = [x for x in range(6,11)]+[x for x in range(11,16)]+[x for x in range(1,6)]
transformation(scnd_perm,'#E1E332')
self.play(*[FadeOut(o) for o in self.mobjects])
| true |
ddddb763ed101f15ba9b809b772e90ca460d93a9 | Python | Yoctol/uttut | /uttut/pipeline/ops/tests/test_add_sos_eos.py | UTF-8 | 1,456 | 2.515625 | 3 | [
"MIT"
] | permissive | import pytest
from ..add_sos_eos import AddSosEos
from ..tokens import START_TOKEN, END_TOKEN
from .common_tests import OperatorTestTemplate, ParamTuple
class TestAddSosEos(OperatorTestTemplate):
params = [
ParamTuple(
['alvin', '喜歡', '吃', '榴槤'],
[1, 2, 3, 4],
['<sos>', 'alvin', '喜歡', '吃', '榴槤', '<eos>'],
[0, 1, 2, 3, 4, 0],
'zh',
),
]
@pytest.fixture(scope='class')
def op(self):
return AddSosEos()
def test_not_equal(self, op):
custom_op = AddSosEos(start_token='custom_SOS', end_token='custom_EOS')
assert custom_op != op
@pytest.mark.parametrize(
"op, expected_configs",
[
pytest.param(
AddSosEos(),
{'start_token': START_TOKEN, 'end_token': END_TOKEN},
id="default",
),
pytest.param(
AddSosEos('1', '2'),
{'start_token': '1', 'end_token': '2'},
id="no keywords",
),
pytest.param(
AddSosEos('1'),
{'start_token': '1', 'end_token': END_TOKEN},
id="partial input",
),
pytest.param(
AddSosEos(end_token='2', start_token='1'),
{'start_token': '1', 'end_token': '2'},
id="input with key",
),
],
)
def test_correct_configs(op, expected_configs):
assert op.configs == expected_configs
| true |
641b6609d493e84ee55f73b908be94ea5d1dd325 | Python | potatoes-never-lie/Algorithm | /2875.py | UTF-8 | 145 | 2.703125 | 3 | [] | no_license | n,m,k=map(int, input().split())
maxVal=0
for i in range(0,k+1):
team=min((n-i)//2, (m-(k-i))//1)
maxVal=max(maxVal, team)
print(maxVal) | true |
a380332a0d51a83b7192513c9db2065d452885fa | Python | blha303/plus7-tools | /get-last-section.py | UTF-8 | 315 | 2.9375 | 3 | [] | no_license | #!/usr/bin/env python2.7
import sys
if len(sys.argv) > 1:
split = sys.argv[1].split("/")
if len(split) >= 1 and split[-1]:
print split[-1]
elif len(split) >= 2 and split[-2]:
print split[-2]
else:
sys.stderr.write("Can't split that on slashes")
else:
sys.stderr.write("Can't split nothing")
| true |
e76ae8212da97a2aa7892e206b6ee7f9ea99b195 | Python | 13555785106/PythonPPT-01 | /twisted-intro-master/basic-twisted/log.py | UTF-8 | 708 | 2.796875 | 3 | [
"MIT"
] | permissive | import sys
from twisted.python import log
from twisted.internet import defer
"""This example illustrates some Twisted logging basics."""
log.msg('This will not be logged, we have not installed a logger.')
log.startLogging(sys.stdout)
log.msg('This will be logged.')
log.err('This will be logged as an error.')
def bad_callback(result):
xxx
try:
bad_callback()
except:
log.err('The next function call will log the traceback as an error.')
log.err()
d = defer.Deferred()
def on_error(failure):
log.err('The next function call will log the failure as an error.')
log.err(failure)
d.addCallback(bad_callback)
d.addErrback(on_error)
d.callback(True)
log.msg('End of example.')
| true |
7937050364a8d66531a713307b133f2055f77d5c | Python | kiayria/epam-python-hw | /homework01/task02/tests/test_fibonacci.py | UTF-8 | 751 | 3.078125 | 3 | [] | no_license | from typing import Sequence
import pytest
from fibonacci.fib import check_fibonacci
@pytest.mark.parametrize(
["data", "expected_result"],
[
([], False),
([0], True),
([5], False),
([10], False),
([0, 0], False),
([0, 1], True),
([89, 144], False),
([0, 0, 0], False),
([0, 1, 1], True),
([377, 610, 987], False),
([5, 2, 7, 9, 16, 25], False),
([0, 0, 0, 0, 0, 0, 0, 0, 0], False),
([0, 1, 1, 2, 3, 5, 8, 13, 21], True),
([987, 1597, 2584, 4181, 6765], False),
],
)
def test_check_fibonacci(data: Sequence[int], expected_result: bool):
actual_result = check_fibonacci(data)
assert actual_result == expected_result
| true |
ee9a3fa312edc1e415f571f4c36ba58e32ed9097 | Python | LucasArthur94/tccapp | /rooms/tests/test_models.py | UTF-8 | 325 | 2.78125 | 3 | [] | no_license | from django.test import TestCase
from rooms.models import Room
# models test
class RoomTestCase(TestCase):
def test_full_identifier(self):
room = Room.objects.create(block='A', floor='T', identifier='ST')
self.assertTrue(isinstance(room, Room))
self.assertTrue(room.full_identifier() == 'AT-ST')
| true |
46f1b736e05b98a351503c21718c872d97c09af0 | Python | oxhead/CodingYourWay | /src/lt_226.py | UTF-8 | 1,739 | 3.96875 | 4 | [] | no_license | """
https://leetcode.com/problems/invert-binary-tree
Related:
"""
"""
Invert a binary tree.
4
/ \
2 7
/ \ / \
1 3 6 9
to
4
/ \
7 2
/ \ / \
9 6 3 1
Trivia:
This problem was inspired by this original tweet by Max Howell:
Google: 90% of our engineers use the software you wrote (Homebrew), but you can’t invert a binary tree on a whiteboard so fuck off.
"""
from utils import parse_tree, tree_traversal_inorder
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
# Time: O(n)
# Space: O(h)
if not root: return root
root.left, root.right = self.invertTree(root.right), self.invertTree(root.left)
def invertTree_v2(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
# Time: O(n)
# Space: O(h)
def swap_tree(node):
if not node: return
node.left, node.right = node.right, node.left
swap_tree(node.left)
swap_tree(node.right)
swap_tree(root)
return root
if __name__ == '__main__':
test_cases = [
([4, 2, 7, 1, 3, 6, 9], None),
]
for test_case in test_cases:
print('case:', test_case)
root = parse_tree(test_case[0])
output = Solution().invertTree(root)
output_list = tree_traversal_inorder(output)
print('output:', output_list)
assert output_list == list(reversed(tree_traversal_inorder(parse_tree(test_case[0]))))
| true |
d57ae9aa7351efd3bd8387dba3a41c87af46aee6 | Python | allenwind/python-concurrency-demo | /celery_task.py | UTF-8 | 512 | 2.640625 | 3 | [] | no_license | #分布式任务队列Celery
#架构组成
#消息中间人 Broker 任务调度队列
#是一个生产者消费者模式,即主程序将任务放入队列中,而后台职程则会从队列中取出任务并执行
#Redis、RabbitMQ
#任务执行单元 Worker
#执行结果存储 Backend
from celery import Celery
app = Celery('tasks',
broker='amqp://guest@localhost//', #Broker 任务调度队列
backend='redis://localhost:6379/0')
@app.task
def add(x, y):
return x + y
| true |
0a35514167c32445072a9048ecb5be24b08ff3d4 | Python | Kar5799/PyAss3 | /PyAss3Q12.py | UTF-8 | 160 | 3.234375 | 3 | [] | no_license | def myfilter(func, lis):
new_list = []
for element in lis:
if func(element):
new_list.append(element)
return iter(new_list) | true |
ea4f1c1a8ae7baafc22da8e2ed7d62e0ac7e4a82 | Python | hidole/google_python_codes | /reports.py | UTF-8 | 864 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python3
from reportlab.platypus import SimpleDocTemplate
from reportlab.platypus import Paragraph, Spacer, Table, Image
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib import colors
import os
def get_filedata(detail): #returing a path and list
path = os.getcwd()
path = path+detail
print(path)
listdata = os.listdir(path)
print(listdata)
return path , listdata
def generate_report(filename,title,data):
styles = getSampleStyleSheet()
report = SimpleDocTemplate(filename)
pdf_body =[]
#report.build(pdf_body)
report_title = Paragraph(title, styles["h1"])
#paragraph_1 = Paragraph("Some normal body text", styles["BodyText"])
paragraph_1 = Paragraph(data, styles["BodyText"])
pdf_body.append(report_title)
pdf_body.append(paragraph_1)
report.build(pdf_body)
| true |
841c2a379a5a1d83b9d4a3d69681acad8cf6d67f | Python | JaredKorthuis/SeniorDesign | /test_script_algorithm.py | UTF-8 | 787 | 2.59375 | 3 | [] | no_license | from jared_algorithm import TableData
if(TableData(20,6,23,90,60,0,0,0,0,0,0,0,0,0,0,0,97330,0)=='very good'):
print "TEST VERY GOOD PASSED"
else:
print "TEST VERY GOOD FAILED!"
if(TableData(30,5,25,110,70,0,0,0,0,0,0,0,0,0,0,0,97231,0)=='good'):
print "TEST GOOD PASSED"
else:
print "TEST GOOD FAILED!"
if(TableData(40,3,26,120,80,1,1,1,1,1,1,1,1,1,1,1,97456,1)=='average'):
print "TEST AVERAGE PASSED"
else:
print "TEST AVERAGE FAILED"
if(TableData(50,2,28,130,100,1,1,1,1,1,1,1,1,1,1,1,97312,1)=='bad'):
print "TEST BAD PASSED"
else:
print "TEST BAD FAILED"
if(TableData(60,1,30,140,120,1,1,1,1,1,1,1,1,1,1,1,97265,1)=='very bad'):
print "TEST VERY BAD PASSED"
else:
print "TEST VERY BAD FAILED"
| true |
9ae1ade199f056b84fadc169bbc8d600e6b2c59a | Python | amirtha4501/Guvi | /Companies/company8.py | UTF-8 | 186 | 2.90625 | 3 | [] | no_license | n = int(input())
arr = list(map(int, input().split()))
if n<=1000000:
mini = min(arr)
maxi = max(arr)
i1 = arr.index(mini)
i2 = arr.index(maxi)
d = abs(i1-i2)
print(d) | true |
e2ddb52dc4222f7a16d97427efdcce7031a3d994 | Python | pabha9/FuzzBizzFibbonacciGame | /swift_nav.py | UTF-8 | 1,406 | 3.84375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 17 13:23:28 2017
@author: A
"""
def fib(n): #generate fibbonacci numbers through recursion
if n < 2:
return n
else:
return fib(n-1) + fib(n-2)
def primeChecker(n): #checking for prime numbers
if n < 2:
return False
if n == 2:
return True
if not n & 1:
return False
if n > 2:
for i in range(3,int(n**0.5)+1,2):#need to go up to the square root of n + 1, the range accounts for it by starting from 3, this part was taken from a forum on stack overflow
if n%i == 0:
return False
return True
def divisibleChecker(n):
a = primeChecker(n)
if a == False:
if n % 15 ==0:
print("Buzz, Fizz, FizzBuzz")
elif n % 3 == 0:
print("Buzz")
elif n % 5 == 0:
print("Fizz")
else:
print("corresponding fibbonacci number:", n)
elif a == True:
if n == 3:
print("Buzz, BuzzFizz")
elif n == 5:
print("Fizz, BuzzFizz")
else:
print("BuzzFizz")
results = []
n = int(input("Please enter a number:"))
for i in range(1,n):
#print(fib(i))
results.append(divisibleChecker(fib(i)))
# print(fib(i))
| true |
f3661c73b8eee42a8c65d5be335f77df9551dafc | Python | llwsykll/leetCode | /Excel Sheet Column Title/ESCT.py | UTF-8 | 683 | 3.578125 | 4 | [] | no_license | class Solution:
def convertToTitle(self, n: int) -> str:
arr=["A","B","C","D","E","F","G","H","I","J","K","L","M",
"N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
res = ""
while(n>26):
res += arr[n%26-1]
n = int(n/26) if n%26!=0 else int(n/26)-1
res +=arr[n-1]
return res[::-1]
class Solution:
def convertToTitle(self, n: int) -> str:
arr=[chr(x) for x in range(ord('A'), ord('Z')+1)]
res = ""
while(n>26):
nn = n%26
nm =int(n/26)
res += arr[nn-1]
n = nm if nn!=0 else nm-1
res +=arr[n-1]
return res[::-1] | true |
a30f0134c0b61a8a4d17d8a3330b2985570c21ed | Python | Jartreg/prometheus-https-demo | /scripts/arpspoof/arpspoof_main.py | UTF-8 | 1,982 | 2.59375 | 3 | [] | no_license | import subprocess
import os
command = None
ipAddress = None
ipAddressTwo = None
ipAdressConfirmed = False
ipAdressTwoConfirmed = False
wrongParameterVar = False
def banner():
print(" ___ ____________ _____ __ ")
print(" / _ \ | ___ \ ___ \ / ___| / _|")
print(" / /_\ \| |_/ / |_/ / \ `--. _ __ ___ ___ | |_ ")
print(" | _ || /| __/ `--. \ '_ \ / _ \ / _ \| _|")
print(" | | | || |\ \| | /\__/ / |_) | (_) | (_) | | ")
print(" \_| |_/\_| \_\_| \____/| .__/ \___/ \___/|_| ")
print(" | | ")
print(" |_| ")
def wrongParameter():
if wrongParameterVar==True:
print("Wrong Parameter")
def writeFile():
target = open("ip.txt", "w")
target.write("%s\n%s\n" % (ipAddress, ipAddressTwo))
while ipAdressConfirmed == False:
os.system("clear")
banner()
print("Enter IP-Address to be forwarded")
wrongParameter()
ipAddress = input()
print("The IP-Adress to be forwarded ist", ipAddress)
print("1) Approve 2)Re-enter")
option = str(input())
if option == '1':
ipAdressConfirmed = True
wrongParameterVar = False
elif option == '2':
pass
else:
wrongParameterVar = True
while ipAdressTwoConfirmed==False:
os.system("clear")
banner()
print("Enter Host's IP-Address")
wrongParameter()
ipAddressTwo = input()
print("The Host's IP-Adress is: ", ipAddressTwo)
print("1) Approve 2)Re-enter")
option = str(input())
if option == '1':
ipAdressTwoConfirmed = True
writeFile()
wrongParameterVar = False
command = ("arpspoof -t %s %s") % (ipAddress,ipAddressTwo)
os.system("clear")
subprocess.check_output(['bash', '-c', command])
elif option == '2':
pass
else:
wrongParameterVar = True | true |
3dde1d8ad8049ca09a4e8c31afe7cfe796812e51 | Python | parnurzeal/boolean_retrieval_search | /src/main/py/boolean_retrieval.py | UTF-8 | 14,570 | 2.578125 | 3 | [] | no_license | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: teerapol.watanavekin
#
# Created: 20/07/2012
# Copyright: (c) teerapol.watanavekin 2012
# Licence: <your licence>
#-------------------------------------------------------------------------------
from bi_inverted_index import Inverted_Index, CNode
import glob
import os
import string
import pickle
import shunting_yard
class Search_System:
"a search system"
inverted_index = None
def __init__(self): #constructor
self.inverted_index = Inverted_Index()
def rank_answer(list, score_list):
mix_list = zip(list,score_list)
sorted_list = sorted(mix_list,key=lambda value:value[1],reverse = True)
answer_list = [i for i,j in sorted_list]
return answer_list
def search3(self,query):
"search for many query with boolean expression"
root = self.inverted_index.root
query_list = query.split()
expression = shunting_yard.infixToRPN(query_list)
expression.reverse()
stack = []
plist_num=0
plist_hashlist={}
#[2012/08/22 added] to initial variables to find score
slist_num=0
slist_hashlist={}
# just find that term if doing only single word query.
if len(expression) ==0:
return []
elif len(expression) <= 1:
word = expression.pop()
answer_last= self.inverted_index.lookup(root,word)
# get node to find tf-idf
node_answer_last = self.inverted_index.lookupNode(root,word)
if(node_answer_last==None):
return []
tf_idf_list = node_answer_last.get_tf_idf_list(self.inverted_index.docCount,self.inverted_index.highest_tc)
ranked_answer = Search_System.rank_answer(answer_last,tf_idf_list)
return ranked_answer
# in case of > 1 word query
while len(expression)!=0:
token = expression.pop()
if shunting_yard.isOperator(token):
word1 = stack.pop()
if( not word1 in plist_hashlist ):
answer_list1 = self.inverted_index.lookup(root,word1)
# [2012/08/22 added] to get score of word
node1 = self.inverted_index.lookupNode(root,word1)
if(node1!=None):
score_list1 = node1.get_tf_idf_list(self.inverted_index.docCount,self.inverted_index.highest_tc)
else: score_list1 = []
else:
answer_list1 = plist_hashlist[word1]
# [2012/08/22 added] to get score of word
score_list1 = slist_hashlist[word1]
word2 = stack.pop()
if( not word2 in plist_hashlist ):
answer_list2 = self.inverted_index.lookup(root,word2)
# [2012/08/22 added] to get score of word
node2 = self.inverted_index.lookupNode(root,word2)
if(node2!=None):
score_list2 = node2.get_tf_idf_list(self.inverted_index.docCount,self.inverted_index.highest_tc)
else: score_list2 = []
else:
answer_list2 = plist_hashlist[word2]
# [2012/08/22 added] to get score of word
score_list2 = slist_hashlist[word2]
if(token=="&&"):
answer_last = self.intersect(answer_list1,answer_list2)
# [2012/08/22 added] to get score of word
score_list_last = self.intersect_score(answer_list1,answer_list2,score_list1,score_list2)
elif(token=="||"):
answer_last = self.union(answer_list1,answer_list2)
# [2012/08/22 added] to get score of word
score_list_last = self.union_score(answer_list1,answer_list2,score_list1,score_list2)
else:
print("Wrong Operator: program will exit")
exit()
stack.append("plist"+str(plist_num))
plist_hashlist["plist"+str(plist_num)] = answer_last
plist_num+=1
#[2012/08/22 added] to store new score list in hash table
slist_hashlist["plist"+str(slist_num)] = score_list_last
slist_num+=1
else:
stack.append(token)
# get all the last answers
#[2012/08/22 added] to store new score list in hash table
last_word = stack.pop()
answer = plist_hashlist[last_word];
#[2012/08/22 added] to get answer score list in hash table
s_answer = slist_hashlist[last_word]
print(answer)
print(s_answer)
ranked_answer = Search_System.rank_answer(answer,s_answer)
return ranked_answer
def union(self, sorted_plist1, sorted_plist2):
"union of two postings lists plist1 and plist2, in other word, it is operand OR"
answer = []
p1,p2=0,0
while(p1<len(sorted_plist1) and p2<len(sorted_plist2)):
if(sorted_plist1[p1]==sorted_plist2[p2]):
answer.append(sorted_plist1[p1])
p1+=1;p2+=1
elif(sorted_plist1[p1][0]<sorted_plist2[p2][0]):
answer.append(sorted_plist1[p1])
p1+=1
else:
answer.append(sorted_plist2[p2])
p2+=1
while(p1<len(sorted_plist1)):
answer.append(sorted_plist1[p1])
p1+=1
while(p2<len(sorted_plist2)):
answer.append(sorted_plist2[p2])
p2+=1
return answer
# [2012/08/22 added] find union of 2 score lists
def union_score(self, sorted_plist1, sorted_plist2,slist1,slist2):
"union of two postings lists plist1 and plist2, in other word, it is operand OR"
answer = []
s_answer=[]
p1,p2=0,0
while(p1<len(sorted_plist1) and p2<len(sorted_plist2)):
if(sorted_plist1[p1]==sorted_plist2[p2]):
answer.append(sorted_plist1[p1])
s_answer.append(slist1[p1]+slist2[p2])
p1+=1;p2+=1
elif(sorted_plist1[p1][0]<sorted_plist2[p2][0]):
answer.append(sorted_plist1[p1])
s_answer.append(slist1[p1])
p1+=1
else:
answer.append(sorted_plist2[p2])
s_answer.append(slist2[p2])
p2+=1
while(p1<len(sorted_plist1)):
answer.append(sorted_plist1[p1])
s_answer.append(slist1[p1])
p1+=1
while(p2<len(sorted_plist2)):
answer.append(sorted_plist2[p2])
s_answer.append(slist2[p2])
p2+=1
return s_answer
def intersect(self,sorted_plist1,sorted_plist2):
"intersection of two postings lists plist1 and plist2, in other word, it is operand AND"
answer = []
p1,p2=0,0
while(p1<len(sorted_plist1) and p2<len(sorted_plist2)):
if(sorted_plist1[p1]==sorted_plist2[p2]):
answer.append(sorted_plist1[p1])
p1+=1; p2+=1
elif(sorted_plist1[p1][0]<sorted_plist2[p2][0]):
p1+=1
else: p2+=1
return answer
# [2012/08/22 added] find intersection of 2 score lists
def intersect_score(self,sorted_plist1,sorted_plist2,slist1,slist2):
"intersection of two postings lists plist1 and plist2, in other word, it is operand AND"
answer = []
s_answer= []
p1,p2=0,0
while(p1<len(sorted_plist1) and p2<len(sorted_plist2)):
if(sorted_plist1[p1]==sorted_plist2[p2]):
answer.append(sorted_plist1[p1])
s_answer.append(slist1[p1]+slist2[p2])
p1+=1; p2+=1
elif(sorted_plist1[p1][0]<sorted_plist2[p2][0]):
p1+=1
else: p2+=1
return s_answer
def load_index(self,path):
"load inverted index to search system"
self.inverted_index.clean(self.inverted_index.root)
if(not os.path.isfile(path)):
print("no such file - "+path)
return False
with open(path,'rb') as index_file:
self.inverted_index = pickle.load(index_file)
#self.inverted_index.printTree(self.inverted_index.root)
return True
class Index_System:
"an inverted index structure with hash table as dictionary and simple list as docID list"
root, inverted_index, docList = None, None, []
stop_words=["a","an","and","are","as","at","be","by","for","from","has","he","in","is","it","its","of","on","that","the","to","was","were","will","with"]
def __init__(self): #constructor
self.inverted_index = Inverted_Index()
self.docList = []
# TODO:
def normalize(word):pass
def check_stopword(self,word):
for stop_word in self.stop_words:
if(stop_word ==word):
return True
return False
def create(self,folder_path,index_path):
"create inverted index from all text files in path"
# clean old inverted index and docList in memory
if(not self.root == None):
self.inverted_index.clean(self.root)
self.inverted_index = Inverted_Index()
self.root = None
self.docList[:] =[]
# check whether folder does exist?
if(not os.path.isdir(folder_path)):
return False
cwd = os.getcwd()
os.chdir(folder_path)
docCount=0
for file in glob.glob("*"):
fp = open(file)
self.docList.append((docCount,file))
#print(self.docList[docCount])
# find max term count
max_tc = 0
for i,line in enumerate(fp):
for word in line.split():
if(self.check_stopword(word)):continue
if(self.root==None):
self.root = self.inverted_index.createNode(word,self.docList[docCount])
self.inverted_index.setRoot(self.root)
else:
self.inverted_index.insert(self.root,word,self.docList[docCount])
fp.close()
docCount=docCount+1
self.inverted_index.setDocCount(docCount)
os.chdir(cwd)
with open(index_path,'wb') as index_file:
pickle.dump(self.inverted_index,index_file)
return True
def show(self):
"show the inverted index data"
print("All inverted index data is as below:")
print("--------------- Inverted Index ----------------")
self.inverted_index.printTree(self.root)
print("-----------------------------------------------")
print()
def process_search_command(search_system,command):
"process search command"
cmdlist = command.split()
cmdlist_count = len(cmdlist)
if(cmdlist_count==3 and cmdlist[1]=="-l"):
search_system.load_index(cmdlist[2])
return
if(cmdlist_count>=2):
query = command[7:]
print("--------------- Search Results ----------------")
print("Query : " + query)
answer = search_system.search3(query)
print("Doc list :", end= '')
print(answer)
print("-----------------------------------------------")
print()
else:
print("Option or parameter is yet assigned")
print()
def process_index_command(index_system,command):
"process index command"
cmdlist = command.split()
cmdlist_count = len(cmdlist)
if(cmdlist_count==2 and cmdlist[1]=="-s"):
index_system.show()
elif(cmdlist_count==4):
if(cmdlist[1]=="-r" and index_system.create(cmdlist[2],cmdlist[3])):
print("Successful created")
else:
print("no directory - "+cmdlist[2])
else: print("wrong usage")
print()
return
def process_command(index_system,search_system, command):
"categorize command and send to function process_index_command or process_search_command"
cmdlist = command.split()
if(cmdlist==[]):
return True
if(cmdlist[0]=='search'):
process_search_command(search_system,command)
elif(cmdlist[0]=='index'):
process_index_command(index_system,command)
elif(cmdlist[0]=='quit' or cmdlist[0]=='exit'):
return False
else:
print(cmdlist[0] + " is not recognized as a command.")
return True
def print_welcome_screen():
print("\nWelcome to Basic Search System!\n")
print("-------------------------------------USAGE------------------------------------")
print("There are 2 commands in the system as below:")
print()
print("1) index - all about index in search system.")
print(" - Description : This command is used for creating the inverted index from the \n\t\ttext files provided.")
print(" - Synopsis : index [ -s ], index [ -r path ]")
print(" - Option \n \
1.1) index [ -s ] --> show all data in inverted index.\n \
1.2) index [ -r path index_file_name] --> create inverted index from all text files in\n\t\tprovided path and return index file named as index_file_name parameter.\n \
")
print("2) search - search for provided query from inverted index.")
print(" - Description : The command will start to search using single query provided.")
print(" - Synopsis : search [ single_query ]")
print(" - Option")
print(" 2.1) search [-l index_file] --> load inverted index into search system")
print(" 2.2) search [ single_query ]--> search using single query")
print("------------------------------------------------------------------------------")
print()
def main():
# declare system class
index_system = Index_System()
search_system = Search_System()
print_welcome_screen()
input_text = input('$')
while(process_command(index_system,search_system,input_text)):
input_text= input('$')
if __name__ == '__main__':
main()
| true |
db7a61d356eec3906537c0c35b5f8e14ba945af1 | Python | danheeks/PySim | /SimControls.py | UTF-8 | 4,064 | 2.515625 | 3 | [] | no_license | import wx
import math
class SimControls(wx.Window):
def __init__(self, parent):
wx.Window.__init__(self, parent )
self.toolpath = None
sizer = wx.BoxSizer()
self.play_button = wx.BitmapButton(self, bitmap = wx.Bitmap('bitmaps/play.png'))
self.pause_button = wx.BitmapButton(self, bitmap = wx.Bitmap('bitmaps/pause.png'))
self.reset_button = wx.BitmapButton(self, bitmap = wx.Bitmap('bitmaps/reset.png'))
self.view_reset_button = wx.BitmapButton(self, bitmap = wx.Bitmap('bitmaps/view_reset.png'))
self.play_button.SetBitmapDisabled(wx.Bitmap('bitmaps/play_d.png'))
self.pause_button.SetBitmapDisabled(wx.Bitmap('bitmaps/pause_d.png'))
self.reset_button.SetBitmapDisabled(wx.Bitmap('bitmaps/reset_d.png'))
button_sizer = wx.BoxSizer()
button_sizer.Add(self.play_button)
button_sizer.Add(self.pause_button)
button_sizer.Add(self.reset_button)
button_sizer.Add(self.view_reset_button)
self.SpeedLabel = wx.StaticText(self, label = 'mm per second')
self.SpeedEdit = wx.TextCtrl(self)
speed_text_sizer = wx.BoxSizer()
speed_text_sizer.Add(self.SpeedLabel)
speed_text_sizer.Add(self.SpeedEdit)
self.SpeedSlider = wx.Slider(self, value = 0, minValue = 0.0, maxValue = 100.0)
speed_sizer = wx.BoxSizer(wx.VERTICAL)
speed_sizer.Add(speed_text_sizer)
speed_sizer.Add(self.SpeedSlider)
sizer.Add(speed_sizer)
sizer.Add(button_sizer)
#mark all controls as disabled until a toolpath object has been set
self.play_button.Enable(False)
self.pause_button.Enable(False)
self.reset_button.Enable(False)
self.view_reset_button.Enable(False)
self.SpeedLabel.Enable(False)
self.SpeedEdit.Enable(False)
self.SpeedSlider.Enable(False)
self.Bind(wx.EVT_TEXT, self.OnSpeedEdit, self.SpeedEdit)
self.Bind(wx.EVT_SLIDER, self.OnSpeedSlider, self.SpeedSlider)
self.Bind(wx.EVT_BUTTON, self.OnPlayButton, self.play_button)
self.Bind(wx.EVT_BUTTON, self.OnPauseButton, self.pause_button)
self.Bind(wx.EVT_BUTTON, self.OnResetButton, self.reset_button)
self.Bind(wx.EVT_BUTTON, self.OnViewResetButton, self.view_reset_button)
self.SetAutoLayout(True)
self.SetSizer(sizer)
sizer.SetSizeHints(self)
sizer.Layout()
sizer.SetSizeHints(self)
def SetFromSimulation(self, toolpath):
self.toolpath = toolpath
self.SpeedEdit.SetValue(str(self.toolpath.mm_per_sec))
self.SpeedSlider.SetValue(math.sqrt(self.toolpath.mm_per_sec))
self.play_button.Enable(True)
self.pause_button.Enable(False)
self.reset_button.Enable(False)
self.view_reset_button.Enable(True)
self.SpeedLabel.Enable(True)
self.SpeedEdit.Enable(True)
self.SpeedSlider.Enable(True)
def OnSpeedSlider(self, event):
self.toolpath.mm_per_sec = math.pow(self.SpeedSlider.GetValue(), 2)
self.SpeedEdit.SetValue(str(self.toolpath.mm_per_sec))
def OnSpeedEdit(self, event):
self.toolpath.mm_per_sec = float(self.SpeedEdit.GetValue())
self.SpeedSlider.SetValue(math.sqrt(self.toolpath.mm_per_sec))
def OnPlayButton(self, event):
self.toolpath.running = True
self.pause_button.Enable(True)
self.reset_button.Enable(True)
self.play_button.Enable(False)
def OnPauseButton(self, event):
self.toolpath.running = False
self.pause_button.Enable(False)
self.play_button.Enable(True)
def OnResetButton(self, event):
self.pause_button.Enable(False)
self.play_button.Enable(True)
self.reset_button.Enable(False)
self.toolpath.OnReset()
def OnViewResetButton(self, event):
self.GetParent().OnViewReset()
| true |
9acd6702f5b358411d5ab902c1f5740b27db647d | Python | alifar76/MAWQ | /src/mawq_miseq_localhost.py | UTF-8 | 19,242 | 2.671875 | 3 | [
"MIT"
] | permissive | """ Added --prefilter_percent_id to pick_open_reference_otus.py on August 28, 2014"""
import os
import commands
import sys
import re
import subprocess
import logging
from datetime import datetime
def input_file_help():
print """
Help me please!!
The input file should be tab-delimited file with .txt extension. The first column should be
folder name of the MiSeq run. The second column should be the name of the mapping file of the run along
with its .txt extension. There should be no trailing white spaces or empty last lines.
Following is how a correct file should be:
140401_M01869_0071_000000000-A7YEF mapping_file_run1.txt
140407_M01869_0073_000000000-A7WVG mapping_file_run2.txt
"""
def input_check(infile):
""" Checks if input file name is entered correctly """
if infile == "":
print "Error: File name not provided!"
mapfile = raw_input("1) Please provide the full name of the input-file (Type help for input-file format): ")
return input_check(mapfile)
elif infile.lower() == "help":
input_file_help()
mapfile = raw_input("1) Please provide the full name of the input-file (Type help for input-file format): ")
return input_check(mapfile)
else:
working_folder = commands.getstatusoutput('pwd')[1]
filelist = os.listdir(working_folder)
if infile not in filelist:
print "Error: File doesn't exist!"
mapfile = raw_input("1) Please provide the full name of the input-file (Type help for input-file format): ")
return input_check(mapfile)
else:
maplist = []
infl = open(infile, 'rU')
for line in infl:
spline = line.strip().split("\t")
if len(spline) != 2:
print "Error: File is not in proper format. There's missing data, no tab-seperation and/or extra empty line(s)."
mapfile = raw_input("1) Please provide the full name of the input-file (Type help for input-file format): ")
return input_check(mapfile)
else:
maplist.append(spline[1])
return maplist, infile # Returns list of mapping files along with name of input file
def mapping_check(maplist):
"""Checks if mapping file name is correct and runs validate_mapping_file.py script """
for mapfile in maplist:
with open(os.devnull, "w") as fnull:
result = subprocess.call(["ls", mapfile], stdout = fnull, stderr = fnull)
if result != 0: # Return code is 0 is ls command is successful
print "Error: One or more of your mapfiles is not present in your current working directory"
mapfile2 = raw_input("1) Please provide the full name of the input-file (Type help for input-file format): ")
inp_check = input_check(mapfile2)
return mapping_check(inp_check[0])
for mapfile in maplist:
filename = mapfile.strip().split(".txt")[0]
os.system("validate_mapping_file.py -m %s -o corrected_%s" % (mapfile.strip(),filename))
os.system("mv $PWD/corrected_%s/%s_corrected.txt ." % (filename,filename))
corrected_files = [mapfile.strip().split(".txt")[0]+"_corrected.txt" for mapfile in maplist]
return corrected_files
def check_value(expression,question,arg):
""" Function to check if input parameters are correct """
try:
if arg == "integer":
return str(int(expression))
if arg == "float":
return str(float(expression))
except ValueError:
if expression == "":
return expression
else:
print "Invalid value. Please enter a number or just hit enter for default value."
checker = raw_input(question)
return check_value(checker,question,arg)
def log_output(statement):
""" Logs and prints output messages """
logging.basicConfig(filename='logging_module_output.txt',level=logging.INFO)
logging.info(statement)
print statement
def log_parse(outfile,inputfile):
output = open(outfile, "w")
infile = open(inputfile, 'rU')
for line in infile:
if line.startswith("INFO:root:"):
linename = line.strip().split("INFO:root:")
if linename[1] != '':
output.write(linename[1]+"\n")
else:
output.write(line.strip()+"\n")
output.close()
return output
def order_index(flashread,indexfile):
""" Shoko's script as a function """
headerData = open(flashread,"rU").read().strip()
headers = headerData.split("\n")
IndexData = open(indexfile,"rU")
IndexSeqs = {}
while IndexData:
headerline=IndexData.readline().split("\n")[0]
Contents = ''
if headerline == '':
break
for i in range(0,3):
Contents += IndexData.readline().split("\n")[0] + "\n"
IndexSeqs[headerline]=Contents
outdata=''
for j in headers:
outdata += j + "\n" + IndexSeqs[j]
of = open("Index_filtered_ordered.fastq","w")
of.write(outdata)
IndexData.close()
of.close()
return of
def preprocess_steps(seq_data,m_min,read_len,runmap_file):
""" Unzipping, flashing, pre-processing """
if m_min == "":
m_min += "225"
if read_len == "":
read_len += "251"
if seq_data == "":
seq_data += "/data/MiSeq_16S_data/MiSeqAnalysis"
log_output("\nRead length for flash: %s" % read_len)
log_output("Min. overlap for flash: %s" % m_min)
folders = []
infile = open(runmap_file, 'rU')
for line in infile:
spline = line.strip().split("\t")
folders.append(spline[0])
for seqs_id in folders:
working_folder = commands.getstatusoutput('pwd')[1]
seq_path = "%s/%s/Data/Intensities/BaseCalls/" % (seq_data,seqs_id)
os.chdir(seq_path)
log_output("\n#Step 1: Gunzipping sequence reads files in MiSeqAnalysis folder...")
os.system("gunzip Undetermined_*")
log_output("Gunzipping complete!")
log_output("\n#Step 2: Assembling R1 and R2 using flash...")
os.system("flash -r %s -f 300 -s 30 -m %s -d $PWD/Output_folder_%s/ -q Undetermined_S0_L001_R1_001.fastq Undetermined_S0_L001_R2_001.fastq" % (read_len, m_min,seqs_id))
log_output("flash complete!")
os.system("mv -f Output_folder_%s/ %s" % (seqs_id,working_folder))
os.chdir(working_folder)
log_output("\n#Step 3: Removing barcode reads from index file that are not in assembled file...")
os.system("sed -n '1~4'p $PWD/Output_folder_%s/out.extendedFrags.fastq >FLAShReads.txt" % seqs_id) # Select the headers of all sequences generated. -n flag is for quiet output. '1~4'p means starting from 1, select every 4 lines after it.
log_output("Barcode removal complete!")
log_output("\n#Step 4: Extracting those reads from index file and order them the same as flash reads")
order_index("FLAShReads.txt","%s/Undetermined_S0_L001_I1_001.fastq" % seq_path)
log_output("Extraction complete!")
os.chdir(seq_path)
log_output("\n#Step 5: Gzipping back the sequence files in MiSeqAnalysis folder...")
os.system("gzip Undetermined_S0_L001_*")
os.chdir(working_folder)
os.system("mv Index_filtered_ordered.fastq Index_filtered_ordered_run_%s.fastq" % seqs_id)
log_output("Gzip complete!")
return
def split_library(runmap_file,phred,max_bad_run,min_rl_frac,n_chars,barcode,start_seq):
""" Function to split libraries """
if phred == "":
phred += "30"
if max_bad_run == "":
max_bad_run += "3"
if min_rl_frac == "":
min_rl_frac += "0.75"
if n_chars == "":
n_chars += "0"
if barcode == "":
barcode += "12"
if start_seq == "":
start_seq += "0"
log_output("Phred score: %s" % phred)
log_output("Max number of consecutive low quality base calls allowed before truncating a read: %s" % max_bad_run)
log_output("Min number of consecutive high quality base calls to include a \
read (per single end read) as a fraction of the input read length: %s" % min_rl_frac)
log_output("Max number of N characters allowed in a sequence to retain it: %s" % n_chars)
log_output("The type of barcode used: %s" % barcode)
log_output("The start seq_ids as ascending integers beginning with start_seq_id: %s" % start_seq)
os.system("mkdir fna_files/")
run_map_dict = {}
infile = open(runmap_file, 'rU')
for line in infile:
spline = line.strip().split("\t")
run_map_dict[spline[0]] = spline[1].strip().split(".txt")[0]+"_corrected"+".txt" #Run IDs as keys and mapping filenames as values
for fold_id in run_map_dict:
folder = "Output_folder_"+fold_id
mapfile = run_map_dict[fold_id]
log_output("\n#Step 6: Splitting libraries using 'split_libraries_fastq.py'...")
os.system('split_libraries_fastq.py -i %s/out.extendedFrags.fastq -m %s \
-o split_lib_output_%s/ -q %s -r %s -p %s -n %s\
--rev_comp_barcode -b Index_filtered_ordered_run_%s.fastq \
--barcode_type %s -s %s' % (folder,mapfile,fold_id,phred,max_bad_run,min_rl_frac,n_chars,fold_id,barcode,start_seq))
log_output("split_libraries_fastq.py complete!")
os.system("mv $PWD/split_lib_output_%s/seqs.fna seqs_%s.fna" % (fold_id,fold_id))
os.system("mv seqs_%s.fna fna_files/" % fold_id)
return
def open_otus_till_biom(parallel,ref_db,prefilt_id):
""" Open OTU picking and other steps """
if parallel == "":
parallel += "4"
if ref_db == "":
ref_db += "/data/Greengenes_Database_May_2013/gg_13_5_otus/rep_set/97_otus.fasta"
if prefilt_id == "":
prefilt_id = "0.6"
os.system("cat fna_files/*.fna > fna_files/seqs_cat.fna")
log_output("\n#Step 7: Picking open-references OTUs using 'pick_open_reference_otus.py'...")
os.system("pick_open_reference_otus.py -i fna_files/seqs_cat.fna -o open_otus_picked/ -aO %s -r %s --prefilter_percent_id %s" % (parallel,ref_db,prefilt_id)) #4:57:19.015901
log_output("OTU picking caused errors, but we'll be able to proceed!")
os.system('cp /opt/qiime_software/core_set_aligned.fasta.imputed $PWD')
os.system('mv core_set_aligned.fasta.imputed core_set_aligned_imputed.fasta')
log_output("\n#Step 8.0: Aligning sequences to template using 'parallel_align_seqs_pynast.py'...")
os.system('parallel_align_seqs_pynast.py -i open_otus_picked/rep_set.fna -o open_otus_picked/pynast_aligned_seqs \
-t $PWD/core_set_aligned_imputed.fasta --jobs_to_start %s' % parallel) #0:10:03.814598
log_output("parallel_align_seqs_pynast.py complete!")
log_output("\n#Step 8.1: Making OTU table by filtering alignment to remove sequences that did not align using 'make_otu_table.py'...")
os.system('make_otu_table.py -i $PWD/open_otus_picked/final_otu_map_mc2.txt -o $PWD/open_otus_picked/otu_table_mc2_no_pynast_failures_w_tax.biom \
-e $PWD/open_otus_picked/pynast_aligned_seqs/rep_set_failures.fasta -t $PWD/open_otus_picked/uclust_assigned_taxonomy/rep_set_tax_assignments.txt') #0:00:17.171876
log_output("make_otu_table.py complete!")
log_output("\n#Step 8.2: Identifying chimeric sequences using 'parallel_identify_chimeric_seqs.py'...")
os.system('parallel_identify_chimeric_seqs.py -i $PWD/open_otus_picked/pynast_aligned_seqs/rep_set_aligned.fasta -a $PWD/core_set_aligned_imputed.fasta \
-m ChimeraSlayer -o $PWD/chimeraslayer_chimeric_seqs.txt -O %s' % parallel) # 5:23:27.544627 # -m flag does not have uchiime
log_output("parallel_identify_chimeric_seqs.py complete!")
log_output("\n#Step 9: Filtering chimeric sequences out of the alignment file using 'filter_fasta.py'...")
os.system('filter_fasta.py -f $PWD/open_otus_picked/pynast_aligned_seqs/rep_set_aligned.fasta -o $PWD/non_chimeric_rep_set_aligned.fasta \
-s $PWD/chimeraslayer_chimeric_seqs.txt -n') #0:00:01.763882
log_output("filter_fasta.py complete!")
log_output("\n#Step 10: Filtering non_chimeric_rep_set_aligned.fasta to remove gaps using 'filter_alignment.py'...")
os.system('filter_alignment.py -i $PWD/non_chimeric_rep_set_aligned.fasta -m /opt/qiime_software/lanemask_in_1s_and_0s \
-o $PWD/non_chimeric_pynast_filtered/') #0:00:04.075475
log_output("filter_alignment.py complete!")
log_output("\n#Step 11: Building new phylogenetic tree using 'make_phylogeny.py'...")
os.system('make_phylogeny.py -i $PWD/non_chimeric_pynast_filtered/non_chimeric_rep_set_aligned_pfiltered.fasta \
-o non_chimeric_rep_set_aligned_pfiltered.tre') #0:07:56.575952
log_output("make_phylogeny.py complete!")
log_output("\n#Step 12: Filtering chimeric OTUs from the OTU table using 'filter_otus_from_otu_table.py'...")
os.system('filter_otus_from_otu_table.py -i $PWD/open_otus_picked/otu_table_mc2_no_pynast_failures_w_tax.biom \
-o otu_table_mc2_no_pynast_failures_no_chimeras_w_tax.biom -e chimeraslayer_chimeric_seqs.txt') #0:00:15.945285
log_output("filter_otus_from_otu_table.py complete!")
log_output("\n#Step 13: Writing biom table summary using 'biom summarize-table'...")
os.system('biom summarize-table -i otu_table_mc2_no_pynast_failures_no_chimeras_w_tax.biom \
-o otu_table_mc2_no_pynast_failures_no_chimeras_lowfilter_w_tax_biom_summary_mc2.txt') #0:00:01.602008
log_output("biom summarize-table complete!")
return
def summary_view(viewtable):
""" Function to show biom summary table """
if viewtable.lower() == 'yes':
os.system('less otu_table_mc2_no_pynast_failures_no_chimeras_lowfilter_w_tax_biom_summary_mc2.txt')
elif viewtable.lower() == 'no':
print "No is not an option!"
table = raw_input("The summary table of the final OTU table is ready. Type 'yes' to view it. \
Once viewed, you can quit by simply typing q. Are you ready? ")
return summary_view(table)
else:
print "I don't understand."
table = raw_input("The summary table of the final OTU table is ready. Type 'yes' to view it. \
Once viewed, you can quit by simply typing q. Are you ready? ")
return summary_view(table)
def rarefaction_check(depth):
""" Check value of rarefaction depth """
try:
return str(int(float(depth)))
except ValueError:
if depth == "":
print "No number of sequences provided to subsample for rarefaction."
dep = raw_input("1) What is the number of sequences to subsample per sample [-d flag]? (No default): ")
return rarefaction_check(dep)
else:
print "Non-integer value given for number of sequences to subsample for rarefaction."
dep = raw_input("1) What is the number of sequences to subsample per sample [-d flag]? (No default): ")
return rarefaction_check(dep)
def summary_plots(depth,merge_metadata):
""" Create alpha, beta and taxa summary plots """
log_output("\n#Step 14: Performing single rarefaction on OTU table using 'single_rarefaction.py'...")
os.system('single_rarefaction.py -i otu_table_mc2_no_pynast_failures_no_chimeras_w_tax.biom -o single_rarefied_otu_table.biom -d %s' % depth)
log_output("single_rarefaction.py complete!")
log_output("\n#Step 15: Summarizing and plotting taxa using 'summarize_taxa_through_plots.py'...")
os.system('summarize_taxa_through_plots.py -o taxa_summary -i single_rarefied_otu_table.biom -m %s' % merge_metadata)
log_output("summarize_taxa_through_plots.py complete!")
log_output("\n#Step 16: Calculating alpha-diversity using 'alpha_rarefaction.py'...")
os.system('alpha_rarefaction.py -i single_rarefied_otu_table.biom -o alpha_rarefaction/ -t non_chimeric_rep_set_aligned_pfiltered.tre \
-m %s --retain_intermediate_files' % merge_metadata)
log_output("alpha_rarefaction.py complete!")
log_output("\n#Step 17: Calculating beta-diversity using 'beta_diversity_through_plots.py'...")
os.system('beta_diversity_through_plots.py -i single_rarefied_otu_table.biom -o beta_diversity/ -t non_chimeric_rep_set_aligned_pfiltered.tre \
-m %s' % merge_metadata)
log_output("beta_diversity_through_plots.py complete!")
return
if __name__ == "__main__":
print "\n\t\t\t\033[1mWelcome to the Microbiome Analysis through Workflow of QIIME, MAWQ program (pronounced 'mock') brought to you by the Lynch Lab!\033[0m"
print "\tTo run the script with default parameters, just press enter to each question without entering a value. To \
exit the pipeline at any point in time, press Ctrl+C\n\n"
try:
inputfile = raw_input("1) Please provide the full name of the input-file (Type help for input-file format): ")
checked = input_check(inputfile)
inputfile = checked[1]
mapping_check(checked[0])
seq_data = raw_input("1) What's the path to the MiSeq run folder? (Default: /data/MiSeq_16S_data/MiSeqAnalysis) ")
print "\nThe following questions are for flash program: \n"
flash_q1 = "1) What's the minimum overlap length between reads [-m flag]? (Default: 225, if length of Read 2 > 250) "
m_min = check_value(raw_input(flash_q1),flash_q1,"integer")
flash_q2 = "2) What's the read length [-r flag]? (Default: 251) "
read_len = check_value(raw_input(flash_q2),flash_q2,"integer")
print "\nThe following questions are for split_libraries_fastq.py script: \n"
split_q1 = "1) What's the maximum unacceptable Phred quality score [-q flag]? (Default: 30) "
phred = check_value(raw_input(split_q1),split_q1,"integer")
split_q2 = "2) What's the max number of consecutive low quality base calls allowed before truncating a read [-r flag]? (Default: 3) "
max_bad_run = check_value(raw_input(split_q2),split_q2,"integer")
split_q3 = "3) What's the min number of consecutive high quality base calls to include a read (per single end read) as a fraction \
of the input read length [-p flag]? (Default: 0.75) "
min_rl_frac = check_value(raw_input(split_q3),split_q3,"float")
split_q4 = "4) What's the max number of N characters allowed in a sequence to retain it [-n flag]? (Default: 0) "
n_chars = check_value(raw_input(split_q4),split_q4,"integer")
split_q5 = "5) What's the type of barcode used [--barcode_type flag]? (Default: 12) "
barcode = check_value(raw_input(split_q5),split_q5,"integer")
split_q6 = "6) What's the start seq_ids as ascending integers beginning with start_seq_id [-s flag]? (Default: 0) "
start_seq = check_value(raw_input(split_q6),split_q6,"integer")
print "\nThe following questions are for pick_open_reference_otus.py script: \n"
otupick_q1 = "1) How many jobs do you wish to run in parallel? (Default: 4) "
parallel = check_value(raw_input(otupick_q1),otupick_q1,"integer")
ref_db = raw_input("2) What's the full path to the reference database? \
(Default: /data/Greengenes_Database_May_2013/gg_13_5_otus/rep_set/97_otus.fasta) ")
prefilt_id = raw_input("2) What's the prefilter_percent_id for sequences to cluster [pass 0.0 to disable]? \
(Default: 0.6) ")
startTime = datetime.now()
preprocess_steps(seq_data,m_min,read_len,inputfile)
split_library(inputfile,phred,max_bad_run,min_rl_frac,n_chars,barcode,start_seq) #0:29:38.925890
open_otus_till_biom(parallel,ref_db,prefilt_id)
viewtable = raw_input("The summary table of the final OTU table is ready. Type 'yes' to view it. \
Once viewed, you can quit by simply typing 'q'. Are you ready? ")
summary_view(viewtable)
print "\nThe following question is for 'single_rarefaction.py' script: \n"
depth = raw_input("1) What is the number of sequences to subsample per sample [-d flag]? (No default): ")
depth = rarefaction_check(depth)
print "\nThe following question is for 'summarize_taxa_through_plots.py', 'alpha_rarefaction.py', \
and 'beta_diversity_through_plots.py' script: \n"
merge_metadata = raw_input("1) What is the name of the final mapping data file for all runs [-m flag]? (No default): ")
merge_metadata_checked = mapping_check([merge_metadata])
summary_plots(depth,merge_metadata_checked[0])
log_parse("wrapper_log_file.txt","logging_module_output.txt")
os.system('rm logging_module_output.txt')
print "\n"+"Task Completed! Time it took to complete the task: "+ str(datetime.now()-startTime) #11:42:32.735675
except KeyboardInterrupt:
print "\n\nThanks for using (or attempting to use) the pipeline. Good-bye!\n" | true |
0aa388e91a8574d72e57f7fbeda8493834f093e9 | Python | rickharris-dev/hacker-rank | /algorithms/warmup/a_very_big_sum.py | UTF-8 | 153 | 3.25 | 3 | [] | no_license | #!/usr/bin/python
n = int(raw_input().strip())
arr = map(int,raw_input().strip().split(' '))
total = 0
for item in arr:
total += item
print total
| true |
e86a409cfbc250e31b3a5755ee7b40d07e2702c6 | Python | LynRodWS/reckoner | /reckoner/helm/provider.py | UTF-8 | 2,334 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2019 FairwindsOps Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from reckoner.command_line_caller import call
from .cmd_response import HelmCmdResponse
from reckoner.config import Config
class HelmProvider(object):
"""
HelmProvider is intended to be the simple way to run commands against helm and have standard responses.
Interface for a provider:
- class method of execute: returns a HelmCmdResponse
"""
def __init__(
self,
helm_command,
helm_binary="helm"):
"""Requires protocol of HelmCommand to respond to command and arguments."""
self._helm_command = helm_command
self._helm_binary = helm_binary
# TODO: Investigate if this is really the implementation i need for a provider (class methods with no access to the instance)
@classmethod
def execute(cls, helm_command):
"""Executed the command provided in the init. Only allowed to be executed once!"""
# initialize the instance of the provider
instance = cls(helm_command)
# start by creating a command line arguments list with the command being first
args = list([instance._helm_binary])
# if command has a space in it (like get manifests), split on space
# and append each segment as it's own list item to make `call` happy
for command_segment in instance._helm_command.command.split(' '):
args.append(command_segment)
for arg in instance._helm_command.arguments:
args.append(arg)
call_response = call(args, path=Config().course_base_directory)
return HelmCmdResponse(
exit_code=call_response.exitcode,
stdout=call_response.stdout,
stderr=call_response.stderr,
command=helm_command,
)
| true |
19f8cf261d0d9243ff9778d9b1203650997f9e53 | Python | hedlesschkn/easy_filter | /scripts/lidarfilter.py | UTF-8 | 958 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env python
#hello world
#Well done
import numpy as np
class LidarFilter():
def __init__(self, slices):
assert slices > 0
assert slices <= 36
self.slices = slices
self.deg_per_slice = 360 / slices
def data(self, data_array, min, max):
assert(len(data_array)==360)
darray = np.array(data_array)
self.raw = darray
darray[darray <= min] = np.nan
darray[darray > max] = np.nan
darray = np.roll(darray, self.deg_per_slice/2)
self.mean_per_slice = np.nanmean(darray.reshape((self.slices, -1)),axis=1)
self.min = np.nanmin(self.mean_per_slice)
self.minpos = np.nanargmin(self.mean_per_slice)
def printraw(self):
raw1 = self.raw.reshape((self.slices, -1))
#raw2 = raw1 * 100
#raw3 = raw2.astype(int)
np.set_printoptions(precision=1, linewidth=500, nanstr="n", infstr="i")
print(raw1)
| true |
b88bc3c9c8046ca5cbdd896f0ea75869c96621d6 | Python | stoimeniliev/LearnPythonTheHardWay | /game.py | UTF-8 | 1,182 | 3.265625 | 3 | [] | no_license | from random import randint
hero = {'lvl' : 1,
'exp' : 0,
'nextlvl' : 50}
'stats' : {'dmg' : [5, 12]),
'hp' : 100,
}
}
enemy = {'lvl' : 1,
'exp' : 0,
'nextlvl' : 50}
'dmg' : [2, 6]),
'hp' : 30,
}
def lvlup(hero):
while hero['exp'] >= hero['nextlvl']:
hero['lvl'] += 1
hero['exp'] += hero['exp'] - hero['nextlevel']
hero['nextlvl'] = round(hero['nextlvl'] * 1.5)
hero['hp'] = hero['hp'] + round(hero['hp'] * 0.2)
hero['dmg'] = hero['dmg'] + round(hero['dmg'] * 0.3)
print "The level of the hero is %d, his damage is %d, his hp is %d, exp to next level is %d" % (hero['lvl'], hero['dmg'], hero['hp'], hero['nextlvl'])
def damaged(player, monster)
atk = randint(player['dmg'][0], player['dmg'][1])
monster['hp'] = monster['hp'] - atk
if monster['hp'] <= 0:
print('{} has been slain'.format(monster['name']))
else:
print('{} takes {} damage!'.format(monster['name'], atk))
def commands(char, foe):
while True:
print('-------------------')
cmd = input('Do you want to attack? yes/no: ').lower()
if 'yes' in cmd:
damaged(char, foe)
elif 'no' in cmd:
print(' {} takes the opportunity to attaack | true |
840e66eff371450efc370dfe469a6e756a2afc3a | Python | USF-IMARS/imars-etl | /imars_etl/find.py | UTF-8 | 2,500 | 2.609375 | 3 | [] | no_license | import os
import copy
import logging
import sys
from imars_etl.Load.validate_args import validate_args
from imars_etl.util.config_logger import config_logger
def find(
directory,
verbose=0,
**kwargs
):
"""
Lists all files that match from a directory
returns:
-------
filepath_list : str[]
list of all matching file paths found
Example usage:
imars_etl.find( TODO )
"""
config_logger(verbose)
logger = logging.getLogger("{}.{}".format(
__name__,
sys._getframe().f_code.co_name)
)
# logger.debug("searching w/ '{}'...".format(fmt))
kwargs["nohash"] = True # TODO rm?
# rm None values from kwargs (for set comparison later)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
filepath_list = []
matches = 0
unmatches = 0
for root, dirs, files in os.walk(directory):
for filename in files:
kwargs_copy = copy.deepcopy(kwargs)
try:
fpath = os.path.join(root, filename)
kwargs_copy['filepath'] = fpath
kwargs_copy = validate_args(kwargs_copy)
logger.debug("="*40)
logger.debug(fpath)
logger.debug("="*40)
# === throw exception if does not match
# filepath required
assert kwargs_copy['filepath'] is not None
# inferred metadata (now in kwargs_copy) must at least contain
# requirements passed to find (kwargs).
logger.debug(" === "*3)
logger.debug(kwargs)
logger.debug(" === "*3)
logger.debug(kwargs_copy)
assert(set(kwargs.items()).issubset(set(kwargs_copy.items())))
assert kwargs_copy['product_id'] is not None
assert kwargs_copy['area_id'] is not None
assert kwargs_copy['time'] is not None
print(fpath)
filepath_list.append(fpath)
matches += 1
except(SyntaxError, KeyError, AssertionError):
# SyntaxError from parser,
# KeyError from missing kwargs,
# AssertionError from requirements above
logger.debug("skipping {}...".format(fpath))
unmatches += 1
logger.info("{} matching files found out of {} searched.".format(
matches, unmatches + matches
))
return filepath_list
| true |
0d81ccdcc75006207701db830c3342fba2146085 | Python | mongesan/Atcoder-m0_ngesan-py | /Beginner-Contest/ABC173/ABC173_C.py | UTF-8 | 357 | 2.75 | 3 | [] | no_license | def check(S):
n=0
for ch in S:
if ch=='#':
n+=1
return n
h,w,k=map(int, input().split())
c=[str(input()) for _ in range(h)]
hl=[]
wl=[]
cnt=0
for s in c:
tmp=check(s)
hl.append(tmp)
cnt+=tmp
for i in range(w):
s=str()
for j in range(h):
s+=c[j][i]
wl.append(check(s))
n=cnt-k
print(hl, wl, n)
| true |
cb4e7b2ae2c5ab0eaac985a14e44b7ddb5783857 | Python | AndreaMartinez0726/Tarea-3 | /Fourier2D.py | UTF-8 | 1,476 | 3.15625 | 3 | [] | no_license | import numpy as np
import matplotlib.pylab as plt
from scipy import ndimage
from scipy import fftpack
#________Punto 1__________
imagen=ndimage.imread("arbol.png")
#________Punto 2__________
Fourier=fftpack.fft2(imagen)
fm= Fourier.real**2+Fourier.imag**2
fm= (fm)**(1./2.)
fm=np.log(fm)
plt.figure()
plt.imshow(fm)
plt.axis('off')
plt.title('Transformada de Fourier')
plt.savefig('MartinezAndrea_FT2D.pdf')
#_________Punto 3 ________
tam=np.size(Fourier,0)
tam1=np.size(Fourier,1)
for i in range(tam):
for j in range(tam1):
if (((i-10)/0.5)**2 + ((j-30)/2)**2 < 100):
Fourier[i,j] = Fourier[i,j]*((i-10)**2+(j-30)**2)/400
if (((i-60)/0.5)**2 + ((j-60)/2)**2 < 100):
Fourier[i,j] = Fourier[i,j]*((i-60)**2+(j-60)**2)/400
if (((i-190)/0.5)**2 + ((j-200)/2)**2 < 100):
Fourier[i,j] = Fourier[i,j]*((i-190)**2+(j-200)**2)/400
if (((i-245)/0.5)**2 + ((j-230)/2)**2 < 100):
Fourier[i,j] = Fourier[i,j]*((i-245)**2+(j-230)**2)/400
fm= Fourier.real**2+Fourier.imag**2
fm= (fm)**(1./2.)
fm=np.log(fm)
#_______Punto 4_________
plt.figure()
plt.imshow(fm)
plt.title('Transformada de Fourier filtrada')
plt.axis('off')
plt.savefig('MartinezAndrea_FT2D_filtrada.pdf')
#_______Punto 5________
inversa=fftpack.ifft2(Fourier)
plt.figure()
plt.imshow(inversa.real,cmap=plt.cm.gray)
plt.title('Imagen filtrada')
plt.axis('off')
plt.savefig('MartinezAndrea_Imagen_filtrada.pdf')
| true |
953cef5ee5ebde4b77b0a0520d75073d868a76fd | Python | araghava92/comp-805 | /labs/week3/lab3-py-practive.py | UTF-8 | 4,012 | 4.3125 | 4 | [] | no_license | """
lab3 Python Practice
RAGHAVA ADUSUMILLI
2/13/2018
"""
from functools import reduce
def switch_case(str_list):
"""
Maps strings in the str_list to a new string of same characters, but
the first letter contains the opposite case
str_list: list of strings
Returns: list of original strings with opposite casing for first letter
"""
return list(map(lambda x: x[0].swapcase() + x[1:], str_list))
def make_title(words):
"""
Maps words in a list to words in the same list, but as titled strings.
words: list of words
Returns: new list of titled words
"""
return list(map(lambda x: x.title(), words))
def three_times_nums(num_list):
"""
Maps numbers in the num_list to numbers that are 3 times the original value
num_list: list of numbers
Returns: list of numbers that are of three times the values in num_list
"""
return list(map(lambda x: x*3, num_list))
def square_nums(num_list):
"""
Maps numbers in the num_list to numbers of same value, but squares the
number given
num_list: list of numbers
Returns: list from num_list which are squared
"""
return list(map(lambda x: pow(x, 2), num_list))
def double_nums(num_list):
"""
Maps numbers in the num_list to their doubles
num_list: list of numbers
Returns: list of doubled numbers
"""
return list(map(lambda x: x*2, num_list))
def only_even(mixed_list):
"""
Filters out odd integers and strings that contain an odd number of
characters.
mixed_list: list of integers and/or strings
Returns: list of only integers and strings that are even or have an even
number of characters.
"""
return list(filter(lambda x: not bool(x % 2) if isinstance(x, int)
else (not bool(len(x) % 2) if isinstance(x, str) else False), mixed_list))
def test_title(names):
"""
Filters out capitalized and non-cap words into their respective lists.
names: list of names
Returns: both lists for review
"""
return (list(filter(lambda x: x.istitle(), names)),
list(filter(lambda x: not x.istitle(), names)))
def keep_lowercase(strs):
"""
Filters out strings that have uppercase values
strs: list of strings
Returns: list of strings that do not contain
uppercase values
"""
return list(filter(lambda x: not any(map(lambda y: y.isupper(), x)), strs))
def lessthan_5(num_list):
"""
Filters out numbers less than five
num_list: list of numbers
Returns: list of numbers in the original list that are less than five
"""
return list(filter(lambda x: x > 4, num_list))
def remove_special_characters(string_list):
"""
Filters out strings that have non-alphanumeric elements
char_list: list of strings
Returns: list of strings that have only letters or numbers in them
"""
return list(filter(lambda x: x.isalpha() or x.isdigit(), string_list))
def greatest_difference(num_list):
"""
Finds the maximum and minimum numbers in a_list and computes the difference.
num_list: list of numbers
Returns: the difference between the maximum and minimum numbers in num_list
"""
num_list.sort()
return num_list[-1:][0] - num_list[0]
def create_word(letters):
"""
Takes a list of characters and creates a word (list with alpha chars only)
from them.
letters: list of letters
Returns: list that has alpha characters only
"""
return reduce(lambda x, y: x + y, letters)
def multiplication_total_of(num_list):
"""
Multiplies all the numbers in num_list together and gives the total
num_list: list of numbers
Returns: the multiplied total of the numbers in the num_list
"""
return reduce(lambda x, y: x * y, num_list)
def subtraction_of(number_list):
"""
Subtracts the numbers in number_list
number_list: list of numbers
Returns: the difference of the numbers in the number_list
"""
return reduce(lambda x, y: x - y, number_list)
| true |
763b9764f674acc077a8858147f787ef8cd29988 | Python | OlivierGaillard/prestige-djangeurope | /inventory/management/commands/update_inventory.py | UTF-8 | 794 | 2.515625 | 3 | [] | no_license | from django.core.management.base import BaseCommand
from django.conf import settings
from inventory.models import Article
class Command(BaseCommand):
"""
The script copy the field 'prix_total' into field 'purchasing_price'
"""
help = 'update english fields of inventory Article'
def handle(self, *args, **options):
count = 0
articles_count = Article.objects.count()
print("Updating purchasing prices, names and quantity of %s articles..." % articles_count)
for a in Article.objects.all():
a.purchasing_price = a.prix_total
a.quantity = a.quantite
a.name = a.nom
a.save()
count += 1
print("Updated purchasing prices, quantity and name of %s articles..." % count)
| true |
6cfdba8ce1302ecf69127eb8fb5bc4d8269235fc | Python | ArtemZaZ/OldAllPython-projects | /Projects/BadProjects/Johny/Jonny_JoyV01.py | UTF-8 | 1,963 | 2.6875 | 3 | [] | no_license | import RTCjoystic
import time
import threading
class Jonny_Joystic(threading.Thread):
def __init__(self, M):
threading.Thread.__init__(self)
self.Joy=RTCjoystic.Joystick_master()
self.Joy.start()
self.EXIT=False
self.L=0
self.R=0
self.M=M
time.sleep(3)
def convert_speed(self):
temp_axis=self.Joy.get_axis()
if(temp_axis!=None):
x_temp=-temp_axis.get('x')
y_temp=-temp_axis.get('y')
trottle_temp=temp_axis.get('trottle')
x=int(100*x_temp)
y=int(100*y_temp)
trottle=((trottle_temp+1.0)/2)
x = ((x+2)//20)*20
y = ((y+2)//20)*20
if x<=0:
if y<=0:
speedR = y + x
speedL = y - x
if y>0:
speedR = y + x
speedL = y - x
if x>0:
if y<=0:
speedR = y + x
speedL = y - x
if y>0:
speedR = y + x
speedL = y - x
speedR=int(trottle*speedR/2)
speedL=int(trottle*speedL/2)
self.R = -speedR
self.L = -speedL
else:
self.R = 0
self.L = 0
def run(self):
while(not self.EXIT):
tempR=self.R
tempL=self.L
self.convert_speed()
if((tempR!=self.R) or (tempL!=self.L)):
self.M.MotorA(self.L)
self.M.MotorB(self.R)
#print("self.L", self.L)
#print("self.R", self.R)
time.sleep(0.5)
def Exit(self):
self.EXIT=True
self.Joy.Exit()
| true |
40b6a42649a3255d4683840a1b86b118c103cc88 | Python | WMRGL/IdentityCheck | /compare_vcfs.py | UTF-8 | 8,199 | 2.75 | 3 | [] | no_license | """
Script to compare MassArray and WGS intersected VCF results.
Sarah Burns & Chipo Mashayamombe-Wolfgarten 29 Jan 2019
"""
from ruffus import *
import vcf
import pandas as pd
import re
import glob
import os
from datetime import datetime
import argparse
arg_parser = argparse.ArgumentParser(description='Scripts to compare massarray and wgs vcf results.')
arg_parser.add_argument('-powers', default='/network/processed/100K_VCFs/hg19_total_af_power.txt')
arg_parser.add_argument('-dest', default='/network/processed/100K_VCFs/archive')
args = arg_parser.parse_args()
output_excel = 'IdentityCheck_%s.xlsx' % datetime.now().strftime('%Y%m%d_%H%M')
def get_alleles_from_genotype(ordered_alleles, genotype):
""" Determine alleles from genotype and list of possible alleles.
E.g. genotype 0/1 and alleles ['A', 'G', 'C'] where A is ref => AG
"""
alleles = ''
genotype_list = genotype.split('/')
for idx in genotype_list:
if idx != '.':
alleles += ordered_alleles[int(idx)]
return alleles
def get_risk(fraction):
"""
calculating the risk: chance of 2 samples having the exact same genotype profile
"""
my_frac = str(fraction)
# count the number of zeros after the decimal point
zeros = len(re.search("\.(0*)", my_frac).group(1))
# create a string with 1 and (the number of zeros+1)
new_str = '1' + ('0' * (zeros + 1))
# convert new_str and my_frac to numeric and multiply them to get the numerator
numerator = pd.to_numeric(my_frac) * pd.to_numeric(new_str)
# divide the new string by the numerator to get a 1 in ...
denominator = int(pd.to_numeric(new_str) / numerator)
denominator = '{0:,}'.format(denominator)
return '1 in {}'.format(denominator)
def calculate_power(snp_list):
# read in the file with precalculated snp-specific probabilities
tot_power = pd.read_table(args.powers, sep='\t')
# take a subset of SNPs that have been called in the sample
snp_subset = tot_power.loc[tot_power['ID'].isin(snp_list)].reset_index(drop=True)
# calculate the probability of identity and the power of exclusion
id_prob = pd.to_numeric(snp_subset['Probability of Identity']).prod()
risk = get_risk(id_prob)
power = 1 - id_prob
return power, risk
def archive_files(sample):
for f in glob.glob('*%s*' % sample):
# compress
os.rename(f, os.path.join(args.dest, f))
@collate(['*.vcf'], regex('(.+GRCh3[78]).intersected.vcf'), add_inputs(r'\1.massarray.vcf'), output_excel)
def collate_vcfs(infiles, outfile):
""" Pair massarray and wgs vcfs with same build and parse all results to excel.
"""
main_cols = ['SAMPLE', 'ARRAY CALLS', 'WGS CALLS', 'ALL MATCHES', 'PROBABILITY OF UNIQUENESS', 'ODDS RATIO',
'HIGH QUAL MATCHES', 'HIGH QUAL PROBABILITY OF UNIQUENESS', 'HIGH QUAL ODDS RATIO']
mismatch_cols = ['SAMPLE', 'SNP', 'WGS GENOTYPE', 'MASSARRAY GENOTYPE', 'QUALITY OF CALL', 'VCF FILTER']
main_df = pd.DataFrame(columns=main_cols)
mismatch_df = pd.DataFrame(columns=mismatch_cols)
all_samples = []
for (wgs_vcf, array_vcf) in infiles:
# Get lab number
try:
sample_name = re.search(r'D\d{2}.\d{5}', wgs_vcf).group(0)
except AttributeError:
sample_name = wgs_vcf.split('.')[0].split('_')[0]
all_samples.append(sample_name)
array_results = {}
wgs_results = {}
coords_to_snp = {}
# Parse required array results into dict e.g. { 'rs123': { 'alleles': 'AG', 'quality': 'A', 'filter': '.' } }
array_reader = vcf.Reader(open(array_vcf, 'r'))
for record in array_reader:
snp_id = record.ID
vcf_filter = ','.join(record.FILTER)
alleles = [str(x) for x in record.ALT]
alleles.insert(0, str(record.REF))
coords_to_snp[(record.CHROM, record.POS)] = snp_id
for sample in record.samples:
gt = sample['GT']
quality = sample['MTQ']
alleles_in_sample = get_alleles_from_genotype(alleles, gt)
array_results[snp_id] = {
'alleles': ''.join(sorted(alleles_in_sample)), 'quality': quality, 'filter': vcf_filter
}
# Parse required wgs results into dict e.g. { 'rs123': 'AG' }
wgs_reader = vcf.Reader(open(wgs_vcf, 'r'))
for record in wgs_reader:
key = ('chr' + record.CHROM, record.POS)
if key in coords_to_snp:
snp_id = coords_to_snp[key]
alleles = [str(x) for x in record.ALT]
alleles.insert(0, record.REF)
for sample in record.samples:
gt = sample['GT']
alleles_in_sample = get_alleles_from_genotype(alleles, gt)
wgs_results[snp_id] = ''.join(sorted(alleles_in_sample))
total_snps = 0
array_calls = 0
wgs_calls = []
all_matches = []
high_quality_matches = []
# Compare array results to wgs
for key, value in array_results.items():
total_snps += 1
if value['alleles']:
array_calls += 1 # count of snps genotyped by array
if key in wgs_results:
wgs_calls.append(key) # list of snps called by wgs
wgs_genotype = wgs_results[key]
if wgs_genotype == value['alleles']: # if match
all_matches.append(key)
if value['quality'] in ['A', 'B']: # A and B are high quality calls
high_quality_matches.append(key)
else:
mismatch_temp_df = pd.DataFrame(
[[sample_name, key, wgs_genotype, value['alleles'], value['quality'], value['filter']]],
columns=mismatch_cols
)
mismatch_df = mismatch_df.append(mismatch_temp_df)
# calculate probabilities
all_prob, all_risk = calculate_power(all_matches)
high_qual_prob, high_qual_risk = calculate_power(high_quality_matches)
temp_df = pd.DataFrame(
[[
sample_name,
'%s/%s' % (array_calls, total_snps),
'%s/%s' % (len(wgs_calls), total_snps),
'%s/%s' % (len(all_matches), len(wgs_calls)),
all_prob,
all_risk,
'%s/%s' % (len(high_quality_matches), len(wgs_calls)),
high_qual_prob,
high_qual_risk
]],
columns=main_cols
)
main_df = main_df.append(temp_df)
writer = pd.ExcelWriter(outfile)
workbook = writer.book
fail_format = workbook.add_format({'bg_color': '#FFC7CE', 'font_color': '#9C0006'})
main_df.to_excel(writer, index=False, sheet_name='IdentityCheck')
main_ws = writer.sheets['IdentityCheck']
main_ws.set_column('A:A', 18)
main_ws.set_column('B:B', 12)
main_ws.set_column('C:C', 11)
main_ws.set_column('D:D', 13)
main_ws.set_column('E:E', 28)
main_ws.set_column('F:F', 15)
main_ws.set_column('G:G', 20)
main_ws.set_column('H:H', 39)
main_ws.set_column('I:I', 24)
main_ws.conditional_format(
'D2:D%s' % (len(infiles) + 1),
{'type': 'formula', 'criteria': '=IF(LEFT(D2,SEARCH("/",D2)-1)/MID(D2,SEARCH("/",D2)+1,99)<1,TRUE,FALSE)',
'format': fail_format}
) # highlight cells in red where number of matches < number of shared snp calls
mismatch_df.to_excel(writer, index=False, sheet_name='Mismatches')
mismatch_ws = writer.sheets['Mismatches']
mismatch_ws.set_column('A:A', 18)
mismatch_ws.set_column('B:B', 10)
mismatch_ws.set_column('C:C', 15)
mismatch_ws.set_column('D:D', 22)
mismatch_ws.set_column('E:E', 16)
mismatch_ws.set_column('F:F', 15)
writer.save()
# move files to archive once processed
if os.path.exists(outfile) and os.path.getsize(outfile) > 0:
for s in all_samples:
archive_files(s)
pipeline_run()
| true |
b139806e7cdb5ec9d19dfa7b0a5b10f2238716d4 | Python | jackdewinter/pymarkdown | /test/nested_three/test_markdown_nested_three_block_block_ordered_max.py | UTF-8 | 144,684 | 2.828125 | 3 | [
"MIT"
] | permissive | """
Extra tests.
"""
from test.utils import act_and_assert
import pytest
# pylint: disable=too-many-lines
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
> > item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > ]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\nitem::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_drop_ordered_x():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
> > item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > ]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\nitem::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > ]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\nitem::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\nitem::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n]",
"[olist(1,14):.:1:16: :\n]",
"[para(1,17):\n]",
"[text(1,17):list\nitem::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_with_li():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
> > 1. item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > ]",
"[olist(1,14):.:1:16: ]",
"[para(1,17):]",
"[text(1,17):list:]",
"[end-para:::True]",
"[li(2,14):16: :1]",
"[para(2,17):]",
"[text(2,17):item:]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list</li>
<li>item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_with_li_and_nl():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
> > 1. item
again"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > \n]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):]",
"[text(1,17):list:]",
"[end-para:::True]",
"[li(2,14):16: :1]",
"[para(2,17):\n]",
"[text(2,17):item\nagain::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list</li>
<li>item
again</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_with_li_and_nl_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
> > 1. item
again"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > \n]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):]",
"[text(1,17):list:]",
"[end-para:::True]",
"[li(2,14):16: :1]",
"[para(2,17):\n]",
"[text(2,17):item\nagain::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list</li>
<li>item
again</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_with_li_and_nl_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
> > 1. item
again"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > \n]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):]",
"[text(1,17):list:]",
"[end-para:::True]",
"[li(2,14):16: :1]",
"[para(2,17):\n]",
"[text(2,17):item\nagain::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list</li>
<li>item
again</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_with_li_and_nl_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
> > 1. item
again"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > \n]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):]",
"[text(1,17):list:]",
"[end-para:::True]",
"[li(2,14):16: :1]",
"[para(2,17):\n]",
"[text(2,17):item\nagain::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list</li>
<li>item
again</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_with_li_and_nl_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
> > 1. item
again"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > \n]",
"[olist(1,14):.:1:16: :\n]",
"[para(1,17):]",
"[text(1,17):list:]",
"[end-para:::True]",
"[li(2,14):16: :1]",
"[para(2,17):\n]",
"[text(2,17):item\nagain::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list</li>
<li>item
again</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty():
"""
Verify that a nesting of block quote, block quote, ordered list, and
no text on the first line, with
the maximum number of spaces allowed works properly.
"""
# Arrange
source_markdown = """ > > 1.
> > item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > ]",
"[olist(1,14):.:1:16: : ]",
"[BLANK(1,16):]",
"[para(2,17):]",
"[text(2,17):item:]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_drop_ordered_x():
"""
Verify that a nesting of block quote, block quote, ordered list, and
no text on the first line, with
the maximum number of spaces allowed works properly.
"""
# Arrange
source_markdown = """ > > 1.
> > item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > ]",
"[olist(1,14):.:1:16: : ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[para(2,14): ]",
"[text(2,14):item:]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
<p>item</p>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_with_space_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, and
no text on the first line, with
the maximum number of spaces allowed works properly.
"""
# Arrange
source_markdown = """ > > 1.
> > item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > ]",
"[olist(1,14):.:1:16: : ]",
"[BLANK(1,17):]",
"[end-olist:::True]",
"[para(2,14): ]",
"[text(2,14):item:]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
<p>item</p>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_drop_ordered_no_item_indent():
"""
Verify that a nesting of block quote, block quote, ordered list, and
no text on the first line, with
the maximum number of spaces allowed works properly.
"""
# Arrange
source_markdown = """ > > 1.
> > item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > ]",
"[olist(1,14):.:1:16: ]",
"[end-olist:::False]",
"[BLANK(1,16):]",
"[para(2,11):]",
"[text(2,11):item:]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
<p>item</p>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, and
no text on the first line, with
the maximum number of spaces allowed works properly.
"""
# Arrange
source_markdown = """ > > 1.
> item"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[para(2,9): ]",
"[text(2,9):item:]",
"[end-para:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
<p>item</p>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, and
no text on the first line, with
the maximum number of spaces allowed works properly.
"""
# Arrange
source_markdown = """ > > 1.
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
"[para(2,4): ]",
"[text(2,4):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, and
no text on the first line, with
the maximum number of spaces allowed works properly.
"""
# Arrange
source_markdown = """ > > 1.
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
"[para(2,1):]",
"[text(2,1):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_with_li():
"""
Verify that a nesting of block quote, block quote, ordered list, and
no text on the first line, with
the maximum number of spaces allowed works properly, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
> > 1. item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[li(2,14):16: :1]",
"[para(2,17):]",
"[text(2,17):item:]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
<li>item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_with_li_and_nl():
"""
Verify that a nesting of block quote, block quote, ordered list, and
no text on the first line, with
the maximum number of spaces allowed works properly, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
> > 1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > \n]",
"[olist(1,14):.:1:16: : ]",
"[BLANK(1,16):]",
"[li(2,14):16: :1]",
"[para(2,17):\n]",
"[text(2,17):item\nlist::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
<li>item
list</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_with_li_and_nl_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, and
no text on the first line, with
the maximum number of spaces allowed works properly, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
> > 1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > \n]",
"[olist(1,14):.:1:16: : ]",
"[BLANK(1,16):]",
"[li(2,14):16: :1]",
"[para(2,17):\n]",
"[text(2,17):item\nlist::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
<li>item
list</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_with_li_and_nl_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, and
no text on the first line, with
the maximum number of spaces allowed works properly, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
> > 1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > \n]",
"[olist(1,14):.:1:16: : ]",
"[BLANK(1,16):]",
"[li(2,14):16: :1]",
"[para(2,17):\n]",
"[text(2,17):item\nlist::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
<li>item
list</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_with_li_and_nl_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, and
no text on the first line, with
the maximum number of spaces allowed works properly, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
> > 1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > \n]",
"[olist(1,14):.:1:16: : ]",
"[BLANK(1,16):]",
"[li(2,14):16: :1]",
"[para(2,17):\n]",
"[text(2,17):item\nlist::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
<li>item
list</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_with_li_and_nl_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, and
no text on the first line, with
the maximum number of spaces allowed works properly, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
> > 1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > \n]",
"[olist(1,14):.:1:16: :\n]",
"[BLANK(1,16):]",
"[li(2,14):16: :1]",
"[para(2,17):\n]",
"[text(2,17):item\nlist::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
<li>item
list</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq1():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\n\a>\a>\a item::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
> item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq1_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\n\a>\a>\a item::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
> item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq1_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\nitem::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq1_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\nitem::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq1_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n]",
"[olist(1,14):.:1:16: :\n]",
"[para(1,17):\n]",
"[text(1,17):list\nitem::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq1_with_li():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
> 1. item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\n\a>\a>\a 1. item::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
> 1. item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq1_with_li_and_nl():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
> 1. item
another"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n\n]",
"[olist(1,14):.:1:16: : \n ]",
"[para(1,17):\n\n]",
"[text(1,17):list\n\a>\a>\a 1. item\nanother::\n\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
> 1. item
another</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq1_with_li_and_nl_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
> 1. item
another"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n\n]",
"[olist(1,14):.:1:16: : \n ]",
"[para(1,17):\n\n]",
"[text(1,17):list\n\a>\a>\a 1. item\nanother::\n\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
> 1. item
another</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq1_with_li_and_nl_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
> 1. item
another"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n\n]",
"[olist(1,14):.:1:16: : \n ]",
"[para(1,17):\n\n]",
"[text(1,17):list\n\a>\a>\a 1. item\nanother::\n\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
> 1. item
another</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq1_with_li_and_nl_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
> 1. item
another"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n\n]",
"[olist(1,14):.:1:16: : \n ]",
"[para(1,17):\n\n]",
"[text(1,17):list\n\a>\a>\a 1. item\nanother::\n\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
> 1. item
another</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq1_with_li_and_nl_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
> 1. item
another"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n\n]",
"[olist(1,14):.:1:16: : \n ]",
"[para(1,17):\n\n]",
"[text(1,17):list\n\a>\a>\a 1. item\nanother::\n\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
> 1. item
another</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq1():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1.
> item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):\a>\a>\a item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<pre><code> > item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq1_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1.
> item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):\a>\a>\a item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<pre><code> > item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq1_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1.
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<pre><code> item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq1_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1.
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
"[para(2,4): ]",
"[text(2,4):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq1_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1.
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
"[para(2,1):]",
"[text(2,1):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq1_with_li():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
> 1. item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):\a>\a>\a 1. item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<pre><code> > 1. item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq1_with_li_and_nl():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
> 1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :\n ]",
"[text(2,5):\a>\a>\a 1. item\n list: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<pre><code> > 1. item
list
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq1_with_li_and_nl_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
> 1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :\n ]",
"[text(2,5):\a>\a>\a 1. item\n list: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<pre><code> > 1. item
list
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq1_with_li_and_nl_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
> 1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :\n ]",
"[text(2,5):\a>\a>\a 1. item\n list: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<pre><code> > 1. item
list
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq1_with_li_and_nl_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
> 1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):\a>\a>\a 1. item: ]",
"[end-icode-block:::False]",
"[para(3,4): ]",
"[text(3,4):list:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<pre><code> > 1. item
</code></pre>
<p>list</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq1_with_li_and_nl_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
> 1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):\a>\a>\a 1. item: ]",
"[end-icode-block:::False]",
"[para(3,1):]",
"[text(3,1):list:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<pre><code> > 1. item
</code></pre>
<p>list</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq2():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > ]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\nitem::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq2_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > ]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\nitem::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq2_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > ]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\nitem::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq2_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\nitem::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq2_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n]",
"[olist(1,14):.:1:16: :\n]",
"[para(1,17):\n]",
"[text(1,17):list\nitem::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq2_with_li():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
> 1. item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > ]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\n1. item::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
1. item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq2_with_li_and_nl():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
> 1. item
redux"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > \n]",
"[olist(1,14):.:1:16: : \n ]",
"[para(1,17):\n\n]",
"[text(1,17):list\n1. item\nredux::\n\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
1. item
redux</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq2_with_li_and_nl_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
> 1. item
redux"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > \n]",
"[olist(1,14):.:1:16: : \n ]",
"[para(1,17):\n\n]",
"[text(1,17):list\n1. item\nredux::\n\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
1. item
redux</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq2_with_li_and_nl_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
> 1. item
redux"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > \n]",
"[olist(1,14):.:1:16: : \n ]",
"[para(1,17):\n\n]",
"[text(1,17):list\n1. item\nredux::\n\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
1. item
redux</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq2_with_li_and_nl_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
> 1. item
redux"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > \n]",
"[olist(1,14):.:1:16: : \n ]",
"[para(1,17):\n\n]",
"[text(1,17):list\n1. item\nredux::\n\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
1. item
redux</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq2_with_li_and_nl_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
> 1. item
redux"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > \n]",
"[olist(1,14):.:1:16: : \n\n]",
"[para(1,17):\n\n]",
"[text(1,17):list\n1. item\nredux::\n\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
1. item
redux</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq2():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1.
> item"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[icode-block(2,10): :]",
"[text(2,10):item: ]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
<pre><code> item
</code></pre>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq2_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1.
> item"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[icode-block(2,10): :]",
"[text(2,10):item: ]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
<pre><code> item
</code></pre>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq2_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1.
> item"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[para(2,9): ]",
"[text(2,9):item:]",
"[end-para:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
<p>item</p>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq2_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1.
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
"[para(2,4): ]",
"[text(2,4):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq2_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1.
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
"[para(2,1):]",
"[text(2,1):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq2_with_li():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
> 1. item"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[icode-block(2,10): :]",
"[text(2,10):1. item: ]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
<pre><code> 1. item
</code></pre>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq2_with_li_and_nl():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
> 1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[icode-block(2,10): :]",
"[text(2,10):1. item: ]",
"[end-icode-block:::False]",
"[end-block-quote:::False]",
"[icode-block(3,5): :]",
"[text(3,5):list: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
<pre><code> 1. item
</code></pre>
</blockquote>
<pre><code> list
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq2_with_li_and_nl_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
> 1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[icode-block(2,10): :]",
"[text(2,10):1. item: ]",
"[end-icode-block:::False]",
"[end-block-quote:::False]",
"[icode-block(3,5): :]",
"[text(3,5):list: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
<pre><code> 1. item
</code></pre>
</blockquote>
<pre><code> list
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq2_with_li_and_nl_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
> 1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[icode-block(2,10): :]",
"[text(2,10):1. item: ]",
"[end-icode-block:::False]",
"[end-block-quote:::False]",
"[icode-block(3,5): :]",
"[text(3,5):list: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
<pre><code> 1. item
</code></pre>
</blockquote>
<pre><code> list
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq2_with_li_and_nl_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
> 1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[icode-block(2,10): :]",
"[text(2,10):1. item: ]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[para(3,4): ]",
"[text(3,4):list:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
<pre><code> 1. item
</code></pre>
</blockquote>
<p>list</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq2_with_li_and_nl_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
> 1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[icode-block(2,10): :]",
"[text(2,10):1. item: ]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[para(3,1):]",
"[text(3,1):list:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
<pre><code> 1. item
</code></pre>
</blockquote>
<p>list</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq3():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\nitem::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq3_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\nitem::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq3_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\nitem::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq3_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\nitem::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq3_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n]",
"[olist(1,14):.:1:16: :\n]",
"[para(1,17):\n]",
"[text(1,17):list\nitem::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq3_with_li():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
1. item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n]",
"[olist(1,14):.:1:16: : ]",
"[para(1,17):\n]",
"[text(1,17):list\n1. item::\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
1. item</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq3_with_li_and_nl():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
1. item
redux"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n\n]",
"[olist(1,14):.:1:16: : \n ]",
"[para(1,17):\n\n]",
"[text(1,17):list\n1. item\nredux::\n\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
1. item
redux</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq3_with_li_and_nl_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
1. item
redux"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n\n]",
"[olist(1,14):.:1:16: : \n ]",
"[para(1,17):\n\n]",
"[text(1,17):list\n1. item\nredux::\n\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
1. item
redux</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq3_with_li_and_nl_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
1. item
redux"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n\n]",
"[olist(1,14):.:1:16: : \n ]",
"[para(1,17):\n\n]",
"[text(1,17):list\n1. item\nredux::\n\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
1. item
redux</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq3_with_li_and_nl_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
1. item
redux"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n\n]",
"[olist(1,14):.:1:16: : \n ]",
"[para(1,17):\n\n]",
"[text(1,17):list\n1. item\nredux::\n\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
1. item
redux</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_no_bq3_with_li_and_nl_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1. list
1. item
redux"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n\n]",
"[olist(1,14):.:1:16: : \n\n]",
"[para(1,17):\n\n]",
"[text(1,17):list\n1. item\nredux::\n\n]",
"[end-para:::True]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li>list
1. item
redux</li>
</ol>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq3():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1.
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<pre><code> item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq3_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1.
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<pre><code> item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq3_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1.
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<pre><code> item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq3_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1.
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
"[para(2,4): ]",
"[text(2,4):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq3_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1.
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
"[para(2,1):]",
"[text(2,1):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq3_with_li():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
1. item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):1. item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<pre><code> 1. item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq3_with_li_and_nl():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :\n ]",
"[text(2,5):1. item\n list: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<pre><code> 1. item
list
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq3_with_li_and_nl_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :\n ]",
"[text(2,5):1. item\n list: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<pre><code> 1. item
list
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq3_with_li_and_nl_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :\n ]",
"[text(2,5):1. item\n list: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<pre><code> 1. item
list
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq3_with_li_and_nl_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):1. item: ]",
"[end-icode-block:::False]",
"[para(3,4): ]",
"[text(3,4):list:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<pre><code> 1. item
</code></pre>
<p>list</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_empty_no_bq3_with_li_and_nl_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces allowed works properly, and no text on the first line,
with no block quote characters on the second line, with a list item.
"""
# Arrange
source_markdown = """ > > 1.
1. item
list"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[olist(1,14):.:1:16: ]",
"[BLANK(1,16):]",
"[end-olist:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):1. item: ]",
"[end-icode-block:::False]",
"[para(3,1):]",
"[text(3,1):list:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<ol>
<li></li>
</ol>
</blockquote>
</blockquote>
<pre><code> 1. item
</code></pre>
<p>list</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
> > item"""
expected_tokens = [
"[icode-block(1,5): :\n ]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list\n\a>\a>\a \a>\a>\a item:]",
"[end-icode-block:::True]",
]
expected_gfm = """<pre><code>> > 1. list
> > item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
> > item"""
expected_tokens = [
"[icode-block(1,5): :\n ]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list\n\a>\a>\a \a>\a>\a item:]",
"[end-icode-block:::True]",
]
expected_gfm = """<pre><code>> > 1. list
> > item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[icode-block(1,5): :\n ]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list\n\a>\a>\a item:]",
"[end-icode-block:::True]",
]
expected_gfm = """<pre><code>> > 1. list
> item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[icode-block(1,5): :\n ]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list\nitem:]",
"[end-icode-block:::True]",
]
expected_gfm = """<pre><code>> > 1. list
item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[icode-block(1,5): :]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list:]",
"[end-icode-block:::False]",
"[para(2,1):]",
"[text(2,1):item:]",
"[end-para:::True]",
]
expected_gfm = """<pre><code>> > 1. list
</code></pre>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_no_bq1():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[icode-block(1,5): :\n ]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list\n \a>\a>\a item:]",
"[end-icode-block:::True]",
]
expected_gfm = """<pre><code>> > 1. list
> item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_no_bq1_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[icode-block(1,5): :\n ]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list\n \a>\a>\a item:]",
"[end-icode-block:::True]",
]
expected_gfm = """<pre><code>> > 1. list
> item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_no_bq1_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[icode-block(1,5): :\n ]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list\n item:]",
"[end-icode-block:::True]",
]
expected_gfm = """<pre><code>> > 1. list
item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_no_bq1_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[icode-block(1,5): :\n ]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list\nitem:]",
"[end-icode-block:::True]",
]
expected_gfm = """<pre><code>> > 1. list
item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_no_bq1_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[icode-block(1,5): :]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list:]",
"[end-icode-block:::False]",
"[para(2,1):]",
"[text(2,1):item:]",
"[end-para:::True]",
]
expected_gfm = """<pre><code>> > 1. list
</code></pre>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_no_bq2():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[icode-block(1,5): :\n ]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list\n\a>\a>\a item:]",
"[end-icode-block:::True]",
]
expected_gfm = """<pre><code>> > 1. list
> item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_no_bq2_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[icode-block(1,5): :\n ]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list\n\a>\a>\a item:]",
"[end-icode-block:::True]",
]
expected_gfm = """<pre><code>> > 1. list
> item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_no_bq2_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[icode-block(1,5): :\n ]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list\n\a>\a>\a item:]",
"[end-icode-block:::True]",
]
expected_gfm = """<pre><code>> > 1. list
> item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_no_bq2_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[icode-block(1,5): :\n ]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list\nitem:]",
"[end-icode-block:::True]",
]
expected_gfm = """<pre><code>> > 1. list
item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_no_bq2_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[icode-block(1,5): :]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list:]",
"[end-icode-block:::False]",
"[para(2,1):]",
"[text(2,1):item:]",
"[end-para:::True]",
]
expected_gfm = """<pre><code>> > 1. list
</code></pre>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_no_bq3():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[icode-block(1,5): :\n ]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list\n item:]",
"[end-icode-block:::True]",
]
expected_gfm = """<pre><code>> > 1. list
item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_no_bq3_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[icode-block(1,5): :\n ]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list\n item:]",
"[end-icode-block:::True]",
]
expected_gfm = """<pre><code>> > 1. list
item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_no_bq3_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[icode-block(1,5): :\n ]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list\n item:]",
"[end-icode-block:::True]",
]
expected_gfm = """<pre><code>> > 1. list
item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_no_bq3_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[icode-block(1,5): :\n ]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list\nitem:]",
"[end-icode-block:::True]",
]
expected_gfm = """<pre><code>> > 1. list
item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_plus_one_block_max_ordered_max_no_bq3_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the first) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[icode-block(1,5): :]",
"[text(1,5):\a>\a>\a \a>\a>\a 1. list:]",
"[end-icode-block:::False]",
"[para(2,1):]",
"[text(2,1):item:]",
"[end-para:::True]",
]
expected_gfm = """<pre><code>> > 1. list
</code></pre>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
> > item"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[icode-block(1,10): :\n ]",
"[text(1,10):\a>\a>\a 1. list\n\a>\a>\a item:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
> item
</code></pre>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
> > item"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[icode-block(1,10): :\n ]",
"[text(1,10):\a>\a>\a 1. list\n\a>\a>\a item:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
> item
</code></pre>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[icode-block(1,10): :\n ]",
"[text(1,10):\a>\a>\a 1. list\nitem:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
item
</code></pre>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[icode-block(1,10): :]",
"[text(1,10):\a>\a>\a 1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[para(2,4): ]",
"[text(2,4):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
</code></pre>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[icode-block(1,10): :]",
"[text(1,10):\a>\a>\a 1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[para(2,1):]",
"[text(2,1):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
</code></pre>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_no_bq1():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[icode-block(1,10): :]",
"[text(1,10):\a>\a>\a 1. list:]",
"[end-icode-block:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):\a>\a>\a item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
</code></pre>
</blockquote>
<pre><code> > item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_no_bq1_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[icode-block(1,10): :]",
"[text(1,10):\a>\a>\a 1. list:]",
"[end-icode-block:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):\a>\a>\a item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
</code></pre>
</blockquote>
<pre><code> > item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_no_bq1_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[icode-block(1,10): :]",
"[text(1,10):\a>\a>\a 1. list:]",
"[end-icode-block:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
</code></pre>
</blockquote>
<pre><code> item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_no_bq1_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[icode-block(1,10): :]",
"[text(1,10):\a>\a>\a 1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[para(2,4): ]",
"[text(2,4):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
</code></pre>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_no_bq1_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[icode-block(1,10): :]",
"[text(1,10):\a>\a>\a 1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[para(2,1):]",
"[text(2,1):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
</code></pre>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_no_bq2():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[icode-block(1,10): :\n ]",
"[text(1,10):\a>\a>\a 1. list\n item:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
item
</code></pre>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_no_bq2_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[icode-block(1,10): :\n ]",
"[text(1,10):\a>\a>\a 1. list\n item:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
item
</code></pre>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_no_bq2_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[icode-block(1,10): :\n ]",
"[text(1,10):\a>\a>\a 1. list\nitem:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
item
</code></pre>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_no_bq2_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[icode-block(1,10): :]",
"[text(1,10):\a>\a>\a 1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[para(2,4): ]",
"[text(2,4):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
</code></pre>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_no_bq2_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[icode-block(1,10): :]",
"[text(1,10):\a>\a>\a 1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[para(2,1):]",
"[text(2,1):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
</code></pre>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_no_bq3():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[icode-block(1,10): :]",
"[text(1,10):\a>\a>\a 1. list:]",
"[end-icode-block:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
</code></pre>
</blockquote>
<pre><code> item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_no_bq3_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[icode-block(1,10): :]",
"[text(1,10):\a>\a>\a 1. list:]",
"[end-icode-block:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
</code></pre>
</blockquote>
<pre><code> item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_no_bq3_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[icode-block(1,10): :]",
"[text(1,10):\a>\a>\a 1. list:]",
"[end-icode-block:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
</code></pre>
</blockquote>
<pre><code> item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_no_bq3_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[icode-block(1,10): :]",
"[text(1,10):\a>\a>\a 1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[para(2,4): ]",
"[text(2,4):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
</code></pre>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_plus_one_ordered_max_no_bq3_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the second) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[icode-block(1,10): :]",
"[text(1,10):\a>\a>\a 1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[para(2,1):]",
"[text(2,1):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<pre><code>> 1. list
</code></pre>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
> > item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > ]",
"[icode-block(1,15): :\n ]",
"[text(1,15):1. list\n item:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
item
</code></pre>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
> > item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > \n > > ]",
"[icode-block(1,15): :\n ]",
"[text(1,15):1. list\nitem:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
item
</code></pre>
</blockquote>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[block-quote(1,9):: > > ]",
"[icode-block(1,15): :]",
"[text(1,15):1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[para(2,9): ]",
"[text(2,9):item:]",
"[end-para:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
</code></pre>
</blockquote>
<p>item</p>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[icode-block(1,15): :]",
"[text(1,15):1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
"[para(2,4): ]",
"[text(2,4):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
</code></pre>
</blockquote>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[icode-block(1,15): :]",
"[text(1,15):1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
"[para(2,1):]",
"[text(2,1):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
</code></pre>
</blockquote>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_no_bq1():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[icode-block(1,15): :]",
"[text(1,15):1. list:]",
"[end-icode-block:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):\a>\a>\a item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
</code></pre>
</blockquote>
</blockquote>
<pre><code> > item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_no_bq1_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[icode-block(1,15): :]",
"[text(1,15):1. list:]",
"[end-icode-block:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):\a>\a>\a item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
</code></pre>
</blockquote>
</blockquote>
<pre><code> > item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_no_bq1_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[icode-block(1,15): :]",
"[text(1,15):1. list:]",
"[end-icode-block:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
</code></pre>
</blockquote>
</blockquote>
<pre><code> item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_no_bq1_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[icode-block(1,15): :]",
"[text(1,15):1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
"[para(2,4): ]",
"[text(2,4):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
</code></pre>
</blockquote>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_no_bq1_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[icode-block(1,15): :]",
"[text(1,15):1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
"[para(2,1):]",
"[text(2,1):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
</code></pre>
</blockquote>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_no_bq2():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[block-quote(1,9):: > > ]",
"[icode-block(1,15): :]",
"[text(1,15):1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[icode-block(2,10): :]",
"[text(2,10):item: ]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
</code></pre>
</blockquote>
<pre><code> item
</code></pre>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_no_bq2_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[block-quote(1,9):: > > ]",
"[icode-block(1,15): :]",
"[text(1,15):1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[icode-block(2,10): :]",
"[text(2,10):item: ]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
</code></pre>
</blockquote>
<pre><code> item
</code></pre>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_no_bq2_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
> item"""
expected_tokens = [
"[block-quote(1,4): : > \n > ]",
"[block-quote(1,9):: > > ]",
"[icode-block(1,15): :]",
"[text(1,15):1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[para(2,9): ]",
"[text(2,9):item:]",
"[end-para:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
</code></pre>
</blockquote>
<p>item</p>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_no_bq2_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[icode-block(1,15): :]",
"[text(1,15):1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
"[para(2,4): ]",
"[text(2,4):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
</code></pre>
</blockquote>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_no_bq2_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[icode-block(1,15): :]",
"[text(1,15):1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
"[para(2,1):]",
"[text(2,1):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
</code></pre>
</blockquote>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_no_bq3():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[icode-block(1,15): :]",
"[text(1,15):1. list:]",
"[end-icode-block:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
</code></pre>
</blockquote>
</blockquote>
<pre><code> item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_no_bq3_drop_ordered():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[icode-block(1,15): :]",
"[text(1,15):1. list:]",
"[end-icode-block:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
</code></pre>
</blockquote>
</blockquote>
<pre><code> item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_no_bq3_drop_ordered_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[icode-block(1,15): :]",
"[text(1,15):1. list:]",
"[end-icode-block:::False]",
"[end-block-quote:::False]",
"[end-block-quote:::False]",
"[icode-block(2,5): :]",
"[text(2,5):item: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
</code></pre>
</blockquote>
</blockquote>
<pre><code> item
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_no_bq3_drop_ordered_block_block():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[icode-block(1,15): :]",
"[text(1,15):1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
"[para(2,4): ]",
"[text(2,4):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
</code></pre>
</blockquote>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_nested_three_block_max_block_max_ordered_max_plus_one_no_bq3_drop_ordered_block_block_all():
"""
Verify that a nesting of block quote, block quote, ordered list, with
the maximum number of spaces (plus one for the third) allowed works properly,
with no block quote characters on the second line.
"""
# Arrange
source_markdown = """ > > 1. list
item"""
expected_tokens = [
"[block-quote(1,4): : > ]",
"[block-quote(1,9):: > > ]",
"[icode-block(1,15): :]",
"[text(1,15):1. list:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[end-block-quote:::True]",
"[para(2,1):]",
"[text(2,1):item:]",
"[end-para:::True]",
]
expected_gfm = """<blockquote>
<blockquote>
<pre><code>1. list
</code></pre>
</blockquote>
</blockquote>
<p>item</p>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
| true |
afc36460be0a5cea8306f4fea83b333cbc4407b6 | Python | conormccauley1999/CompetitiveProgramming | /Kattis/phonelist.py | UTF-8 | 264 | 3.203125 | 3 | [] | no_license | def c(ps):
ps.sort()
for i in range(1, len(ps)):
if ps[i].startswith(ps[i - 1]): return False
return True
t = int(raw_input())
for x in range(0, t):
n = int(raw_input())
ps = [str(raw_input()) for y in range(0, n)]
print "YES" if c(ps) else "NO" | true |
afdb5fba12ab27ce8bd48f6e08c29d287b5a8be9 | Python | lucidworks/fusion-seed-app | /pipelines.py | UTF-8 | 151 | 2.6875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
import sys
#what is the command
command = sys.argv[1];
source = sys.argv[2];
print "Command: ", command;
print "Source: ", source; | true |
75ef322b6a60aad7b304b4436415ca7d6dd4b846 | Python | merantix-momentum/squirrel-core | /test/test_fsspec/test_custom_fs.py | UTF-8 | 1,038 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | import fsspec
import pytest
from squirrel.constants import FILESYSTEM, URL, SQUIRREL_BUCKET
from squirrel.fsspec.custom_gcsfs import CustomGCSFileSystem
from squirrel.fsspec.fs import get_fs_from_url
@pytest.fixture
def fs(test_gcs_url: URL) -> FILESYSTEM:
"""Return an instance of custom gcsfs."""
return get_fs_from_url(test_gcs_url)
def test_make_connection(fs: FILESYSTEM) -> None:
"""Test connection to custom gcsfs by gcs.ls command."""
fs.ls(SQUIRREL_BUCKET)
def test_simple_upload(fs: FILESYSTEM, test_gcs_url: URL) -> None:
"""Test a simple upload case in gcs."""
file = f"{test_gcs_url}/test_file"
with fs.open(file, "wb", content_type="text/plain") as f:
f.write(b"random word")
with fs.open(file, "wb") as f:
f.write(b"random word")
assert fs.cat(file) == b"random word"
def test_fsspec_returns_custom_gcsfs() -> None:
"""Tests that fsspec returns our custom filesystem for gs:// protocol."""
assert isinstance(fsspec.filesystem("gs"), CustomGCSFileSystem)
| true |
fcea35f50e5f6c50c476a3c2b59b7ec104ca5cd0 | Python | hongyong3/TIL | /Algorithm/Swea/D3_3282.py | UTF-8 | 796 | 2.96875 | 3 | [] | no_license | import sys
sys.stdin = open("D3_3282_input.txt", "r")
def knapsack(n, k, data):
ans = [[0 for x in range(K + 1)] for x in range(n + 1)]
for i in range(n + 1):
for w in range(K + 1):
if i == 0 or w == 0:
ans[i][w] = 0
elif data[i - 1][0] <= w:
ans[i][w] = max(data[i - 1][1] + ans[i - 1][w - data[i - 1][0]], ans[i - 1][w])
else:
ans[i][w] = ans[i - 1][w]
return ans[n][k]
T = int(input())
for test_case in range(T):
N, K = map(int, input().split()) # N : 물건의 개수 K : 가방의 부피
data = [list(map(int, input().split())) for _ in range(N)] # data[i][0] : 물건의 부피 data[i][1] : 물건의 가치
print("#{} {}".format(test_case + 1, knapsack(N, K, data))) | true |
eaabf59020384ff56ba32ebb1e28392a4c000114 | Python | caimengyuan/MachineLearning | /HMM/hmm.py | UTF-8 | 3,629 | 3.265625 | 3 | [] | no_license | import numpy as np
from hmmlearn import hmm
states = ["box 1", "box 2", "box 3"] #状态
n_states = len(states)
observations = ["red", "white"] #观测值
n_observation = len(observations)
start_probability = np.array([0.2, 0.4, 0.4]) #初始状态概率向量
transition_probability = np.array([ #状态转移概率矩阵A
[0.5, 0.2, 0.3],
[0.3, 0.5, 0.2],
[0.2, 0.3, 0.5]
])
emission_probability = np.array([ #观测概率矩阵B
[0.5, 0.5],
[0.4, 0.6],
[0.7, 0.3]
])
model = hmm.MultinomialHMM(n_components=n_states) #n_components是假设的隐含状态数量
model.startprob_ = start_probability #startprob_参数对应我们的隐藏状态初始分布
model.transmat_ = transition_probability #该参数对应状态转移矩阵A
model.emissionprob_ = emission_probability #该参数对应观测状态概率矩阵B
# if __name__ == '__main__':
# seen = np.array([[0,1,0]]).T
# logprob, box = model.decode(seen, algorithm="viterbi") #利用维特比算法
# print("The ball picked:", ", ".join(map(lambda x: str(np.array(observations)[x]), seen)))
# print("The hidden box", ", ".join(map(lambda x: str(np.array(states)[x]), box)))
# if __name__ == '__main__':
# seen = np.array([[0, 1, 0]]).T
# box2 = model.predict(seen)
# print("The ball picked:", ", ".join(map(lambda x: str(np.array(observations)[x]), seen)))
# print("The hidden box", ", ".join(map(lambda x: str(np.array(states)[x]), box2)))
#问题一 Evaluation
# if __name__ == '__main__':
# seen = np.array([[0,1,0]]).T
# print(model.score(seen)) #score函数返回的是以自然对数为底的对数概率值
#问题二 Learning
# model2 = hmm.MultinomialHMM(n_components=n_states, n_iter=20, tol=0.01)
# X2 = np.array([[0, 1, 0, 1], [0, 0, 0, 1], [1, 0, 1, 1]])
# model2.fit(X2)
# print(model2.startprob_)
# print(model2.transmat_)
# print(model2.emissionprob_)
# print(model2.score(X2))
# model2.fit(X2)
# print(model2.startprob_)
# print(model2.transmat_)
# print(model2.emissionprob_)
# print(model2.score(X2))
# model2.fit(X2)
# print(model2.startprob_)
# print(model2.transmat_)
# print(model2.emissionprob_)
# print(model2.score(X2))
'''
实例2
'''
startprob = np.array([0.6, 0.3, 0.1, 0.0])
#The transition matrix, note that there are no transitions possible
#between component 1 and 3
transmat = np.array([[0.7, 0.2, 0.0, 0.1],
[0.3, 0.5, 0.2, 0.0],
[0.0, 0.3, 0.5, 0.2],
[0.2, 0.0, 0.2, 0.6]])
#The means of each component
means = np.array([[0.0, 0.0],
[0.0, 11.0],
[9.0, 10.0],
[11.0, -1.0]])
#The covariance of each component
covars = .5 * np.tile(np.identity(2), (4, 1, 1))
#Build an HMM instance and set parameters
model3 = hmm.GaussianHMM(n_components=4, covariance_type="full") #covariance_type 取值为"full"意味着所有的μ,Σ都需要指定
#取值为“spherical”则Σ的非对角线元素为0,对角线元素相同。取值为“diag”则的非对角线元素为0,对角线元素可以不同,"tied"指所有的隐藏状态对应的观测状态分布使用相同的协方差矩阵Σ
#Instead of fitting it from the data, we directly set the estimated
#parameters, the means and covariance of components
model3.startprob_ = startprob
model3.transmat_ = transmat
model3.means_ = means
model3.covars_ = covars
if __name__ == '__main__':
seen = np.array([[1.1, 2.0], [-1, 2.0], [3, 7]])
logprob, state = model3.decode(seen, algorithm="viterbi")
print(state) | true |
903ed11161736abdc67265a8faa5f9e3c5119e3d | Python | shadrqen/Load-Prediction-Model | /models/model.py | UTF-8 | 4,255 | 3.53125 | 4 | [] | no_license | from models.imports import *
from models.data import data
#filling the empty or NaN fields in all rows
data['Gender'].fillna(data['Gender'].mode()[0], inplace=True)
data['Married'].fillna(data['Married'].mode()[0], inplace=True)
data['Dependents'].fillna(data['Dependents'].mode()[0], inplace=True)
data['Loan_Amount_Term'].fillna(data['Loan_Amount_Term'].mode()[0], inplace=True)
data['Credit_History'].fillna(data['Credit_History'].mode()[0], inplace=True)
data['LoanAmount'].fillna(data['LoanAmount'].mean(), inplace=True)
data['Self_Employed'].fillna('No',inplace=True)
# Since, sklearn requires all inputs to be numeric, we should convert all our categorical variables into numeric by encoding the categories.
var_mod = ['Gender','Married','Dependents','Education','Self_Employed','Property_Area','Loan_Status']
le = LabelEncoder()
for i in var_mod:
data[i] = le.fit_transform(data[i])
# Generic function for making a classification model and accessing performance:
def classification_model(model, data, predictors, outcome):
# Fit the model:
model.fit(data[predictors], data[outcome])
datachoice = input("Which test data should we use? \n"
"1. The training set \n"
"2. Enter Manually")
if int(datachoice) == 1:
# Make predictions on training set:
predictions = model.predict(data[predictors])
# Perform k-fold cross-validation with 5 folds
# Print accuracy
accuracy = metrics.accuracy_score(predictions, data[outcome])
print("Accuracy : %s" % "{0:.3%}".format(accuracy))
kf = RepeatedKFold(n_splits=5, n_repeats=10, random_state=None)
error = []
for train, test in kf.split(np.zeros(data.shape[0])):
# Filter training data
train_predictors = (data[predictors].iloc[train, :])
# The target we're using to train the algorithm.
train_target = data[outcome].iloc[train]
# Training the algorithm using the predictors and target.
model.fit(train_predictors, train_target)
# Record error from each cross-validation run
error.append(model.score(data[predictors].iloc[test, :], data[outcome].iloc[test]))
print("Cross-Validation Score : %s" % "{0:.3%}".format(np.mean(error)))
print("\n")
elif int(datachoice) ==2:
try:
credithistory = input("Credit History: ")
education = input("Education")
married = input("Married")
selfemployed = input("Self Employed")
propertyarea = input("Property_Area")
# data2predict = np.array([credithistory,education,married,selfemployed,propertyarea])
data2predict = {'Credit_History': [credithistory], 'Education': [education], 'Married': [married],
'Self_Employed': [selfemployed], 'Property_Area': [propertyarea]}
dataframe = pd.DataFrame(data=data2predict)
predictions = model.predict(dataframe)
print(dataframe)
print("The predicted loan status is --------- ", predictions, " --------")
except ValueError as e:
print("Kindly enter numbers only!", e)
else:
return 0
# Fit the model again so that it can be refered outside the function:
model.fit(data[predictors], data[outcome])
def predict(model):
outcome_var = 'Loan_Status'
predictor_var = ['Credit_History', 'Education', 'Married', 'Self_Employed', 'Property_Area']
classification_model(model, data, predictor_var, outcome_var)
while 1:
try:
choice = input("Please choose the algorithm: \n"
"1. LogisticRegression. "
"2. DecisionTreeClassifier. "
"3. RandomForestClassifier. ")
if int(choice) == 1:
model = LogisticRegression(solver='lbfgs')
predict(model)
elif int(choice) == 2:
model = DecisionTreeClassifier()
predict(model)
elif int(choice) == 3:
model = RandomForestClassifier(n_estimators=100)
predict(model)
else:
break
except ValueError as e:
print("Kindly enter numbers only when choosing the algorithm!", e) | true |
a2dc4c7fbc7ab1901d72fc551ae02e6283060dde | Python | Acinate/python | /leetcode/21_merge_two_sorted_arrays.py | UTF-8 | 1,045 | 3.625 | 4 | [] | no_license | import unittest
from datastructures.linked_list import ListNodeUtil
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
if not l1 or not l2:
return l1 or l2
if l1.val < l2.val:
l1.next = self.mergeTwoLists(l1.next, l2)
return l1
else:
l2.next = self.mergeTwoLists(l1, l2.next)
return l2
# TODO: Fix self parameter in convert_to_ll (Stack Overflow)
class TestSolution(unittest.TestCase):
def test_merge_two_lists(self):
convert_to_ll = ListNodeUtil().list_to_linked_list
list1: ListNode = convert_to_ll(self, [1, 2, 4])
list2: ListNode = convert_to_ll(self, [1, 3, 4])
list3: ListNode = Solution().mergeTwoLists(list1, list2)
convert_to_lst = ListNodeUtil().linked_list_to_list
self.assertEqual(convert_to_lst(self, list3), [1, 1, 2, 3, 4, 4])
| true |
06d90b3df91e31d2022f38ea2099890f5f727a77 | Python | Ashishkapil/python-runs | /kth-smallest-bst/smallest.py | UTF-8 | 1,820 | 3.90625 | 4 | [] | no_license | class Node:
def __init__(self, data):
self.val = data
self.left = None
self.right = None
self.count = 0
class BST:
def __init__(self):
self.root = None
self.stack = []
self.stacktip = None
pass
def insert(self, val):
node = Node(val)
self._find_and_insert(node, self.root)
def _find_and_insert(self, node, head):
if not self.root:
self.root = node
return
if node.val < head.val:
head.count += 1
if not head.left:
head.left = node
else:
self._find_and_insert(node, head.left)
elif node.val == head.val:
return False
else:
if not head.right:
head.right = node
else:
self._find_and_insert(node, head.right)
def inorder(self, head):
if not head:
return
self.inorder(head.left)
print(str(head.val) + ' (' + str(head.count) + ') ')
self.inorder(head.right)
def smallest(self, head, kth):
if kth == head.count + 1:
return head.val
if kth > head.count + 1:
return self.smallest(head.right, kth - head.count - 1)
else:
return self.smallest(head.left, kth)
bst = BST()
bst.insert(25)
bst.insert(15)
bst.insert(50)
bst.insert(10)
bst.insert(22)
bst.insert(35)
bst.insert(70)
bst.insert(4)
bst.insert(12)
bst.insert(18)
bst.insert(24)
bst.insert(31)
bst.insert(44)
bst.insert(66)
bst.insert(90)
bst.inorder(bst.root)
print(bst.smallest(bst.root, 1))
print(bst.smallest(bst.root, 2))
print(bst.smallest(bst.root, 3))
print(bst.smallest(bst.root, 4))
print(bst.smallest(bst.root, 5))
print(bst.smallest(bst.root, 6))
print(bst.smallest(bst.root, 7))
print(bst.smallest(bst.root, 8))
print(bst.smallest(bst.root, 9))
print(bst.smallest(bst.root, 10))
print(bst.smallest(bst.root, 11))
print(bst.smallest(bst.root, 12))
print(bst.smallest(bst.root, 13))
print(bst.smallest(bst.root, 14))
print(bst.smallest(bst.root, 15))
| true |
24ceb59dd0854bc9ec1db66bfd4db6b7cccf3203 | Python | DanteLore/jira-utils | /tests/mock_slack.py | UTF-8 | 900 | 2.609375 | 3 | [] | no_license | class MockSlack:
def __init__(self, incoming_messages=None, name_lookup=None):
self.incoming_messages = incoming_messages or []
self.outgoing_messages = []
self.uploaded_files = []
self.name_lookup = name_lookup or {}
def read_next_messages_for_channel(self, channel_id):
return self.incoming_messages
def add_incoming(self, message):
self.incoming_messages.append(message)
def get_channel_id(self, channel):
return "CHANNEL_1"
def get_user_id(self, name):
return "BOTID"
def search_user_id(self, name):
return self.name_lookup.get(name)
def send(self, recipient, message, attachments):
msg = {"recipient": recipient, "message": message}
self.outgoing_messages.append(msg)
def upload_file(self, channel, filename, file_handle):
self.uploaded_files.append(filename)
| true |
0d23cca7ba20a5e6e9be87543c25c161e80f388c | Python | xujun10110/fulltext_engine | /searcher.py | UTF-8 | 5,472 | 2.953125 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
import sys
from search import Search
from content import Content
from collections import Counter
from tokenizer import Tokenizer
import termcolor
NGRAM = 2
DAMPING_SCORE = 10
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) #term color
class Searcher:
def __init__(self):
self.engine = Search(NGRAM, "./")
self.tokenizer = Tokenizer("ma")
self.query_string = []
#just a wrapper for search engine search function
def _execute(self, statement, numOfResult):
return self.engine.zenhan_search(unicode(statement, "UTF-8"), numOfResult)
#single word is statement without operator (without space as AND or OR keyword)
def execute_with_singleword(self, statement, numOfResult):
search_result = self._execute(statement, numOfResult)
statement = self.tokenizer.split(unicode(statement, "UTF-8"))
self.print_result(search_result, statement)
#input as a list of tubles: [(id1, score1), (id2, score2)... ]
#search content for each id
#print content
def print_result(self, search_result, query):
#[TODO] print matched words with color
for elem in search_result:
doc = self.engine.content.get(elem[0])
termcolor.printcolor(doc, query)
print ""
#list word is statement with operator (with space as AND and OR operator)
def execute_with_listword(self, statementList, numOfResult):
normalized_list = []
if ("OR" in statementList): #--> or routine
#because can not contain AND and OR in one query
#so we normalize all strings which have space
statementList = statementList.split()
prev_or = -1
statements_len = len(statementList)
for i in range(0, statements_len):
if (statementList[i] == "OR"):
if (prev_or + 1) >= i:
return None
else:
normalized_list.append("".join(statementList[(prev_or+1):(i)]))
prev_or = i
if (i == statements_len-1):
normalized_list.append("".join(statementList[(prev_or+1):(i+1)]))
return self._or_operator(normalized_list, numOfResult)
else: #--> and routine
normalized_list = statementList.split()
return self._and_operator(normalized_list, numOfResult)
#take input as statement list (for example "a OR B" will as ["a", "OR", "b"]
#preprocess to concat string with space (for ex: "a b OR c" will as ["ab", "c"]
#take result of each statement and return list of result
#execute OR operator for all results
def _or_operator(self, statementList, numOfResult):
result = []
for statement in statementList:
#append to query to print color
tokens = self.tokenizer.split(unicode(statement, "UTF-8"))
for t in tokens:
if t not in self.query_string: self.query_string.append(t)
for i in range(0, len(statementList)):
temp_ret = self._execute(statementList[i], "all")
result.append(temp_ret) #[TODO] move below process to here!!!
#or list of result
prev_list = []
cur_list = []
accumulate_result = Counter()
for j in range(0, len(result)):
if not result[j]: continue; #in case not search any thing
if (j == 0):
prev_list = result[j]
continue
cur_list = result[j]
#OR operator bw previous list to current list
max_score = cur_list[0][1] #max score is first element because our list is sorted
for m in range(0, len(cur_list)-1):
content_id = cur_list[m][0]
content_score = cur_list[m][1]
exist = [i for i,v in enumerate(prev_list) if v[0] == content_id]
if (len(exist) > 0): # if an elent exist in both list, reduce score
accumulate_result[content_id] = content_score - max_score
else :
accumulate_result[content_id] = content_score
prev_list = cur_list
self.print_result(accumulate_result.most_common(numOfResult), self.query_string)
#take input as statement list (for example "a b" will as ["a", "b"]
#take result of each statement and return list of result
#execute AND operator for all results (simply merge all result + increase score)
def _and_operator(self, statementList, numOfResult):
#[TODO] set ealier token higher score
accumulate_result = Counter()
for statement in statementList:
#append to query to print color
tokens = self.tokenizer.split(unicode(statement, "UTF-8"))
for t in tokens:
if t not in self.query_string: self.query_string.append(t)
result = self._execute(statement, "all")
for content in result:
id = content[0]
score = content[1]
accumulate_result[id] += score
self.print_result(accumulate_result.most_common(numOfResult), self.query_string)
if __name__ == "__main__":
#[TODO] load once, search multiple!
param_len = len(sys.argv)
if (param_len) < 3:
print "usage: ./searcher.py statement numOfResult"
sys.exit(1)
statement = None
statement_list = None
if (param_len == 3):
statement = sys.argv[1]
numOfResult = int(sys.argv[2])
else:
statement_list = " ".join(sys.argv[1:(param_len-1)])
numOfResult = int(sys.argv[param_len-1])
searcher = Searcher()
if statement != None:
searcher.execute_with_singleword(statement, numOfResult)
if statement_list != None:
searcher.execute_with_listword(statement_list, numOfResult)
| true |
4c34eb71c64c9e3d7863c05f3b697f50b330b23f | Python | kr80865n/Machine-Learning-Projects-Public | /Depth Estimation from 2D Images/Depth Map Prediction from a Single Image using a Multi-Scale Deep Network/train.py | UTF-8 | 2,833 | 2.578125 | 3 | [] | no_license | # For reproducability
import numpy as np
np.random.seed(3)
import keras
from keras.optimizers import SGD, Adam
from nets import get_models
from loss_functions import SIMSE
import matplotlib.pyplot as plt
import os
from PIL import Image
# based on NYUDepth dataset, modified
input_shape = (304,228,3)
output_shape = (63, 44, 1)
# Other params
visualize = True
save = True
batch_size = 16
epochs_coarse = 5
epochs_fine = 20
optimizer_coarse = Adam() # SGD(lr=0.001, momentum=0.9)
optimizer_fine = optimizer_coarse
# load images and depths, and squeeze in [0,1]
imgs = np.array([np.array(Image.open("depth_dataset/rgb_data/"+i))/255.
for i in os.listdir("depth_dataset/rgb_data")]).astype(np.float32)
depths = np.array([np.array(Image.open("depth_dataset/depth_target/"+i).convert("L"))/255.
for i in os.listdir("depth_dataset/depth_target")]).reshape((imgs.shape[0],
output_shape[0],
output_shape[1],
1)).astype(np.float32)
# Load models
model_coarse, model_fine = get_models(input_shape, output_shape)
# Compile; SIMSE seems to be working better than SILoss
model_coarse.compile(optimizer_coarse, loss=SIMSE)
model_fine.compile(optimizer_fine, loss=SIMSE)
# Train coarse model
h1 = model_coarse.fit(imgs, depths, batch_size=batch_size, epochs=epochs_coarse)
# Train fine model
h2 = model_fine.fit(imgs, depths, batch_size=batch_size, epochs=epochs_fine)
if visualize:
preds = model_coarse.predict(imgs[:1].reshape((1,
input_shape[0],
input_shape[1],
input_shape[2])))
preds2 = model_fine.predict(imgs[:1].reshape((1,
input_shape[0],
input_shape[1],
input_shape[2])))
fig, (ax0, ax1, ax2, ax3, ax4) = plt.subplots(1,5, )#figsize=(16,12))
ax0.imshow(imgs[0])
ax0.set_title("Input image")
ax1.imshow(preds[0].reshape(output_shape[0:2]), cmap='gray')
ax1.set_title("Coarse prediction")
ax2.imshow(preds2[0].reshape(output_shape[0:2]), cmap='gray')
ax2.set_title("Fine prediction")
ax3.imshow(depths[0].reshape(output_shape[0:2]), cmap='gray')
ax3.set_title("Ground truth")
ax4.plot(h1.history["loss"], label='Coarse model loss', c='blue')
ax4.plot(h2.history["loss"], label='Fine model loss', c='red')
ax4.set_title("Loss")
ax4.legend()
plt.show()
if save:
model_fine.save("fine.h5")
model_coarse.save("coarse.h5")
| true |
e3a1a848846c5932447593a653d32835d60c7879 | Python | MuhamadAinurRofiq/TUGAS-UAS | /Uas/main.py.py | UTF-8 | 1,297 | 3.34375 | 3 | [] | no_license | from Perhitungan.Gaji import gaji
from Perhitungan.Nilai import nilai
from Perhitungan.Pembayaran import pembayaran
from Perhitungan.Kalkulator import kalkulator
import getpass
def login():
print('=+= Login =+=')
user=input('Username : ')
password=getpass.getpass('Password : ')
if user == 'aino' and password == 'gratisan':
mulai()
else:
print('Maaf username / password yang anda masukkan salah, Mohon ulangi')
login()
def mulai ():
print('\n\t\t\t +=+= PILIHAN PROGAM =+=+ \n\n1.PENGGAJIAN \n2.INPUT NILAI \n3.PEMBAYARAN \n4.KALKULATOR')
pilih = input('\nMasukkan pilihan anda : ')
if pilih == '1':
print('-'*75)
gaji()
elif pilih == '2':
print('-'*75)
nilai()
elif pilih == '3':
print('-'*75)
pembayaran()
elif pilih == '4':
print('-'*75)
kalkulator()
else :
print('\nMaaf pilihan yang anda masukkan salah, silahkan ulangi pilihan anda.')
jawab ()
def jawab ():
tanya = input('\nApakah anda ingin menjalan kan progam lagi (y/t): ')
if tanya == 'y':
mulai()
else :
print('='*75)
print('\n\t\t\t+++ TERIMAKASIH +++')
login()
| true |
9b5c71f082060b0639d949b54e73b0a1aaa81bcb | Python | Scalabull/get-tested-covid19 | /src/data_pipeline/csv_preprocessors/cmd_preprocess_csv.py | UTF-8 | 1,897 | 2.546875 | 3 | [
"MIT"
] | permissive | import csv
import importlib
import click
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import reference_constants
import csv_helpers
import maine_preprocessor
import idaho_preprocessor
TARGET_PREPROCESSED_CSV_HEADER = reference_constants.TARGET_PREPROCESSED_CSV_HEADER
OUT_FILE_PATH = '../tmp_data/pp_out_lts.csv'
def load_preprocessor(source):
options = {
'maine': maine_preprocessor.preprocess_csv,
'idaho': idaho_preprocessor.preprocess_csv
}
if source in options:
return options[source]
else:
raise Exception('Specified source is not configured. Check "options" dict for available sources.')
def main_tool_process(csv_file, source):
preprocessor = load_preprocessor(source)
with open(csv_file) as test_centers_file:
test_center_reader = csv.reader(test_centers_file)
header = next(test_center_reader)
with open(OUT_FILE_PATH, 'w') as out_file:
out_writer = csv.writer(out_file, delimiter=',', quoting=csv.QUOTE_MINIMAL)
out_writer.writerow(TARGET_PREPROCESSED_CSV_HEADER)
for test_center in test_center_reader:
test_center_dict = csv_helpers.convert_csv_row_to_dict(test_center, header)
arranged_test_center_row = preprocessor(test_center_dict)
out_writer.writerow(arranged_test_center_row)
print('Success! formatted output file is at: ', OUT_FILE_PATH)
# Command line interface
@click.command()
@click.option('--csv_file', default=None, help='CSV file containing test center rows, sourced from an external organization')
@click.option('--source', default=None, help='Source organization (format to use). Currently one option: maine')
def exec_tool(csv_file, source):
main_tool_process(csv_file, source)
if __name__ == '__main__':
exec_tool() | true |
db07db20e570c35d374239abccfb0276fa8e8611 | Python | nmichiels/cifDataset | /cifStreamer/cifDataset.py | UTF-8 | 6,347 | 3.015625 | 3 | [
"BSD-3-Clause"
] | permissive | """
A specialized dataset loader class for *.cif files.
Support boths a python FlowSightParser and a faster FlowSightParserC implemented in C++.
"""
import builtins
from .dataset import Dataset
import numpy as np
# from .FlowSightParser import FlowSightParser
from .FlowSightParserC import FlowSightParser
from .dataPreparation import pad_or_crop
class CIFDataset(Dataset):
"""
A specialized dataset loader class for *.cif files with C++ support.
"""
def __init__(self, cifFile, overRuleChannelCount = None):
"""The constructor initializes the cif file and parses high level information."""
print('Initializing Dataset: ' + cifFile)
Dataset.__init__(self)
self._flowSightParser = FlowSightParser()
if not self._flowSightParser.loadFile(cifFile):
print("ERROR (CIFDataset): Could not open file \"", cifFile, "\"")
self._nimages = 0
self._nchannels = 0
return
self._flowSightParser.loadMetaData(verbose=False, overRuleChannelCount = overRuleChannelCount)
self._num_examples = int(self._flowSightParser._numCells / 2)
self._num_channels = self._flowSightParser._channelCount
print("Image Count: " + repr(self._num_examples))
print("Channel Count: " + repr(self._num_channels))
self._index_in_epoch = 0
def eod(self):
"""Returns if the end of the dataset is reached, i.e. iterated over all the images of the dataset."""
if (self._epochs_done > 0):
return True
else:
return False
# Target resolution required! ==> not all images are of the same size
def nextBatch_withmask(self, batch_size, image_size):
"""Returns next `batch_size` of examples and masks from this cif dataset.
Not implemented because it requires the total number of images of the dataset. Getting this is too slow."""
# old implementation, requires known number of images in dataset and it is to slow to know up front
raise NotImplementedError()
"""Return the next `batch_size` examples from this data set."""
# start = self._index_in_epoch
# self._index_in_epoch += batch_size
# end = self._index_in_epoch
# if end > self._num_examples:
# end = self._num_examples
# self._epochs_done += 1
# self._index_in_epoch = 0
# count = end-start
# # print("batch:", batch_size)
# # print("end: ", count)
# batch = np.ndarray(shape=(count, image_size,image_size, self.num_channels))
# batch_mask = np.ndarray(shape=(count, image_size,image_size, self.num_channels))
# for i in range(0,count):
# current_image_ID = (self._index_in_epoch-count+i) * 2 +1
# image = self._flowSightParser.openIFDData(current_image_ID , verbose=False)
# mask = self._flowSightParser.openIFDData(current_image_ID+1 , verbose=False)
# for channel in range(image.shape[-1]):
# img = image[:,:,channel]
# msk = mask[:,:,channel]
# batch[i][:,:,channel] = pad_or_crop(img, image_size, 'symmetric')# pad_or_crop(img, image_size, 'symmetric', constant_values=(0))
# batch_mask[i][:,:,channel] = pad_or_crop(msk, image_size, 'symmetric')# pad_or_crop(img, image_size, 'symmetric', constant_values=(0))
# # print (imgCropped)
# return batch, batch_mask
def nextImage_withmask(self):
"""Return the next example and mask from this data set."""
# Calculate the exact position in the cif file
# *2 because of interleaved image/mask
# +1 because first element contains only metadata
current_image_ID = self._index_in_epoch * 2 +1
self._index_in_epoch += 1
image = self._flowSightParser.openIFDData(current_image_ID , verbose=False)
mask = self._flowSightParser.openIFDData(current_image_ID+1 , verbose=False)
if self._flowSightParser.eof():
self._num_examples = self._index_in_epoch
self._epochs_done += 1
self._index_in_epoch = 0
self._flowSightParser.resetToFirstIFD()
return image, mask
def nextImage(self):
"""Returns next example of this dataset."""
current_image_ID = self._index_in_epoch * 2 +1
self._index_in_epoch += 1
if self._index_in_epoch > self._num_examples:
self._epochs_done += 1
self._index_in_epoch = 0
image = self._flowSightParser.openIFDData(current_image_ID , verbose=False)
mask = self._flowSightParser.openIFDData(current_image_ID+1 , verbose=False) #mask should be decoded, otherwise the file pointer of cif is incorrect
return image
def nextMask(self):
"""Returns next mask of this hdf5 dataset, skipping the image."""
current_image_ID = self._index_in_epoch * 2 +1
self._index_in_epoch += 1
if self._index_in_epoch > self._num_examples:
self._epochs_done += 1
self._index_in_epoch = 0
image = self._flowSightParser.openIFDData(current_image_ID+1 , verbose=False)
return image
# Target resolution required! ==> not all images are of the same size
def nextBatch(self, batch_size, image_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
end = self._index_in_epoch
if end > self._num_examples:
end = self._num_examples
self._epochs_done += 1
self._index_in_epoch = 0
count = end-start
batch = np.ndarray(shape=(count, image_size,image_size, self.num_channels))
for i in range(0,count):
current_image_ID = (self._index_in_epoch-count+i) * 2 +1
image = self._flowSightParser.openIFDData(current_image_ID , verbose=False)
for channel in range(image.shape[-1]):
img = image[:,:,channel]
batch[i][:,:,channel] = pad_or_crop(img, image_size, 'symmetric')# pad_or_crop(img, image_size, 'symmetric', constant_values=(0))
return batch
def __del__(self):
pass | true |
00656fb457499fde7c9e7bfc708843795df844cd | Python | metabismuth/esalg-week2-tarefa1 | /stack.py | UTF-8 | 485 | 3.59375 | 4 | [] | no_license | class Stack:
def __init__(self):
self.stack = []
def __str__(self):
return str(self.stack)
def receive(self, item):
return self.stack.append(item)
def receive_many(self, items):
for i in items:
self.receive(i)
def give(self, item):
return self.stack.pop(item)
def peek(self):
return self.stack[0]
def size(self):
return len(self.stack)
def empty(self):
self.stack = []
# a = Queue()
# a.receive_many([2,2,34,4])
# print(a)
| true |
7175b7268b3b81436be791d18502625dd5c4683f | Python | sanand0/orderedattrdict | /tests/test_orderedattrdict.py | UTF-8 | 10,120 | 3.09375 | 3 | [
"MIT"
] | permissive | import os
import json
import yaml
import random
import unittest
from collections import OrderedDict
from orderedattrdict import AttrDict, DefaultAttrDict, CounterAttrDict, Tree
from orderedattrdict.yamlutils import AttrDictYAMLLoader, from_yaml
# In Python 3, chr is unichr
try:
unichr
except NameError:
unichr = chr
class Generator(object):
'''
Generated random object hierarchies using AttrDict.
https://github.com/maxtaco/python-random-json
'''
def __init__(self):
random.seed()
def byte(self):
return random.randint(0, 0xff)
def integer(self, signed):
i = random.randint(0, 0xfffffff)
if signed:
i = 0x7ffffff - i
return i
def _small_float(self, signed=True):
numerator = self.integer(signed=signed)
denominator = self.integer(signed=False)
return float(numerator) / float(1 + denominator)
def float(self):
while True:
try:
base = self._small_float(signed=False)
exp = self._small_float()
return base ** exp
except OverflowError:
pass
def string(self, n=None):
if not n:
n = random.randint(32, 128)
return u''.join([unichr(self.byte()) for i in range(n)]).strip()
def array(self, n, d):
if not n:
n = random.randint(0, 10)
return [self.json(d + 1) for i in range(n)]
def obj(self, n, d=0):
if not n:
n = random.randint(0, 8)
return AttrDict([(self.string(10), self.json(d+1)) for i in range(n)])
def json(self, d=0):
b = random.randint(0, 7)
ret = None
# Don't go more than 4 levels deep. Cut if off by
# not allowing recursive structures at level 5.
if d > 4 and b > 5:
b = b % 5
if False:
pass
elif b is 0:
ret = False
elif b is 1:
ret = True
elif b is 2:
ret = None
elif b is 3:
ret = self.integer(True)
elif b is 4:
ret = self.float()
elif b is 5:
ret = self.string()
elif b is 6:
ret = self.array(None, d)
elif b is 7:
ret = self.obj(None, d)
return ret
class TestAttrDict(unittest.TestCase):
'''Test core orderedattrdict.AttrDict behaviour'''
def setUp(self):
self.gen = Generator()
self.klass = AttrDict
def test_attribute_access(self):
'Items can be accessed as attributes'
ad = self.klass()
ad['x'] = 1
self.assertEqual(ad.x, 1)
self.assertTrue('x' in ad)
ad._y = 2
self.assertEqual(ad['_y'], 2)
del ad['x']
with self.assertRaises(AttributeError):
ad.x
del ad._y
with self.assertRaises(KeyError):
ad['_y']
def test_ordereddict(self):
'AttrDict inherits all OrderedDict behaviour'
items = [('x', 1), ('_y', 2), (3, 3)]
ad = self.klass(items)
od = OrderedDict(items)
self.assertEqual(ad, od)
self.assertEqual(ad.keys(), od.keys())
self.assertEqual(ad.items(), od.items())
ad.pop(items[0][0])
od.pop(items[0][0])
self.assertEqual(ad, od)
ad.pop(items[1][0])
od.pop(items[1][0])
self.assertEqual(ad, od)
ad['x'] = od['x'] = 1
self.assertEqual(ad, od)
ad.setdefault('x', 10)
od.setdefault('x', 10)
ad.setdefault('new', 10)
od.setdefault('new', 10)
self.assertEqual(ad, od)
new_ad = ad.copy()
new_od = od.copy()
self.assertEqual(new_ad, new_od)
self.assertEqual(type(new_ad), type(ad))
ad.popitem()
od.popitem()
self.assertEqual(ad, od)
ad.clear()
od.clear()
self.assertEqual(ad, od)
ad = self.klass.fromkeys(range(10), 1)
od = OrderedDict.fromkeys(range(10), 1)
self.assertEqual(ad, od)
# Not tested:
# ad.iterkeys()
# ad.itervalues()
# ad.iteritems()
# ad.viewkeys()
# ad.viewvalues()
# ad.viewitems()
def test_str(self):
items = [('x', 1), ('_y', 2), (3, 3)]
ad = self.klass(items)
self.assertEqual(str(ad), "{'x': 1, '_y': 2, 3: 3}")
self.assertTrue(repr(ad).endswith("([('x', 1), ('_y', 2), (3, 3)])"))
def test_conflict(self):
data = [
('__format__', 1),
('__contains__', 1),
('keys', 1),
('values', 1),
('items', 1),
('get', 1),
('pop', 1),
]
ad = self.klass(data)
self.assertTrue(callable(ad.__format__))
self.assertTrue(callable(ad.__contains__))
self.assertTrue(callable(ad.keys))
self.assertTrue(callable(ad.values))
self.assertTrue(callable(ad.items))
self.assertTrue(callable(ad.get))
self.assertTrue(callable(ad.pop))
for key, val in data:
self.assertEqual(ad[key], val)
def test_yaml(self):
'Load YAML with ordered AttrDict instead of dict'''
for iteration in range(10):
ad = self.gen.obj(10)
self.assertEqual(
ad, yaml.load(yaml.dump(ad), Loader=AttrDictYAMLLoader))
self.assertEqual(
ad, yaml.load(yaml.safe_dump(ad), Loader=AttrDictYAMLLoader))
yaml.add_constructor(u'tag:yaml.org,2002:map', from_yaml)
yaml.add_constructor(u'tag:yaml.org,2002:omap', from_yaml)
for iteration in range(10):
ad = self.gen.obj(10)
self.assertEqual(ad, yaml.safe_load(yaml.dump(ad)))
self.assertEqual(ad, yaml.safe_load(yaml.safe_dump(ad)))
def test_mergetag(self):
'Check if YAML merge tag works'
folder = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(folder, 'test.mergetag.yaml')) as handle:
self.assertEqual(
{'base': {'key': 'value'}, 'derived': {'key': 'value'}},
yaml.load(handle, Loader=AttrDictYAMLLoader))
def test_json(self):
for iteration in range(10):
ad = self.gen.obj(10)
self.assertEqual(ad, json.loads(json.dumps(ad), object_pairs_hook=self.klass))
def test_files(self):
'Ensure that test JSON files have values in sorted order'
folder = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(folder, 'test.json')) as handle:
result = json.load(handle, object_pairs_hook=self.klass)
self.assertEqual(list(result.values()), sorted(result.values()))
with open(os.path.join(folder, 'test.yaml')) as handle:
result = yaml.load(handle, Loader=AttrDictYAMLLoader)
self.assertEqual(list(result.values()), sorted(result.values()))
class NoneDefaultAttrDict(DefaultAttrDict):
'A DefaultAttrDict that mimics an AttrDict'
def __init__(self, *args, **kwargs):
super(NoneDefaultAttrDict, self).__init__(None, *args, **kwargs)
def __copy__(self):
return type(self)(self)
class TestDefaultAttrDict(TestAttrDict):
'DefaultAttrDict with None constructor inherits all AttrDict behaviour'
def setUp(self):
super(TestDefaultAttrDict, self).setUp()
self.klass = NoneDefaultAttrDict
def test_defaultdict_counter(self):
'DefaultAttrDict as a list generator'
ad = DefaultAttrDict(int)
self.assertEqual(ad['x'], 0)
ad.x += 1
ad.y += 2
ad.z = ad.z + 3
self.assertEqual(ad, {'x': 1, 'y': 2, 'z': 3})
def test_defaultdict_with_list(self):
'DefaultAttrDict as a list generator'
ad = DefaultAttrDict(list)
self.assertEqual(ad['x'], [])
self.assertEqual(ad['y'], [])
self.assertEqual(ad, {'x': [], 'y': []})
self.assertFalse('z' in ad)
ad = DefaultAttrDict(list)
self.assertEqual(ad.x, [])
self.assertEqual(ad.y, [])
self.assertEqual(ad, {'x': [], 'y': []})
self.assertFalse('z' in ad)
def test_defaultdict_with_set(self):
'DefaultAttrDict as a set generator'
ad = DefaultAttrDict(set)
self.assertEqual(ad['x'], set())
self.assertEqual(ad['y'], set())
self.assertEqual(ad, {'x': set(), 'y': set()})
self.assertFalse('z' in ad)
ad = DefaultAttrDict(set)
self.assertEqual(ad.x, set())
self.assertEqual(ad.y, set())
self.assertEqual(ad, {'x': set(), 'y': set()})
self.assertFalse('z' in ad)
def test_defaultdict_tree(self):
'DefaultAttrDict can be used as a tree'
def tree():
return DefaultAttrDict(tree)
ad = tree()
self.assertEqual(ad['x'], {})
self.assertEqual(ad['y'], {})
self.assertEqual(ad['x']['1'], {})
self.assertEqual(ad, {'x': {'1': {}}, 'y': {}})
self.assertFalse('z' in ad)
ad = tree()
ad.a.b.c = 1
self.assertEqual(ad, {'a': {'b': {'c': 1}}})
class TestCounterAttrDict(unittest.TestCase):
def test_counterattrdict(self):
ad = CounterAttrDict()
self.assertEqual(ad.x, 0)
self.assertEqual(ad.y, 0)
self.assertEqual(ad, {})
ad.x += 1
ad.y += 2
ad.z += 3
self.assertEqual(ad, {'x': 1, 'y': 2, 'z': 3})
ad = CounterAttrDict()
self.assertEqual(ad['x'], 0)
self.assertEqual(ad['y'], 0)
self.assertEqual(ad, {})
ad['x'] += 1
ad['y'] += 2
ad['z'] += 3
self.assertEqual(ad, {'x': 1, 'y': 2, 'z': 3})
class TestTree(unittest.TestCase):
def test_tree(self):
tree = Tree()
tree.x.y.z = 1
self.assertEqual(tree, {'x': {'y': {'z': 1}}})
del tree.x.y
self.assertEqual(tree, {'x': {}})
tree.a.b = None
self.assertEqual(tree, {'x': {}, 'a': {'b': None}})
| true |
00ac488a687e2d6cbe2b921398ab5afaee0254f2 | Python | SonicXP/alfred-douban-suggest | /douban_movie.py | UTF-8 | 1,128 | 2.65625 | 3 | [] | no_license | import httplib
import urllib
import json
from xml.dom.minidom import Document
params = urllib.urlencode({"q": "{query}"})
conn = httplib.HTTPConnection("movie.douban.com", 80)
conn.request("GET", "/j/subject_suggest?"+params)
response = conn.getresponse()
data = response.read()
conn.close()
dataobj = json.loads(data)
doc = Document()
items = doc.createElement("items")
for item in dataobj:
title = item["title"]
if "year" in item:
title = title + " (" + item["year"] + ")"
xmlitem = doc.createElement("item")
xmlitem.setAttribute("uid", item["url"])
xmlitem.setAttribute("arg", item["url"])
xmlitem.setAttribute("autocomplete", item["title"])
xmlitem.setAttribute("valid", "YES")
attr = doc.createElement("title")
attr.appendChild(doc.createTextNode(title))
xmlitem.appendChild(attr)
attr = doc.createElement("subtitle")
attr.appendChild(doc.createTextNode(item["sub_title"]))
xmlitem.appendChild(attr)
attr = doc.createElement("icon")
attr.appendChild(doc.createTextNode("icon.png"))
xmlitem.appendChild(attr)
items.appendChild(xmlitem)
doc.appendChild(items)
print unicode(doc.toxml()).encode("utf-8")
| true |
3b409f9eb862ad6ee36b68cd4ae4ce787faeccf8 | Python | amaljyothicollegeaes/S1-A-JILSE-JACOB-43 | /PYTHON PROGRAMING LAB/17-2-2021/17-02-2021/CO3/Graphics/FindPerimeter.py | UTF-8 | 513 | 3.359375 | 3 | [] | no_license | import circle
from rectangle import *
from Graphics._3D_graphics import cuboid,sphere
a=float(input('Enter length of the rectangle: '))
b=float(input('Enter breadth of the rectangle: '))
perimeter(a,b)
r=float(input('Enter the radius of the circle: '))
circle.circumference(r)
l=float(input('Enter length of the cuboid: '))
b=float(input('Enter breadth of the cuboid: '))
h=float(input('Enter height of the cuboid: '))
cuboid.perimeter(l,b,h)
r=float(input('Enter the radius of the sphere: '))
sphere.perimeter(r) | true |
13ac50394bab2f4ba249615ee12c9f31c6114253 | Python | cyLeo2018/spider_python | /v27.py | UTF-8 | 114 | 3.21875 | 3 | [] | no_license | import re
hello = u"你好,世界"
pattern = re.compile(r'[\u4e00-\u9fa5]+')
m = pattern.match(hello)
print(m) | true |
8e65f4a167f06129db2bf82b8a40dcd5a6c8fb1f | Python | MaudBoucherit/horse_colic | /src/data_import.py | UTF-8 | 2,215 | 2.953125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# data_import.py
# Maud Boucherit, Jan 2018
#
# This script import the data for the horse colic project
# and deal with the missing data before saving it in data/
#
# Dependencies: argparse, pandas
#
# Usage: python src/data_import.py
# import libraries
import argparse
import pandas as pd
# read in command line arguments
parser = argparse.ArgumentParser()
args = parser.parse_args()
def main():
# Names of my variables
names = ['surgery', 'age', 'hospital', 'rectal_temp', 'pulse', 'respiration', 'extreme_temp',
'peripheral_pulse', 'mucous', 'capillary_time', 'pain', 'peristalsis', 'abdominal_dist',
'nasogastric_tube', 'nasogastric_reflux', 'nasogastric_PH', 'feces', 'abdomen', 'cell_vol',
'protein', 'abdomino_appearance', 'abdom_protein', 'outcome', 'surgical_lesion', 'type1',
'type2', 'type3', 'cp_data', 'trash']
# Import the training data
## There is a 29th blank column for all rows except the first one, so I need to remove it
train = pd.read_csv("http://archive.ics.uci.edu/ml/machine-learning-databases/horse-colic/horse-colic.data",
sep=" ", header=None, names=names, skiprows=1, na_values='?',
usecols=[1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,23])
# Add the first row
train = train.append(pd.DataFrame([[1, 38.50, 66, 28, 3, 3, 2, 5, 4, 4, 3, 5, 45.00, 8.40, 2]],
columns=['age', 'rectal_temp', 'pulse', 'respiration', 'extreme_temp',
'peripheral_pulse', 'capillary_time', 'pain', 'peristalsis', 'abdominal_dist',
'feces', 'abdomen', 'cell_vol', 'protein', 'surgical_lesion']))
train.to_csv("data/train.csv")
# Import the test data
test = pd.read_csv("http://archive.ics.uci.edu/ml/machine-learning-databases/horse-colic/horse-colic.test",
sep=" ", header=None, names = names[:-1], na_values='?',
usecols=[1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,23])
test.to_csv("data/test.csv")
# call main function
if __name__ == "__main__":
main() | true |
802f5da634071de172c6263e3fc22902b33bb4ce | Python | shan18/Depth-Estimation-Segmentation | /tensornet/data/processing.py | UTF-8 | 6,720 | 3 | 3 | [
"MIT"
] | permissive | import numpy as np
import torch
import albumentations as A
from albumentations.pytorch import ToTensor
class Transformations:
"""Wrapper class to pass on albumentaions transforms into PyTorch."""
def __init__(
self, resize=(0, 0), padding=(0, 0), crop=(0, 0), horizontal_flip_prob=0.0,
vertical_flip_prob=0.0, gaussian_blur_prob=0.0, rotate_degree=0.0,
cutout_prob=0.0, cutout_dim=(8, 8), hue_saturation_prob=0.0, contrast_prob=0.0,
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), normalize=True, train=True
):
"""Create data transformation pipeline.
Args:
resize (tuple, optional): Resize the input to the given height and
width. (default: (0, 0))
padding (tuple, optional): Pad the image if the image size is less
than the specified dimensions (height, width). (default= (0, 0))
crop (tuple, optional): Randomly crop the image with the specified
dimensions (height, width). (default: (0, 0))
horizontal_flip_prob (float, optional): Probability of an image
being horizontally flipped. (default: 0)
vertical_flip_prob (float, optional): Probability of an image
being vertically flipped. (default: 0)
rotate_prob (float, optional): Probability of an image being
rotated. (default: 0)
rotate_degree (float, optional): Angle of rotation for image
augmentation. (default: 0)
cutout_prob (float, optional): Probability that cutout will be
performed. (default: 0)
cutout_dim (tuple, optional): Dimensions of the cutout box (height, width).
(default: (8, 8))
hue_saturation_prob (float, optional): Probability of randomly changing hue,
saturation and value of the input image. (default: 0)
contrast_prob (float, optional): Randomly changing contrast of the input image.
(default: 0)
mean (float or tuple, optional): Dataset mean. (default: 0.5 for each channel)
std (float or tuple, optional): Dataset standard deviation. (default: 0.5 for each channel)
"""
transforms_list = []
if sum(resize) > 0:
transforms_list += [A.Resize(
height=resize[0], width=resize[1], always_apply=True
)]
if train:
if sum(padding) > 0:
transforms_list += [A.PadIfNeeded(
min_height=padding[0], min_width=padding[1], always_apply=True
)]
if sum(crop) > 0:
transforms_list += [A.RandomCrop(crop[0], crop[1], always_apply=True)]
if horizontal_flip_prob > 0: # Horizontal Flip
transforms_list += [A.HorizontalFlip(p=horizontal_flip_prob)]
if vertical_flip_prob > 0: # Vertical Flip
transforms_list += [A.VerticalFlip(p=vertical_flip_prob)]
if gaussian_blur_prob > 0: # Patch Gaussian Augmentation
transforms_list += [A.GaussianBlur(p=gaussian_blur_prob)]
if rotate_degree > 0: # Rotate image
transforms_list += [A.Rotate(limit=rotate_degree)]
if cutout_prob > 0: # CutOut
if isinstance(mean, float):
fill_value = mean * 255.0
else:
fill_value = tuple([x * 255.0 for x in mean])
transforms_list += [A.CoarseDropout(
p=cutout_prob, max_holes=1, fill_value=fill_value,
max_height=cutout_dim[0], max_width=cutout_dim[1]
)]
if hue_saturation_prob > 0: # Hue Saturation
transforms_list += [A.HueSaturationValue(p=hue_saturation_prob)]
if contrast_prob > 0: # Random Contrast
transforms_list += [A.RandomContrast(p=contrast_prob)]
if normalize:
# normalize the data with mean and standard deviation to keep values in range [-1, 1]
# since there are 3 channels for each image,
# we have to specify mean and std for each channel
transforms_list += [
A.Normalize(mean=mean, std=std, always_apply=True),
]
# convert the data to torch.FloatTensor
transforms_list += [
ToTensor()
]
self.transform = A.Compose(transforms_list)
def __call__(self, image):
"""Process and image through the data transformation pipeline.
Args:
image: Image to process.
Returns:
Transformed image.
"""
if not isinstance(image, np.ndarray):
image = np.array(image)
if len(image.shape) == 2:
image = np.expand_dims(image, axis=-1)
image = self.transform(image=image)['image']
return image
def data_loader(data, shuffle=True, batch_size=1, num_workers=1, cuda=False):
"""Create data loader
Args:
data (torchvision.datasets): Downloaded dataset.
shuffle (bool, optional): If True, shuffle the dataset.
(default: True)
batch_size (int, optional): Number of images to considered
in each batch. (default: 1)
num_workers (int, optional): How many subprocesses to use
for data loading. (default: 1)
cuda (bool, optional): True is GPU is available. (default: False)
Returns:
DataLoader instance.
"""
loader_args = {
'shuffle': shuffle,
'batch_size': batch_size
}
# If GPU exists
if cuda:
loader_args['num_workers'] = num_workers
loader_args['pin_memory'] = True
return torch.utils.data.DataLoader(data, **loader_args)
class InfiniteDataLoader:
"""Create infinite loop in a data loader.
Args:
data_loader (torch.utils.data.DataLoader): DataLoader object.
auto_reset (bool, optional): Create an infinite loop data loader.
(default: True)
"""
def __init__(self, data_loader, auto_reset=True):
self.data_loader = data_loader
self.auto_reset = auto_reset
self._iterator = iter(data_loader)
def __next__(self):
# Get a new set of inputs and labels
try:
data, target = next(self._iterator)
except StopIteration:
if not self.auto_reset:
raise
self._iterator = iter(self.data_loader)
data, target = next(self._iterator)
return data, target
def get_batch(self):
return next(self)
| true |
c004b0fe1f7ba82c4fea9de23e63c84103fc5455 | Python | nightqiuhua/selenium_webdriver | /轻松自动化---selenium-webdriver(python) (四)/selenium_exercise_8.py | UTF-8 | 353 | 2.921875 | 3 | [] | no_license | from selenium import webdriver
import time
import os
browser = webdriver.Firefox()
path = 'file://'+os.path.abspath('checkbox.html')
browser.get(path)
inputs = browser.find_elements_by_tag_name('input')
for in_put in inputs:
if in_put.get_attribute('type') == 'checkbox':
print('in_put=',in_put)
in_put.click()
time.sleep(2)
browser.quit() | true |
de7b5c40651009a6104c8d44292da2cce0354588 | Python | zhangweichina111/RPi-snake | /snake.py | UTF-8 | 7,277 | 2.8125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import string
import sys
import select
from time import sleep
import termios
import tty
import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
import Image
import ImageDraw
import ImageFont
import random
import RPi.GPIO as GPIO
import linkList
class apple:
def __init__(self,x,y):
self.x = x
self.y = y
__snakeDir__ = random.randint(1,4)*3
__snakeBody__ = linkList.LinkList()
__score__ = 0
__eatFlag__ = 0
__goFlag__ = 0
__apple__ = apple(0,0)
'''
def dirTurnTo3(channel):
global __snakeDir__
if __snakeDir__ != 3 and __snakeDir__ != 9:
__snakeDir__ = 3
def dirTurnTo6(channel):
global __snakeDir__
if __snakeDir__ != 6 and __snakeDir__ != 12:
__snakeDir__ = 6
def dirTurnTo9(channel):
global __snakeDir__
if __snakeDir__ != 9 and __snakeDir__ != 3:
__snakeDir__ = 9
def dirTurnTo12(channel):
global __snakeDir__
if __snakeDir__ != 12 and __snakeDir__ != 6:
__snakeDir__ = 12
'''
def dirTurnLeft(channel):
global __snakeDir__
if __snakeDir__ == 3:
__snakeDir__ = 12
else:
__snakeDir__ = __snakeDir__ - 3
def dirTurnRight(channel):
global __snakeDir__
if __snakeDir__ == 12:
__snakeDir__ = 3
else:
__snakeDir__ = __snakeDir__ + 3
def initSnakeBody(initlen):
global __snakeBody__
global __snakeDir__
#head_x = random.randint(1,32)*4
#head_y = random.randint(1,16)*4
head_x = 64
head_y = 40
for i in range(0,string.atoi(initlen)):
if __snakeDir__ == 3:
item = linkList.Node(head_x-4*i,head_y)
elif __snakeDir__ == 6:
item = linkList.Node(head_x,head_y-4*i)
elif __snakeDir__ == 9:
item = linkList.Node(head_x+4*i,head_y)
elif __snakeDir__ == 12:
item = linkList.Node(head_x,head_y+4*i)
__snakeBody__.append(item)
def setApple(image):
global __eatFlag__
global __apple__
if __eatFlag__ == 1:
__eatFlag__ = 0
x = random.randint(1,32)*4
y = random.randint(1,16)*4
if x >= 124:
x = 124
if x <= 0:
x = 0
if y >= 60:
y = 60
if y <= 20:
y = 20
# print "eat apple"
__apple__.x = x
__apple__.y = y
#print __apple__.x
#print __apple__.y
draw = ImageDraw.Draw(image)
x = __apple__.x
y = __apple__.y
draw.ellipse((x,y,x+4,y+4), outline=255, fill=0)
return image
def drawSnakeBody(image):
global __snakeBody__
draw = ImageDraw.Draw(image)
length = __snakeBody__.getlength()
for i in range(0,length):
t = __snakeBody__.getitem(i)
if i == 0:
draw.rectangle((t.cur_x,t.cur_y,t.cur_x+4,t.cur_y+4),outline=255,fill=0)
else:
draw.rectangle((t.cur_x,t.cur_y,t.cur_x+4,t.cur_y+4),outline=0,fill=255)
return image
def snakeMove(curHeadX,curHeadY):
global __snakeBody__
length = __snakeBody__.getlength()
for i in range(1,length):
__snakeBody__.getitem(length-i).cur_x = __snakeBody__.getitem(length-i-1).cur_x
__snakeBody__.getitem(length-i).cur_y = __snakeBody__.getitem(length-i-1).cur_y
__snakeBody__.getitem(0).cur_x = curHeadX
__snakeBody__.getitem(0).cur_y = curHeadY
def isGameOver():
global __snakeBody__
global __goFlag__
length = __snakeBody__.getlength()
dict = {}
for i in range(0,length):
t = __snakeBody__.getitem(i)
index = t.cur_x + (t.cur_y - 1)*128
if index in dict:
__goFlag__ = 1
break
else:
index = t.cur_x + (t.cur_y - 1)*128
dict[index] = 1
head_x = __snakeBody__.getitem(0).cur_x
head_y = __snakeBody__.getitem(0).cur_y
if head_x < 0 or head_y < 20 or head_x >= 128 or head_y >=64:
__goFlag__ = 1
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Please input command like \"sudo python snake.py 1 4\""
print "This means that level 1 and initial snake's length = 4"
GPIO.setmode(GPIO.BCM)
GPIO.setup(23,GPIO.IN,pull_up_down=GPIO.PUD_UP)
GPIO.setup(24,GPIO.IN,pull_up_down=GPIO.PUD_UP)
try:
GPIO.add_event_detect(23,GPIO.RISING,callback=dirTurnLeft,bouncetime=200)
GPIO.add_event_detect(24,GPIO.RISING,callback=dirTurnRight,bouncetime=200)
# Raspberry Pi pin configuration
RST = 17
# Note the following are only used with SPI:
DC = 27
SPI_PORT = 0
SPI_DEVICE = 0
# 128x64 display with hardware SPI
disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# Initialize library
disp.begin()
# Clear display
disp.clear()
disp.display()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
width = disp.width
height = disp.height
except KeyboardInterrupt:
print "*** GPIO clean up ***"
GPIO.cleanup()
print "*** disp clear ***"
disp.clear()
disp.display()
# init snake body
initSnakeBody(sys.argv[2])
#for i in range(0,4):
# print "%i - %i"%(__snakeBody__.getitem(i).cur_x,__snakeBody__.getitem(i).cur_y)
__goFlag__ = 0
__eatFlag__ = 1
in_speed = sys.argv[1]
real_speed = 1.0/(string.atof(in_speed)*10.0)
# 非阻塞输入用于退出
old_settings = termios.tcgetattr(sys.stdin)
tty.setcbreak(sys.stdin.fileno())
while True:
if __goFlag__ == 1:
image = Image.new('1',(width,height))
draw = ImageDraw.Draw(image)
#font_zh = ImageFont.truetype('simsun.ttf',11)
font_zh = ImageFont.load_default()
font_en = ImageFont.truetype('robotastic.ttf',20)
draw.text((0,0),'http:ghost.micheal.cn',font=font_zh,fill=255)
draw.text((25,20),'GAME',font=font_en,fill=255)
draw.text((25,40),'OVER',font=font_en,fill=255)
disp.image(image)
disp.display()
break
sleep(real_speed)
# 绘图 BEGIN
image = Image.new('1', (width, height))
draw = ImageDraw.Draw(image)
# draw screen boder
draw.rectangle((0,20,127,63),outline=255,fill=0)
# draw apple
image = setApple(image)
cx = __snakeBody__.getitem(0).cur_x
cy = __snakeBody__.getitem(0).cur_y
if cx == __apple__.x and cy == __apple__.y:
__eatFlag__ = 1
__score__ += 10
last = __snakeBody__.getlength()
newNode = linkList.Node(__snakeBody__.getitem(last-1).cur_x,__snakeBody__.getitem(last-1).cur_y)
__snakeBody__.append(newNode)
# draw snake body
image = drawSnakeBody(image)
# info start
font_zh = ImageFont.truetype('simsun.ttf',14)
# output_text = 'cd=%i cx=%i cy=%i'%(__snakeDir__,cx,cy)
output_text = 'Score:%i'%__score__
draw.text((0,0),unicode(output_text ,'utf-8'),font=font_zh, fill=255)
# draw.text((0,10),'a.x=%i a.y=%i ef=%i'%(__apple__.x,__apple__.y,__eatFlag__),font=font_zh,fill = 255)
# info end
if __snakeDir__ == 3:
cx += 4
if cx > 128:
cx -= 4
if __snakeDir__ == 9:
cx -= 4
if cx < 0:
cx += 4
if __snakeDir__ == 6:
cy += 4
if cy > 64:
cy -= 4
if __snakeDir__ == 12:
cy -= 4
if cy < 0:
cy += 4
snakeMove(cx,cy)
#__snakeBody__.getitem(0).cur_x = cx
#__snakeBody__.getitem(0).cur_y = cy
# __snakeBody__.getitem(0).cur_dir = __snakeDir__
disp.image(image)
disp.display()
isGameOver()
# 绘图 END
if select.select([sys.stdin],[],[],0) == ([sys.stdin],[],[]):
c = sys.stdin.read(1)
if c == '\x1b': break
sys.stdout.write(c)
sys.stdout.flush()
termios.tcsetattr(sys.stdin,termios.TCSADRAIN,old_settings)
if __goFlag__ == 1:
print "*** Game over. Your score: %i ***"%__score__
print "*** As always,have a nice day ***"
print "*** clean up ***"
raw_input("press enter to continue >>>")
#disp.clear()
#disp.display()
GPIO.cleanup()
| true |
497361e170cfac70ab5a07d105936f26ce4f4f48 | Python | shahineb/aerosols-vertical-profiles | /src/preprocessing/preprocess_modis.py | UTF-8 | 6,036 | 2.6875 | 3 | [] | no_license | import sys
import glob
import numpy as np
import pickle
import matplotlib.pyplot as plt
import xarray as xr
import dask
import netCDF4 as nc
from pprint import pprint
import pandas as pd
from functools import partial
import multiprocessing as mp
def standardise_coords_and_dims_modis(ds):
ds = ds.rename({'Cell_Along_Swath:mod04': 'x', 'Cell_Across_Swath:mod04': 'y',
'Latitude': 'lat', 'Longitude': 'lon', 'Scan_Start_Time': 'time'})
ds = ds.set_coords(['time', 'lon', 'lat'])
return ds
def preprocess_aod_data(ds,
aerosol_qa_cutoff = 1.0,
sza_cutoff = None,
verbose=False):
"""Takes modis level 2 dataset, filters using AOD 550 QA flag
### TO DO:
- check if low SZA should also be filtered
## NOTE: the problem with using cis here was that some data would be flattened without warning.
Args:
ds (xarray.Dataset): dataset from one modis l2 granule
aerosol_qa_cutoff (float): lower limit for Land_Ocean_Quality_Flag
sza_cutoff(float): upper limit for solar zenith angle
Returns:
ds (xr.Dataset)
"""
land_mask = (ds['Land_sea_Flag'] == 0)
# QA flags applied as recommended (Hsu et al., 2013; Levy et al., 2013; Sayer et al. 2013)
# if Dark Target algo used accept all points w flag > 2
aod_dt_qa_mask = (ds['AOD_550_Dark_Target_Deep_Blue_Combined_Algorithm_Flag'] == 0.) & (ds['AOD_550_Dark_Target_Deep_Blue_Combined_QA_Flag'] > 2.)
# if Deep Blue algo used accept all points w flag > 1
aod_db_qa_mask = (ds['AOD_550_Dark_Target_Deep_Blue_Combined_Algorithm_Flag'] == 1.) & (ds['AOD_550_Dark_Target_Deep_Blue_Combined_QA_Flag'] > 1.)
aod_dtdb_qa_mask = aod_dt_qa_mask | aod_db_qa_mask
aod_dtdb_mask = land_mask & aod_dtdb_qa_mask
# this product contains only AOD values for the filtered,
# quantitatively useful retrievals over dark targets
aod_lo_qa_mask = (ds['Land_Ocean_Quality_Flag'] >= aerosol_qa_cutoff)
aod_lo_mask = land_mask & aod_lo_qa_mask
if sza_cutoff:
sza_mask = (ds['Solar_Zenith'] <= sza_cutoff)
aod_dtdb_mask = aod_dtdb_mask & sza_mask
aod_lo_mask = aod_lo_mask & sza_mask
# ignore data from this file if no valid points
valid_points = np.count_nonzero(aod_lo_mask)
valid_points_dtdb = np.count_nonzero(aod_dtdb_mask)
if verbose:
print(f'The number of valid points in this dataset from AOD_Land_Ocean is {valid_points} and from DT_DB is {valid_points_dtdb}')
if min(valid_points, valid_points_dtdb) == 0:
return None
ds['AOD_550_Dark_Target_Deep_Blue_Combined'] = ds['AOD_550_Dark_Target_Deep_Blue_Combined'].where(aod_dtdb_mask)
ds['Optical_Depth_Land_And_Ocean'] = ds['Optical_Depth_Land_And_Ocean'].where(aod_lo_mask)
ds = standardise_coords_and_dims_modis(ds)
ds.attrs['description'] = f"""MODIS AOD 550 from Optical Depth Land Ocean over the oceans (Land_sea_flag = 0),
filtered using QA flag (see https://doi.org/10.1002/2014JD022453)
with lowest accepted value for Land_Ocean_Quality_Flag {aerosol_qa_cutoff}"""
return ds[['AOD_550_Dark_Target_Deep_Blue_Combined', 'AOD_550_Dark_Target_Deep_Blue_Combined_Algorithm_Flag',
'Optical_Depth_Land_And_Ocean', 'PSML003_Ocean', 'Solar_Zenith']]
def read_and_process_hdfs(date,
base_dir,
dim,
drop_variables=None,
preprocess_func=None,
verbose=False):
"""Creates processed daily dataset by applying the transform function on datasets from
each satellite swath and concatenating along the specified dimension.
Args:
date (str or timestamp): date of target hdf files
base_dir (str): base directory
dim (str): dimension to concatenate the datasets along
drop_variables (list): list of variables to drop when opening dataset
preprocess_fund (func): function used to preprocess each dataset before concat
Returns:
combined (xr.Dataset)
"""
def process_one_path(path):
open_kwargs = dict(decode_cf=True, decode_times=False,
drop_variables=drop_variables, mask_and_scale=True)
# use a context manager, to ensure the file gets closed after use
with xr.open_dataset(path, **open_kwargs) as ds:
# transform_func should do some sort of selection or aggregation
if preprocess_func is not None:
ds = preprocess_func(ds)
# load all data from the transformed dataset, to ensure we can
# use it after closing each original file
if ds is not None:
ds.load()
return ds
else:
return None
date = str(date).split(' ')[0].split('-')
year = date[0]
month = date[1]
day = date[2]
paths = sorted(glob.glob(base_dir + year + '/' + month + '/' + day + '/' + '*.hdf'))
if verbose:
print(f'Processing MODIS files for {day}/{month}/{year}')
datasets = [process_one_path(p) for p in paths]
datasets = [ds for ds in datasets if ds]
if len(datasets):
combined = xr.concat(datasets, dim)
combined.time.attrs['units'] = 'seconds since 1993-01-01'
combined = xr.decode_cf(combined)
combined.to_netcdf('/gws/nopw/j04/eo_shared_data_vol2/scratch/sofija/aodisaggregation/data/interim/' +
year + '/' + month + '/' + "Processed_MOD_AOD_MYD04_L2_" + day + '_' + month + '_'
+ year + ".nc")
return combined
else:
print(f'No valid data for {day}/{month}/{year}!')
| true |
a967c5b2032e42dc9dc6a1eba651a7d40d9a7741 | Python | BeyondMark/ida-parse-trace-file-helper | /ida_parse_trace_line_helper/trace_line_parser.py | UTF-8 | 1,773 | 2.765625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from ida_parse_trace_line_helper.operand import Operand
from ida_parse_trace_line_helper.trace_line_info import TraceData
class TraceLine:
def __init__(self, trace_line: str):
self.__raw_trace_line = trace_line
self.__data = self.__parse_trace_line_to_list()
def get_changed_register(self) -> dict:
return self.__data.changed_registers
def get_source_operands(self):
return self.get_operands()[1:]
def get_dest_operand(self):
return self.get_operand(0)
def get_mnemonic(self) -> str:
return self.__data.mnemonic
def get_operands(self) -> list:
return [Operand(x) for x in self.__data.operands]
def get_operand(self, index):
return Operand(self.__data.operands[index]) if index < len(self.__data.operands) else None
def __parse_trace_line_to_list(self):
if self.__raw_trace_line:
temp_list = self.__raw_trace_line.split("\t")
if len(temp_list) >= 4 and self.__raw_trace_line.endswith("\n"):
return TraceData(
temp_list[0].strip(),
temp_list[1].split(":")[0].strip(),
temp_list[1].split(":")[1].strip() if len(temp_list[1].split(":")) >= 2 else "",
temp_list[2].strip(),
temp_list[3].strip()
)
return None
def __str__(self):
return "{module_name}:{address}\t{mnemonic} {operands}\t{changed_registers}".format(
module_name=self.__data.module_name,
address=self.__data.address,
mnemonic=self.__data.mnemonic,
operands=self.__data.operands,
changed_registers=self.__data.changed_registers
)
| true |