text stringlengths 4 1.02M | meta dict |
|---|---|
import logging
import asyncio
import json
from typing import Dict, List, Any
from .socks_http import urlopen
CURRENCIES = [
"USD", "EUR", "GBP", "AUD",
"CAD", "JPY", "CNY"
] # type: List[str]
async def fetch_from_api(base_url: str, chain_1209k: str, loop=None) -> Dict[str, Any]:
fiats = ",".join(CURRENCIES) # type: str
url = base_url.format(chain_1209k.upper(), fiats) # type: str
logging.info("Fetching rates from URL: %s", url)
return json.loads(await urlopen(url, loop=loop))
async def fetch_exchange_rates(chain_1209k: str = "btc", loop=None) -> Dict[str, Dict]:
btcav_url = ("https://apiv2.bitcoinaverage.com/indices/" +
"global/ticker/short?crypto={}&fiat={}") # type: str
ccomp_url = ("https://min-api.cryptocompare.com/data/" +
"price?fsym={}&tsyms={}") # type: str
all_rates = {} # type: Dict[str, Dict[str, Any]]
btcav_json = await fetch_from_api(
btcav_url, chain_1209k, loop=loop) # type: Dict[str, Any]
btcav_rates = {} # type: Dict[str, float]
for key, value in btcav_json.items():
symbol = key.replace(chain_1209k.upper(), "") # type: str
if symbol in CURRENCIES:
btcav_rates[symbol] = value["last"]
all_rates["btcav"] = btcav_rates
ccomp_json = await fetch_from_api(
ccomp_url, chain_1209k, loop=loop) # type: Dict[str, Any]
all_rates["ccomp"] = ccomp_json
return all_rates
def main():
loop = asyncio.get_event_loop() # type: asyncio.AbstractEventLoop
result = loop.run_until_complete(
fetch_exchange_rates()) # type: Dict[str, float]
print(result)
loop.close()
if __name__ == "__main__":
main()
| {
"content_hash": "f707a676f1af914fc8f6c939f38ae8d7",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 87,
"avg_line_length": 32.24528301886792,
"alnum_prop": 0.6079578700994733,
"repo_name": "metamarcdw/nowallet",
"id": "31397dcd3f2c292958c4913823910daef8aa142e",
"size": "1709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nowallet/exchange_rate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "905"
},
{
"name": "Python",
"bytes": "104686"
}
],
"symlink_target": ""
} |
"""A module that provides support for the Covariance Matrix Adaptation
Evolution Strategy.
"""
import numpy
import copy
from math import sqrt, log, exp
import tools
class Strategy(object):
"""
A strategy that will keep track of the basic parameters of the CMA-ES
algorithm.
:param centroid: An iterable object that indicates where to start the
evolution.
:param sigma: The initial standard deviation of the distribution.
:param parameter: One or more parameter to pass to the strategy as
described in the following table, optional.
+----------------+---------------------------+----------------------------+
| Parameter | Default | Details |
+================+===========================+============================+
| ``lambda_`` | ``int(4 + 3 * log(N))`` | Number of children to |
| | | produce at each generation,|
| | | ``N`` is the individual's |
| | | size (integer). |
+----------------+---------------------------+----------------------------+
| ``mu`` | ``int(lambda_ / 2)`` | The number of parents to |
| | | keep from the |
| | | lambda children (integer). |
+----------------+---------------------------+----------------------------+
| ``cmatrix`` | ``identity(N)`` | The initial covariance |
| | | matrix of the distribution |
| | | that will be sampled. |
+----------------+---------------------------+----------------------------+
| ``weights`` | ``"superlinear"`` | Decrease speed, can be |
| | | ``"superlinear"``, |
| | | ``"linear"`` or |
| | | ``"equal"``. |
+----------------+---------------------------+----------------------------+
| ``cs`` | ``(mueff + 2) / | Cumulation constant for |
| | (N + mueff + 3)`` | step-size. |
+----------------+---------------------------+----------------------------+
| ``damps`` | ``1 + 2 * max(0, sqrt(( | Damping for step-size. |
| | mueff - 1) / (N + 1)) - 1)| |
| | + cs`` | |
+----------------+---------------------------+----------------------------+
| ``ccum`` | ``4 / (N + 4)`` | Cumulation constant for |
| | | covariance matrix. |
+----------------+---------------------------+----------------------------+
| ``ccov1`` | ``2 / ((N + 1.3)^2 + | Learning rate for rank-one |
| | mueff)`` | update. |
+----------------+---------------------------+----------------------------+
| ``ccovmu`` | ``2 * (mueff - 2 + 1 / | Learning rate for rank-mu |
| | mueff) / ((N + 2)^2 + | update. |
| | mueff)`` | |
+----------------+---------------------------+----------------------------+
"""
def __init__(self, centroid, sigma, **kargs):
self.params = kargs
# Create a centroid as a numpy array
self.centroid = numpy.array(centroid)
self.dim = len(self.centroid)
self.sigma = sigma
self.pc = numpy.zeros(self.dim)
self.ps = numpy.zeros(self.dim)
self.chiN = sqrt(self.dim) * (1 - 1. / (4. * self.dim) +
1. / (21. * self.dim ** 2))
self.C = self.params.get("cmatrix", numpy.identity(self.dim))
self.diagD, self.B = numpy.linalg.eigh(self.C)
indx = numpy.argsort(self.diagD)
self.diagD = self.diagD[indx] ** 0.5
self.B = self.B[:, indx]
self.BD = self.B * self.diagD
self.cond = self.diagD[indx[-1]] / self.diagD[indx[0]]
self.lambda_ = self.params.get("lambda_", int(4 + 3 * log(self.dim)))
self.update_count = 0
self.computeParams(self.params)
def generate(self, ind_init):
"""Generate a population of :math:`\lambda` individuals of type
*ind_init* from the current strategy.
:param ind_init: A function object that is able to initialize an
individual from a list.
:returns: A list of individuals.
"""
arz = numpy.random.standard_normal((self.lambda_, self.dim))
arz = self.centroid + self.sigma * numpy.dot(arz, self.BD.T)
return map(ind_init, arz)
def update(self, population):
"""Update the current covariance matrix strategy from the
*population*.
:param population: A list of individuals from which to update the
parameters.
"""
population.sort(key=lambda ind: ind.fitness, reverse=True)
old_centroid = self.centroid
self.centroid = numpy.dot(self.weights, population[0:self.mu])
c_diff = self.centroid - old_centroid
# Cumulation : update evolution path
self.ps = (1 - self.cs) * self.ps \
+ sqrt(self.cs * (2 - self.cs) * self.mueff) / self.sigma \
* numpy.dot(self.B, (1. / self.diagD)
* numpy.dot(self.B.T, c_diff))
hsig = float((numpy.linalg.norm(self.ps) /
sqrt(1. - (1. - self.cs) ** (2. * (self.update_count + 1.))) / self.chiN
< (1.4 + 2. / (self.dim + 1.))))
self.update_count += 1
self.pc = (1 - self.cc) * self.pc + hsig \
* sqrt(self.cc * (2 - self.cc) * self.mueff) / self.sigma \
* c_diff
# Update covariance matrix
artmp = population[0:self.mu] - old_centroid
self.C = (1 - self.ccov1 - self.ccovmu + (1 - hsig)
* self.ccov1 * self.cc * (2 - self.cc)) * self.C \
+ self.ccov1 * numpy.outer(self.pc, self.pc) \
+ self.ccovmu * numpy.dot((self.weights * artmp.T), artmp) \
/ self.sigma ** 2
self.sigma *= numpy.exp((numpy.linalg.norm(self.ps) / self.chiN - 1.)
* self.cs / self.damps)
self.diagD, self.B = numpy.linalg.eigh(self.C)
indx = numpy.argsort(self.diagD)
self.cond = self.diagD[indx[-1]] / self.diagD[indx[0]]
self.diagD = self.diagD[indx] ** 0.5
self.B = self.B[:, indx]
self.BD = self.B * self.diagD
def computeParams(self, params):
"""Computes the parameters depending on :math:`\lambda`. It needs to
be called again if :math:`\lambda` changes during evolution.
:param params: A dictionary of the manually set parameters.
"""
self.mu = params.get("mu", int(self.lambda_ / 2))
rweights = params.get("weights", "superlinear")
if rweights == "superlinear":
self.weights = log(self.mu + 0.5) - \
numpy.log(numpy.arange(1, self.mu + 1))
elif rweights == "linear":
self.weights = self.mu + 0.5 - numpy.arange(1, self.mu + 1)
elif rweights == "equal":
self.weights = numpy.ones(self.mu)
else:
raise RuntimeError("Unknown weights : %s" % rweights)
self.weights /= sum(self.weights)
self.mueff = 1. / sum(self.weights ** 2)
self.cc = params.get("ccum", 4. / (self.dim + 4.))
self.cs = params.get("cs", (self.mueff + 2.) /
(self.dim + self.mueff + 3.))
self.ccov1 = params.get("ccov1", 2. / ((self.dim + 1.3) ** 2 +
self.mueff))
self.ccovmu = params.get("ccovmu", 2. * (self.mueff - 2. +
1. / self.mueff) /
((self.dim + 2.) ** 2 + self.mueff))
self.ccovmu = min(1 - self.ccov1, self.ccovmu)
self.damps = 1. + 2. * max(0, sqrt((self.mueff - 1.) /
(self.dim + 1.)) - 1.) + self.cs
self.damps = params.get("damps", self.damps)
class StrategyOnePlusLambda(object):
"""
A CMA-ES strategy that uses the :math:`1 + \lambda` paradigme.
:param parent: An iterable object that indicates where to start the
evolution. The parent requires a fitness attribute.
:param sigma: The initial standard deviation of the distribution.
:param lambda_: Number of offspring to produce from the parent.
(optional, defaults to 1)
:param parameter: One or more parameter to pass to the strategy as
described in the following table. (optional)
Other parameters can be provided as described in the next table
+----------------+---------------------------+----------------------------+
| Parameter | Default | Details |
+================+===========================+============================+
| ``d`` | ``1.0 + N / (2.0 * | Damping for step-size. |
| | lambda_)`` | |
+----------------+---------------------------+----------------------------+
| ``ptarg`` | ``1.0 / (5 + sqrt(lambda_)| Taget success rate. |
| | / 2.0)`` | |
+----------------+---------------------------+----------------------------+
| ``cp`` | ``ptarg * lambda_ / (2.0 +| Step size learning rate. |
| | ptarg * lambda_)`` | |
+----------------+---------------------------+----------------------------+
| ``cc`` | ``2.0 / (N + 2.0)`` | Cumulation time horizon. |
+----------------+---------------------------+----------------------------+
| ``ccov`` | ``2.0 / (N**2 + 6.0)`` | Covariance matrix learning |
| | | rate. |
+----------------+---------------------------+----------------------------+
| ``pthresh`` | ``0.44`` | Threshold success rate. |
+----------------+---------------------------+----------------------------+
"""
def __init__(self, parent, sigma, **kargs):
self.parent = parent
self.sigma = sigma
self.dim = len(self.parent)
self.C = numpy.identity(self.dim)
self.A = numpy.identity(self.dim)
self.pc = numpy.zeros(self.dim)
self.computeParams(kargs)
self.psucc = self.ptarg
def computeParams(self, params):
"""Computes the parameters depending on :math:`\lambda`. It needs to
be called again if :math:`\lambda` changes during evolution.
:param params: A dictionary of the manually set parameters.
"""
# Selection :
self.lambda_ = params.get("lambda_", 1)
# Step size control :
self.d = params.get("d", 1.0 + self.dim / (2.0 * self.lambda_))
self.ptarg = params.get("ptarg", 1.0 / (5 + sqrt(self.lambda_) / 2.0))
self.cp = params.get("cp", self.ptarg * self.lambda_ / (2 + self.ptarg * self.lambda_))
# Covariance matrix adaptation
self.cc = params.get("cc", 2.0 / (self.dim + 2.0))
self.ccov = params.get("ccov", 2.0 / (self.dim ** 2 + 6.0))
self.pthresh = params.get("pthresh", 0.44)
def generate(self, ind_init):
"""Generate a population of :math:`\lambda` individuals of type
*ind_init* from the current strategy.
:param ind_init: A function object that is able to initialize an
individual from a list.
:returns: A list of individuals.
"""
# self.y = numpy.dot(self.A, numpy.random.standard_normal(self.dim))
arz = numpy.random.standard_normal((self.lambda_, self.dim))
arz = self.parent + self.sigma * numpy.dot(arz, self.A.T)
return map(ind_init, arz)
def update(self, population):
"""Update the current covariance matrix strategy from the
*population*.
:param population: A list of individuals from which to update the
parameters.
"""
population.sort(key=lambda ind: ind.fitness, reverse=True)
lambda_succ = sum(self.parent.fitness <= ind.fitness for ind in population)
p_succ = float(lambda_succ) / self.lambda_
self.psucc = (1 - self.cp) * self.psucc + self.cp * p_succ
if self.parent.fitness <= population[0].fitness:
x_step = (population[0] - numpy.array(self.parent)) / self.sigma
self.parent = copy.deepcopy(population[0])
if self.psucc < self.pthresh:
self.pc = (1 - self.cc) * self.pc + sqrt(self.cc * (2 - self.cc)) * x_step
self.C = (1 - self.ccov) * self.C + self.ccov * numpy.outer(self.pc, self.pc)
else:
self.pc = (1 - self.cc) * self.pc
self.C = (1 - self.ccov) * self.C + self.ccov * (numpy.outer(self.pc, self.pc) + self.cc * (2 - self.cc) * self.C)
self.sigma = self.sigma * exp(1.0 / self.d * (self.psucc - self.ptarg) / (1.0 - self.ptarg))
# We use Cholesky since for now we have no use of eigen decomposition
# Basically, Cholesky returns a matrix A as C = A*A.T
# Eigen decomposition returns two matrix B and D^2 as C = B*D^2*B.T = B*D*D*B.T
# So A == B*D
# To compute the new individual we need to multiply each vector z by A
# as y = centroid + sigma * A*z
# So the Cholesky is more straightforward as we don't need to compute
# the squareroot of D^2, and multiply B and D in order to get A, we directly get A.
# This can't be done (without cost) with the standard CMA-ES as the eigen decomposition is used
# to compute covariance matrix inverse in the step-size evolutionary path computation.
self.A = numpy.linalg.cholesky(self.C)
class StrategyMultiObjective(object):
"""Multiobjective CMA-ES strategy based on the paper [Voss2010]_. It
is used similarly as the standard CMA-ES strategy with a generate-update
scheme.
:param population: An initial population of individual.
:param sigma: The initial step size of the complete system.
:param mu: The number of parents to use in the evolution. When not
provided it defaults to the length of *population*. (optional)
:param lambda_: The number of offspring to produce at each generation.
(optional, defaults to 1)
:param indicator: The indicator function to use. (optional, default to
:func:`~deap.tools.hypervolume`)
Other parameters can be provided as described in the next table
+----------------+---------------------------+----------------------------+
| Parameter | Default | Details |
+================+===========================+============================+
| ``d`` | ``1.0 + N / 2.0`` | Damping for step-size. |
+----------------+---------------------------+----------------------------+
| ``ptarg`` | ``1.0 / (5 + 1.0 / 2.0)`` | Taget success rate. |
+----------------+---------------------------+----------------------------+
| ``cp`` | ``ptarg / (2.0 + ptarg)`` | Step size learning rate. |
+----------------+---------------------------+----------------------------+
| ``cc`` | ``2.0 / (N + 2.0)`` | Cumulation time horizon. |
+----------------+---------------------------+----------------------------+
| ``ccov`` | ``2.0 / (N**2 + 6.0)`` | Covariance matrix learning |
| | | rate. |
+----------------+---------------------------+----------------------------+
| ``pthresh`` | ``0.44`` | Threshold success rate. |
+----------------+---------------------------+----------------------------+
.. [Voss2010] Voss, Hansen, Igel, "Improved Step Size Adaptation
for the MO-CMA-ES", 2010.
"""
def __init__(self, population, sigma, **params):
self.parents = population
self.dim = len(self.parents[0])
# Selection
self.mu = params.get("mu", len(self.parents))
self.lambda_ = params.get("lambda_", 1)
# Step size control
self.d = params.get("d", 1.0 + self.dim / 2.0)
self.ptarg = params.get("ptarg", 1.0 / (5.0 + 0.5))
self.cp = params.get("cp", self.ptarg / (2.0 + self.ptarg))
# Covariance matrix adaptation
self.cc = params.get("cc", 2.0 / (self.dim + 2.0))
self.ccov = params.get("ccov", 2.0 / (self.dim ** 2 + 6.0))
self.pthresh = params.get("pthresh", 0.44)
# Internal parameters associated to the mu parent
self.sigmas = [sigma] * len(population)
# Lower Cholesky matrix (Sampling matrix)
self.A = [numpy.identity(self.dim) for _ in range(len(population))]
# Inverse Cholesky matrix (Used in the update of A)
self.invCholesky = [numpy.identity(self.dim) for _ in range(len(population))]
self.pc = [numpy.zeros(self.dim) for _ in range(len(population))]
self.psucc = [self.ptarg] * len(population)
self.indicator = params.get("indicator", tools.hypervolume)
def generate(self, ind_init):
"""Generate a population of :math:`\lambda` individuals of type
*ind_init* from the current strategy.
:param ind_init: A function object that is able to initialize an
individual from a list.
:returns: A list of individuals with a private attribute :attr:`_ps`.
This last attribute is essential to the update function, it
indicates that the individual is an offspring and the index
of its parent.
"""
arz = numpy.random.randn(self.lambda_, self.dim)
individuals = list()
# Make sure every parent has a parent tag and index
for i, p in enumerate(self.parents):
p._ps = "p", i
# Each parent produce an offspring
if self.lambda_ == self.mu:
for i in range(self.lambda_):
# print "Z", list(arz[i])
individuals.append(ind_init(self.parents[i] + self.sigmas[i] * numpy.dot(self.A[i], arz[i])))
individuals[-1]._ps = "o", i
# Parents producing an offspring are chosen at random from the first front
else:
ndom = tools.sortLogNondominated(self.parents, len(self.parents), first_front_only=True)
for i in range(self.lambda_):
j = numpy.random.randint(0, len(ndom))
_, p_idx = ndom[j]._ps
individuals.append(ind_init(self.parents[p_idx] + self.sigmas[p_idx] * numpy.dot(self.A[p_idx], arz[i])))
individuals[-1]._ps = "o", p_idx
return individuals
def _select(self, candidates):
if len(candidates) <= self.mu:
return candidates, []
pareto_fronts = tools.sortLogNondominated(candidates, len(candidates))
chosen = list()
mid_front = None
not_chosen = list()
# Fill the next population (chosen) with the fronts until there is not enouch space
# When an entire front does not fit in the space left we rely on the hypervolume
# for this front
# The remaining fronts are explicitely not chosen
full = False
for front in pareto_fronts:
if len(chosen) + len(front) <= self.mu and not full:
chosen += front
elif mid_front is None and len(chosen) < self.mu:
mid_front = front
# With this front, we selected enough individuals
full = True
else:
not_chosen += front
# Separate the mid front to accept only k individuals
k = self.mu - len(chosen)
if k > 0:
# reference point is chosen in the complete population
# as the worst in each dimension +1
ref = numpy.array([ind.fitness.wvalues for ind in candidates]) * -1
ref = numpy.max(ref, axis=0) + 1
for i in range(len(mid_front) - k):
idx = self.indicator(mid_front, ref=ref)
not_chosen.append(mid_front.pop(idx))
chosen += mid_front
return chosen, not_chosen
def _rankOneUpdate(self, invCholesky, A, alpha, beta, v):
w = numpy.dot(invCholesky, v)
# Under this threshold, the update is mostly noise
if w.max() > 1e-20:
w_inv = numpy.dot(w, invCholesky)
norm_w2 = numpy.sum(w ** 2)
a = sqrt(alpha)
root = numpy.sqrt(1 + beta / alpha * norm_w2)
b = a / norm_w2 * (root - 1)
A = a * A + b * numpy.outer(v, w)
invCholesky = 1.0 / a * invCholesky - b / (a ** 2 + a * b * norm_w2) * numpy.outer(w, w_inv)
return invCholesky, A
def update(self, population):
"""Update the current covariance matrix strategies from the
*population*.
:param population: A list of individuals from which to update the
parameters.
"""
chosen, not_chosen = self._select(population + self.parents)
cp, cc, ccov = self.cp, self.cc, self.ccov
d, ptarg, pthresh = self.d, self.ptarg, self.pthresh
# Make copies for chosen offspring only
last_steps = [self.sigmas[ind._ps[1]] if ind._ps[0] == "o" else None for ind in chosen]
sigmas = [self.sigmas[ind._ps[1]] if ind._ps[0] == "o" else None for ind in chosen]
invCholesky = [self.invCholesky[ind._ps[1]].copy() if ind._ps[0] == "o" else None for ind in chosen]
A = [self.A[ind._ps[1]].copy() if ind._ps[0] == "o" else None for ind in chosen]
pc = [self.pc[ind._ps[1]].copy() if ind._ps[0] == "o" else None for ind in chosen]
psucc = [self.psucc[ind._ps[1]] if ind._ps[0] == "o" else None for ind in chosen]
# Update the internal parameters for successful offspring
for i, ind in enumerate(chosen):
t, p_idx = ind._ps
# Only the offspring update the parameter set
if t == "o":
# Update (Success = 1 since it is chosen)
psucc[i] = (1.0 - cp) * psucc[i] + cp
sigmas[i] = sigmas[i] * exp((psucc[i] - ptarg) / (d * (1.0 - ptarg)))
if psucc[i] < pthresh:
xp = numpy.array(ind)
x = numpy.array(self.parents[p_idx])
pc[i] = (1.0 - cc) * pc[i] + sqrt(cc * (2.0 - cc)) * (xp - x) / last_steps[i]
invCholesky[i], A[i] = self._rankOneUpdate(invCholesky[i], A[i], 1 - ccov, ccov, pc[i])
else:
pc[i] = (1.0 - cc) * pc[i]
pc_weight = cc * (2.0 - cc)
invCholesky[i], A[i] = self._rankOneUpdate(invCholesky[i], A[i], 1 - ccov + pc_weight, ccov, pc[i])
self.psucc[p_idx] = (1.0 - cp) * self.psucc[p_idx] + cp
self.sigmas[p_idx] = self.sigmas[p_idx] * exp((self.psucc[p_idx] - ptarg) / (d * (1.0 - ptarg)))
# It is unnecessary to update the entire parameter set for not chosen individuals
# Their parameters will not make it to the next generation
for ind in not_chosen:
t, p_idx = ind._ps
# Only the offspring update the parameter set
if t == "o":
self.psucc[p_idx] = (1.0 - cp) * self.psucc[p_idx]
self.sigmas[p_idx] = self.sigmas[p_idx] * exp((self.psucc[p_idx] - ptarg) / (d * (1.0 - ptarg)))
# Make a copy of the internal parameters
# The parameter is in the temporary variable for offspring and in the original one for parents
self.parents = chosen
self.sigmas = [sigmas[i] if ind._ps[0] == "o" else self.sigmas[ind._ps[1]] for i, ind in enumerate(chosen)]
self.invCholesky = [invCholesky[i] if ind._ps[0] == "o" else self.invCholesky[ind._ps[1]] for i, ind in enumerate(chosen)]
self.A = [A[i] if ind._ps[0] == "o" else self.A[ind._ps[1]] for i, ind in enumerate(chosen)]
self.pc = [pc[i] if ind._ps[0] == "o" else self.pc[ind._ps[1]] for i, ind in enumerate(chosen)]
self.psucc = [psucc[i] if ind._ps[0] == "o" else self.psucc[ind._ps[1]] for i, ind in enumerate(chosen)]
| {
"content_hash": "662173885c235663b92c5c9ffb47490a",
"timestamp": "",
"source": "github",
"line_count": 521,
"max_line_length": 130,
"avg_line_length": 48.4299424184261,
"alnum_prop": 0.47217818642993026,
"repo_name": "GrimRanger/GeneticAlgorithm",
"id": "5dd7b43483f1a1d27308662094d12a3cc37eda94",
"size": "26140",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "helps/deap/deap-master/deap/cma.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "48558"
},
{
"name": "C++",
"bytes": "24037"
},
{
"name": "Java",
"bytes": "15591"
},
{
"name": "Makefile",
"bytes": "3143"
},
{
"name": "Python",
"bytes": "622361"
},
{
"name": "R",
"bytes": "1032"
}
],
"symlink_target": ""
} |
matplotlib_installed = True
try:
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
except ImportError:
matplotlib_installed = False
class Plot(object):
def __init__(self, result_set):
if result_set.finalised:
self.result_set = result_set
else:
raise AttributeError("Result Set is not finalised")
self.nplayers = self.result_set.nplayers
self.matplotlib_installed = matplotlib_installed
def boxplot_dataset(self):
return [self.result_set.scores[ir] for ir in self.result_set.ranking]
def payoff_dataset(self):
return [[self.result_set.payoff_matrix[r1][r2]
for r2 in self.result_set.ranking]
for r1 in self.result_set.ranking]
def boxplot_xticks_locations(self):
return range(1, len(self.result_set.ranked_names) + 2)
def boxplot_xticks_labels(self):
return [str(n) for n in self.result_set.ranked_names]
def boxplot_title(self):
return ("Mean score per stage game over {} "
"rounds repeated {} times ({} strategies)").format(
self.result_set.turns,
self.result_set.repetitions,
len(self.result_set.ranking))
def boxplot(self):
if not self.matplotlib_installed:
return None
figure = plt.figure()
plt.boxplot(self.boxplot_dataset())
plt.xticks(
self.boxplot_xticks_locations(),
self.boxplot_xticks_labels(),
rotation=90)
plt.tick_params(axis='both', which='both', labelsize=8)
plt.title(self.boxplot_title())
return figure
def payoff(self):
if not self.matplotlib_installed:
return None
figure, ax = plt.subplots()
mat = ax.matshow(self.payoff_dataset())
plt.xticks(range(self.result_set.nplayers))
plt.yticks(range(self.result_set.nplayers))
ax.set_xticklabels(self.result_set.ranked_names, rotation=90)
ax.set_yticklabels(self.result_set.ranked_names)
plt.tick_params(axis='both', which='both', labelsize=8)
figure.colorbar(mat)
return figure
def stackplot(self, populations):
if not self.matplotlib_installed:
return None
figure, ax = plt.subplots()
turns = range(len(populations))
pops = [[populations[iturn][ir] for iturn in turns] for ir in self.result_set.ranking]
ax.stackplot(turns, *pops)
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
ax.yaxis.labelpad = 25.0
plt.ylim([0.0, 1.0])
plt.ylabel('Relative population size')
plt.xlabel('Turn')
plt.title("Strategy population dynamics based on average payoffs")
ax2 = ax.twinx()
trans = transforms.blended_transform_factory(ax.transAxes, ax.transData)
ticks = []
for i, n in enumerate(self.result_set.ranked_names):
x = -0.02
y = (i + 0.5) * 1.0 / self.nplayers
ax.annotate(n, xy=(x, y), xycoords=trans, clip_on=False, va='center', ha='right', fontsize=8)
ticks.append(y)
ax.set_yticks(ticks)
ax.tick_params(direction='out')
ax.set_yticklabels([])
return figure
| {
"content_hash": "ae321beea762efb23820bb91208891bf",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 105,
"avg_line_length": 32.84158415841584,
"alnum_prop": 0.6059692493216762,
"repo_name": "drvinceknight/Axelrod",
"id": "b4ef0b96dc2fd88029e2c565474f69e5b5a01655",
"size": "3317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "axelrod/plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "171077"
}
],
"symlink_target": ""
} |
"""Voluptuous schemas for the KNX integration."""
import voluptuous as vol
from xknx.devices.climate import SetpointShiftMode
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE_CLASS,
CONF_ENTITY_ID,
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_TYPE,
)
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_STATE_ADDRESS,
CONF_SYNC_STATE,
OPERATION_MODES,
PRESET_MODES,
ColorTempModes,
)
class ConnectionSchema:
"""Voluptuous schema for KNX connection."""
CONF_KNX_LOCAL_IP = "local_ip"
TUNNELING_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_KNX_LOCAL_IP): cv.string,
vol.Optional(CONF_PORT): cv.port,
}
)
ROUTING_SCHEMA = vol.Schema({vol.Optional(CONF_KNX_LOCAL_IP): cv.string})
class CoverSchema:
"""Voluptuous schema for KNX covers."""
CONF_MOVE_LONG_ADDRESS = "move_long_address"
CONF_MOVE_SHORT_ADDRESS = "move_short_address"
CONF_STOP_ADDRESS = "stop_address"
CONF_POSITION_ADDRESS = "position_address"
CONF_POSITION_STATE_ADDRESS = "position_state_address"
CONF_ANGLE_ADDRESS = "angle_address"
CONF_ANGLE_STATE_ADDRESS = "angle_state_address"
CONF_TRAVELLING_TIME_DOWN = "travelling_time_down"
CONF_TRAVELLING_TIME_UP = "travelling_time_up"
CONF_INVERT_POSITION = "invert_position"
CONF_INVERT_ANGLE = "invert_angle"
DEFAULT_TRAVEL_TIME = 25
DEFAULT_NAME = "KNX Cover"
SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MOVE_LONG_ADDRESS): cv.string,
vol.Optional(CONF_MOVE_SHORT_ADDRESS): cv.string,
vol.Optional(CONF_STOP_ADDRESS): cv.string,
vol.Optional(CONF_POSITION_ADDRESS): cv.string,
vol.Optional(CONF_POSITION_STATE_ADDRESS): cv.string,
vol.Optional(CONF_ANGLE_ADDRESS): cv.string,
vol.Optional(CONF_ANGLE_STATE_ADDRESS): cv.string,
vol.Optional(
CONF_TRAVELLING_TIME_DOWN, default=DEFAULT_TRAVEL_TIME
): cv.positive_int,
vol.Optional(
CONF_TRAVELLING_TIME_UP, default=DEFAULT_TRAVEL_TIME
): cv.positive_int,
vol.Optional(CONF_INVERT_POSITION, default=False): cv.boolean,
vol.Optional(CONF_INVERT_ANGLE, default=False): cv.boolean,
}
)
class BinarySensorSchema:
"""Voluptuous schema for KNX binary sensors."""
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_SYNC_STATE = CONF_SYNC_STATE
CONF_IGNORE_INTERNAL_STATE = "ignore_internal_state"
CONF_AUTOMATION = "automation"
CONF_HOOK = "hook"
CONF_DEFAULT_HOOK = "on"
CONF_COUNTER = "counter"
CONF_DEFAULT_COUNTER = 1
CONF_ACTION = "action"
CONF_RESET_AFTER = "reset_after"
DEFAULT_NAME = "KNX Binary Sensor"
AUTOMATION_SCHEMA = vol.Schema(
{
vol.Optional(CONF_HOOK, default=CONF_DEFAULT_HOOK): cv.string,
vol.Optional(CONF_COUNTER, default=CONF_DEFAULT_COUNTER): cv.port,
vol.Required(CONF_ACTION): cv.SCRIPT_SCHEMA,
}
)
AUTOMATIONS_SCHEMA = vol.All(cv.ensure_list, [AUTOMATION_SCHEMA])
SCHEMA = vol.All(
cv.deprecated("significant_bit"),
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SYNC_STATE, default=True): vol.Any(
vol.All(vol.Coerce(int), vol.Range(min=2, max=1440)),
cv.boolean,
cv.string,
),
vol.Optional(CONF_IGNORE_INTERNAL_STATE, default=False): cv.boolean,
vol.Required(CONF_STATE_ADDRESS): cv.string,
vol.Optional(CONF_DEVICE_CLASS): cv.string,
vol.Optional(CONF_RESET_AFTER): cv.positive_int,
vol.Optional(CONF_AUTOMATION): AUTOMATIONS_SCHEMA,
}
),
)
class LightSchema:
"""Voluptuous schema for KNX lights."""
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_BRIGHTNESS_ADDRESS = "brightness_address"
CONF_BRIGHTNESS_STATE_ADDRESS = "brightness_state_address"
CONF_COLOR_ADDRESS = "color_address"
CONF_COLOR_STATE_ADDRESS = "color_state_address"
CONF_COLOR_TEMP_ADDRESS = "color_temperature_address"
CONF_COLOR_TEMP_STATE_ADDRESS = "color_temperature_state_address"
CONF_COLOR_TEMP_MODE = "color_temperature_mode"
CONF_RGBW_ADDRESS = "rgbw_address"
CONF_RGBW_STATE_ADDRESS = "rgbw_state_address"
CONF_MIN_KELVIN = "min_kelvin"
CONF_MAX_KELVIN = "max_kelvin"
DEFAULT_NAME = "KNX Light"
DEFAULT_COLOR_TEMP_MODE = "absolute"
DEFAULT_MIN_KELVIN = 2700 # 370 mireds
DEFAULT_MAX_KELVIN = 6000 # 166 mireds
SCHEMA = vol.Schema(
{
vol.Required(CONF_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_STATE_ADDRESS): cv.string,
vol.Optional(CONF_BRIGHTNESS_ADDRESS): cv.string,
vol.Optional(CONF_BRIGHTNESS_STATE_ADDRESS): cv.string,
vol.Optional(CONF_COLOR_ADDRESS): cv.string,
vol.Optional(CONF_COLOR_STATE_ADDRESS): cv.string,
vol.Optional(CONF_COLOR_TEMP_ADDRESS): cv.string,
vol.Optional(CONF_COLOR_TEMP_STATE_ADDRESS): cv.string,
vol.Optional(
CONF_COLOR_TEMP_MODE, default=DEFAULT_COLOR_TEMP_MODE
): cv.enum(ColorTempModes),
vol.Optional(CONF_RGBW_ADDRESS): cv.string,
vol.Optional(CONF_RGBW_STATE_ADDRESS): cv.string,
vol.Optional(CONF_MIN_KELVIN, default=DEFAULT_MIN_KELVIN): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_MAX_KELVIN, default=DEFAULT_MAX_KELVIN): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
}
)
class ClimateSchema:
"""Voluptuous schema for KNX climate devices."""
CONF_SETPOINT_SHIFT_ADDRESS = "setpoint_shift_address"
CONF_SETPOINT_SHIFT_STATE_ADDRESS = "setpoint_shift_state_address"
CONF_SETPOINT_SHIFT_MODE = "setpoint_shift_mode"
CONF_SETPOINT_SHIFT_MAX = "setpoint_shift_max"
CONF_SETPOINT_SHIFT_MIN = "setpoint_shift_min"
CONF_TEMPERATURE_ADDRESS = "temperature_address"
CONF_TEMPERATURE_STEP = "temperature_step"
CONF_TARGET_TEMPERATURE_ADDRESS = "target_temperature_address"
CONF_TARGET_TEMPERATURE_STATE_ADDRESS = "target_temperature_state_address"
CONF_OPERATION_MODE_ADDRESS = "operation_mode_address"
CONF_OPERATION_MODE_STATE_ADDRESS = "operation_mode_state_address"
CONF_CONTROLLER_STATUS_ADDRESS = "controller_status_address"
CONF_CONTROLLER_STATUS_STATE_ADDRESS = "controller_status_state_address"
CONF_CONTROLLER_MODE_ADDRESS = "controller_mode_address"
CONF_CONTROLLER_MODE_STATE_ADDRESS = "controller_mode_state_address"
CONF_HEAT_COOL_ADDRESS = "heat_cool_address"
CONF_HEAT_COOL_STATE_ADDRESS = "heat_cool_state_address"
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS = (
"operation_mode_frost_protection_address"
)
CONF_OPERATION_MODE_NIGHT_ADDRESS = "operation_mode_night_address"
CONF_OPERATION_MODE_COMFORT_ADDRESS = "operation_mode_comfort_address"
CONF_OPERATION_MODE_STANDBY_ADDRESS = "operation_mode_standby_address"
CONF_OPERATION_MODES = "operation_modes"
CONF_ON_OFF_ADDRESS = "on_off_address"
CONF_ON_OFF_STATE_ADDRESS = "on_off_state_address"
CONF_ON_OFF_INVERT = "on_off_invert"
CONF_MIN_TEMP = "min_temp"
CONF_MAX_TEMP = "max_temp"
DEFAULT_NAME = "KNX Climate"
DEFAULT_SETPOINT_SHIFT_MODE = "DPT6010"
DEFAULT_SETPOINT_SHIFT_MAX = 6
DEFAULT_SETPOINT_SHIFT_MIN = -6
DEFAULT_TEMPERATURE_STEP = 0.1
DEFAULT_ON_OFF_INVERT = False
SCHEMA = vol.All(
cv.deprecated("setpoint_shift_step", replacement_key=CONF_TEMPERATURE_STEP),
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_SETPOINT_SHIFT_MODE, default=DEFAULT_SETPOINT_SHIFT_MODE
): cv.enum(SetpointShiftMode),
vol.Optional(
CONF_SETPOINT_SHIFT_MAX, default=DEFAULT_SETPOINT_SHIFT_MAX
): vol.All(int, vol.Range(min=0, max=32)),
vol.Optional(
CONF_SETPOINT_SHIFT_MIN, default=DEFAULT_SETPOINT_SHIFT_MIN
): vol.All(int, vol.Range(min=-32, max=0)),
vol.Optional(
CONF_TEMPERATURE_STEP, default=DEFAULT_TEMPERATURE_STEP
): vol.All(float, vol.Range(min=0, max=2)),
vol.Required(CONF_TEMPERATURE_ADDRESS): cv.string,
vol.Required(CONF_TARGET_TEMPERATURE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_TARGET_TEMPERATURE_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_HEAT_COOL_ADDRESS): cv.string,
vol.Optional(CONF_HEAT_COOL_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_NIGHT_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_COMFORT_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_STANDBY_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_STATE_ADDRESS): cv.string,
vol.Optional(
CONF_ON_OFF_INVERT, default=DEFAULT_ON_OFF_INVERT
): cv.boolean,
vol.Optional(CONF_OPERATION_MODES): vol.All(
cv.ensure_list, [vol.In({**OPERATION_MODES, **PRESET_MODES})]
),
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
}
),
)
class SwitchSchema:
"""Voluptuous schema for KNX switches."""
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
DEFAULT_NAME = "KNX Switch"
SCHEMA = vol.Schema(
{
vol.Required(CONF_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_STATE_ADDRESS): cv.string,
}
)
class ExposeSchema:
"""Voluptuous schema for KNX exposures."""
CONF_KNX_EXPOSE_TYPE = CONF_TYPE
CONF_KNX_EXPOSE_ATTRIBUTE = "attribute"
CONF_KNX_EXPOSE_DEFAULT = "default"
CONF_KNX_EXPOSE_ADDRESS = CONF_ADDRESS
SCHEMA = vol.Schema(
{
vol.Required(CONF_KNX_EXPOSE_TYPE): vol.Any(int, float, str),
vol.Optional(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_KNX_EXPOSE_ATTRIBUTE): cv.string,
vol.Optional(CONF_KNX_EXPOSE_DEFAULT): cv.match_all,
vol.Required(CONF_KNX_EXPOSE_ADDRESS): cv.string,
}
)
class NotifySchema:
"""Voluptuous schema for KNX notifications."""
DEFAULT_NAME = "KNX Notify"
SCHEMA = vol.Schema(
{
vol.Required(CONF_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
class SensorSchema:
"""Voluptuous schema for KNX sensors."""
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_SYNC_STATE = CONF_SYNC_STATE
DEFAULT_NAME = "KNX Sensor"
SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SYNC_STATE, default=True): vol.Any(
vol.All(vol.Coerce(int), vol.Range(min=2, max=1440)),
cv.boolean,
cv.string,
),
vol.Required(CONF_STATE_ADDRESS): cv.string,
vol.Required(CONF_TYPE): vol.Any(int, float, str),
}
)
class SceneSchema:
"""Voluptuous schema for KNX scenes."""
CONF_SCENE_NUMBER = "scene_number"
DEFAULT_NAME = "KNX SCENE"
SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_ADDRESS): cv.string,
vol.Required(CONF_SCENE_NUMBER): cv.positive_int,
}
)
class WeatherSchema:
"""Voluptuous schema for KNX weather station."""
CONF_SYNC_STATE = CONF_SYNC_STATE
CONF_KNX_TEMPERATURE_ADDRESS = "address_temperature"
CONF_KNX_BRIGHTNESS_SOUTH_ADDRESS = "address_brightness_south"
CONF_KNX_BRIGHTNESS_EAST_ADDRESS = "address_brightness_east"
CONF_KNX_BRIGHTNESS_WEST_ADDRESS = "address_brightness_west"
CONF_KNX_WIND_SPEED_ADDRESS = "address_wind_speed"
CONF_KNX_RAIN_ALARM_ADDRESS = "address_rain_alarm"
CONF_KNX_FROST_ALARM_ADDRESS = "address_frost_alarm"
CONF_KNX_WIND_ALARM_ADDRESS = "address_wind_alarm"
CONF_KNX_DAY_NIGHT_ADDRESS = "address_day_night"
CONF_KNX_AIR_PRESSURE_ADDRESS = "address_air_pressure"
CONF_KNX_HUMIDITY_ADDRESS = "address_humidity"
CONF_KNX_EXPOSE_SENSORS = "expose_sensors"
DEFAULT_NAME = "KNX Weather Station"
SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SYNC_STATE, default=True): vol.Any(
vol.All(vol.Coerce(int), vol.Range(min=2, max=1440)),
cv.boolean,
cv.string,
),
vol.Optional(CONF_KNX_EXPOSE_SENSORS, default=False): cv.boolean,
vol.Required(CONF_KNX_TEMPERATURE_ADDRESS): cv.string,
vol.Optional(CONF_KNX_BRIGHTNESS_SOUTH_ADDRESS): cv.string,
vol.Optional(CONF_KNX_BRIGHTNESS_EAST_ADDRESS): cv.string,
vol.Optional(CONF_KNX_BRIGHTNESS_WEST_ADDRESS): cv.string,
vol.Optional(CONF_KNX_WIND_SPEED_ADDRESS): cv.string,
vol.Optional(CONF_KNX_RAIN_ALARM_ADDRESS): cv.string,
vol.Optional(CONF_KNX_FROST_ALARM_ADDRESS): cv.string,
vol.Optional(CONF_KNX_WIND_ALARM_ADDRESS): cv.string,
vol.Optional(CONF_KNX_DAY_NIGHT_ADDRESS): cv.string,
vol.Optional(CONF_KNX_AIR_PRESSURE_ADDRESS): cv.string,
vol.Optional(CONF_KNX_HUMIDITY_ADDRESS): cv.string,
}
)
| {
"content_hash": "0940fe3c9206c2314ca20d192a4add79",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 86,
"avg_line_length": 38.774025974025975,
"alnum_prop": 0.6241961414790996,
"repo_name": "tchellomello/home-assistant",
"id": "a436f2dcdc8e7a2a667e0498811f76b1d924ffd3",
"size": "14928",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/knx/schema.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "26713364"
},
{
"name": "Shell",
"bytes": "4528"
}
],
"symlink_target": ""
} |
from .baseClass import BaseClass
__all__ = ['BaseClass']
| {
"content_hash": "52fe25b28288c858bd7e1307a377cd16",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 32,
"avg_line_length": 19.333333333333332,
"alnum_prop": 0.6896551724137931,
"repo_name": "chrisbrake/PythonSandbox",
"id": "107a7a998f2f96f6e3f7b90ff8f3a2126e3a8b5b",
"size": "58",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "character/classes/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Gherkin",
"bytes": "164"
},
{
"name": "HTML",
"bytes": "2519"
},
{
"name": "JavaScript",
"bytes": "3317"
},
{
"name": "Python",
"bytes": "35318"
},
{
"name": "Shell",
"bytes": "41"
}
],
"symlink_target": ""
} |
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-std=gnu99',
'-I', './',
'-isystem', '/home/konrad/Programming/eb/STM32/toolchain/firmware/CORE/inc/',
'-isystem', '/home/konrad/Programming/eb/STM32/toolchain/firmware/StdPeriph_Driver/inc/',
'-isystem', '/home/konrad/Programming/eb/STM32/toolchain/firmware/STM32Cube_FW_F1_V1.3.0/Drivers/STM32F1xx_HAL_Driver/Inc',
'c'
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| {
"content_hash": "f9a6d2ac7e567e2df87f4f9bb6f9c8bb",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 123,
"avg_line_length": 33.7563025210084,
"alnum_prop": 0.6895693303460294,
"repo_name": "stsrc/RFID",
"id": "48e29aa2d38b0c8dcab673784937ef0994c65aa7",
"size": "5417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RFID_src/.ycm_extra_conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "430654"
},
{
"name": "C++",
"bytes": "2160"
},
{
"name": "HTML",
"bytes": "6226"
},
{
"name": "JavaScript",
"bytes": "3002"
},
{
"name": "Makefile",
"bytes": "664"
},
{
"name": "Python",
"bytes": "5417"
}
],
"symlink_target": ""
} |
import requests
import xml.dom.minidom
# We need to import the JSON library just to handle our request to the APIC for login
import json
# We need to log in to the APIC and gather a token, before we can access any data
# Let's construct a request with a body
# We'll need to disable certificate warnings
requests.packages.urllib3.disable_warnings()
# We need to have a body of data consisting of a username and password to gather a cookie from APIC
encoded_body = json.dumps({
"aaaUser": {
"attributes": {
"name": "admin",
"pwd": "ciscopsdt"
}
}
})
# Now lets make the request and store the data
resp = requests.post("https://sandboxapicdc.cisco.com/api/aaaLogin.json", data=encoded_body, verify=False)
# This stores the received APIC-cookie from the login as a value to be used in subsequent REST calls
header = {"Cookie": "APIC-cookie=" + resp.cookies["APIC-cookie"]}
# Now we make a call towards the tenant class on the ACI fabric with the proper header value set.
# We leverage the .xml ending to receive the data back as XML
tenants = requests.get("https://sandboxapicdc.cisco.com/api/node/class/fvTenant.xml?rsp-subtree-include=health,faults", headers=header, verify=False)
# Now lets use DOM to clean up the XML from its completely raw format
dom = xml.dom.minidom.parseString(tenants.text)
xml = dom.toprettyxml()
print(xml)
# Now we want to parse the resulting XML and print only the tenant name and its current health score. We'll do this through iteration over the elements in the XML
tenant_objects = dom.firstChild
if tenant_objects.hasChildNodes:
tenant_element = tenant_objects.firstChild
while tenant_element is not None:
if tenant_element.tagName == 'fvTenant':
health_element = tenant_element.firstChild
output = "Tenant: " + tenant_element.getAttribute('name') + '\t Health Score: ' + health_element.getAttribute('cur')
print(output.expandtabs(40))
tenant_element = tenant_element.nextSibling | {
"content_hash": "c741b0bc560900f8a5ddb730bbba4c90",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 163,
"avg_line_length": 44.340425531914896,
"alnum_prop": 0.7034548944337812,
"repo_name": "CiscoDevNet/coding-skills-sample-code",
"id": "93b1cfde90b5be0a24ab71d6ed70eea07a1e4e20",
"size": "2084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coding201-parsing-xml/get-tenants-xml-3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "157"
},
{
"name": "C++",
"bytes": "9084"
},
{
"name": "CMake",
"bytes": "1491"
},
{
"name": "HTML",
"bytes": "2763"
},
{
"name": "Python",
"bytes": "95600"
}
],
"symlink_target": ""
} |
import feedparser
from functools import wraps
from xml.sax import SAXException
from cmsplugin_feed.utils import strip_tags, get_image, prioritize_jpeg
import re
def apply(f):
@wraps(f)
def wrapper(*args, **kwargs):
feed = f(*args, **kwargs)
if feed:
for fproc in FEED_PROCESSORS:
feed = fproc(feed)
return feed
return wrapper
def remove_invalid(feed):
entries = feed.get('entries', [])
feed['entries'] = [e for e in entries if isinstance(e, dict)]
return feed
def add_image_hrefs(feed):
""" WARNING! it changes the feed arg """
supported_image_types = ('image/jpeg', 'image/png')
entries = feed.get('entries', [])
for entry in entries:
if 'image' not in entry:
try:
for link in entry.get('links', []):
try:
if link.get('type') in supported_image_types:
entry['image'] = link.get('href')
break
except AttributeError:
continue
except (TypeError, AttributeError):
pass
image_getters = (
lambda e: e['image'] if isinstance(e['image'], basestring) else None,
lambda e: e['image']['href'] if e['image'] else None,
lambda e: prioritize_jpeg(e['media_thumbnail']),
lambda e: prioritize_jpeg(e['media_content']))
for getter in image_getters:
try:
image = getter(entry)
if image:
entry['image'] = image
break
except (KeyError, IndexError, TypeError):
pass
return feed
def add_image_from_content(feed):
entries = feed.get('entries', [])
for entry in entries:
if 'image' not in entry or not entry['image']:
text = entry.get('summary', '')
if 'content' in entry:
try:
text += ''.join([e.value for e in entry['content']])
except (TypeError, AttributeError):
pass
img = get_image(text)
if img:
entry['image'] = img
return feed
def fix_summary(feed):
entries = feed.get('entries', [])
for entry in entries:
summary = entry.get('summary', '')
entry['summary'] = re.sub(r"\s+", " ", strip_tags(summary))
return feed
# keep the order of the processors
FEED_PROCESSORS = (remove_invalid, add_image_hrefs, add_image_from_content, fix_summary)
@apply
def fetch_parsed_feed(feed_url):
"""Returns the parsed feed if not malformed,"""
feed = feedparser.parse(feed_url)
parse_error = hasattr(feed, 'bozo_exception') and (
isinstance(feed.bozo_exception, SAXException))
if not feed.bozo or not parse_error:
return feed
| {
"content_hash": "a2745faa0d3a682cce6ed3e4aa088e97",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 88,
"avg_line_length": 30.893617021276597,
"alnum_prop": 0.5420110192837465,
"repo_name": "pbs/cmsplugin-feed",
"id": "230202e7a0ee744aae083532d68b104cfb11886f",
"size": "2904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmsplugin_feed/processors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "973"
},
{
"name": "HTML",
"bytes": "1702"
},
{
"name": "Python",
"bytes": "21620"
}
],
"symlink_target": ""
} |
"""Test specially formated cmdln_*.py files
Each cmdln_*.py implemented a cmdln.Cmdln subclass and its module
docstring is an 'expect' script to test running it.
Usage:
Run all cmdln_*.py tests:
python test_cmdln.py
As part of a large test suite:
import test_cmdln
test_cmdln.suite() # returns a unittest.TestSuite
Test just specified cmdln_* files:
python test_cmdln.py <file-pattern>...
"""
import sys
import os
import unittest
import difflib
import pprint
import shutil
import glob
PY3 = sys.version_info[0] == 3
#---- support stuff
def banner(text, ch='=', length=78):
"""Return a banner line centering the given text.
"text" is the text to show in the banner. None can be given to have
no text.
"ch" (optional, default '=') is the banner line character (can
also be a short string to repeat).
"length" (optional, default 78) is the length of banner to make.
Examples:
>>> banner("Peggy Sue")
'================================= Peggy Sue =================================='
>>> banner("Peggy Sue", ch='-', length=50)
'------------------- Peggy Sue --------------------'
>>> banner("Pretty pretty pretty pretty Peggy Sue", length=40)
'Pretty pretty pretty pretty Peggy Sue'
"""
if text is None:
return ch * length
elif len(text) + 2 + len(ch)*2 > length:
# Not enough space for even one line char (plus space) around text.
return text
else:
remain = length - (len(text) + 2)
prefix_len = remain / 2
suffix_len = remain - prefix_len
if len(ch) == 1:
prefix = ch * prefix_len
suffix = ch * suffix_len
else:
prefix = ch * (prefix_len/len(ch)) + ch[:prefix_len%len(ch)]
suffix = ch * (suffix_len/len(ch)) + ch[:suffix_len%len(ch)]
return prefix + ' ' + text + ' ' + suffix
def indented(text, indent=' '*4):
lines = text.splitlines(1)
return indent + indent.join(lines)
#---- Expect shorthand to expect translation
SHELL_PROMPT = "$ "
class SpawnBlock:
def __init__(self, spawnline):
self._parse(spawnline) # sets self.cmd and self.options
self.lines = []
def _parse(self, line):
self.options = {}
parts = line[len(SHELL_PROMPT):].split("#", 1)
if len(parts) == 1:
self.cmd = parts[0]
else:
self.cmd, optstr = parts
landmark = "expecttest:"
if optstr.startswith(landmark):
for opt in optstr[len(landmark):].split(","):
opt = opt.strip()
if '=' in opt:
name, value = opt.split('=')
if value.startswith('"'):
value = value[1:-1]
else:
name, value = opt, True
self.options[name] = value
def addline(self, line):
self.lines.append(line)
def generate(self):
"""Return executable "expect" code for this spawn-block."""
expect = ["spawn "+self.cmd]
interactive = self.options.get("INTERACTIVE", False)
if interactive:
prompt = self.options["PROMPT"]
if sys.platform == "win32":
eol_expect = r"\n"
eol_expect_repr = r"\\n"
eof_expect = r"\032\r" # Ctrl-Z + return
else:
eol_expect = r"\r\n"
eol_expect_repr = r"\\r\\n"
eof_expect = r"\004" # Ctrl-D
for line in self.lines:
if interactive and line.startswith(prompt):
expect.append(r"""expect {
-i $spawn_id
-re "^%s" {}
default {
puts stderr {ERROR: expected "%s"}
puts stderr " got \"$expect_out(buffer)\""
exit 1
}
}""" % (prompt, prompt))
input = line[len(prompt):]
if input in ("^D", "^Z"):
#XXX Around the post-10.4 (Tiger) OS X release
# updates for 10.3 this 'expect' started failing.
# Adding the "(....)?" helps. I don't know enough
# Tcl to figure out exactly what those friggin'
# chars are.
expect += [r'send "%s"' % eof_expect,
r'expect -re "^(....)?%s$"' % eol_expect]
else:
expect += [r'send "%s\r"' % input,
r'expect -re "^%s%s"' % (input, eol_expect)]
else:
expected = tcl_escape(line)
if line == "<BLANKLINE>":
expected = r"\s*" # a "blank line" can have whitespace
expect.append(r"""expect {
-i $spawn_id
-re {^%s%s} {}
default {
puts stderr {ERROR: expected "%s%s"}
puts stderr " got \"$expect_out(buffer)\""
exit 1
}
}""" % (expected, eol_expect, expected.replace('\\', '\\\\'),
eol_expect_repr))
# Trap EOF for current process and make sure there isn't
# unexpected trailing output.
expect.append(r"""expect {
-i $spawn_id
eof {
} -re "^.+$" {
puts stderr "error: unexpected trailing output: '$expect_out(buffer)'\n"
exit 1
} timeout {
puts stderr {ERROR: timed out waiting for EOF from '%s'}
exit 1
}
}""" % self.cmd)
return '\n'.join(expect)
def tcl_escape(s):
"""Escape the given string as appropriate for using in a Tcl string
and regex.
"""
return s.replace("[", "\\[").replace("]", "\\]") \
.replace("$", "\\$") \
.replace("?", "\\?") \
.replace("(", "\\(").replace(")", "\\)")
def strip_prefix(line, prefix):
junk, content = line[:len(prefix)], line[len(prefix):].rstrip()
if junk.strip(): # line in block with short indentation
raise ValueError("too-short indentation on line: '%s'"
% line)
assert '\t' not in junk, \
"error: tab in expect-line prefix: '%s'" % line
return content
def parse_expect_content(content):
"""Generate parsed "expect" lines.
"Expect" blocks begin with a "spawn" line -- one that is prefixed
with a shell prompt -- and end with a blank line or the end of the
content. A "parsed" line is one with the indentation removed, if
any.
Generates 2-tuples
(<line-type>, <parsed-line>)
where <line-type> is "spawn" for spawn-lines or "other" for other
lines.
"""
if not content:
raise StopIteration
prefix = None
for line in content.splitlines(0):
if not line.strip():
prefix = None # end of a block
elif line.lstrip().startswith(SHELL_PROMPT):
if prefix is None: # start of a new block
idx = line.index(SHELL_PROMPT)
prefix, content = line[:idx], line[idx:].rstrip()
assert '\t' not in prefix, \
"error: tab in expect-line prefix: '%s'" % line
else:
content = strip_prefix(line, prefix)
yield "spawn", content
elif prefix is not None:
yield "other", strip_prefix(line, prefix)
def generate_expect(content):
# Break into "spawn"-block. A new spawn block starts with what
# will become an expect "spawn" command. Specifically a block
# that begins with the '$ ' shell prompt.
blocks = []
block = None
for type, line in parse_expect_content(content):
assert type in ("spawn", "other"), \
"unexpected spawn line type: %r" % type
if type == "spawn":
block = SpawnBlock(line)
blocks.append(block)
else:
assert block is not None, \
"'other' spawn line without leading 'spawn' line: %r" % line
block.addline(line)
expect = ["#!/usr/bin/env tclsh",
"",
"package require Expect",
"set timeout 3",
"set send_slow {10 .001}",
""]
for block in blocks:
expect.append(block.generate())
return '\n'.join(expect) + '\n'
#----- test cases
class CmdlnTestCase(unittest.TestCase):
pass
def _testOneCmdln(self, modname, fname):
_debug = False # Set to true to dump status info for each test run.
mod = __import__(modname)
doc = mod.__doc__
if not PY3 and isinstance(doc, unicode):
doc = doc.encode("utf-8")
expect = generate_expect(doc)
if False:
tmpfname = ".%s.exp.tmp" % modname
open(tmpfname, 'w').write(expect)
retval = os.system("tclsh "+tmpfname)
if hasattr(os, "WEXITSTATUS"):
retval = os.WEXITSTATUS(retval)
stdout = stderr = ""
else:
if _debug:
tmpfname = ".%s.exp.tmp" % modname
open(tmpfname, 'w').write(expect)
import process
p = process.ProcessOpen("tclsh")
p.stdin.write(expect)
p.stdin.close()
retval = p.wait()
if hasattr(os, "WEXITSTATUS"):
retval = os.WEXITSTATUS(retval)
stdout = p.stdout.read()
stderr = p.stderr.read()
p.close()
self.failIf(retval, """\
'%s' did not behave as expected:
%s
%s
%s
%s
%s
%s
%s""" % (fname,
indented(banner("expect shorthand", length=72)),
indented(doc or ""),
indented(banner("stdout", length=72)),
indented(stdout),
indented(banner("stderr", length=72)),
indented(stderr),
indented(banner(None, length=72))))
if __name__ == "__main__" and sys.argv[1:]:
testfiles = []
for arg in sys.argv[1:]:
testfiles += glob.glob(arg)
else:
testfiles = glob.glob("cmdln_*.py")
if sys.version_info[:2] >= (2,4):
testfiles += glob.glob("cmdln24_*.py")
for fname in testfiles:
base = os.path.basename(os.path.splitext(fname)[0])
testfunc = lambda self, base=base, fname=fname: _testOneCmdln(self, base, fname)
if base.startswith("cmdln_"):
base = base[len("cmdln_"):]
testname = 'test_'+base
setattr(CmdlnTestCase, testname, testfunc)
#---- mainline
def suite():
"""Return a unittest.TestSuite to be used by test.py."""
return unittest.makeSuite(CmdlnTestCase)
if __name__ == "__main__":
runner = unittest.TextTestRunner(sys.stdout, verbosity=2)
result = runner.run(suite())
| {
"content_hash": "aa9ba4707885d47ad479750ee695ce16",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 88,
"avg_line_length": 32.24390243902439,
"alnum_prop": 0.5273260211800302,
"repo_name": "hfeeki/cmdln",
"id": "87cded0df95cdf148daec4aa8408bd0ba8745d40",
"size": "10654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_cmdln.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "465368"
}
],
"symlink_target": ""
} |
import re
import six
from typing import (
Any, Iterable, Iterator, Tuple, Sized, List, Optional, Dict,
Union, Callable, Pattern
)
import numpy as np
import scipy.sparse as sp
class FeatureNames(Sized, Iterable):
"""
A list-like object with feature names. It allows
feature names for unknown features to be generated using
a provided template, and to avoid making copies of large objects
in get_feature_names.
"""
def __init__(self,
feature_names=None,
bias_name=None, # type: str
unkn_template=None, # type: str
n_features=None, # type: int
):
# type: (...) -> None
if not (feature_names is not None or
(unkn_template is not None and n_features)):
raise ValueError(
'Pass feature_names or unkn_template and n_features')
if feature_names is not None:
if not isinstance(feature_names, (list, dict, np.ndarray)):
raise TypeError('Unexpected feature_names type')
if n_features is not None and n_features != len(feature_names):
if not isinstance(feature_names, dict):
raise ValueError(
'n_features should match feature_names length')
elif unkn_template is None:
raise ValueError(
'unkn_template should be set for sparse features')
self.feature_names = feature_names
self.unkn_template = unkn_template
self.n_features = n_features or len(feature_names) # type: int
self.bias_name = bias_name
def __repr__(self):
# type: () -> str
return '<FeatureNames: {} features {} bias>'.format(
self.n_features, 'with' if self.has_bias else 'without')
def __len__(self):
# type: () -> int
return self.n_features + int(self.has_bias)
def __iter__(self):
# type: () -> Iterator[str]
return (self[i] for i in range(len(self)))
def __getitem__(self, idx):
if isinstance(idx, slice):
return self._slice(idx)
if isinstance(idx, np.ndarray):
return [self[i] for i in idx]
if self.has_bias and idx == self.bias_idx:
return self.bias_name
if 0 <= idx < self.n_features:
try:
return self.feature_names[idx]
except (TypeError, KeyError, IndexError):
return self.unkn_template % idx
raise IndexError('Feature index out of range')
def _slice(self, aslice):
# type: (slice) -> Any
if isinstance(self.feature_names, (list, np.ndarray)):
# Fast path without going through __getitem__
if self.has_bias:
lst = list(self.feature_names)
lst.append(self.bias_name)
else:
lst = self.feature_names
return lst[aslice]
else:
indices = range(len(self))[aslice]
return [self[idx] for idx in indices]
@property
def has_bias(self):
# type: () -> bool
return self.bias_name is not None
@property
def bias_idx(self):
# type: () -> Optional[int]
if self.has_bias:
return self.n_features
return None
def filtered(self, feature_filter, x=None):
# type: (Callable, Any) -> Tuple[FeatureNames, List[int]]
""" Return feature names filtered by a regular expression
``feature_re``, and indices of filtered elements.
"""
indices = []
filtered_feature_names = []
indexed_names = None # type: Optional[Iterable[Tuple[int, Any]]]
if isinstance(self.feature_names, (np.ndarray, list)):
indexed_names = enumerate(self.feature_names)
elif isinstance(self.feature_names, dict):
indexed_names = six.iteritems(self.feature_names)
elif self.feature_names is None:
indexed_names = []
assert indexed_names is not None
if x is not None:
if sp.issparse(x) and len(x.shape) == 2:
assert x.shape[0] == 1
flt = lambda nm, i: feature_filter(nm, x[0, i])
else:
# FIXME: mypy warns about x[i] because it thinks x can be None
flt = lambda nm, i: feature_filter(nm, x[i]) # type: ignore
else:
flt = lambda nm, i: feature_filter(nm)
for idx, name in indexed_names:
if any(flt(nm, idx) for nm in _all_feature_names(name)):
indices.append(idx)
filtered_feature_names.append(name)
if self.has_bias and flt(self.bias_name, self.bias_idx):
assert self.bias_idx is not None # for mypy
bias_name = self.bias_name
indices.append(self.bias_idx)
else:
bias_name = None
return (
FeatureNames(
filtered_feature_names,
bias_name=bias_name,
unkn_template=self.unkn_template,
),
indices)
def handle_filter(self,
feature_filter,
feature_re, # type: Pattern[str]
x=None, # type: Any
):
# type: (...) -> Tuple[FeatureNames, Union[List[int], None]]
if feature_re is not None and feature_filter:
raise ValueError('pass either feature_filter or feature_re')
if feature_re is not None:
if x is not None:
feature_filter = lambda name, _: re.search(feature_re, name)
else:
feature_filter = lambda name: re.search(feature_re, name)
if feature_filter is not None:
return self.filtered(feature_filter, x)
else:
return self, None
def add_feature(self, feature):
# type: (Any) -> int
""" Add a new feature name, return it's index.
"""
# A copy of self.feature_names is always made, because it might be
# "owned" by someone else.
# It's possible to make the copy only at the first call to
# self.add_feature to improve performance.
idx = self.n_features
if isinstance(self.feature_names, (list, np.ndarray)):
self.feature_names = list(self.feature_names)
self.feature_names.append(feature)
elif isinstance(self.feature_names, dict):
self.feature_names = dict(self.feature_names)
self.feature_names[idx] = feature
elif self.feature_names is None:
self.feature_names = {idx: feature}
self.n_features += 1
return idx
def _all_feature_names(name):
# type: (Union[str, bytes, List[Dict]]) -> List[str]
""" All feature names for a feature: usually just the feature itself,
but can be several features for unhashed features with collisions.
"""
if isinstance(name, bytes):
return [name.decode('utf8')]
elif isinstance(name, list):
return [x['name'] for x in name]
else:
return [name]
| {
"content_hash": "bdcc5b9e4588b7ea94cf8d685849bba2",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 78,
"avg_line_length": 37.494791666666664,
"alnum_prop": 0.5542436449506876,
"repo_name": "TeamHG-Memex/eli5",
"id": "ff1fd80c34731751a8049402d44223037554a47b",
"size": "7199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eli5/_feature_names.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8731"
},
{
"name": "Jupyter Notebook",
"bytes": "7973230"
},
{
"name": "Python",
"bytes": "503460"
},
{
"name": "Shell",
"bytes": "1302"
}
],
"symlink_target": ""
} |
import burnGPIO as IO
from time import sleep
import sys, termios, atexit
from intelhex import IntelHex
from select import select
from mydelay import mydelay
class PIC16F8X:
CpuTag=0
CpuId=0
CpuRevision=0
ProgramSize = 2048
ProgramBase = 0
DataSize = 256
DataBase = 0x2100
ConfigBase = 0x2000
PicFamily = "PIC12/16"
#cpu list dict. CpuId [Pic Name, ProgramSize]
CpuList = { 0x0720 : ['PIC16F87' , 4096] ,
0x0760: ['PIC16F88' , 4096] ,
}
# command definition
C_LOAD_CONFIG = 0
C_LOAD_PROGRAM = 2
C_LOAD_DATA = 3
C_READ_PROGRAM = 4
C_READ_DATA = 5
C_INC_ADDRESS = 6
C_BEGIN_ERASE = 8
C_BEGIN_PROGRAMMING =0x18
C_BULK_ERASE_PROGRAM = 9
C_BULK_ERASE_DATA = 0xB
C_END_PROGRAMMING = 0x17
C_CHIP_ERASE = 0x1F
def Set_LVP(self):
IO.GPIO.setup(IO.PIC_DATA,IO.GPIO.OUT)
IO.GPIO.output(IO.PIC_DATA, False)
IO.GPIO.output(IO.PIC_CLK, False)
#PGM LOW
IO.GPIO.output(IO.PIC_PGM,False)
#MCLR LOW
IO.GPIO.output(IO.PIC_MCLR, False)
#PGM HIGH
IO.GPIO.output(IO.PIC_PGM,True)
#held MCLR HIGH
IO.GPIO.output(IO.PIC_MCLR, True)
sleep(0.1)
#ok PIC_CLK=out& HIGH, PIC_DATA=out & LOW
IO.GPIO.output(IO.PIC_CLK, False)
# print("LVP ON")
sleep(0.3)
def Release_LVP(self):
IO.GPIO.output(IO.PIC_MCLR, False)
sleep(0.1)
IO.GPIO.output(IO.PIC_PGM, False)
IO.GPIO.output(IO.PIC_MCLR, True)
# print("LVP OFF")
def SendCommand(self,Command):
IO.GPIO.setup( IO.PIC_DATA, IO.GPIO.OUT)
for loop in range(6):
IO.GPIO.output(IO.PIC_CLK, True)
IO.GPIO.output(IO.PIC_DATA, (Command & 1) ==1)
mydelay()
IO.GPIO.output(IO.PIC_CLK, False)
mydelay()
Command = Command >> 1;
def ReadWord(self):
IO.GPIO.setup(IO.PIC_DATA, IO.GPIO.IN)
Value = 0
for loop in range(16):
IO.GPIO.output(IO.PIC_CLK, True)
mydelay()
if IO.GPIO.input(IO.PIC_DATA):
Value = Value + (1 << loop)
IO.GPIO.output(IO.PIC_CLK, False)
mydelay()
Value = (Value >> 1) & 0x3FFF;
return Value;
def LoadWord(self,Value):
IO.GPIO.setup(IO.PIC_DATA, IO.GPIO.OUT)
Value = (Value << 1) & 0x7FFE
for loop in range(16):
IO.GPIO.output(IO.PIC_CLK, True)
IO.GPIO.output(IO.PIC_DATA,(Value & 1)==1)
mydelay()
IO.GPIO.output(IO.PIC_CLK, False)
mydelay()
Value = Value >> 1;
def BulkErase(self):
self.Set_LVP();
self.SendCommand(self.C_LOAD_CONFIG)
self.LoadWord(0x3fff)
print("Chip Erase Chip",end='')
self.SendCommand(self.C_CHIP_ERASE)
sleep(0.2)
print(".... done.")
def ProgramBlankCheck(self):
print("Program blank check",end='')
#reset address
self.Set_LVP()
for l in range(self.ProgramSize):
self.SendCommand(self.C_READ_PROGRAM)
Value = self.ReadWord()
if Value != 0x3fff :
print("*** CPU program at Address ", hex(l), " = ", hex(Value), " Failed!")
return False
if (l % 128)==0 :
sys.stdout.write('.')
sys.stdout.flush()
self.SendCommand(self.C_INC_ADDRESS)
print("Passed!")
return True
def DataBlankCheck(self):
print("Data Blank check",end='')
self.Set_LVP()
for l in range(self.DataSize):
self.SendCommand(self.C_READ_DATA)
Value = self.ReadWord() & 0xff
if Value != 0xff :
print("*** CPU eeprom data at Address ", hex(l), " = ", hex(Value), "Failed!")
return False
if (l % 32)==0 :
sys.stdout.write('.')
sys.stdout.flush()
self.SendCommand(self.C_INC_ADDRESS)
print("Passed!")
return True
def ProgramBurn(self, pic_data):
print("Writing Program",end='')
self.Set_LVP()
for i in range(0,self.ProgramSize,4):
for DataCount in range(4):
l = i + DataCount
if pic_data.get(l*2+ self.ProgramBase) != None :
if pic_data.get(l*2+ self.ProgramBase+1) != None :
Value = pic_data.get(l*2+ self.ProgramBase) + ( 256 * pic_data.get(l*2+ self.ProgramBase+1))
Value = Value & 0x3fff
self.SendCommand(self.C_LOAD_PROGRAM)
self.LoadWord(Value)
if DataCount != 3:
self.SendCommand(self.C_INC_ADDRESS)
if (i % 128)==0 :
sys.stdout.write('.')
sys.stdout.flush()
#ok 4 words then write
self.SendCommand(self.C_BEGIN_PROGRAMMING)
sleep(0.005)
self.SendCommand(self.C_END_PROGRAMMING)
self.SendCommand(self.C_INC_ADDRESS)
print("Done.")
return True
def DataBurn(self,pic_data):
print("Writing Data",end='')
self.Set_LVP()
for l in range( self.DataSize):
if pic_data.get(l*2 + self.DataBase) != None :
if pic_data.get(l*2 + self.DataBase + 1) != None :
Value = pic_data.get(l*2 + self.DataBase)
self.SendCommand(self.C_LOAD_DATA)
self.LoadWord(Value)
self.SendCommand(self.C_BEGIN_PROGRAMMING)
sleep(0.003)
self.SendCommand(self.C_END_PROGRAMMING)
if (l % 32)==0 :
sys.stdout.write('.')
sys.stdout.flush()
self.SendCommand(self.C_INC_ADDRESS)
print("Done.")
return True
def ProgramCheck(self, pic_data):
print("Program check ",end='')
self.Set_LVP()
for l in range(self.ProgramSize):
if pic_data.get(l*2+ self.ProgramBase) != None :
if pic_data.get(l*2+ self.ProgramBase+1) != None :
Value = pic_data.get(l*2+ self.ProgramBase) + ( 256 * pic_data.get(l*2+ self.ProgramBase+1))
Value = Value & 0x3fff
self.SendCommand(self.C_READ_PROGRAM)
RValue = self.ReadWord()
if Value != RValue :
print("Program address:", hex(l) , " write ", hex(Value), " read ", hex(RValue))
return False
if (l % 128)==0 :
sys.stdout.write('.')
sys.stdout.flush()
self.SendCommand(self.C_INC_ADDRESS)
print("Passed!")
return True
def DataCheck(self,pic_data):
print("Data check ",end='')
self.Set_LVP()
for l in range(self.DataSize):
if pic_data.get(l*2 + self.DataBase) != None :
if pic_data.get(l*2 + self.DataBase+1) != None :
Value = pic_data.get(l*2+self.DataBase)
self.SendCommand(self.C_READ_DATA)
RValue = self.ReadWord()
if Value != RValue :
print("Data address:", hex(l) , " write ", hex(Value), " read ", hex(RValue))
return False
if (l % 32)==0 :
sys.stdout.write('.')
sys.stdout.flush()
self.SendCommand(self.C_INC_ADDRESS)
print("Passed!")
return True
def ConfigBurn(self,pic_data):
print("Writing Config",end='')
self.Set_LVP()
self.SendCommand(self.C_LOAD_CONFIG)
self.LoadWord(0x3fff)
#user id first
for l in range(4):
if pic_data.get(l*2+ self.ConfigBase) != None :
if pic_data.get(l*2+ self.ConfigBase+1) != None :
Value = pic_data.get(l*2+ self.ConfigBase) + ( 256 * pic_data.get(l*2+ self.ConfigBase+1))
Value = Value & 0x3fff
self.SendCommand(self.C_LOAD_PROGRAM)
self.LoadWord(Value)
if l != 3 :
self.SendCommand(self.C_INC_ADDRESS)
self.SendCommand(self.C_BEGIN_PROGRAMMING)
sleep(0.005)
self.SendCommand(self.C_END_PROGRAMMING)
self.SendCommand(self.C_INC_ADDRESS)
sys.stdout.write('.')
sys.stdout.flush()
#ok we are at 02004
#skip 0x2004 .. 0x2006
self.SendCommand(self.C_INC_ADDRESS)
self.SendCommand(self.C_INC_ADDRESS)
self.SendCommand(self.C_INC_ADDRESS)
# now the configuration word 1& 2 at 0x2007 ( hex file at 0x1000E)
for l in range(7,9):
if pic_data.get(l*2+ self.ConfigBase) != None :
if pic_data.get(l*2+ self.ConfigBase+1) != None :
Value = pic_data.get(l*2+ self.ConfigBase) + ( 256 * pic_data.get(l*2+ self.ConfigBase+1))
Value = Value & 0x3fff
self.SendCommand(self.C_LOAD_PROGRAM)
if l == 8:
#catch21 force LVP programming to be always ON
Value = Value | 0x2000
self.LoadWord(Value)
self.SendCommand(self.C_BEGIN_PROGRAMMING)
sleep(0.005)
self.SendCommand(self.C_END_PROGRAMMING)
sys.stdout.write('.')
sys.stdout.flush()
self.SendCommand(self.C_INC_ADDRESS)
print("Done.")
return True
#just check if the user forget to set LVP flag enable
#if not just give a warning since we force LVP enable
def CheckLVP(self,pic_data):
#specify config word2
l=8
if pic_data.get(l*2+ self.ConfigBase) != None :
if pic_data.get(l*2+ self.ConfigBase+1) != None :
Value = pic_data.get(l*2+ self.ConfigBase) + ( 256 * pic_data.get(l*2+ self.ConfigBase+1))
Value = Value & 0x3fff
return((Value & 0x2000)== 0x2000)
return True
def ConfigCheck(self, pic_data):
print("Config Check",end='')
self.Set_LVP()
self.SendCommand(self.C_LOAD_CONFIG)
self.LoadWord(0x3fff)
#user id first
for l in range(4):
if pic_data.get(l*2+ self.ConfigBase) != None :
if pic_data.get(l*2+ self.ConfigBase+1) != None :
Value = pic_data.get(l*2+ self.ConfigBase) + ( 256 * pic_data.get(l*2+ self.ConfigBase+1))
Value = Value & 0x3fff
self.SendCommand(self.C_READ_PROGRAM)
RValue = self.ReadWord()
if Value != RValue :
print("User Id Location:", hex(l) , " write ", hex(Value), " read ", hex(RValue), " Failed!")
return False
sys.stdout.write('.')
sys.stdout.flush()
self.SendCommand(self.C_INC_ADDRESS)
#ok we are at 0x2004
#skip 0x2004 .. 0x2006
self.SendCommand(self.C_INC_ADDRESS)
self.SendCommand(self.C_INC_ADDRESS)
self.SendCommand(self.C_INC_ADDRESS)
# now the configuration word 1& 2 at 0x2007
for l in range(7,9):
if pic_data.get(l*2+ self.ConfigBase) != None :
if pic_data.get(l*2+ self.ConfigBase+1) != None :
Value = pic_data.get(l*2+ self.ConfigBase) + ( 256 * pic_data.get(l*2+ self.ConfigBase+1))
Value = Value & 0x3fff
if l == 8:
#catch21 force LVP programming to be always ON
Value = Value | 0x2000
self.SendCommand(self.C_READ_PROGRAM)
RValue = self.ReadWord()
if Value != RValue :
print("Config Word ", l-6 , " write ", hex(Value), " read ", hex(RValue), " Failed!")
return False
sys.stdout.write('.')
sys.stdout.flush()
self.SendCommand(self.C_INC_ADDRESS)
print("Passed!")
self.CheckLVP(pic_data)
return True
def IDBurn(self, pic_data):
#ID is with CONFIG
mydelay()
def IDCheck(self, pic_data):
#id is with CONFIG
return True
def ScanCpuTag(self):
print("Check PIC16F87/88...")
self.Set_LVP()
self.SendCommand(0)
self.LoadWord(0x3FFF)
for l in range(6):
self.SendCommand(self.C_INC_ADDRESS)
self.SendCommand(self.C_READ_PROGRAM)
self.CpuTag=self.ReadWord()
self.CpuRevision = self.CpuTag & 0x1f
self.CpuId = self.CpuTag & 0x3FE0
if ((self.CpuTag & 0x3FE0) == 0x3FE0):
self.CpuTag=0
return self.CpuTag
ListName=0
ListProgramSize=1
def FindCpu(self, Id):
_cpuInfo = self.CpuList.get(Id & 0xFFE0)
if _cpuInfo != None:
self.ProgramSize= _cpuInfo[self.ListProgramSize]
self.CpuId = Id & 0XFFE0
self.CpuRevision= Id & 0x1F
return _cpuInfo
| {
"content_hash": "459898c482b59d43bcd9b372ce6dd5da",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 105,
"avg_line_length": 29.813953488372093,
"alnum_prop": 0.594383775351014,
"repo_name": "danjperron/burnLVP",
"id": "8f111457eb62be2c2c34f4728b73caa1bab67421",
"size": "13063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CpuPIC16F8X.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126903"
}
],
"symlink_target": ""
} |
import sys
import importlib
from .git import get_current_branch
def import_from_str(module: str):
return importlib.import_module(module)
def get_current_task_type():
from gitbarry.config import settings
current_branch = get_current_branch()
for task_prefix in settings['TASKS'].keys():
if current_branch.startswith('%s/' % task_prefix):
return task_prefix
return False
def ensure_current_branch_is_taskbranch():
current_task_type = get_current_task_type()
if current_task_type is False:
print("Current branch not looks like barry task branch.")
sys.exit(5)
| {
"content_hash": "061afeca61cc2b64fc9f90ad299c2c44",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 65,
"avg_line_length": 26.166666666666668,
"alnum_prop": 0.6878980891719745,
"repo_name": "a1fred/git-barry",
"id": "00c665536b155d3250551ce583955ba35f0003b0",
"size": "628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gitbarry/utils/shortcuts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14780"
},
{
"name": "Shell",
"bytes": "76"
}
],
"symlink_target": ""
} |
from tweepy import OAuthHandler
class SaitanOAuthHandler():
def __init__(self, config):
keys = config.OAuthKeys()
self.oauth = OAuthHandler(keys['consumer_key'],
keys['consumer_secret'],
secure = True
)
self.oauth.set_access_token(keys['access_token_key'], keys['access_token_secret'])
def authenticate(self):
return self.oauth
| {
"content_hash": "b659c1e96f034dbdfd87c39004c5b135",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 84,
"avg_line_length": 27.076923076923077,
"alnum_prop": 0.7017045454545454,
"repo_name": "chris-x86-64/Saitan-bot-py",
"id": "b3637b57c725f595b4928bffc845d8fc9e264682",
"size": "352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saitan/oauth_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "174"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class DomainValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="domain", parent_name="layout.yaxis", **kwargs):
super(DomainValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
items=kwargs.pop(
"items",
[
{"valType": "number", "min": 0, "max": 1, "editType": "plot"},
{"valType": "number", "min": 0, "max": 1, "editType": "plot"},
],
),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "a7153f7faa81ec73227caa74d8f602a1",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 83,
"avg_line_length": 37.8421052631579,
"alnum_prop": 0.4965229485396384,
"repo_name": "plotly/python-api",
"id": "511ec578d2d082258261ed54e5d80ff4dbd55eda",
"size": "719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/yaxis/_domain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template, request, session, redirect, url_for
from helpers import register_account, get_posts, make_post, get_post, login_user
import datetime
import re
app = Flask(__name__)
app.debug = True
app.secret_key = 'foobar'
@app.route("/")
def root():
if 'username' in session:
return redirect(url_for('home'))
return render_template("root.html")
@app.route("/home")
def home():
if not 'username' in session:
return redirect("/")
posts = get_posts(session['username'])
return render_template("home.html", posts=posts)
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "GET":
return render_template("register.html")
else:
username = request.form["username"].lower()
password = request.form["password"]
reg = register_account(username, password)
if reg == True:
session['username'] = username
return redirect(url_for('home'))
else:
return render_template("register.html", error="Username is already taken.")
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == 'GET':
return render_template('login.html')
else:
username = request.form['username'].lower()
password = request.form['password']
log = login_user(username, password)
if log == True:
session['username'] = username
return redirect(url_for('home'))
else:
return render_template("login.html", error="Wrong username and/or password.")
@app.route("/logout")
def logout():
session['username'].pop()
return redirect("/")
@app.route("/new", methods=["GET", "POST"])
def new():
if request.method == 'GET':
if not 'username' in session:
return redirect("/")
return render_template("new.html")
else:
content = request.form["content"]
post = make_post(session['username'], content)
return redirect(url_for("home"))
@app.route("/<id>")
def show(id):
post = get_post(id, session['username'])
if post == False:
return render_template('show.html', error='You can\'t view this entry.')
else:
return render_template('show.html', post=post)
if __name__ == '__main__':
app.run()
| {
"content_hash": "58f2e69adc1e0a6ea76d5452439ef157",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 80,
"avg_line_length": 25.337349397590362,
"alnum_prop": 0.6742748454588683,
"repo_name": "NotBlizzard/journaler",
"id": "9e9fb69a18748ac8d72634c9b07b6cee650be30c",
"size": "2103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "675"
},
{
"name": "HTML",
"bytes": "2205"
},
{
"name": "Python",
"bytes": "3334"
}
],
"symlink_target": ""
} |
from hypothesis import given
from hypothesis.strategies import integers, lists
from src.Algorithms.quicksort import quicksort
@given(lists(integers()))
def test_quicksort(xs):
assert quicksort(xs.copy()) == sorted(xs)
| {
"content_hash": "c1d3deadf94aabd66ee2801ab4492a56",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 49,
"avg_line_length": 25,
"alnum_prop": 0.7733333333333333,
"repo_name": "roghu/py3_projects",
"id": "1e0c80bc8930a98f76fc443e6697f000017d4a26",
"size": "225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/Algorithms/test_quicksort.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "387"
},
{
"name": "Python",
"bytes": "111612"
}
],
"symlink_target": ""
} |
""" commands for blogtopoid script
"""
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import sys
import codecs
import logging
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import pkg_resources
from .blogtopoid import (Config, generate_index, generate_feed,
prepare_style, tags, Post, Page, write_file)
def quickstart():
""" ask for all configuration options and write example files.
"""
# read config template
config_file = pkg_resources.resource_stream(
__name__,
'example/blogtopoid.config.example',
)
# query user for config options
config = ConfigParser.SafeConfigParser()
config.readfp(config_file)
for section in config.sections():
for option in config.options(section):
answer = raw_input(
"{} [default: {}] = ".format(option,
config.get(section, option))
)
if answer:
config.set(section, option, answer)
config.write(codecs.open('blogtopoid.config', 'w', 'utf8'))
# make post and pages dirs
if not os.path.exists(config.get('general', 'inputdir')):
os.makedirs(config.get('general', 'inputdir'))
if not os.path.exists(config.get('general', 'pagesdir')):
os.makedirs(config.get('general', 'pagesdir'))
if not os.path.exists(config.get('general', 'styledir')):
os.makedirs(config.get('general', 'styledir'))
if not os.path.exists(config.get('general', 'templatedir')):
os.makedirs(config.get('general', 'templatedir'))
# copy example post to inputdir/
hw_post = pkg_resources.resource_stream(
__name__,
'example/19700101 example.md',
)
codecs.open(
os.path.join(
config.get('general', 'inputdir'), '19700101 example.md'
), 'w', 'utf8'
).write(hw_post.read())
hw_image = pkg_resources.resource_stream(
__name__,
'example/19700101 pythonlogo.png',
)
open(os.path.join(
config.get('general', 'inputdir'), '19700101 pythonlogo.png'
), 'wb').write(hw_image.read())
# copy example template files
for template_filename in ['index.html', 'page.html', 'post.html']:
template_file = pkg_resources.resource_stream(
__name__,
'example/{}'.format(template_filename),
)
codecs.open(os.path.join(
config.get('general', 'templatedir'), template_filename
), 'w', 'utf8').write(template_file.read())
# done setting up
sys.exit(0)
def new_post():
""" ask for YAML front-matter options, create empty post
and start editor
"""
print('not yet implemented')
sys.exit(1)
def generate():
""" generate HTML
"""
config = Config()
# set up logging
ch = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(levelname)s - %(message)s')
ch.setFormatter(formatter)
logging.getLogger('blogtopoid').addHandler(ch)
logging.getLogger('blogtopoid').setLevel(logging.DEBUG)
pages = []
for infile in os.listdir(unicode(config.pagesdir)):
if os.path.splitext(infile)[1].lower() in config.supported_blogtypes:
page = Page(infile)
pages.append(page)
for page in pages:
write_file(os.path.join(config.outputdir, page.outfile),
page.render(pages))
posts = []
for infile in os.listdir(unicode(config.inputdir)):
if os.path.splitext(infile)[1].lower() in config.supported_blogtypes:
post = Post(infile)
# if hashstore.get(infile) == post.hash:
# print("already processed")
# continue
posts.append(post)
# hashstore.set(infile, post.hash)
# sort posts by publish date
posts.sort(key=lambda p: p.date, reverse=True)
# render post htmls
for post in posts:
write_file(os.path.join(config.outputdir, post.outfile),
post.render(pages))
# generate index from index.md
write_file(os.path.join(config.outputdir, 'index.html'),
generate_index(posts, pages))
# generate rss feed
generate_feed(posts)
# generate tag pages
tagdir = os.path.join(config.outputdir, 'tags')
if not os.path.exists(tagdir):
os.makedirs(tagdir)
for tag in tags.values():
write_file(
os.path.join(tagdir, '{}.html'.format(tag.name)),
generate_index(tag.posts, pages)
)
# copy style dir to disk
write_file(
os.path.join(config.outputdir, 'style', 'style.css'),
prepare_style()
)
| {
"content_hash": "06fee0199c72cb4435aad1c9f22ed977",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 77,
"avg_line_length": 31.07741935483871,
"alnum_prop": 0.5982976956612,
"repo_name": "hansenerd/blogtopoid",
"id": "7df1fd6d63ba28dfbe240990bbc3cfbc3c9c0cf8",
"size": "4817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blogtopoid/commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30032"
},
{
"name": "Shell",
"bytes": "6718"
}
],
"symlink_target": ""
} |
import logging
from django.conf import settings
from django.db.models import Prefetch
from django.shortcuts import redirect
from django.views.generic import DetailView
from rdmo.core.constants import VALUE_TYPE_FILE
from rdmo.core.utils import render_to_format
from rdmo.core.views import ObjectPermissionMixin
from rdmo.questions.models import QuestionSet, Question
from rdmo.views.utils import ProjectWrapper
from ..models import Project, Snapshot
from ..utils import get_value_path
logger = logging.getLogger(__name__)
class ProjectAnswersView(ObjectPermissionMixin, DetailView):
model = Project
queryset = Project.objects.prefetch_related(
Prefetch('catalog__sections__questionsets', queryset=QuestionSet.objects.select_related('attribute')),
Prefetch('catalog__sections__questionsets__questions', queryset=Question.objects.select_related('attribute', 'questionset')),
Prefetch('catalog__sections__questionsets__questionsets', queryset=QuestionSet.objects.select_related('attribute')),
Prefetch('catalog__sections__questionsets__questionsets__questions', queryset=Question.objects.select_related('attribute', 'questionset')),
)
permission_required = 'projects.view_project_object'
template_name = 'projects/project_answers.html'
no_catalog_error_template = 'projects/project_error_no_catalog.html'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
if self.object.catalog is None:
return redirect('project_error', pk=self.object.pk)
else:
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
try:
context['current_snapshot'] = context['project'].snapshots.get(pk=self.kwargs.get('snapshot_id'))
except Snapshot.DoesNotExist:
context['current_snapshot'] = None
# collect values with files, remove double files and order them.
context['attachments'] = context['project'].values.filter(snapshot=context['current_snapshot']) \
.filter(value_type=VALUE_TYPE_FILE) \
.order_by('file')
context.update({
'project_wrapper': ProjectWrapper(context['project'], context['current_snapshot']),
'snapshots': list(context['project'].snapshots.values('id', 'title')),
'export_formats': settings.EXPORT_FORMATS
})
return context
class ProjectAnswersExportView(ObjectPermissionMixin, DetailView):
model = Project
queryset = Project.objects.prefetch_related(
Prefetch('catalog__sections__questionsets', queryset=QuestionSet.objects.select_related('attribute')),
Prefetch('catalog__sections__questionsets__questions', queryset=Question.objects.select_related('attribute', 'questionset')),
Prefetch('catalog__sections__questionsets__questionsets', queryset=QuestionSet.objects.select_related('attribute')),
Prefetch('catalog__sections__questionsets__questionsets__questions', queryset=Question.objects.select_related('attribute', 'questionset')),
)
permission_required = 'projects.view_project_object'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
try:
context['current_snapshot'] = context['project'].snapshots.get(pk=self.kwargs.get('snapshot_id'))
except Snapshot.DoesNotExist:
context['current_snapshot'] = None
context.update({
'project_wrapper': ProjectWrapper(context['project'], context['current_snapshot']),
'title': context['project'].title,
'format': self.kwargs.get('format'),
'resource_path': get_value_path(context['project'], context['current_snapshot'])
})
return context
def render_to_response(self, context, **response_kwargs):
return render_to_format(self.request, context['format'], context['title'], 'projects/project_answers_export.html', context)
| {
"content_hash": "86f3843247b19795fb0c0f6a0787c2c0",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 147,
"avg_line_length": 46.16483516483517,
"alnum_prop": 0.6779338252796953,
"repo_name": "rdmorganiser/rdmo",
"id": "54a0d096ebae8a0e7e87090767e041422c4943bd",
"size": "4201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdmo/projects/views/project_answers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "426256"
},
{
"name": "JavaScript",
"bytes": "110821"
},
{
"name": "Python",
"bytes": "1265092"
},
{
"name": "SCSS",
"bytes": "20373"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Member
@admin.register(Member)
class AdminMember(admin.ModelAdmin):
list_display = ('name', 'last_name', 'phone', 'email', 'address',
'personal_skills', 'team_skills', 'weakness',
'under_presure',)
list_filter = ('email',)
| {
"content_hash": "038807f6baf5a81355d4ca21f690262d",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 69,
"avg_line_length": 36.111111111111114,
"alnum_prop": 0.6123076923076923,
"repo_name": "Jhonbeltran/information-layer8",
"id": "7bdecec39001f313fd784a88b38d0e389c3a8a30",
"size": "325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "members/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "162378"
},
{
"name": "HTML",
"bytes": "17338"
},
{
"name": "JavaScript",
"bytes": "324493"
},
{
"name": "Python",
"bytes": "26785"
}
],
"symlink_target": ""
} |
"""Control Flow Operations.
See the @{$python/control_flow_ops} guide.
@@identity
@@identity_n
@@tuple
@@group
@@no_op
@@count_up_to
@@cond
@@case
@@while_loop
@@logical_and
@@logical_not
@@logical_or
@@logical_xor
@@equal
@@not_equal
@@less
@@less_equal
@@greater
@@greater_equal
@@where
@@is_finite
@@is_inf
@@is_nan
@@verify_tensor_all_finite
@@check_numerics
@@add_check_numerics_ops
@@Assert
@@Print
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import control_flow_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.gen_control_flow_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_should_use
# We override the 'tuple' for a control flow op, so we keep python's
# existing 'tuple' for later use in this module.
_basetuple = tuple
# pylint: disable=protected-access
# Assert and Print are special symbols in python, so we must
# use an upper-case version of them.
@tf_should_use.should_use_result
def Assert(condition, data, summarize=None, name=None):
"""Asserts that the given condition is true.
If `condition` evaluates to false, print the list of tensors in `data`.
`summarize` determines how many entries of the tensors to print.
NOTE: To ensure that Assert executes, one usually attaches a dependency:
```python
# Ensure maximum element of x is smaller or equal to 1
assert_op = tf.Assert(tf.less_equal(tf.reduce_max(x), 1.), [x])
with tf.control_dependencies([assert_op]):
... code using x ...
```
Args:
condition: The condition to evaluate.
data: The tensors to print out when condition is false.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional).
Returns:
assert_op: An `Operation` that, when executed, raises a
`tf.errors.InvalidArgumentError` if `condition` is not true.
"""
with ops.name_scope(name, "Assert", [condition, data]) as name:
xs = ops.convert_n_to_tensor(data)
if all([x.dtype in {dtypes.string, dtypes.int32} for x in xs]):
# As a simple heuristic, we assume that string and int32 are
# on host to avoid the need to use cond. If it is not case,
# we will pay the price copying the tensor to host memory.
return gen_logging_ops._assert(
condition, data, summarize, name="Assert")
else:
condition = ops.convert_to_tensor(condition, name="Condition")
def true_assert():
return gen_logging_ops._assert(
condition, data, summarize, name="Assert")
guarded_assert = cond(
condition, no_op, true_assert, name="AssertGuard")
return guarded_assert.op
def _Identity(data, name=None):
"""Return a tensor with the same shape and contents as the input tensor.
Args:
data: A Tensor.
name: A name for this operation (optional).
Returns:
A Tensor with the same type and value as the input Tensor.
"""
data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return gen_array_ops._ref_identity(data, name=name)
else:
return array_ops.identity(data, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _Identity(data.values, name=name)
indices = array_ops.identity(data.indices, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = array_ops.identity(dense_shape, name="dense_shape")
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = array_ops.identity(data.dense_shape, name="dense_shape")
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def _NextIteration(data, name=None):
data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return ref_next_iteration(data, name=name)
else:
return next_iteration(data, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _NextIteration(data.values, name=name)
indices = next_iteration(data.indices, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = next_iteration(dense_shape, name="dense_shape")
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = next_iteration(data.dense_shape, name="dense_shape")
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def _Enter(data, frame_name, is_constant=False, parallel_iterations=10,
use_ref=True, use_input_shape=True, name=None):
"""Creates or finds a child frame, and makes `data` available to it.
The unique `frame_name` is used by the `Executor` to identify frames. If
`is_constant` is true, `data` is a constant in the child frame; otherwise
it may be changed in the child frame. At most `parallel_iterations`
iterations are run in parallel in the child frame.
Args:
data: The tensor to be made available to the child frame.
frame_name: The name of the child frame.
is_constant: If true, the output is constant within the child frame.
parallel_iterations: The number of iterations allowed to run in parallel.
use_ref: If true, use ref_enter if data is of ref type.
name: A name for this operation (optional).
Returns:
The same tensor as `data`.
"""
data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype and use_ref: # pylint: disable=protected-access
result = ref_enter(data, frame_name, is_constant, parallel_iterations,
name=name)
else:
result = enter(data, frame_name, is_constant, parallel_iterations,
name=name)
if use_input_shape:
result.set_shape(data.get_shape())
return result
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _Enter(data.values, frame_name, is_constant,
parallel_iterations=parallel_iterations,
use_input_shape=use_input_shape, name=name)
indices = enter(data.indices, frame_name, is_constant,
parallel_iterations, name="indices")
if use_input_shape:
indices.set_shape(data.indices.get_shape())
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = enter(dense_shape, frame_name, is_constant,
parallel_iterations, name="dense_shape")
if use_input_shape:
dense_shape.set_shape(data.dense_shape.get_shape())
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = enter(data.dense_shape, frame_name, is_constant,
parallel_iterations, name="dense_shape")
if use_input_shape:
dense_shape.set_shape(data.dense_shape.get_shape())
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def exit(data, name=None):
"""Exits the current frame to its parent frame.
Exit makes its input `data` available to the parent frame.
Args:
data: The tensor to be made available to the parent frame.
name: A name for this operation (optional).
Returns:
The same tensor as `data`.
"""
data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return gen_control_flow_ops._ref_exit(data, name)
else:
return gen_control_flow_ops._exit(data, name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = exit(data.values, name=name)
indices = gen_control_flow_ops._exit(data.indices, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = gen_control_flow_ops._exit(dense_shape, name)
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = gen_control_flow_ops._exit(data.dense_shape, name)
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def switch(data, pred, dtype=None, name=None):
"""Forwards `data` to an output determined by `pred`.
If `pred` is false, the `data` input is forwarded to the first output.
Otherwise, the data goes to the second output.
This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
pred: A scalar that specifies which output port will receive data.
dtype: Optional element type for the returned tensor. If missing,
the type is inferred from the type of `value`.
name: A name for this operation (optional).
Returns:
`(output_false, output_true)`: If `pred` is true, data will be forwarded
to `output_true`, otherwise it goes to `output_false`.
"""
with ops.name_scope(name, "Switch", [data, pred]) as name:
data = ops.internal_convert_to_tensor_or_indexed_slices(
data, dtype=dtype, name="data", as_ref=True)
pred = ops.convert_to_tensor(pred, name="pred")
if isinstance(data, ops.Tensor):
return gen_control_flow_ops._switch(data, pred, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
val, ind = data.values, data.indices
val_f, val_t = gen_control_flow_ops._switch(val, pred, name=name)
ind_f, ind_t = gen_control_flow_ops._switch(ind, pred, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape_f, dense_shape_t = gen_control_flow_ops._switch(
dense_shape, pred, name="dense_shape")
else:
dense_shape_f, dense_shape_t = None, None
return (ops.IndexedSlices(val_f, ind_f, dense_shape_f),
ops.IndexedSlices(val_t, ind_t, dense_shape_t))
else:
dense_shape = data.dense_shape
dense_shape_f, dense_shape_t = gen_control_flow_ops._switch(
data.dense_shape, pred, name="dense_shape")
return (sparse_tensor.SparseTensor(ind_f, val_f, dense_shape_f),
sparse_tensor.SparseTensor(ind_t, val_t, dense_shape_t))
def _SwitchRefOrTensor(data, pred, name="Switch"):
"""Forwards `data` to an output determined by `pred`.
If `pred` is false, the `data` input is forwarded to the first output.
Otherwise, the data goes to the second output.
This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
pred: A scalar that specifies which output port will receive data.
name: A name for this operation (optional).
Returns:
`(output_false, output_true)`: If `pred` is true, data will be forwarded to
`output_true`, otherwise it goes to `output_false`.
Raises:
TypeError: if data is not a Tensor or IndexedSlices
"""
data = ops.convert_to_tensor_or_indexed_slices(data, name="data")
# NOTE(vrv): ops.colocate_with(data, ignore_existing=True) below
# addresses the following scenario.
#
# Assume you execute Optimizer.apply_gradients() in a branch of a cond().
#
# 1. The update op is created inside a `with ops.colocate(var):` block
#
# 2. Some tensor `data` is captured and a switch is created in a
# `with ops.colocate_with(data):` block.
#
# with ops.colocate_with(var):
# with ops.colocate_with(data):
# op = ...
#
# var and data may be pinned to different devices, so we want to ops
# created within ops.colocate_with(data) to ignore the existing stack.
with ops.colocate_with(data, ignore_existing=True):
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return ref_switch(data, pred, name=name)
return switch(data, pred, name=name)
def merge(inputs, name=None):
"""Returns the value of an available element of `inputs`.
This op tests each of the tensors in `inputs` in turn to determine if any of
them is available. If it finds an available tensor, it returns it and its
index in `inputs`.
It is an error if more than one tensor in `inputs` is available. If no tensor
in `inputs` is available, the returned tensor and index are not set.
This op handles both `Tensor`s and `IndexedSlices`. If inputs has a mix of
`Tensor`s and `IndexedSlices`, all inputs are converted to IndexedSlices
before merging.
Args:
inputs: The input tensors, at most one of which is available.
name: A name for this operation (optional).
Returns:
A tuple containing the chosen input tensor and its index in `inputs`.
Raises:
ValueError: If any of the inputs is None, or inputs are IndexedSlices and
some but not all have a dense_shape property.
"""
if any([inp is None for inp in inputs]):
raise ValueError("At least one of the merge inputs is None: %s" % inputs)
with ops.name_scope(name, "Merge", inputs) as name:
inputs = [ops.internal_convert_to_tensor_or_indexed_slices(inp, as_ref=True)
for inp in inputs]
if all([isinstance(v, ops.Tensor) for v in inputs]):
if all([v.dtype._is_ref_dtype for v in inputs]): # pylint: disable=protected-access
return gen_control_flow_ops._ref_merge(inputs, name)
else:
return gen_control_flow_ops._merge(inputs, name)
elif all([isinstance(v, sparse_tensor.SparseTensor) for v in inputs]):
# Only handle the case when all inputs are SparseTensor.
values, _ = merge([inp.values for inp in inputs], name=name)
indices, chosen_index = gen_control_flow_ops._merge(
[inp.indices for inp in inputs], name="indices")
dense_shape, _ = gen_control_flow_ops._merge(
[inp.dense_shape for inp in inputs], name="dense_shape")
return (sparse_tensor.SparseTensor(indices, values, dense_shape),
chosen_index)
else:
# For now convert all the inputs as IndexedSlices.
inputs = math_ops._as_indexed_slices_list(inputs, optimize=False)
values, _ = merge([inp.values for inp in inputs], name=name)
indices, chosen_index = gen_control_flow_ops._merge(
[inp.indices for inp in inputs], name="indices")
if any(inp.dense_shape is not None for inp in inputs):
if any(inp.dense_shape is None for inp in inputs):
raise ValueError("Either all merged IndexedSlices must have a "
"dense_shape, or none must have a dense_shape.")
dense_shape, _ = gen_control_flow_ops._merge(
[inp.dense_shape for inp in inputs], name="dense_shape")
else:
dense_shape = None
return ops.IndexedSlices(values, indices, dense_shape), chosen_index
# pylint: enable=protected-access
def _convert_tensorarray_to_flow(tensor_or_tensor_array):
if isinstance(tensor_or_tensor_array, tensor_array_ops.TensorArray):
return tensor_or_tensor_array.flow
else:
return tensor_or_tensor_array
def _make_tensor_array(ta, t_or_flow):
# pylint: disable=protected-access
new_ta = tensor_array_ops.TensorArray(
dtype=ta.dtype, handle=ta.handle, flow=t_or_flow,
infer_shape=ta._infer_shape,
colocate_with_first_write_call=ta._colocate_with_first_write_call)
new_ta._colocate_with = ta._colocate_with
new_ta._element_shape = ta._element_shape
# pylint: enable=protected-access
return new_ta
def _convert_flows_to_tensorarrays(tensors_or_tensorarrays, tensors_or_flows):
if len(tensors_or_tensorarrays) != len(tensors_or_flows):
raise ValueError(
"Lengths of original Tensor list and new list do not match: %d vs. %d"
% (len(tensors_or_tensorarrays), len(tensors_or_flows)))
return [
_make_tensor_array(ta, t_or_flow)
if isinstance(ta, tensor_array_ops.TensorArray)
else t_or_flow
for (ta, t_or_flow) in zip(tensors_or_tensorarrays, tensors_or_flows)]
def _IsLoopConstantEnter(op):
"""Return true iff op is a loop invariant."""
is_enter = (op.type == "Enter" or op.type == "RefEnter")
return is_enter and op.get_attr("is_constant")
def _GetLoopConstantEnter(value):
"""Return the enter op if we can infer `value` to be a loop invariant."""
id_ops = {"Switch", "RefSwitch", "Identity", "RefIdentity"}
op = value.op
while op.type in id_ops:
op = op.inputs[0].op
return op if _IsLoopConstantEnter(op) else None
def _GetOutputContext(op):
"""Return the control flow context for the output of an op."""
ctxt = op._get_control_flow_context()
if IsLoopExit(op):
ctxt = ctxt.outer_context
return ctxt
def _ShapeLessThanOrEqual(shape1, shape2):
if shape2.dims is None:
return True
if shape1.ndims != shape2.ndims:
return False
for dim1, dim2 in zip(shape1.dims, shape2.dims):
if dim2.value is not None and dim1.value != dim2.value:
return False
return True
def _SetShapeInvariants(input_vars, enter_vars, shapes):
"""Set the shapes of the tensors in `enter_vars` to `shapes`.
Args:
input_vars: A list of tensors that are inputs to `enter_vars`.
enter_vars: A list of tensors whose shapes will be set.
shapes: A (possibly nested) list of shapes.
Raises:
ValueError: If any tensor in `enter_vars` has a less specific shape
than its corresponding shape in `shapes`.
"""
if shapes is None:
return
flat_shapes = nest.flatten(shapes)
if not all([isinstance(s, tensor_shape.TensorShape) for s in flat_shapes]):
raise ValueError("`shapes` must be a (possibly nested) list of shapes.")
# Check that the shapes of the inputs are less than the shape invariants,
# and set the shapes of `enter_vars` to the shape invariants.
for inp, var, shape in zip(input_vars, enter_vars, flat_shapes):
if isinstance(var, ops.Tensor):
if not _ShapeLessThanOrEqual(inp.get_shape(), shape):
raise ValueError(
"The shape invariant specified for %s is not compatible with "
"the initial shape of the loop variable. It enters the loop "
"with shape %s, but the specified shape invariant is %s."
% (inp.name, inp.get_shape(), shape))
var.set_shape(shape)
else:
if not isinstance(var, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(var))
if isinstance(var, ops.IndexedSlices):
if not _ShapeLessThanOrEqual(inp.values.get_shape(), shape):
raise ValueError(
"The shape invariant specified for %s is not compatible with "
"the initial shape of the values tensor of this IndexedSlices. "
"It enters the loop with shape %s, but the specified shape "
"invariant is %s."
% (inp.values.name, inp.values.get_shape(), shape))
var.values.set_shape(shape)
var.indices.set_shape(tensor_shape.TensorShape([shape[0]]))
if var.dense_shape is not None:
var.dense_shape.set_shape(tensor_shape.TensorShape([shape.ndims]))
else:
if not _ShapeLessThanOrEqual(inp.dense_shape.get_shape(), shape):
raise ValueError(
"The shape invariant specified for %s is not compatible with "
"the initial shape of the shape tensor of this SparseTensor. "
"It enters the loop with shape %s, but the specified shape "
"invariant is %s."
% (inp.dense_shape.name, inp.dense_shape.get_shape(), shape))
var.values.set_shape(tensor_shape.TensorShape([None]))
var.indices.set_shape(tensor_shape.TensorShape([None, shape.ndims]))
var.dense_shape.set_shape(shape)
def _EnforceShapeInvariant(merge_var, next_var):
"""Check if the shapes of the loops variables are invariants.
Args:
merge_vars: The list of tensors representing the initial values of the
loop variables.
next_vars: The list of tensors representing the values of the loop
variables after one loop iteration.
Raises:
ValueError: If any tensor in `merge_vars` has a more specific shape than
its correspnding tensor in `next_var`.
"""
if isinstance(merge_var, ops.Tensor):
m_shape = merge_var.get_shape()
n_shape = next_var.get_shape()
if not _ShapeLessThanOrEqual(n_shape, m_shape):
raise ValueError(
"The shape for %s is not an invariant for the loop. It enters "
"the loop with shape %s, but has shape %s after one iteration. "
"Provide shape invariants using either the `shape_invariants` "
"argument of tf.while_loop or set_shape() on the loop variables."
% (merge_var.name, m_shape, n_shape))
else:
if not isinstance(var, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(var))
if isinstance(var, ops.IndexedSlices):
m_values_shape = merge_var.values.get_shape()
m_indices_shape = merge_var.indices.get_shape()
m_shape_shape = tensor_shape.TensorShape(None)
if merge_var.dense_shape is not None:
m_shape_shape = merge_var.dense_shape.get_shape()
n_values_shape = next_var.values.get_shape()
n_indices_shape = next_var.indices.get_shape()
n_shape_shape = tensor_shape.TensorShape(None)
if next_var.dense_shape is not None:
n_shape_shape = next_var.dense_shape.get_shape()
if (not _ShapeLessThanOrEqual(n_values_shape, m_values_shape) or
not _ShapeLessThanOrEqual(n_indices_shape, m_indices_shape)):
if not _ShapeLessThanOrEqual(n_values_shape, m_values_shape):
raise ValueError(
"The shape for %s is not an invariant for the loop. It enters "
"the loop with shape (%s, %s, %s), but has shape (%s, %s, %s) "
"after one iteration. Provide shape invariants using either the "
"`shape_invariants` argument of tf.while_loop or set_shape() "
"on the loop variables."
% (merge_var.name, m_values_shape, m_indices_shape, m_shape_shape,
n_values_shape, n_indices_shape, n_shape_shape))
else:
m_values_shape = merge_var.values.get_shape()
m_indices_shape = merge_var.indices.get_shape()
m_shape_shape = merge_var.dense_shape.get_shape()
n_values_shape = next_var.values.get_shape()
n_indices_shape = next_var.indices.get_shape()
n_shape_shape = next_var.dense_shape.get_shape()
if (not _ShapeLessThanOrEqual(n_values_shape, m_values_shape) or
not _ShapeLessThanOrEqual(n_indices_shape, m_indices_shape) or
not _ShapeLessThanOrEqual(n_shape_shape, m_shape_shape)):
raise ValueError(
"The shape for %s is not an invariant for the loop. It enters "
"the loop with shape (%s, %s, %s), but has shape (%s, %s, %s) "
"after one iteration. Provide shape invariants using either "
"the `shape_invariants` argument of tf.while_loop or set_shape() "
"on the loop variables."
% (merge_var.name, m_values_shape, m_indices_shape, m_shape_shape,
n_values_shape, n_indices_shape, n_shape_shape))
def _AddNextAndBackEdge(m, v):
"""Add NextIteration and back edge from v to m."""
if isinstance(m, ops.Tensor):
v = ops.convert_to_tensor(v)
v = _NextIteration(v)
m.op._update_input(1, v) # pylint: disable=protected-access
elif isinstance(m, ops.IndexedSlices):
# pylint: disable=protected-access
v = math_ops._as_indexed_slices(v, optimize=False)
v = _NextIteration(v)
m.values.op._update_input(1, v.values)
m.indices.op._update_input(1, v.indices)
# pylint: enable=protected-access
if m.dense_shape is not None:
if v.dense_shape is None:
raise ValueError("Must have dense shape: %s" % v.name)
m.dense_shape.op._update_input(1, v.dense_shape)
elif isinstance(m, sparse_tensor.SparseTensor):
if not isinstance(v, sparse_tensor.SparseTensor):
raise ValueError("Must be a sparse tensor: %s" % v.name)
v = _NextIteration(v)
# pylint: disable=protected-access
m.values.op._update_input(1, v.values)
m.indices.op._update_input(1, v.indices)
m.dense_shape.op._update_input(1, v.dense_shape)
# pylint: enable=protected-access
else:
raise TypeError("Type %s not supported" % type(m))
return v
class GradLoopState(object):
"""The state used for constructing the gradient graph for a while loop.
We create a GradLoopState for each while loop in forward and its
corresponding while loop in backprop. This gives us access to both
the forward and the backprop WhileContexts.
During the construction of gradient graph, any time when we detect
a forward value that is needed for backprop, we create a history
accumulator and add it to `history_map`. Any time when we backprop
a loop switch op (in _SwitchGrad), we add the grad merge op in
`switch_map`.
"""
def __init__(self, forward_ctxt, outer_grad_state):
# The grad loop state for the outer while loop.
self._outer_grad_state = None
# The while loop context for forward.
self._forward_context = None
# The loop counter added by AddForwardLoopCounter. It is the value
# of the loop counter for the next iteration.
self._forward_index = None
# A sync op for forward.
self._forward_sync = None
# The while loop context for backprop.
self._grad_context = None
# The loop counter added by AddBackpropLoopCounter. It is the value
# of the loop counter for the current iteration.
self._grad_index = None
# A sync op for backprop.
self._grad_sync = None
# Information needed by backprop.
self._history_map = {}
self._switch_map = {}
self._unused_exits = []
self._deferred_exits = []
self._forward_loop_exits = list(forward_ctxt.loop_exits)
self._pending_exits_count = len(forward_ctxt.loop_exits)
self._outer_grad_state = outer_grad_state
if outer_grad_state:
outer_forward_ctxt = outer_grad_state.forward_context
else:
outer_forward_ctxt = forward_ctxt.outer_context
# Add the forward loop counter.
if outer_forward_ctxt: outer_forward_ctxt.Enter()
cnt, forward_index = forward_ctxt.AddForwardLoopCounter(outer_grad_state)
if outer_forward_ctxt: outer_forward_ctxt.Exit()
self._forward_context = forward_ctxt
self._forward_index = forward_index
# Add the backprop WhileContext, and the backprop loop counter.
if outer_grad_state:
# This is a nested loop. Remember the iteration counts for each
# execution of this inner loop.
outer_forward_ctxt.AddName(cnt.name)
history_cnt = outer_grad_state.AddForwardAccumulator(cnt)
outer_grad_ctxt = outer_grad_state.grad_context
outer_grad_ctxt.Enter()
self._grad_context = WhileContext(forward_ctxt.parallel_iterations,
forward_ctxt.back_prop,
forward_ctxt.swap_memory,
forward_ctxt.name,
self)
real_cnt = outer_grad_state.AddBackpropAccumulatedValue(history_cnt, cnt)
self._grad_index = self._grad_context.AddBackpropLoopCounter(
real_cnt, outer_grad_state)
outer_grad_ctxt.Exit()
else:
if outer_forward_ctxt: outer_forward_ctxt.Enter()
self._grad_context = WhileContext(forward_ctxt.parallel_iterations,
forward_ctxt.back_prop,
forward_ctxt.swap_memory,
forward_ctxt.name,
self)
self._grad_index = self._grad_context.AddBackpropLoopCounter(
cnt, outer_grad_state)
if outer_forward_ctxt: outer_forward_ctxt.Exit()
@property
def outer_grad_state(self):
"""The grad loop state for outer loop."""
return self._outer_grad_state
@property
def forward_context(self):
"""The while loop context for forward."""
return self._forward_context
@property
def forward_index(self):
"""The loop index of forward loop."""
return self._forward_index
@property
def forward_sync(self):
"""A control trigger node for synchronization in the forward loop.
One main use is to keep the push ops of a stack executed in the
iteration order.
"""
if self._forward_sync is None:
with ops.control_dependencies(None):
self._forward_sync = control_trigger(name="f_sync")
self._forward_sync._set_control_flow_context(self._forward_context)
self._forward_index.op._add_control_input(self._forward_sync)
return self._forward_sync
@property
def grad_context(self):
"""The corresponding WhileContext for gradient."""
return self._grad_context
@property
def grad_index(self):
"""The loop index of backprop loop."""
return self._grad_index
@property
def grad_sync(self):
"""A control trigger node for synchronization in the grad loop.
One main use is to keep the pop ops of a stack executed in the
iteration order.
"""
if self._grad_sync is None:
with ops.control_dependencies(None):
self._grad_sync = control_trigger(name="b_sync")
self._grad_sync._set_control_flow_context(self._grad_context)
self._grad_index.op._add_control_input(self._grad_sync)
if self._grad_context.outer_context:
self._grad_context.outer_context.AddInnerOp(self._grad_sync)
return self._grad_sync
@property
def history_map(self):
"""The map that records all the tensors needed for backprop."""
return self._history_map
@property
def switch_map(self):
"""The map that records all the Switch ops for the while loop."""
return self._switch_map
@property
def unused_exits(self):
"""The list of "unused" exits."""
return self._unused_exits
@property
def deferred_exits(self):
"""The list of "deferred" exits."""
return self._deferred_exits
@property
def forward_loop_exits(self):
"""The list of exits of the forward loop."""
return self._forward_loop_exits
@property
def pending_exits_count(self):
"""The number of exits we expect to see but haven't."""
return self._pending_exits_count
@pending_exits_count.setter
def pending_exits_count(self, cnt):
"""Set the pending count to cnt."""
self._pending_exits_count = cnt
def AddForwardAccumulator(self, value, dead_branch=False):
"""Add an accumulator for each forward tensor that is needed in backprop.
This is added to the forward loop at the first time when a tensor
in the forward loop is used by backprop gradient computation loop.
We create an accumulator that accumulates the value of tensor at each
iteration. Called in the control flow context where gradients() is called.
The pseudocode is:
```
acc = stack();
while (_pivot) {
acc = stack_push(acc, value);
}
```
We make sure that the stack push op in one iteration is executed before
next iteration. This is achieved by adding a control edge from
`forward_index.op.inputs[0].op` to the push op, and another control
edge from the push op to either `forward_index.op` or `forward_sync`.
Args:
value: The source tensor in forward that is to be accumulated.
dead_branch: True iff the tensor is on a dead branch of a cond.
Returns:
The stack that contains the accumulated history of the tensor.
Raises:
TypeError: For internal errors involving the value condition context.
"""
curr_ctxt = ops.get_default_graph()._get_control_flow_context()
with ops.control_dependencies(None):
if curr_ctxt: curr_ctxt.Enter()
with ops.colocate_with(value):
# pylint: disable=protected-access
acc = gen_data_flow_ops._stack_v2(-1, value.dtype.base_dtype,
name="f_acc")
# pylint: enable=protected-access
if curr_ctxt: curr_ctxt.Exit()
# Make acc available in the forward context.
enter_acc = self.forward_context.AddValue(acc)
# Add the stack_push op in the context of value.op.
swap_enabled = self.forward_context.swap_memory
value_ctxt = _GetOutputContext(value.op)
if value_ctxt == self.forward_context:
# value is not nested in the forward context.
self.forward_context.Enter()
# pylint: disable=protected-access
push = gen_data_flow_ops._stack_push_v2(
enter_acc, value, swap_memory=swap_enabled)
# pylint: enable=protected-access
self.forward_context.Exit()
# Protect stack push and order it before forward_index.
self.forward_index.op._add_control_input(push.op)
else:
# value is in a cond context within the forward context.
if not isinstance(value_ctxt, CondContext):
raise TypeError(
"value_ctxt is not a CondContext: %s" % value_ctxt)
if dead_branch:
# The special case for creating a zero tensor for a dead
# branch of a switch. See ControlFlowState.ZerosLike().
value_ctxt.outer_context.Enter()
# pylint: disable=protected-access
push = gen_data_flow_ops._stack_push_v2(
enter_acc, value, swap_memory=swap_enabled)
# pylint: enable=protected-access
value_ctxt.outer_context.Exit()
push.op._set_control_flow_context(value_ctxt)
else:
value_ctxt.Enter()
# pylint: disable=protected-access
push = gen_data_flow_ops._stack_push_v2(
enter_acc, value, swap_memory=swap_enabled)
# pylint: enable=protected-access
value_ctxt.Exit()
# Protect stack push and order it before forward_sync.
self.forward_sync._add_control_input(push.op)
# Order stack push after the successor of forward_index
add_op = self.forward_index.op.inputs[0].op
push.op._add_control_input(add_op)
return acc
def AddBackpropAccumulatedValue(self, history_value, value,
dead_branch=False):
"""Add the getter for an accumulated value in the grad context.
This is added to the backprop loop. Called in the grad context to
get the value of an accumulated value. The stack pop op must be guarded
by the pred of the controlling cond.
Args:
history_value: The history (a stack) of a value.
value: The value that is pushed onto the stack.
dead_branch: True iff the tensor is on a dead branch of a cond.
Returns:
The current value (the top of the stack).
"""
history_ctxt = history_value.op._get_control_flow_context()
# Find the cond context that controls history_value if any.
cond_ctxt = None
value_ctxt = value.op._get_control_flow_context()
while value_ctxt and value_ctxt != history_ctxt:
if isinstance(value_ctxt, CondContext):
cond_ctxt = value_ctxt
break
value_ctxt = value_ctxt.outer_context
with ops.control_dependencies(None):
self.grad_context.Enter()
if cond_ctxt:
# Guard stack pop with a switch if it is controlled by a cond.
grad_state = self
pred = None
while pred is None and grad_state:
pred = grad_state.history_map.get(cond_ctxt.pred.name)
grad_state = grad_state.outer_grad_state
if pred is None:
pred = cond_ctxt.pred
branch = (1 - cond_ctxt.branch) if dead_branch else cond_ctxt.branch
history_value = _SwitchRefOrTensor(history_value, pred)[branch]
# pylint: disable=protected-access
pop = gen_data_flow_ops._stack_pop_v2(history_value,
value.dtype.base_dtype)
# pylint: enable=protected-access
pop.set_shape(value.get_shape())
self.grad_context.Exit()
parallel_iterations = self.grad_context.parallel_iterations
if parallel_iterations > 1:
# All pops are ordered after pivot_for_body and before grad_sync.
self.grad_sync._add_control_input(pop.op)
return pop
def GetRealValue(self, value):
"""Get the real value of `value`.
If backprop "uses" a value produced by forward inference, an accumulator
is added in the forward loop to accumulate its values. We use the
accumulated value. This method must be called in the grad loop context.
`value` must be in forward and needed for backprop.
Args:
value: A tensor to be captured.
Returns:
The same tensor obtained from the saved history.
"""
assert value.op.type not in ["Variable", "VariableV2"]
real_value = self._history_map.get(value.name)
if real_value is None:
cur_value = value
cur_grad_state = self
while True:
enter_op = _GetLoopConstantEnter(cur_value)
if enter_op:
# Special case: cur_value comes from a constant Enter node.
cur_value = enter_op.inputs[0]
cur_grad_state = cur_grad_state.outer_grad_state
if cur_grad_state is None:
# We are now outside all nested loops for this gradient(),
# so `value` is a loop invariant and there is no need to
# save the history of value. Just make cur_value to enter
# the right control flow context.
real_value = self._grad_context.AddValue(cur_value)
break
elif constant_op.is_constant(cur_value):
# If the value to be forwarded is a constant, clone the constant in
# the gradient loop rather than using a stack.
# TODO(phawkins): consider hoisting the constant out of the loop
# instead.
real_value = constant_op.constant(
tensor_util.constant_value(cur_value), dtype=cur_value.dtype)
break
else:
# Record the history of this value in forward_ctxt.
self._grad_context.Exit()
history_value = cur_grad_state.AddForwardAccumulator(cur_value)
self._grad_context.Enter()
break
if real_value is None:
# Add the stack pop op in the grad context.
real_value = cur_grad_state.AddBackpropAccumulatedValue(history_value,
cur_value)
if cur_grad_state != self:
real_value = self._grad_context.AddValue(real_value)
self._history_map[value.name] = real_value
return real_value
def _GetWhileContext(op):
"""Get the WhileContext to which this op belongs."""
ctxt = op._get_control_flow_context()
if ctxt:
ctxt = ctxt.GetWhileContext()
return ctxt
class ControlFlowState(object):
"""Maintain the mapping from the loops to their grad states."""
def __init__(self):
self._map = {} # maps forward loop context to GradLoopState
def GetGradState(self, op, before):
"""Return the grad state for this op if it's in a forward loop context."""
if before and IsLoopExit(op):
forward_ctxt = op._get_control_flow_context()
forward_ctxt = forward_ctxt.outer_context
if forward_ctxt:
forward_ctxt = forward_ctxt.GetWhileContext()
else:
forward_ctxt = _GetWhileContext(op)
if forward_ctxt:
return self._map.get(forward_ctxt)
return None
def ProcessUnusedLoopExits(self, pending_count, to_ops_set):
"""Process all the "unused" loop exits.
The "unused" exits of the loops are added to `unused_exits`. An exit is
unused if its pending_count is 0. If there is an exit with real gradient,
all these deferred exits will enter the backprop loop with zero gradient.
Otherwise, they will enter the backprop loop with None. As an example,
people often write:
```python
v1, _ = tf.while_loop(p, b, [x1, x2])
result = gradients(v1, x1)
```
The exit node for x2 is not included by the betweenness analysis. But we
need to backprop x2 if x2 is involved in computing v1.
Args:
pending_count: The number of backprop inputs for every op.
to_ops_set: The set of ops for ys in gradients(ys, xs)
Returns:
The set of unused loop exits that we know at this point we need
to backprop.
"""
loop_exits = []
for _, grad_state in self._map.items():
# pylint: disable=protected-access
for y in grad_state.forward_loop_exits:
if pending_count[y.op._id] == 0:
grad_state.pending_exits_count -= 1
if y.op._id not in to_ops_set:
grad_state.unused_exits.append(y)
if grad_state.pending_exits_count == 0:
loop_exits.extend(grad_state.unused_exits)
# Need to include Enters in backprop for higher-order gradients.
for y in grad_state.forward_context.loop_enters:
if pending_count[y.op._id] == 0:
pending_count[y.op._id] = 1
# pylint: enable=protected-access
return loop_exits
def EnterGradWhileContext(self, op, before):
"""Enter the WhileContext for gradient computation."""
grad_state = self.GetGradState(op, before)
if grad_state:
grad_state.grad_context.Enter()
def ExitGradWhileContext(self, op, before):
"""Exit the WhileContext for gradient computation."""
grad_state = self.GetGradState(op, before)
if grad_state:
grad_state.grad_context.Exit()
def AddWhileContext(self, op, between_op_list, between_ops):
"""Add the grad state for the while loop that op belongs to.
Note that op is an Exit, and this method must be called in
the control flow context where gradients() is called.
Note that this method modifies `between_op_list` and `between_ops`.
"""
forward_ctxt = _GetWhileContext(op)
grad_state = self._map.get(forward_ctxt)
if grad_state is None:
# This is a new while loop so create a grad state for it.
outer_forward_ctxt = forward_ctxt.outer_context
if outer_forward_ctxt:
outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()
outer_grad_state = None
if outer_forward_ctxt:
outer_grad_state = self._map.get(outer_forward_ctxt)
grad_state = GradLoopState(forward_ctxt, outer_grad_state)
self._map[forward_ctxt] = grad_state
# We need to include all exits of a loop for backprop.
for loop_exit in grad_state.forward_loop_exits:
if not between_ops[loop_exit.op._id]:
between_ops[loop_exit.op._id] = True
between_op_list.append(loop_exit.op)
def ZerosLikeForExit(self, val):
"""Create zeros_like gradient for a loop exit.
If the result of a loop variable is not used but is involved in
computing the result of some needed loop variable, we create a
zero-valued tensor that is fed as gradient for the Exit node of that
loop variable. Note that val.op is an Exit, and this method must be
called in the control flow context where gradients() is called.
Args:
val: The output tensor of an Exit op.
Returns:
A zero tensor of the same shape of val.
"""
val_shape = val.get_shape()
forward_ctxt = val.op._get_control_flow_context()
outer_forward_ctxt = forward_ctxt.outer_context
if outer_forward_ctxt:
outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()
outer_grad_state = None
if outer_forward_ctxt:
outer_grad_state = self._map.get(outer_forward_ctxt)
if outer_grad_state:
# This is a nested loop.
if val_shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor
# with the right shape in the right context.
outer_grad_state.grad_context.Enter()
result = array_ops.zeros(val_shape.dims, val.dtype)
outer_grad_state.grad_context.Exit()
else:
# Only the shape of value is needed for backprop.
forward_ctxt.outer_context.Enter()
shape = array_ops.shape_internal(val, optimize=False)
forward_ctxt.outer_context.Exit()
# Save the shape to a stack.
history_shape = outer_grad_state.AddForwardAccumulator(shape)
# Get the shape back from the stack.
outer_grad_ctxt = outer_grad_state.grad_context
outer_grad_ctxt.Enter()
real_shape = outer_grad_state.AddBackpropAccumulatedValue(
history_shape, shape)
result = array_ops.zeros(real_shape, val.dtype)
outer_grad_ctxt.Exit()
else:
# This is not a nested loop.
if val_shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor
# with the right shape.
result = array_ops.zeros(val_shape.dims, val.dtype)
else:
result = array_ops.zeros_like(val, optimize=False)
return result
def ZerosLike(self, op, index):
"""Create zeros_like for the specified output of an op.
If op is in a while loop that is part of gradients(), this method
must be called in its grad loop context.
Args:
op: A tensorflow operation.
index: the index for a specific output of the op.
Returns:
A zero tensor of the same shape of op.outputs[index].
"""
if IsLoopSwitch(op): return None
dead_branch = IsSwitch(op)
forward_ctxt = _GetWhileContext(op)
grad_state = self._map.get(forward_ctxt)
if grad_state is None:
# op is not in a while loop that is part of gradients().
return ZerosLikeOutsideLoop(op, index)
op_ctxt = op._get_control_flow_context()
val = ops.convert_to_tensor(op.outputs[index], name="tensor")
shape = val.get_shape()
if shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor with
# the right shape in the grad loop context.
result = constant_op.constant(0, shape=shape.dims, dtype=val.dtype)
if dead_branch:
# op is a cond switch. Guard the zero tensor with a switch.
pred = grad_state.history_map.get(op_ctxt.pred.name)
branch = op_ctxt.branch
result = _SwitchRefOrTensor(result, pred)[1 - branch]
else:
# Unknown shape so keep a history of the shape at runtime.
if dead_branch:
# Need to add a special switch to guard the value.
pred = op_ctxt.pred
branch = op_ctxt.branch
op_ctxt.outer_context.Enter()
val = _SwitchRefOrTensor(op.inputs[0], pred)[1 - branch]
zeros_shape = array_ops.shape_internal(val, optimize=False)
op_ctxt.outer_context.Exit()
val.op._set_control_flow_context(op_ctxt)
zeros_shape.op._set_control_flow_context(op_ctxt)
else:
op_ctxt.Enter()
zeros_shape = array_ops.shape_internal(val, optimize=False)
op_ctxt.Exit()
# Add forward accumulator for shape.
grad_state.grad_context.Exit()
history_zeros_shape = grad_state.AddForwardAccumulator(
zeros_shape, dead_branch=dead_branch)
grad_state.grad_context.Enter()
# Create a zero tensor with the right shape.
shape = grad_state.AddBackpropAccumulatedValue(
history_zeros_shape, zeros_shape, dead_branch)
result = array_ops.zeros(shape, val.dtype)
return result
def PostProcessing(self):
"""Perform postprocessing at the end of gradients().
We have created the gradient graph at this point. So this function
can be used to perform any postprocessing on the gradient graph.
We currently perform the following postprocessing:
1. Patch the gradient graph if the output of a loop variable
doesn't depend on its input.
"""
for _, grad_state in self._map.items():
for _, b_merge in grad_state.switch_map.items():
if b_merge.op.inputs[0] == b_merge.op.inputs[1]:
# The value of this loop variable at iteration i+1 doesn't
# depend on its value at iteration i. So use zeros as the
# gradients for all iterations > 0.
dtype = b_merge.op.inputs[0].dtype
shape = b_merge.op.inputs[0].get_shape()
# pylint: disable=protected-access
if shape.is_fully_defined():
grad_state.grad_context.Enter()
# Create a zeros and use it for iterations > 0.
grad_val = constant_op.constant(0, dtype=dtype, shape=shape)
next_grad_val = _NextIteration(grad_val)
grad_state.grad_context.Exit()
else:
# Create a zeros in the outer grad context.
outer_grad_ctxt = grad_state.grad_context.outer_context
if outer_grad_ctxt: outer_grad_ctxt.Enter()
enter_grad_op = b_merge.op.inputs[0].op
enter_grad = enter_grad_op.inputs[0]
grad_shape = array_ops.shape_internal(enter_grad, optimize=False)
grad_val = array_ops.zeros(grad_shape)
if outer_grad_ctxt: outer_grad_ctxt.Exit()
# Use the zeros for iterations > 0.
grad_state.grad_context.Enter()
next_grad_val = _NextIteration(grad_val)
grad_state.grad_context.Exit()
b_merge.op._update_input(1, next_grad_val)
# pylint: enable=protected-access
def MaybeCreateControlFlowState(between_op_list, between_ops,
colocate_gradients_with_ops):
"""Create the state for all the while loops involved in one gradients().
We create a ControlFlowState when there are while loops involved in
gradients(). In gradients(), control flow logic is only invoked when
the ControlFlowState is not None.
Note that this method modifies `between_op_list` and `between_ops`.
"""
loop_state = None
for op in between_op_list:
if IsLoopExit(op):
if loop_state is None:
loop_state = ControlFlowState()
if colocate_gradients_with_ops:
with ops.colocate_with(op):
loop_state.AddWhileContext(op, between_op_list, between_ops)
else:
loop_state.AddWhileContext(op, between_op_list, between_ops)
return loop_state
def IsSwitch(op):
"""Return true if `op` is a Switch."""
return op.type == "Switch" or op.type == "RefSwitch"
def IsLoopExit(op):
"""Return true if `op` is an Exit."""
return op.type == "Exit" or op.type == "RefExit"
def IsLoopSwitch(op):
"""Return true if `op` is the Switch for a while loop."""
if IsSwitch(op):
ctxt = op._get_control_flow_context()
return ctxt and isinstance(ctxt, WhileContext)
return False
def ZerosLikeOutsideLoop(op, index):
"""Create zeros_like for the specified output of an op."""
val = op.outputs[index]
if not IsSwitch(op):
return array_ops.zeros_like(val, optimize=False)
else:
op_ctxt = op._get_control_flow_context()
if op_ctxt:
# We are in a cond context. Use a switch to create zeros only when needed.
pred = op_ctxt.pred
branch = op_ctxt.branch
switch_val = switch(op.inputs[0], pred)[1 - branch]
zeros_shape = array_ops.shape_internal(switch_val, optimize=False)
return array_ops.zeros(zeros_shape, dtype=val.dtype)
else:
return array_ops.zeros_like(val, optimize=False)
class ControlFlowContext(object):
"""The base class for control flow context.
The usage pattern is a sequence of (Enter, Exit) followed by a final
ExitResult.
We maintain the following state for control flow contexts during graph
construction:
1. graph has _control_flow_context: the current context used to
construct new nodes. Changed by ctxt.Enter() and ctxt.Exit()
2. op has _control_flow_context: the context to which the op belongs.
Set at the time the op is created. Immutable.
3. A ControlFlowContext has _outer_context: the context in which this
context is created. Set at the time a context is created. Immutable.
4. A ControlFlowContext has _context_stack.
Pushed and popped by ctxt.Enter() and ctxt.Exit()
"""
def __init__(self, values_def=None, import_scope=None):
self._outer_context = ops.get_default_graph()._get_control_flow_context()
self._context_stack = []
if values_def:
self._init_values_from_proto(values_def,
import_scope=import_scope)
else:
# Values that have been already seen in this context.
self._values = set()
# Values referenced by but external to this context.
self._external_values = {}
def _init_values_from_proto(self, values_def, import_scope=None):
"""Initializes values and external_values from `ValuesDef` protocol buffer.
Args:
values_def: `ValuesDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(values_def, control_flow_pb2.ValuesDef)
self._values = set(
ops.prepend_name_scope(value, import_scope)
for value in values_def.values)
g = ops.get_default_graph()
self._external_values = {}
for k, v in values_def.external_values.items():
k = ops.prepend_name_scope(k, import_scope)
self._external_values[k] = g.as_graph_element(
ops.prepend_name_scope(v, import_scope))
op_names = set([
op.split(":")[0]
for op in self._values - set(self._external_values.keys())
])
for op in op_names:
# pylint: disable=protected-access
g.as_graph_element(op)._set_control_flow_context(self)
# pylint: enable=protected-access
@property
def outer_context(self):
"""Return the context containing this context."""
return self._outer_context
@property
def grad_state(self):
raise NotImplementedError("Abstract method")
@property
def back_prop(self):
raise NotImplementedError("Abstract method")
def _to_proto(self, export_scope=None):
"""Converts the values to a `ValuesDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `ValuesDef` protocol buffer.
"""
values_def = control_flow_pb2.ValuesDef()
values_def.values.extend(
[ops.strip_name_scope(v, export_scope)
for v in sorted(self._values)])
for k, v in self._external_values.items():
k = ops.strip_name_scope(k, export_scope)
values_def.external_values[k] = ops.strip_name_scope(
v.name, export_scope)
return values_def
@staticmethod
def _from_proto(values_def, import_scope=None):
"""Returns a `ControlFlowContext` created from `values_def`."""
return ControlFlowContext(values_def=values_def,
import_scope=import_scope)
def AddName(self, name):
self._values.add(name)
# pylint: disable=protected-access
def Enter(self):
"""Enter this control flow context."""
graph = ops.get_default_graph()
self._context_stack.append(graph._get_control_flow_context())
graph._set_control_flow_context(self)
def Exit(self):
"""Exit this control flow context."""
graph = ops.get_default_graph()
last_context = self._context_stack.pop()
graph._set_control_flow_context(last_context)
def ExitResult(self, result):
"""Make a list of tensors available in the outer context."""
if self._outer_context:
nest.map_structure(lambda x: self._outer_context.AddName(x.name), result)
def GetWhileContext(self):
"""Return the while context containing this context."""
if self._outer_context:
return self._outer_context.GetWhileContext()
return None
def _IsInOuterContext(self, op):
op_ctxt = _GetOutputContext(op)
outer_ctxt = self.outer_context
while outer_ctxt != op_ctxt:
if outer_ctxt is None:
return False
outer_ctxt = outer_ctxt.outer_context
return True
def _RemoveExternalControlEdges(self, op):
"""Remove any external control dependency on this op."""
while_ctxt = self.GetWhileContext()
# A control input of `op` is internal if it is in the same while
# loop context as the enclosing while loop context of self.
if while_ctxt is None:
internal_control_inputs = op.control_inputs
else:
internal_control_inputs = []
for x in op.control_inputs:
ctxt = _GetOutputContext(x)
if ctxt is not None and ctxt.GetWhileContext() == while_ctxt:
internal_control_inputs.append(x)
if len(internal_control_inputs) != len(op.control_inputs):
del op.control_inputs[:]
op._add_control_inputs(internal_control_inputs)
return internal_control_inputs
# pylint: enable=protected-access
def AddInnerOp(self, op):
"""Notifies a scope about an operator added to an inner scope."""
if self._outer_context:
self._outer_context.AddInnerOp(op)
def GetControlPivot(self):
"""Returns the pivot node for this context, or None."""
return None
class CondContext(ControlFlowContext):
"""The context for the conditional construct."""
def __init__(self, pred=None, pivot=None, branch=None,
name="cond_text", context_def=None, import_scope=None):
"""Creates a `CondContext`.
Args:
pred: The `boolean` tensor for the conditional predicate.
pivot: The predicate tensor in this branch.
branch: 0 or 1 representing this branch.
name: Name of the `CondContext` python object.
context_def: Optional `ContextDef` protocol buffer to initialize the
`CondContext` object from.
import_scope: Optional `string`. Name scope to add. Only used when
initialing from protocol buffer.
"""
self._name = ops.get_default_graph().unique_name(name)
if context_def:
self._init_from_proto(context_def, import_scope=import_scope)
else:
# Initializes the default fields.
ControlFlowContext.__init__(self)
self._pred = pred # The boolean tensor for the cond predicate
self._pivot = pivot # The predicate tensor in this branch
self._branch = branch # 0 or 1 representing this branch
# Values considered to have been already seen in this context.
self._values.add(pred.name)
self._values.add(pivot.name)
def _init_from_proto(self, context_def, import_scope=None):
"""Creates a new `CondContext` from protocol buffer.
Args:
context_def: `CondContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(context_def, control_flow_pb2.CondContextDef)
# Create from context_def.
g = ops.get_default_graph()
self._name = ops.prepend_name_scope(
context_def.context_name, import_scope)
self._pred = g.as_graph_element(ops.prepend_name_scope(
context_def.pred_name, import_scope))
self._pivot = g.as_graph_element(ops.prepend_name_scope(
context_def.pivot_name, import_scope))
self._branch = context_def.branch
super(CondContext, self).__init__(values_def=context_def.values_def,
import_scope=import_scope)
@property
def name(self):
return self._name
@property
def pred(self):
return self._pred
@property
def pivot(self):
return self._pivot
@property
def branch(self):
return self._branch
@property
def grad_state(self):
if self.GetWhileContext():
return self.GetWhileContext().grad_state
return None
@property
def back_prop(self):
if self.GetWhileContext():
self.GetWhileContext().back_prop
return False
def GetControlPivot(self):
return self._pivot
def to_proto(self, export_scope=None):
"""Converts a `CondContext` to a `CondContextDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `CondContextDef` protocol buffer.
"""
if (export_scope is None or
self.name.startswith(export_scope)):
context_def = control_flow_pb2.CondContextDef()
context_def.context_name = ops.strip_name_scope(
self.name, export_scope)
context_def.pred_name = ops.strip_name_scope(
self._pred.name, export_scope)
context_def.pivot_name = ops.strip_name_scope(
self._pivot.name, export_scope)
context_def.branch = self._branch
context_def.values_def.MergeFrom(super(CondContext, self)._to_proto(
export_scope))
return context_def
else:
return None
@staticmethod
def from_proto(context_def, import_scope=None):
"""Returns a `CondContext` object created from `context_def`."""
return CondContext(context_def=context_def,
import_scope=import_scope)
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
if val.name in self._values:
# Use the real value if it comes from outer context. This is needed in
# particular for nested conds.
result = self._external_values.get(val.name)
result = val if result is None else result
else:
result = val
self._values.add(val.name)
if self._outer_context:
result = self._outer_context.AddValue(val)
self._values.add(result.name)
with ops.control_dependencies(None):
result = _SwitchRefOrTensor(result, self._pred)[self._branch]
result.op.graph.prevent_fetching(result.op)
# pylint: disable=protected-access
result.op._set_control_flow_context(self)
# pylint: enable=protected-access
self._values.add(result.name)
self._external_values[val.name] = result
return result
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
"""Add `op` to the current context."""
if not op.inputs:
# Remove any external control dependency on this op
self._RemoveExternalControlEdges(op)
# pylint: disable=protected-access
op._add_control_input(self._pivot.op)
# pylint: enable=protected-access
for x in op.outputs:
self._values.add(x.name)
else:
for index in range(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x != x:
# pylint: disable=protected-access
op._update_input(index, real_x)
# pylint: enable=protected-access
# Remove any external control dependency on this op.
self._RemoveExternalControlEdges(op)
for x in op.outputs:
self._values.add(x.name)
# pylint: disable=protected-access
if op.graph._is_function(op.type) or op.type == "SymbolicGradient":
op._add_control_input(self._pivot.op)
# pylint: enable=protected-access
if self._outer_context or not IsLoopExit(op):
op.graph.prevent_fetching(op)
def _ProcessOutputTensor(self, val):
"""Process an output tensor of a conditional branch."""
real_val = val
if val.name not in self._values:
# Handle the special case of lambda: x
self._values.add(val.name)
if self._outer_context:
real_val = self._outer_context.AddValue(val)
self._values.add(real_val.name)
real_val = _SwitchRefOrTensor(real_val, self._pred)[self._branch]
self._external_values[val.name] = real_val
else:
external_val = self._external_values.get(val.name)
if external_val is not None:
real_val = external_val
return real_val
def _BuildCondTensor(self, v):
if isinstance(v, ops.Operation):
# Use pivot as the proxy for this op.
return with_dependencies([v], self._pivot)
elif isinstance(v, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
values = self._ProcessOutputTensor(v.values)
indices = self._ProcessOutputTensor(v.indices)
if isinstance(v, ops.IndexedSlices):
dense_shape = v.dense_shape
if dense_shape is not None:
dense_shape = self._ProcessOutputTensor(dense_shape)
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = self._ProcessOutputTensor(v.dense_shape)
return sparse_tensor.SparseTensor(indices, values, dense_shape)
else:
v = nest.map_structure(_convert_tensorarray_to_flow, v)
return self._ProcessOutputTensor(ops.convert_to_tensor(v))
def BuildCondBranch(self, fn):
"""Add the subgraph defined by fn() to the graph."""
original_result = fn()
if original_result is None:
return None, None
result = nest.map_structure(self._BuildCondTensor, original_result)
if not isinstance(result, (list, _basetuple)):
result = [result]
return original_result, result
def _UnpackIfSingleton(res):
if isinstance(res, (list, _basetuple)) and len(res) == 1:
return res[0]
else:
return res
# pylint: disable=g-doc-args
@deprecation.deprecated_args(
None,
"fn1/fn2 are deprecated in favor of the true_fn/false_fn arguments.",
"fn1", "fn2")
def cond(pred, true_fn=None, false_fn=None, strict=False, name=None,
fn1=None, fn2=None):
"""Return `true_fn()` if the predicate `pred` is true else `false_fn()`.
`true_fn` and `false_fn` both return lists of output tensors. `true_fn` and
`false_fn` must have the same non-zero number and type of outputs.
Note that the conditional execution applies only to the operations defined in
`true_fn` and `false_fn`. Consider the following simple program:
```python
z = tf.multiply(a, b)
result = tf.cond(x < y, lambda: tf.add(x, z), lambda: tf.square(y))
```
If `x < y`, the `tf.add` operation will be executed and `tf.square`
operation will not be executed. Since `z` is needed for at least one
branch of the `cond`, the `tf.multiply` operation is always executed,
unconditionally.
Although this behavior is consistent with the dataflow model of TensorFlow,
it has occasionally surprised some users who expected a lazier semantics.
Note that `cond` calls `true_fn` and `false_fn` *exactly once* (inside the
call to `cond`, and not at all during `Session.run()`). `cond`
stitches together the graph fragments created during the `true_fn` and
`false_fn` calls with some additional graph nodes to ensure that the right
branch gets executed depending on the value of `pred`.
`tf.cond` supports nested structures as implemented in
`tensorflow.python.util.nest`. Both `true_fn` and `false_fn` must return the
same (possibly nested) value structure of lists, tuples, and/or named tuples.
Singleton lists and tuples form the only exceptions to this: when returned by
`true_fn` and/or `false_fn`, they are implicitly unpacked to single values.
This behavior is disabled by passing `strict=True`.
Args:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
strict: A boolean that enables/disables 'strict' mode; see above.
name: Optional name prefix for the returned tensors.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`. If the
callables return a singleton list, the element is extracted from the list.
Raises:
TypeError: if `true_fn` or `false_fn` is not callable.
ValueError: if `true_fn` and `false_fn` do not return the same number of
tensors, or return tensors of different types.
Example:
```python
x = tf.constant(2)
y = tf.constant(5)
def f1(): return tf.multiply(x, 17)
def f2(): return tf.add(y, 23)
r = tf.cond(tf.less(x, y), f1, f2)
# r is set to f1().
# Operations in f2 (e.g., tf.add) are not executed.
```
"""
# We needed to make true_fn/false_fn keyword arguments for
# backwards-compatibility. This check exists so that we can convert back to
# having them be positional arguments.
# TODO(josh11b): Make `true_fn` and `false_fn` positional arguments after
# `fn1` and `fn2` are deleted.
if fn1 is not None:
if true_fn is not None:
raise TypeError("cond(): true_fn and fn1 may not be set simultaneously.")
true_fn = fn1
elif true_fn is None:
raise TypeError("cond(): true_fn argument required")
if fn2 is not None:
if false_fn is not None:
raise TypeError("cond(): false_fn and fn2 may not be set simultaneously.")
false_fn = fn2
elif false_fn is None:
raise TypeError("cond(): false_fn argument required")
if not callable(true_fn):
raise TypeError("true_fn must be callable.")
if not callable(false_fn):
raise TypeError("false_fn must be callable.")
with ops.name_scope(name, "cond", [pred]):
if context.in_eager_mode():
if pred:
return true_fn()
return false_fn()
# Add the Switch to the graph.
if isinstance(pred, bool):
raise TypeError("pred must not be a Python bool")
p_2, p_1 = switch(pred, pred)
pivot_1 = array_ops.identity(p_1, name="switch_t")
pivot_2 = array_ops.identity(p_2, name="switch_f")
pred = array_ops.identity(pred, name="pred_id")
# Disable the fetching of tensors that are only on one branch of cond.
for tensor in [p_1, p_2, pivot_1, pivot_2, pred]:
tensor.op.graph.prevent_fetching(tensor.op)
# Build the graph for the true branch in a new context.
context_t = CondContext(pred, pivot_1, branch=1)
context_t.Enter()
orig_res_t, res_t = context_t.BuildCondBranch(true_fn)
if orig_res_t is None:
raise ValueError("true_fn must have a return value.")
context_t.ExitResult(res_t)
context_t.Exit()
# Build the graph for the false branch in a new context.
context_f = CondContext(pred, pivot_2, branch=0)
context_f.Enter()
orig_res_f, res_f = context_f.BuildCondBranch(false_fn)
if orig_res_f is None:
raise ValueError("false_fn must have a return value.")
context_f.ExitResult(res_f)
context_f.Exit()
if not strict:
orig_res_t = _UnpackIfSingleton(orig_res_t)
orig_res_f = _UnpackIfSingleton(orig_res_f)
# Check that the return values of the two branches have the same structure.
try:
nest.assert_same_structure(orig_res_t, orig_res_f)
except TypeError as e:
raise TypeError(
"Incompatible return types of true_fn and false_fn: {}".format(e))
except ValueError as e:
raise ValueError(
"Incompatible return values of true_fn and false_fn: {}".format(e))
# Add the final merge to the graph.
if not res_t:
raise ValueError("true_fn and false_fn must return at least one result.")
res_t_flat = nest.flatten(res_t)
res_f_flat = nest.flatten(res_f)
for x, y in zip(res_t_flat, res_f_flat):
assert ((isinstance(x, ops.IndexedSlices) and
isinstance(y, ops.IndexedSlices)) or
(isinstance(x, sparse_tensor.SparseTensor) and
isinstance(y, sparse_tensor.SparseTensor)) or
(isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor)))
val_x = x if isinstance(x, ops.Tensor) else x.values
val_y = y if isinstance(y, ops.Tensor) else y.values
if val_x.dtype.base_dtype != val_y.dtype.base_dtype:
raise ValueError(
"Outputs of true_fn and false_fn must have the same type: %s, %s" %
(val_x.dtype.name, val_y.dtype.name))
merges = [merge(pair)[0] for pair in zip(res_f_flat, res_t_flat)]
merges = _convert_flows_to_tensorarrays(nest.flatten(orig_res_t), merges)
# Add to collections
ops.add_to_collection(ops.GraphKeys.COND_CONTEXT, context_t)
ops.add_to_collection(ops.GraphKeys.COND_CONTEXT, context_f)
merges = nest.pack_sequence_as(structure=orig_res_t, flat_sequence=merges)
# Singleton lists and tuples are automatically unpacked if strict == False.
if not strict:
merges = _UnpackIfSingleton(merges)
return merges
# pylint: enable=g-doc-args
def _resource_safe_shape(t):
"""Returns the shape of t or the variable it points to."""
if t.dtype == dtypes.resource:
while t.op.inputs:
t = t.op.inputs[0]
return tensor_shape.TensorShape(t.op.get_attr("shape"))
return array_ops.shape_internal(t, optimize=False)
# TODO(yuanbyu): Consider having a unified notion of context for
# not only conditionals and loops but also control dependency and
# subgraphs.
class WhileContext(ControlFlowContext):
"""The context for the loop construct."""
def __init__(self, parallel_iterations=10, back_prop=True, swap_memory=False,
name="while_context", grad_state=None, context_def=None,
import_scope=None):
""""Creates a `WhileContext`.
Args:
parallel_iterations: The number of iterations allowed to run in parallel.
back_prop: Whether backprop is enabled for this while loop.
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
name: Optional name prefix for the returned tensors.
grad_state: The gradient loop state.
context_def: Optional `WhileContextDef` protocol buffer to initialize
the `Whilecontext` python object from.
import_scope: Optional `string`. Name scope to add. Only used when
initialing from protocol buffer.
"""
if context_def:
self._init_from_proto(context_def, import_scope=import_scope)
else:
ControlFlowContext.__init__(self)
self._init_from_args(parallel_iterations, back_prop, swap_memory,
name)
# The gradient loop state.
self._grad_state = grad_state
def _init_from_args(self, parallel_iterations, back_prop, swap_memory,
name):
"""Creates a new `WhileContext` from arguments.
Args:
parallel_iterations: The number of iterations allowed to run in parallel.
back_prop: Whether backprop is enabled for this while loop.
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
name: Optional name prefix for the returned tensors.
Raises:
ValueError: If `parallel_iterations` has invalid value.
"""
if not isinstance(parallel_iterations, int) or (parallel_iterations <= 0):
raise ValueError("`parallel_iterations` must be a positive integer: "
"%s" % parallel_iterations)
self._name = ops.get_default_graph().unique_name(name)
self._parallel_iterations = parallel_iterations
self._back_prop = back_prop
self._swap_memory = swap_memory
# We use this node to control constants created by the pred lambda.
self._pivot_for_pred = None
# We use this node to control constants created by the body lambda.
self._pivot_for_body = None
# The boolean tensor for loop termination condition. Used in code
# generation for gradient computation
self._pivot = None
# The list of exit tensors for loop variables.
self._loop_exits = []
# The list of enter tensors for loop variables.
self._loop_enters = []
def _init_from_proto(self, context_def, import_scope=None):
"""Creates a new `WhileContext` from protocol buffer.
Args:
context_def: `WhileContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(context_def, control_flow_pb2.WhileContextDef)
# Create from context_def.
g = ops.get_default_graph()
self._name = ops.prepend_name_scope(
context_def.context_name, import_scope)
self._parallel_iterations = context_def.parallel_iterations
self._back_prop = context_def.back_prop
self._swap_memory = context_def.swap_memory
self._pivot_for_pred = g.as_graph_element(ops.prepend_name_scope(
context_def.pivot_for_pred_name, import_scope))
# We use this node to control constants created by the body lambda.
self._pivot_for_body = g.as_graph_element(ops.prepend_name_scope(
context_def.pivot_for_body_name, import_scope))
# The boolean tensor for loop termination condition. Used in code
# generation for gradient computation.
self._pivot = g.as_graph_element(
ops.prepend_name_scope(context_def.pivot_name, import_scope))
# The list of exit tensors for loop variables.
self._loop_exits = [g.as_graph_element(
ops.prepend_name_scope(exit_name, import_scope))
for exit_name in context_def.loop_exit_names]
# The list of enter tensors for loop variables.
self._loop_enters = [g.as_graph_element(
ops.prepend_name_scope(enter_name, import_scope))
for enter_name in context_def.loop_enter_names]
super(WhileContext, self).__init__(values_def=context_def.values_def,
import_scope=import_scope)
@property
def name(self):
return self._name
@property
def parallel_iterations(self):
"""The number of iterations allowed to run in parallel."""
return self._parallel_iterations
@property
def back_prop(self):
"""True iff backprop is enabled for this while loop."""
return self._back_prop
@property
def swap_memory(self):
"""True iff GPU-CPU memory swap is enabled for this while loop."""
return self._swap_memory
@property
def pivot(self):
"""The boolean tensor representing the loop termination condition."""
return self._pivot
@property
def loop_enters(self):
"""The list of enter tensors for loop variables."""
return self._loop_enters
@property
def loop_exits(self):
"""The list of exit tensors for loop variables."""
return self._loop_exits
@property
def grad_state(self):
"""The gradient loop state."""
return self._grad_state
def to_proto(self, export_scope=None):
"""Converts a `WhileContext` to a `WhileContextDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `WhileContextDef` protocol buffer.
"""
if (export_scope is None or
self.name.startswith(export_scope)):
context_def = control_flow_pb2.WhileContextDef()
context_def.context_name = ops.strip_name_scope(
self.name, export_scope)
context_def.parallel_iterations = self._parallel_iterations
context_def.back_prop = self._back_prop
context_def.swap_memory = self._swap_memory
context_def.pivot_for_pred_name = ops.strip_name_scope(
self._pivot_for_pred.name, export_scope)
context_def.pivot_for_body_name = ops.strip_name_scope(
self._pivot_for_body.name, export_scope)
context_def.pivot_name = ops.strip_name_scope(
self._pivot.name, export_scope)
context_def.loop_exit_names.extend(
[ops.strip_name_scope(l.name, export_scope)
for l in self._loop_exits])
context_def.loop_enter_names.extend(
[ops.strip_name_scope(l.name, export_scope)
for l in self._loop_enters])
context_def.values_def.MergeFrom(
super(WhileContext, self)._to_proto(
export_scope=export_scope))
return context_def
else:
return None
@staticmethod
def from_proto(context_def, import_scope=None):
"""Returns a `WhileContext` object created from `context_def`.
Args:
context_def: A `WhileContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
Returns:
A `WhileContext` Python object.
"""
return WhileContext(context_def=context_def,
import_scope=import_scope)
def GetWhileContext(self):
return self
def GetControlPivot(self):
if self._pivot_for_body is not None:
return self._pivot_for_body
return self._pivot_for_pred
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
result = val
if val.name not in self._values:
self._values.add(val.name)
# If we are in a grad context and val is from its forward context,
# use GetRealValue(), which adds the logic to save the history of
# val in forward.
grad_ctxt = ops.get_default_graph()._get_control_flow_context()
if grad_ctxt:
grad_ctxt = grad_ctxt.GetWhileContext()
if grad_ctxt.grad_state:
forward_ctxt = _GetWhileContext(val.op)
if IsLoopExit(val.op):
forward_ctxt = forward_ctxt.outer_context
if forward_ctxt:
forward_ctxt = forward_ctxt.GetWhileContext()
if forward_ctxt == grad_ctxt.grad_state.forward_context:
real_val = grad_ctxt.grad_state.GetRealValue(val)
self._external_values[val.name] = real_val
return real_val
if self._outer_context is not None:
result = self._outer_context.AddValue(val)
# Create an Enter to make `result` known to this loop context.
with ops.control_dependencies(None):
enter = _Enter(result, self._name, is_constant=True,
parallel_iterations=self._parallel_iterations)
enter.graph.prevent_feeding(enter)
if self._outer_context:
self._outer_context.AddInnerOp(enter.op)
# Fix the control inputs and control flow context of these enter ops.
self._FixControlInputsAndContext([enter])
# Add `enter` in this context.
self._values.add(enter.name)
self._external_values[val.name] = enter
result = enter
else:
actual_val = self._external_values.get(val.name)
if actual_val is not None:
result = actual_val
return result
def AddOp(self, op):
"""Add `op` to the current context."""
# For a reduction op, if op is in a grad context and its input is from
# its forward context, moving op to the forward context means we would
# store the tensor after the reduction as opposed to the tensor before
# reduction, and therefore could significantly reduce memory consumption.
# For now, we do this only for a few ops.
if op.type in {"Shape", "Size", "Rank"}:
grad_ctxt = ops.get_default_graph()._get_control_flow_context()
if grad_ctxt:
grad_ctxt = grad_ctxt.GetWhileContext()
if grad_ctxt.grad_state:
op_input_forward_ctxt = _GetWhileContext(op.inputs[0].op)
if op_input_forward_ctxt == grad_ctxt.grad_state.forward_context:
op_input_ctxt = op.inputs[0].op._get_control_flow_context()
op._set_control_flow_context(op_input_ctxt)
op_input_ctxt._AddOpInternal(op)
return
self._AddOpInternal(op)
def _AddOpInternal(self, op):
"""Add `op` to the current context.
In the case that op has only external data inputs, we remove all of its
external control inputs so all its inputs are in the same while loop
context. This is valid because op now has an Enter input that has all
the right control dependency.
"""
if not op.inputs:
# Remove any external control dependency on this op
control_inputs = self._RemoveExternalControlEdges(op)
# Add a control edge from the control pivot to this op.
if not control_inputs:
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot().op)
# pylint: enable=protected-access
for x in op.outputs:
self._values.add(x.name)
else:
for index in range(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x != x:
op._update_input(index, real_x)
# Remove any external control dependency on this op.
self._RemoveExternalControlEdges(op)
# Add a control dependency to prevent loop invariants from
# enabling ops that should not be executed.
self._MaybeAddControlDependency(op)
for x in op.outputs:
self._values.add(x.name)
if self._outer_context or not IsLoopExit(op):
op.graph.prevent_fetching(op)
for x in op.outputs:
op.graph.prevent_feeding(x)
if self._outer_context:
self._outer_context.AddInnerOp(op)
def _MaybeAddControlDependency(self, op):
"""Add a control input to the op if it only depends on loop invariants."""
def _IsOpFree(op):
"""Determines if `op` needs a control dependency."""
if op.control_inputs:
return False
# pylint: disable=protected-access
if op.graph._is_function(op.type) or op.type == "SymbolicGradient":
return True
# pylint: enable=protected-access
for x in op.inputs:
if not _IsLoopConstantEnter(x.op):
return False
return True
if _IsOpFree(op):
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot().op)
# pylint: enable=protected-access
def AddForwardLoopCounter(self, outer_grad_state):
"""Adds a loop that counts the number of iterations.
This is added to the forward loop at the time when we start to
create the loop for backprop gradient computation. Called in
the outer context of this forward context.
The pseudocode is:
`n = 0; while (_pivot) { n++; }`
Note that a control dependency is added to `n` to ensure the correct
execution order of stack push ops.
Args:
outer_grad_state: The outer grad state. None if not nested.
Returns:
The number of iterations taken by the forward loop and the loop index.
"""
n = constant_op.constant(0, name="f_count")
if outer_grad_state is not None:
# Force the stack pushes of i-th execution of an inner loop to be ordered
# before the pushes of (i+1)-th execution of the same inner loop.
outer_add_op = outer_grad_state.forward_index.op.inputs[0].op
n.op._add_control_input(outer_add_op) # pylint: disable=protected-access
self.Enter()
self.AddName(n.name)
enter_n = _Enter(n, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="f_count")
self.loop_enters.append(enter_n)
merge_n = merge([enter_n, enter_n])[0]
switch_n = switch(merge_n, self._pivot)
index = math_ops.add(switch_n[1], 1)
next_n = _NextIteration(index)
merge_n.op._update_input(1, next_n)
total_iterations = exit(switch_n[0], name="f_count")
self.loop_exits.append(total_iterations)
self.ExitResult([total_iterations])
self.Exit()
return total_iterations, next_n
def AddBackpropLoopCounter(self, count, outer_grad_state):
"""Add the backprop loop that controls the iterations.
This is added to the backprop loop. It is used to control the loop
termination of the backprop loop. Called in the outer context of
this grad context.
The pseudocode is:
`n = count; while (n >= 1) { n--; }`
Note that a control dependency is added to `final_zero` to ensure the
correct execution order of stack pop ops.
Args:
count: The number of iterations for backprop.
outer_grad_state: The outer grad state. None if not nested.
Returns:
The loop index.
"""
one = constant_op.constant(1, name="b_count")
self.Enter()
self.AddName(count.name)
enter_count = _Enter(count, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="b_count")
self.loop_enters.append(enter_count)
merge_count = merge([enter_count, enter_count])[0]
self._pivot_for_pred = merge_count
pred = math_ops.greater_equal(merge_count, one)
self._pivot = loop_cond(pred, name="b_count")
switch_count = switch(merge_count, self._pivot)
index = math_ops.subtract(switch_count[1], one)
self._pivot_for_body = index
next_count = _NextIteration(index)
merge_count.op._update_input(1, next_count)
final_zero = exit(switch_count[0], name="b_count")
self.loop_exits.append(final_zero)
if outer_grad_state is not None:
# Force the stack pops of i-th execution of an inner loop to be ordered
# before the pops of (i+1)-th execution of the same inner loop.
# pylint: disable=protected-access
outer_grad_state.grad_sync._add_control_input(final_zero.op)
# pylint: enable=protected-access
self.ExitResult([final_zero])
self.Exit()
return next_count
def AddBackpropAccumulator(self, op, grad):
"""Add an accumulation loop for every loop invariant.
This is added to the backprop loop. It is used to accumulate partial
gradients within each loop iteration. Called when in the gradient while
context.
The pseudocode is:
```
acc = 0.0;
while (_pivot) {
acc += grad;
}
```
Args:
op: The Enter op for a loop invariant.
grad: The partial gradient of an iteration for a loop invariant.
Returns:
The gradient for a loop invariant.
"""
self.Exit()
# Create a zeros tensor with the right shape for acc. If we don't
# know the full shape statically, we will have to get the shape
# dynamically from the forward inference. Getting the shape right
# for the zeros is only needed for the base case when the loop exits
# without running any iterations.
shape = grad.get_shape()
if shape.is_fully_defined():
if self.outer_context: self.outer_context.Enter()
acc = constant_op.constant(0, grad.dtype, shape=shape, name="b_acc")
if self.outer_context: self.outer_context.Exit()
else:
value = op.inputs[0]
if (isinstance(self.outer_context, WhileContext) and
self.outer_context.grad_state is not None):
# We are in a nested while loop.
forward_ctxt = self.grad_state.forward_context
forward_ctxt.outer_context.Enter()
zeros_shape = array_ops.shape_internal(value, optimize=False)
forward_ctxt.outer_context.Exit()
outer_grad_state = self.grad_state.outer_grad_state
history_zeros_shape = outer_grad_state.AddForwardAccumulator(
zeros_shape)
self.outer_context.Enter()
real_shape = outer_grad_state.AddBackpropAccumulatedValue(
history_zeros_shape, zeros_shape)
acc = array_ops.zeros(real_shape, grad.dtype)
self.outer_context.Exit()
else:
if self.outer_context: self.outer_context.Enter()
zeros_shape = array_ops.shape_internal(value, optimize=False)
acc = array_ops.zeros(zeros_shape, grad.dtype)
if self.outer_context: self.outer_context.Exit()
acc._shape = grad.get_shape() # pylint: disable=protected-access
self.Enter()
self.AddName(acc.name)
enter_acc = _Enter(acc, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="b_acc")
self.loop_enters.append(enter_acc)
merge_acc = merge([enter_acc, enter_acc], name="b_acc")[0]
switch_acc_false, switch_acc_true = switch(merge_acc, self._pivot)
add_acc = math_ops.add(switch_acc_true, grad)
next_acc = _NextIteration(add_acc)
merge_acc.op._update_input(1, next_acc) # pylint: disable=protected-access
result_acc = exit(switch_acc_false, name="b_acc")
self.loop_exits.append(result_acc)
self.ExitResult([result_acc])
return result_acc
def AddBackpropIndexedSlicesAccumulator(self, op, grad):
"""This is used for accumulating gradients that are IndexedSlices.
This is essentially the equivalent of AddBackpropAccumulator but optimized
for things like updating embeddings from within a while loop.
Args:
op: The Enter op for a loop invariant.
grad: The partial gradients represented as an IndexedSlices.
Returns:
The accumulated IndexedSlices gradient of the loop invariant.
"""
values = grad.values
indices = grad.indices
dense_shape = grad.dense_shape
self.Exit()
if self.outer_context: self.outer_context.Enter()
if values.get_shape().is_fully_defined():
values_shape = tensor_shape.TensorShape(
[tensor_shape.Dimension(1)] + values.get_shape().dims[1:])
if self.outer_context: self.outer_context.Enter()
values_acc = constant_op.constant(0, values.dtype, shape=values_shape,
name="b_acc")
if self.outer_context: self.outer_context.Exit()
else:
values_shape = _resource_safe_shape(op.inputs[0])[1:]
values_shape = array_ops.concat([[1], values_shape], 0)
values_acc = array_ops.zeros(values_shape, dtype=values.dtype)
indices_acc = constant_op.constant([0], indices.dtype)
shape_acc = None
if dense_shape is not None:
if dense_shape.get_shape().is_fully_defined():
if self.outer_context: self.outer_context.Enter()
shape_acc = constant_op.constant(0, dense_shape.dtype,
shape=dense_shape.get_shape())
if self.outer_context: self.outer_context.Exit()
else:
shape_acc = array_ops.zeros_like(
array_ops.shape_internal(op.inputs[0], optimize=False),
optimize=False)
if self.outer_context: self.outer_context.Exit()
self.Enter()
self.AddName(values_acc.name)
self.AddName(indices_acc.name)
init_acc = [indices_acc, values_acc]
if shape_acc is not None:
self.AddName(shape_acc.name)
init_acc.append(shape_acc)
enter_acc = [_Enter(x, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="b_acc") for x in init_acc]
self.loop_enters.extend(enter_acc)
merge_acc = [merge([x, x], name="b_acc")[0] for x in enter_acc]
switch_acc = [switch(x, self._pivot) for x in merge_acc]
# The actual accumulation.
acc_indexed_slices = [
array_ops.concat([xa[1], xv], 0)
for xa, xv in zip(switch_acc[:2], [indices, values])
]
if shape_acc is not None:
# For the shape we just keep the maximum
acc_indexed_slices.append(
math_ops.maximum(dense_shape, switch_acc[2][1]))
next_acc = [_NextIteration(x) for x in acc_indexed_slices]
for xm, xn in zip(merge_acc, next_acc):
xm.op._update_input(1, xn) # pylint: disable=protected-access
exit_acc = [exit(x[0], name="b_acc") for x in switch_acc]
self.loop_exits.extend(exit_acc)
self.ExitResult(exit_acc)
return ops.IndexedSlices(
indices=exit_acc[0], values=exit_acc[1],
dense_shape=exit_acc[2] if shape_acc is not None else None)
def _InitializeValues(self, values):
"""Makes the values known to this context."""
self._values = set()
for x in values:
if isinstance(x, ops.Tensor):
self._values.add(x.name)
else:
self._values.add(x.values.name)
self._values.add(x.indices.name)
if isinstance(x, ops.IndexedSlices):
dense_shape = x.dense_shape
elif isinstance(x, sparse_tensor.SparseTensor):
dense_shape = x.dense_shape
else:
raise TypeError("Type %s not supported" % type(x))
if dense_shape is not None:
self._values.add(dense_shape.name)
def _BuildLoop(self, pred, body, original_loop_vars, loop_vars,
shape_invariants):
"""Core: Add the loop termination condition and body to the graph."""
flat_loop_vars = nest.flatten(original_loop_vars)
# Let the context know the loop variables so the loop variables
# would be added in the outer contexts properly.
self._InitializeValues(loop_vars)
real_vars = loop_vars
if self._outer_context:
real_vars = [self._outer_context.AddValue(x) for x in loop_vars]
with ops.control_dependencies(None):
enter_vars = [_Enter(x, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
use_input_shape=(shape_invariants is None))
for x in real_vars]
for x in enter_vars:
x.graph.prevent_feeding(x)
if self._outer_context:
self._outer_context.AddInnerOp(x.op)
# Finds the closest enclosing non-None control pivot.
outer_context = self._outer_context
control_pivot = None
while outer_context is not None and control_pivot is None:
control_pivot = outer_context.GetControlPivot()
# pylint: disable=protected-access
outer_context = outer_context._outer_context
# pylint: enable=protected-access
if control_pivot is not None:
for var in enter_vars:
if _IsLoopConstantEnter(var.op.inputs[0].op):
# pylint: disable=protected-access
var.op._add_control_input(control_pivot.op)
# pylint: enable=protected-access
_SetShapeInvariants(real_vars, enter_vars, shape_invariants)
# Fix the control inputs and control flow context of these enter ops.
self._FixControlInputsAndContext(enter_vars)
self._InitializeValues(enter_vars)
self._loop_enters = enter_vars
merge_vars = [merge([x, x])[0] for x in enter_vars]
self._pivot_for_pred = merge_vars[0]
# Build the graph for pred.
merge_vars_with_tensor_arrays = (
_convert_flows_to_tensorarrays(flat_loop_vars, merge_vars))
packed_vars = nest.pack_sequence_as(
structure=original_loop_vars,
flat_sequence=merge_vars_with_tensor_arrays)
c = ops.convert_to_tensor(pred(*packed_vars))
self._pivot = loop_cond(c, name="LoopCond")
switch_vars = [_SwitchRefOrTensor(x, self._pivot) for x in merge_vars]
# Build the graph for body.
vars_for_body = [_Identity(x[1]) for x in switch_vars]
self._pivot_for_body = vars_for_body[0]
# Convert TensorArray flow variables inside the context back into
# their associated TensorArrays for calling the body.
vars_for_body_with_tensor_arrays = (
_convert_flows_to_tensorarrays(flat_loop_vars, vars_for_body))
packed_vars_for_body = nest.pack_sequence_as(
structure=original_loop_vars,
flat_sequence=vars_for_body_with_tensor_arrays)
body_result = body(*packed_vars_for_body)
if not nest.is_sequence(body_result):
body_result = [body_result]
# Compare the structure types of input and output of body.
# For backwards compatibility, the first layer is forced to a list
# during this comparison, because inputs are typically lists and
# outputs of the body are typically tuples.
nest.assert_same_structure(list(packed_vars_for_body), list(body_result))
# Store body_result to keep track of TensorArrays returned by body
original_body_result = body_result
# Convert TensorArrays returned by body into their flow variables
result = nest.map_structure(_convert_tensorarray_to_flow,
nest.flatten(body_result))
result = ops.convert_n_to_tensor_or_indexed_slices(result)
# Add NextIteration and the back edges to complete the loop.
if len(merge_vars) != len(result):
raise ValueError("Number of inputs and outputs of body must match "
"loop_vars: %d, %d" % (len(merge_vars), len(result)))
next_vars = []
for m, v in zip(merge_vars, result):
next_vars.append(_AddNextAndBackEdge(m, v))
# Add the exit ops.
exit_vars = [exit(x[0]) for x in switch_vars]
self._loop_exits = exit_vars
# Make sure the shapes of loop outputs are correct.
for m_var, n_var in zip(merge_vars, next_vars):
if isinstance(m_var, ops.Tensor):
_EnforceShapeInvariant(m_var, n_var)
# Exit the loop.
self.ExitResult(exit_vars)
return original_body_result, exit_vars
def BuildLoop(self, pred, body, loop_vars, shape_invariants):
"""Add the loop termination condition and body to the graph."""
# Keep original_loop_vars to identify which are TensorArrays
original_loop_vars = loop_vars
# Convert TensorArrays to their flow variables
loop_vars = nest.map_structure(_convert_tensorarray_to_flow,
nest.flatten(loop_vars))
loop_vars = ops.convert_n_to_tensor_or_indexed_slices(loop_vars)
try:
self.Enter()
original_body_result, exit_vars = self._BuildLoop(
pred, body, original_loop_vars, loop_vars, shape_invariants)
finally:
self.Exit()
flat_result = nest.flatten(original_body_result)
# Convert TensorArray flow variables outside the context back into
# their associated TensorArrays for returning to caller.
exit_vars_with_tensor_arrays = (
_convert_flows_to_tensorarrays(flat_result, exit_vars))
packed_exit_vars = nest.pack_sequence_as(
structure=original_body_result,
flat_sequence=exit_vars_with_tensor_arrays)
return (packed_exit_vars[0] if len(exit_vars) == 1
else packed_exit_vars)
def _FixControlInputsAndContext(self, enters):
graph = ops.get_default_graph()
# pylint: disable=protected-access
for e in enters:
if isinstance(e, ops.Tensor):
xs = [e]
else:
if not isinstance(e, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(e))
xs = [e.values, e.indices]
shape = e.dense_shape
if shape is not None:
xs.append(shape)
for x in xs:
inp_op = x.op.inputs[0]
control_inputs = graph._control_dependencies_for_inputs([inp_op])
outer_control_inputs = [op for op in control_inputs
if self._IsInOuterContext(op)]
x.op._set_control_flow_context(self)
x.op._add_control_inputs(outer_control_inputs)
graph._record_op_seen_by_control_dependencies(x.op)
# pylint: enable=protected-access
def while_loop(cond, body, loop_vars, shape_invariants=None,
parallel_iterations=10, back_prop=True, swap_memory=False,
name=None):
"""Repeat `body` while the condition `cond` is true.
`cond` is a callable returning a boolean scalar tensor. `body` is a callable
returning a (possibly nested) tuple, namedtuple or list of tensors of the same
arity (length and structure) and types as `loop_vars`. `loop_vars` is a
(possibly nested) tuple, namedtuple or list of tensors that is passed to both
`cond` and `body`. `cond` and `body` both take as many arguments as there are
`loop_vars`.
In addition to regular Tensors or IndexedSlices, the body may accept and
return TensorArray objects. The flows of the TensorArray objects will
be appropriately forwarded between loops and during gradient calculations.
Note that `while_loop` calls `cond` and `body` *exactly once* (inside the
call to `while_loop`, and not at all during `Session.run()`). `while_loop`
stitches together the graph fragments created during the `cond` and `body`
calls with some additional graph nodes to create the graph flow that
repeats `body` until `cond` returns false.
For correctness, `tf.while_loop()` strictly enforces shape invariants for
the loop variables. A shape invariant is a (possibly partial) shape that
is unchanged across the iterations of the loop. An error will be raised
if the shape of a loop variable after an iteration is determined to be more
general than or incompatible with its shape invariant. For example, a shape
of [11, None] is more general than a shape of [11, 17], and [11, 21] is not
compatible with [11, 17]. By default (if the argument `shape_invariants` is
not specified), it is assumed that the initial shape of each tensor in
`loop_vars` is the same in every iteration. The `shape_invariants` argument
allows the caller to specify a less specific shape invariant for each loop
variable, which is needed if the shape varies between iterations. The
@{tf.Tensor.set_shape}
function may also be used in the `body` function to indicate that
the output loop variable has a particular shape. The shape invariant for
SparseTensor and IndexedSlices are treated specially as follows:
a) If a loop variable is a SparseTensor, the shape invariant must be
TensorShape([r]) where r is the rank of the dense tensor represented
by the sparse tensor. It means the shapes of the three tensors of the
SparseTensor are ([None], [None, r], [r]). NOTE: The shape invariant here
is the shape of the SparseTensor.dense_shape property. It must be the shape of
a vector.
b) If a loop variable is an IndexedSlices, the shape invariant must be
a shape invariant of the values tensor of the IndexedSlices. It means
the shapes of the three tensors of the IndexedSlices are (shape, [shape[0]],
[shape.ndims]).
`while_loop` implements non-strict semantics, enabling multiple iterations
to run in parallel. The maximum number of parallel iterations can be
controlled by `parallel_iterations`, which gives users some control over
memory consumption and execution order. For correct programs, `while_loop`
should return the same result for any parallel_iterations > 0.
For training, TensorFlow stores the tensors that are produced in the
forward inference and are needed in back propagation. These tensors are a
main source of memory consumption and often cause OOM errors when training
on GPUs. When the flag swap_memory is true, we swap out these tensors from
GPU to CPU. This for example allows us to train RNN models with very long
sequences and large batches.
Args:
cond: A callable that represents the termination condition of the loop.
body: A callable that represents the loop body.
loop_vars: A (possibly nested) tuple, namedtuple or list of numpy array,
`Tensor`, and `TensorArray` objects.
shape_invariants: The shape invariants for the loop variables.
parallel_iterations: The number of iterations allowed to run in parallel.
It must be a positive integer.
back_prop: Whether backprop is enabled for this while loop.
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
name: Optional name prefix for the returned tensors.
Returns:
The output tensors for the loop variables after the loop. When the length
of `loop_vars` is 1 this is a Tensor, TensorArray or IndexedSlice and when
the length of `loop_vars` is greater than 1 it returns a list.
Raises:
TypeError: if `cond` or `body` is not callable.
ValueError: if `loop_vars` is empty.
Example:
```python
i = tf.constant(0)
c = lambda i: tf.less(i, 10)
b = lambda i: tf.add(i, 1)
r = tf.while_loop(c, b, [i])
```
Example with nesting and a namedtuple:
```python
import collections
Pair = collections.namedtuple('Pair', 'j, k')
ijk_0 = (tf.constant(0), Pair(tf.constant(1), tf.constant(2)))
c = lambda i, p: i < 10
b = lambda i, p: (i + 1, Pair((p.j + p.k), (p.j - p.k)))
ijk_final = tf.while_loop(c, b, ijk_0)
```
Example using shape_invariants:
```python
i0 = tf.constant(0)
m0 = tf.ones([2, 2])
c = lambda i, m: i < 10
b = lambda i, m: [i+1, tf.concat([m, m], axis=0)]
tf.while_loop(
c, b, loop_vars=[i0, m0],
shape_invariants=[i0.get_shape(), tf.TensorShape([None, 2])])
```
"""
with ops.name_scope(name, "while", loop_vars):
if not loop_vars:
raise ValueError("No loop variables provided")
if not callable(cond):
raise TypeError("cond must be callable.")
if not callable(body):
raise TypeError("body must be callable.")
if parallel_iterations < 1:
raise TypeError("parallel_iterations must be a positive integer.")
if context.in_eager_mode():
while cond(*loop_vars):
loop_vars = body(*loop_vars)
return loop_vars
if shape_invariants is not None:
nest.assert_same_structure(loop_vars, shape_invariants)
loop_context = WhileContext(parallel_iterations, back_prop, swap_memory) # pylint: disable=redefined-outer-name
ops.add_to_collection(ops.GraphKeys.WHILE_CONTEXT, loop_context)
result = loop_context.BuildLoop(cond, body, loop_vars, shape_invariants)
return result
def _AsTensorList(x, p):
"""Return x as a list of Tensors or IndexedSlices.
For entries of `x` that are Operations, this returns an Identity of `p`
with a dependency on the operation.
Args:
x: A Tensor/IndexedSlices/Operation or a list or tuple of them.
p: A Tensor to return for entries in `x` that are Operations.
Returns:
A list of Tensors or IndexedSlices.
"""
if not isinstance(x, (list, _basetuple)):
x = [x]
l = []
for v in x:
if isinstance(v, ops.Operation):
v = with_dependencies([v], p)
v = ops.convert_to_tensor_or_indexed_slices(v)
if isinstance(v, ops.Tensor):
l.append(array_ops.identity(v))
else:
l.append(ops.IndexedSlices(array_ops.identity(v.values),
array_ops.identity(v.indices)))
return l
def _CheckResults(a, b):
assert len(a) == len(b), (
"Values returned by a() and b() must have the same length.")
for x, y in zip(a, b):
assert x.dtype == y.dtype, (
"Values returned by a() [%s] and b() [%s] must have "
"the same type: %s, %s." %
(x.name, y.name, x.dtype.name, y.dtype.name))
def with_dependencies(dependencies, output_tensor, name=None):
"""Produces the content of `output_tensor` only after `dependencies`.
In some cases, a user may want the output of an operation to be
consumed externally only after some other dependencies have run
first. This function ensures returns `output_tensor`, but only after all
operations in `dependencies` have run. Note that this means that there is
no guarantee that `output_tensor` will be evaluated after any `dependencies`
have run.
See also @{tf.tuple$tuple} and @{tf.group$group}.
Args:
dependencies: Iterable of operations to run before this op finishes.
output_tensor: A `Tensor` or `IndexedSlices` that will be returned.
name: (Optional) A name for this operation.
Returns:
Same as `output_tensor`.
Raises:
TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
"""
if context.in_eager_mode():
return output_tensor
with ops.name_scope(name, "control_dependency",
list(dependencies) + [output_tensor]) as name:
with ops.colocate_with(output_tensor):
with ops.control_dependencies(dependencies):
output_tensor = ops.convert_to_tensor_or_indexed_slices(output_tensor)
if isinstance(output_tensor, ops.Tensor):
return _Identity(output_tensor, name=name)
else:
return ops.IndexedSlices(_Identity(output_tensor.values, name=name),
output_tensor.indices,
output_tensor.dense_shape)
def _GroupControlDeps(dev, deps, name=None):
with ops.control_dependencies(deps):
if dev is None:
return no_op(name=name)
else:
with ops.device(dev):
return no_op(name=name)
# TODO(touts): Accept "inputs" as a list.
def group(*inputs, **kwargs):
"""Create an op that groups multiple operations.
When this op finishes, all ops in `input` have finished. This op has no
output.
See also @{tf.tuple$tuple} and
@{tf.control_dependencies$control_dependencies}.
Args:
*inputs: Zero or more tensors to group.
**kwargs: Optional parameters to pass when constructing the NodeDef.
name: A name for this operation (optional).
Returns:
An Operation that executes all its inputs.
Raises:
ValueError: If an unknown keyword argument is provided.
"""
if context.in_eager_mode():
return None
name = kwargs.pop("name", None)
if kwargs:
raise ValueError("Unknown keyword arguments: " + ", ".join(kwargs.keys()))
with ops.name_scope(name, "group_deps", inputs) as name:
# Grouping no inputs means do nothing
if not inputs:
return no_op(name=name)
# Sorts *inputs according to their devices.
ops_on_device = {} # device -> operations specified on the device.
for inp in inputs:
dev = inp.device
if dev in ops_on_device:
ops_on_device[dev].append(inp)
else:
ops_on_device[dev] = [inp]
if len(ops_on_device) == 1:
# 1-level tree. The root node is the returned NoOp node.
(dev, deps), = ops_on_device.items()
return _GroupControlDeps(dev, deps, name=name)
# 2-level tree. The root node is the returned NoOp node.
# deps contains 1 NoOp node for each device.
deps = []
def device_key(dev):
"""A sort key that allows None to be compared to strings."""
return "" if dev is None else dev
for dev in sorted(six.iterkeys(ops_on_device), key=device_key):
deps.append(_GroupControlDeps(dev, ops_on_device[dev]))
with ops.control_dependencies(deps):
return no_op(name=name)
def tuple(tensors, name=None, control_inputs=None):
"""Group tensors together.
This creates a tuple of tensors with the same values as the `tensors`
argument, except that the value of each tensor is only returned after the
values of all tensors have been computed.
`control_inputs` contains additional ops that have to finish before this op
finishes, but whose outputs are not returned.
This can be used as a "join" mechanism for parallel computations: all the
argument tensors can be computed in parallel, but the values of any tensor
returned by `tuple` are only available after all the parallel computations
are done.
See also @{tf.group$group} and
@{tf.control_dependencies$control_dependencies}.
Args:
tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`.
name: (optional) A name to use as a `name_scope` for the operation.
control_inputs: List of additional ops to finish before returning.
Returns:
Same as `tensors`.
Raises:
ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`.
TypeError: If `control_inputs` is not a list of `Operation` or `Tensor`
objects.
"""
if context.in_eager_mode():
return tensors
with ops.name_scope(name, "tuple", tensors) as name:
gating_ops = [t.op for t in tensors if t is not None]
if control_inputs:
for c in control_inputs:
if isinstance(c, ops.Tensor):
c = c.op
elif not isinstance(c, ops.Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
gating_ops.append(c)
# Note that in order to ensure ordering in the pbtxt, we must take care to
# ensure the order here.
gating_ops = sorted(set(gating_ops), key=lambda op: op._id) # Uniquify ops.
if not gating_ops:
raise ValueError("Must have at least one Tensor: %s" % tensors)
gate = group(*gating_ops)
tpl = []
for t in tensors:
if t is not None:
tpl.append(with_dependencies([gate], t))
else:
tpl.append(None)
return tpl
def _assert_exclusive(preds):
"""Returns an Assert op that checks that the predicates are exclusive."""
preds_c = array_ops.stack(preds, name="preds_c")
num_true_conditions = math_ops.reduce_sum(
math_ops.cast(preds_c, dtypes.int32), name="num_true_conds")
at_most_one_true_condition = math_ops.less(
num_true_conditions, constant_op.constant(2, name="two_true_conds"))
error_msg = [("More than one condition evaluated as True but "
"exclusive=True. Conditions: (%s), Values:"
% ", ".join([p.name for p in preds])),
preds_c]
return Assert(condition=at_most_one_true_condition, data=error_msg,
summarize=len(preds))
def case(pred_fn_pairs, default=None, exclusive=False, strict=False,
name="case"):
"""Create a case operation.
The `pred_fn_pairs` parameter is a dict or list of pairs of size N.
Each pair contains a boolean scalar tensor and a python callable that
creates the tensors to be returned if the boolean evaluates to True.
`default` is a callable generating a list of tensors. All the callables
in `pred_fn_pairs` as well as `default` (if provided) should return the same
number and types of tensors.
If `exclusive==True`, all predicates are evaluated, and an exception is
thrown if more than one of the predicates evaluates to `True`.
If `exclusive==False`, execution stops at the first predicate which
evaluates to True, and the tensors generated by the corresponding function
are returned immediately. If none of the predicates evaluate to True, this
operation returns the tensors generated by `default`.
`tf.case` supports nested structures as implemented in
`tensorflow.python.util.nest`. All of the callables must return the same
(possibly nested) value structure of lists, tuples, and/or named tuples.
Singleton lists and tuples form the only exceptions to this: when returned by
a callable, they are implicitly unpacked to single values. This
behavior is disabled by passing `strict=True`.
If an unordered dictionary is used for `pred_fn_pairs`, the order of the
conditional tests is not guaranteed. However, the order is guaranteed to be
deterministic, so that variables created in conditional branches are created
in fixed order across runs.
**Example 1:**
Pseudocode:
```
if (x < y) return 17;
else return 23;
```
Expressions:
```python
f1 = lambda: tf.constant(17)
f2 = lambda: tf.constant(23)
r = case([(tf.less(x, y), f1)], default=f2)
```
**Example 2:**
Pseudocode:
```
if (x < y && x > z) raise OpError("Only one predicate may evaluate true");
if (x < y) return 17;
else if (x > z) return 23;
else return -1;
```
Expressions:
```python
def f1(): return tf.constant(17)
def f2(): return tf.constant(23)
def f3(): return tf.constant(-1)
r = case({tf.less(x, y): f1, tf.greater(x, z): f2},
default=f3, exclusive=True)
```
Args:
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a
callable which returns a list of tensors.
default: Optional callable that returns a list of tensors.
exclusive: True iff at most one predicate is allowed to evaluate to `True`.
strict: A boolean that enables/disables 'strict' mode; see above.
name: A name for this operation (optional).
Returns:
The tensors returned by the first pair whose predicate evaluated to True, or
those returned by `default` if none does.
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
ValueError: If in eager mode and all predicates are false and no
default is provided.
ValueError: If in eager mode and is passed a dictionary.
"""
pfp = pred_fn_pairs # For readability
if not (isinstance(pfp, list) or isinstance(pfp, _basetuple)
or isinstance(pfp, dict)):
raise TypeError("fns must be a list, tuple, or dict")
if isinstance(pfp, dict):
if context.in_eager_mode():
raise ValueError(
"In eager mode the predicates must be a list, not a dictionary.")
if isinstance(pfp, collections.OrderedDict):
pfp = pfp.items()
else:
pfp = sorted(pfp.items(), key=lambda item: item[0].name)
if not exclusive:
logging.warn("%s: An unordered dictionary of predicate/fn pairs was "
"provided, but exclusive=False. The order of conditional "
"tests is deterministic but not guaranteed.", name)
for tup in pfp:
if not isinstance(tup, _basetuple) or len(tup) != 2:
raise TypeError("Each entry in pred_fn_pairs must be a 2-tuple")
pred, fn = tup
if pred.dtype != dtypes.bool:
raise TypeError("pred must be of type bool: %s", pred.name)
if not callable(fn):
raise TypeError("fn for pred %s must be callable." % pred.name)
if default is not None and not callable(default):
raise TypeError("default must be callable.")
if context.in_eager_mode():
for pred, fn in pfp:
if pred:
return fn()
if default is None:
raise ValueError("tf.case received all false predicates and no default.")
return default()
preds, fns = map(list, zip(*pfp))
del pfp # From now on, preds and fns form the source of truth.
with ops.name_scope(name, "case", [preds]):
exclusivity_assert = _assert_exclusive(preds) if exclusive else None
# If no default is provided, then we remove one of the (predicate, function)
# pairs and define the default to be the removed function with an additional
# control dependency that asserts that the removed predicate holds.
if default is None:
all_preds = _basetuple(preds) # For the error message.
last_pred, last_fn = preds.pop(), fns.pop()
def new_default():
preds_c = array_ops.stack(all_preds, name="preds_c")
error_msg = [
("None of the conditions evaluated as True. Conditions: (%s), "
"Values:" % ", ".join([p.name for p in all_preds])),
preds_c]
assertion = Assert(condition=last_pred,
data=error_msg, summarize=len(all_preds))
with ops.control_dependencies([assertion]):
return last_fn()
default = new_default
if not preds:
return default()
not_preds = []
for i, p in enumerate(preds):
with ops.name_scope("not_%d" % i):
not_preds.append(math_ops.logical_not(p))
and_not_preds = [constant_op.constant(True, name="always_true")]
for i, notp in enumerate(not_preds):
with ops.name_scope("and_not_%d" % i):
and_not_preds.append(math_ops.logical_and(and_not_preds[-1], notp))
# preds = [p1, p2, p3]
# fns = [f1, f2, f3]
# not_preds = [~p1, ~p2, ~p3]
# and_not_preds = [True, ~p1, ~p1 & ~p2, ~p1 & ~p2 & ~p3]
# case_preds = [p1,
# p2 & ~p1,
# p3 & ~p2 & ~p1,
# ~p3 & ~p2 & ~p1]
case_preds = []
for i, (p, and_not_p_prev) in enumerate(zip(preds, and_not_preds[:-1])):
with ops.name_scope("case_%d" % i):
case_preds.append(math_ops.logical_and(p, and_not_p_prev))
with ops.name_scope("case_none_are_true"):
case_preds.append(and_not_preds[-1])
# Create an empty tensor, or list, with the right type and shape
with ops.name_scope("case_create_empty"):
def _create_empty_constant(dtype, shape):
value = ("" if dtype == dtypes.string else dtype.as_numpy_dtype())
if shape.ndims is None:
return array_ops.constant(value, dtype=dtype)
else:
temp_shape = [1 if x.value is None else x.value for x in shape]
result = array_ops.constant(value, shape=temp_shape, dtype=dtype)
result._shape = shape # pylint: disable=protected-access
return result
def _correct_empty(v):
if isinstance(v, ops.Operation):
return no_op()
elif isinstance(v, tensor_array_ops.TensorArray):
return v
elif not hasattr(v, "dtype"):
return ops.convert_to_tensor(v)
elif isinstance(v, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(indices=[[0] * len(v.get_shape())],
values=[v.dtype.as_numpy_dtype()],
dense_shape=v.get_shape())
else:
return _create_empty_constant(v.dtype, v.get_shape())
empty = lambda: nest.map_structure(_correct_empty, default())
# case_sequence = [
# cond(~p3 & ~p2 & ~p1, default, empty),
# cond(p3 & ~p2 & ~p1, f3, lambda: case_sequence[0]),
# cond(p2 & ~p1, f2, lambda: case_sequence[1]),
# cond(p1, f1, lambda: case_sequence[2])
# ]
#
# And the return value will be case_sequence[-1]
def _build_case():
all_fns = [fn for fn in fns]
all_fns.append(default)
prev_case = None
for i, (cp, fn) in enumerate(list(zip(case_preds, all_fns))[::-1]):
prev_case = cond(
cp, fn,
empty if i == 0 else lambda: prev_case,
strict=strict, name="If_%d" % i)
return prev_case
if exclusivity_assert is not None:
with ops.control_dependencies([exclusivity_assert]):
case_seq = _build_case()
else:
case_seq = _build_case()
if not strict:
case_seq = _UnpackIfSingleton(case_seq)
return case_seq
ops.register_proto_function(ops.GraphKeys.COND_CONTEXT,
proto_type=control_flow_pb2.CondContextDef,
to_proto=CondContext.to_proto,
from_proto=CondContext.from_proto)
ops.register_proto_function(ops.GraphKeys.WHILE_CONTEXT,
proto_type=control_flow_pb2.WhileContextDef,
to_proto=WhileContext.to_proto,
from_proto=WhileContext.from_proto)
| {
"content_hash": "40e1948c32cef28ceb1c997c7133992e",
"timestamp": "",
"source": "github",
"line_count": 3258,
"max_line_length": 116,
"avg_line_length": 38.85328422344997,
"alnum_prop": 0.6601624217910637,
"repo_name": "Mazecreator/tensorflow",
"id": "b341eab7ce2de5adb2c8ae0cd0975370503619d7",
"size": "127274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/control_flow_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7583"
},
{
"name": "C",
"bytes": "175403"
},
{
"name": "C++",
"bytes": "21737608"
},
{
"name": "CMake",
"bytes": "130644"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "786880"
},
{
"name": "HTML",
"bytes": "558790"
},
{
"name": "Java",
"bytes": "279355"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833840"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "36991"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64656"
},
{
"name": "Protocol Buffer",
"bytes": "199996"
},
{
"name": "Python",
"bytes": "17935555"
},
{
"name": "Shell",
"bytes": "320192"
},
{
"name": "TypeScript",
"bytes": "775401"
}
],
"symlink_target": ""
} |
from django.template.defaultfilters import divisibleby
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_true(self):
self.assertEqual(divisibleby(4, 2), True)
def test_false(self):
self.assertEqual(divisibleby(4, 3), False)
| {
"content_hash": "c4df318ba0494130c17a099e1394d8bb",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 54,
"avg_line_length": 27.09090909090909,
"alnum_prop": 0.7080536912751678,
"repo_name": "yephper/django",
"id": "59156853196d12f24a3bcfd9e932c61b29422c20",
"size": "298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/template_tests/filter_tests/test_divisibleby.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404, JsonResponse
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
import random
import json
from amr.forms import UserRegistrationForm, UserLoginForm, AmrGenerationForm
from amr.models import *
from amr.amr_reader.amr import AMR
def index(request):
return render(request, 'amr/index.html')
def signup(request):
login_form = UserLoginForm()
registration_form = UserRegistrationForm()
if request.method == "POST":
# Process their registration
if request.POST.get('login', None):
login_form = UserLoginForm(request.POST)
if login_form.is_valid():
user = authenticate(username=login_form.cleaned_data['email'],
password=login_form.cleaned_data['password'])
if user is not None:
login(request, user)
if request.GET.get('next', None) is None:
return redirect('index')
else:
return redirect(request.GET.get('next'))
else:
messages.error(request, "Invalid email/password combination")
elif request.POST.get('registration', None):
# They are trying to register
registration_form = UserRegistrationForm(request.POST)
if registration_form.is_valid():
if registration_form.cleaned_data['email'] != registration_form.cleaned_data['confirm_email']:
messages.error(request, "Email addresses do not match")
return redirect('signup')
if registration_form.cleaned_data['password'] != registration_form.cleaned_data['confirm_password']:
messages.error(request, "Passwords do not match")
return redirect('signup')
if User.objects.filter(username=registration_form.cleaned_data['email']).count() != 0:
messages.error(request, "Email is already registered")
return redirect('signup')
user = User.objects.create_user(registration_form.cleaned_data['email'],
registration_form.cleaned_data['email'],
registration_form.cleaned_data['password'])
login(request, user)
if request.GET.get('next', None) is None:
return redirect('index')
else:
return redirect(request.GET.get('next'))
else:
# They messed something up so just direct them back to this page so they can fix it
messages.error(request, 'Something went wrong. Please try again.')
return redirect('signup')
# This is the default fall through area
return render(request, 'amr/signup.html', {
'registration_form': registration_form,
'login_form': login_form,
})
def signout(request):
if request.user.is_authenticated():
logout(request)
messages.success(request, 'Successfully logged out')
return redirect('index')
def contact(request):
return render(request, 'amr/contact.html')
@login_required
def generate(request):
amr_form = AmrGenerationForm()
if request.method == "POST":
# They are submitting a generation
amr_form = AmrGenerationForm(request.POST)
if amr_form.is_valid():
amr = AmrEntry.objects.get(id=amr_form.cleaned_data['amr_id'])
generation = Generation(amr=amr, human_sentence=amr_form.cleaned_data['generation'], user=request.user)
generation.save()
messages.success(request, 'Generation saved. Thanks! Here\'s another')
return redirect('generate')
# Select a random AMR for them to generate
amr_ids = AmrEntry.objects.values_list('id', flat=True)
seen_ids = Generation.objects.filter(user=request.user).values_list('amr_id', flat=True)
amr_ids = list(set(amr_ids) - set(seen_ids))
if len(amr_ids) == 0:
messages.info(request, "You have made a generation for all available AMRs.")
return redirect('index')
random_id = random.sample(list(amr_ids), 1)
amr = AmrEntry.objects.get(id__in=random_id)
return render(request, 'amr/generate.html', {'amr': AMR(amr.amr), 'amr_id': amr.id, 'amr_form': amr_form})
@login_required
def easy_generate(request):
amr_form = AmrGenerationForm()
if request.method == "POST":
# They are submitting a generation
amr_form = AmrGenerationForm(request.POST)
if amr_form.is_valid():
amr = AmrEntry.objects.get(id=amr_form.cleaned_data['amr_id'])
generation = Generation(amr=amr, human_sentence=amr_form.cleaned_data['generation'], user=request.user)
generation.save()
messages.success(request, 'Generation saved. Thanks! Here\'s another')
return redirect('easy_generate')
# Select the easiest AMR available
amr_ids = AmrEntry.objects.values_list('id', flat=True)
seen_ids = Generation.objects.filter(user=request.user).values_list('amr_id', flat=True)
amr_ids = list(set(amr_ids) - set(seen_ids))
if len(amr_ids) == 0:
messages.info(request, "You have made a generation for all available AMRs.")
return redirect('index')
amrs = AmrEntry.objects.filter(id__in=amr_ids)
amrs = sorted(amrs, key=lambda x: len(x.sentence))
amr = amrs[0]
return render(request, 'amr/generate.html', {'amr': AMR(amr.amr), 'amr_id': amr.id, 'amr_form': amr_form})
def not_found(request):
raise Http404("Page does not exist")
@login_required
def get_sentence_pairs(request):
generations = Generation.objects.all()
response = []
for generation in generations:
temp = {}
temp['reference'] = generation.amr.sentence
temp['hypothesis'] = generation.human_sentence
temp['user'] = generation.user_id
temp['amr'] = generation.amr.amr
temp['amr_id'] = generation.amr_id
response.append(temp)
return JsonResponse(response, safe=False)
| {
"content_hash": "d7bfeff2568b1830813135a15f6de025",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 116,
"avg_line_length": 44.19444444444444,
"alnum_prop": 0.6239786297925832,
"repo_name": "NickChapman/humangen",
"id": "faec882d2319e56c5ff70f2a333b9436e39bff17",
"size": "6364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amr/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6720"
},
{
"name": "Python",
"bytes": "56553"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
def truncate_sentence(text, max_chars, break_words=False, padding=0):
"""Truncates a sentence.
:param max_chars: The maximum characters of truncated sentence.
:param break_words: If you wish to truncate given sentence strictly even
if it breaks a word, set it to ``True``. It defaults
to ``False`` which means truncating given sentence
shorter but never breaking words.
:param padding: The padding size for truncating. It is usually used to
keep spaces for some ending characters such as ``"..."``.
:return: The truncated sentence.
"""
if break_words:
return text[:-abs(max_chars - len(text)) - padding]
words = []
for word in text.split():
predicted_len = (
sum(map(len, words)) + # length of words
len(word) + # length of next word
len(words) - 1 + # length of spaces
padding)
if predicted_len >= max_chars:
break
words.append(word)
return ' '.join(words)
| {
"content_hash": "e6f8f519b2482a5d6a4facea7f4e38d0",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 77,
"avg_line_length": 38.93103448275862,
"alnum_prop": 0.5828166519043402,
"repo_name": "tonyseek/html5lib-truncation",
"id": "f9df93e02aecced1bcb1dc459babc0b14a00b252",
"size": "1129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "html5lib_truncation/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9271"
}
],
"symlink_target": ""
} |
import saml2
from saml2 import SamlBase
NAMESPACE = 'http://schemas.xmlsoap.org/wsdl/'
class TDocumentation_(SamlBase):
"""The http://schemas.xmlsoap.org/wsdl/:tDocumentation element """
c_tag = 'tDocumentation'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
def t_documentation__from_string(xml_string):
return saml2.create_class_from_xml_string(TDocumentation_, xml_string)
class TDocumented_documentation(TDocumentation_):
c_tag = 'documentation'
c_namespace = NAMESPACE
c_children = TDocumentation_.c_children.copy()
c_attributes = TDocumentation_.c_attributes.copy()
c_child_order = TDocumentation_.c_child_order[:]
c_cardinality = TDocumentation_.c_cardinality.copy()
def t_documented_documentation_from_string(xml_string):
return saml2.create_class_from_xml_string(TDocumented_documentation, xml_string)
class TDocumented_(SamlBase):
"""The http://schemas.xmlsoap.org/wsdl/:tDocumented element """
c_tag = 'tDocumented'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://schemas.xmlsoap.org/wsdl/}documentation'] = ('documentation', TDocumented_documentation)
c_cardinality['documentation'] = {"min":0, "max":1}
c_child_order.extend(['documentation'])
def __init__(self,
documentation=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.documentation=documentation
def t_documented__from_string(xml_string):
return saml2.create_class_from_xml_string(TDocumented_, xml_string)
class TExtensibleAttributesDocumented_(TDocumented_):
"""The http://schemas.xmlsoap.org/wsdl/:tExtensibleAttributesDocumented element """
c_tag = 'tExtensibleAttributesDocumented'
c_namespace = NAMESPACE
c_children = TDocumented_.c_children.copy()
c_attributes = TDocumented_.c_attributes.copy()
c_child_order = TDocumented_.c_child_order[:]
c_cardinality = TDocumented_.c_cardinality.copy()
class TExtensibleDocumented_(TDocumented_):
"""The http://schemas.xmlsoap.org/wsdl/:tExtensibleDocumented element """
c_tag = 'tExtensibleDocumented'
c_namespace = NAMESPACE
c_children = TDocumented_.c_children.copy()
c_attributes = TDocumented_.c_attributes.copy()
c_child_order = TDocumented_.c_child_order[:]
c_cardinality = TDocumented_.c_cardinality.copy()
class TImport_(TExtensibleAttributesDocumented_):
"""The http://schemas.xmlsoap.org/wsdl/:tImport element """
c_tag = 'tImport'
c_namespace = NAMESPACE
c_children = TExtensibleAttributesDocumented_.c_children.copy()
c_attributes = TExtensibleAttributesDocumented_.c_attributes.copy()
c_child_order = TExtensibleAttributesDocumented_.c_child_order[:]
c_cardinality = TExtensibleAttributesDocumented_.c_cardinality.copy()
c_attributes['namespace'] = ('namespace', 'anyURI', True)
c_attributes['location'] = ('location', 'anyURI', True)
def __init__(self,
namespace=None,
location=None,
documentation=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
TExtensibleAttributesDocumented_.__init__(self,
documentation=documentation,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.namespace=namespace
self.location=location
def t_import__from_string(xml_string):
return saml2.create_class_from_xml_string(TImport_, xml_string)
class TTypes_(TExtensibleDocumented_):
"""The http://schemas.xmlsoap.org/wsdl/:tTypes element """
c_tag = 'tTypes'
c_namespace = NAMESPACE
c_children = TExtensibleDocumented_.c_children.copy()
c_attributes = TExtensibleDocumented_.c_attributes.copy()
c_child_order = TExtensibleDocumented_.c_child_order[:]
c_cardinality = TExtensibleDocumented_.c_cardinality.copy()
def t_types__from_string(xml_string):
return saml2.create_class_from_xml_string(TTypes_, xml_string)
class TPart_(TExtensibleAttributesDocumented_):
"""The http://schemas.xmlsoap.org/wsdl/:tPart element """
c_tag = 'tPart'
c_namespace = NAMESPACE
c_children = TExtensibleAttributesDocumented_.c_children.copy()
c_attributes = TExtensibleAttributesDocumented_.c_attributes.copy()
c_child_order = TExtensibleAttributesDocumented_.c_child_order[:]
c_cardinality = TExtensibleAttributesDocumented_.c_cardinality.copy()
c_attributes['name'] = ('name', 'NCName', True)
c_attributes['element'] = ('element', 'QName', False)
c_attributes['type'] = ('type', 'QName', False)
def __init__(self,
name=None,
element=None,
type=None,
documentation=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
TExtensibleAttributesDocumented_.__init__(self,
documentation=documentation,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.name=name
self.element=element
self.type=type
def t_part__from_string(xml_string):
return saml2.create_class_from_xml_string(TPart_, xml_string)
class TOperation_(TExtensibleDocumented_):
"""The http://schemas.xmlsoap.org/wsdl/:tOperation element """
c_tag = 'tOperation'
c_namespace = NAMESPACE
c_children = TExtensibleDocumented_.c_children.copy()
c_attributes = TExtensibleDocumented_.c_attributes.copy()
c_child_order = TExtensibleDocumented_.c_child_order[:]
c_cardinality = TExtensibleDocumented_.c_cardinality.copy()
c_attributes['name'] = ('name', 'NCName', True)
c_attributes['parameterOrder'] = ('parameter_order', 'NMTOKENS', False)
def __init__(self,
name=None,
parameter_order=None,
documentation=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
TExtensibleDocumented_.__init__(self,
documentation=documentation,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.name=name
self.parameter_order=parameter_order
def t_operation__from_string(xml_string):
return saml2.create_class_from_xml_string(TOperation_, xml_string)
class TParam_(TExtensibleAttributesDocumented_):
"""The http://schemas.xmlsoap.org/wsdl/:tParam element """
c_tag = 'tParam'
c_namespace = NAMESPACE
c_children = TExtensibleAttributesDocumented_.c_children.copy()
c_attributes = TExtensibleAttributesDocumented_.c_attributes.copy()
c_child_order = TExtensibleAttributesDocumented_.c_child_order[:]
c_cardinality = TExtensibleAttributesDocumented_.c_cardinality.copy()
c_attributes['name'] = ('name', 'NCName', False)
c_attributes['message'] = ('message', 'QName', True)
def __init__(self,
name=None,
message=None,
documentation=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
TExtensibleAttributesDocumented_.__init__(self,
documentation=documentation,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.name=name
self.message=message
def t_param__from_string(xml_string):
return saml2.create_class_from_xml_string(TParam_, xml_string)
class TFault_(TExtensibleAttributesDocumented_):
"""The http://schemas.xmlsoap.org/wsdl/:tFault element """
c_tag = 'tFault'
c_namespace = NAMESPACE
c_children = TExtensibleAttributesDocumented_.c_children.copy()
c_attributes = TExtensibleAttributesDocumented_.c_attributes.copy()
c_child_order = TExtensibleAttributesDocumented_.c_child_order[:]
c_cardinality = TExtensibleAttributesDocumented_.c_cardinality.copy()
c_attributes['name'] = ('name', 'NCName', True)
c_attributes['message'] = ('message', 'QName', True)
def __init__(self,
name=None,
message=None,
documentation=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
TExtensibleAttributesDocumented_.__init__(self,
documentation=documentation,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.name=name
self.message=message
def t_fault__from_string(xml_string):
return saml2.create_class_from_xml_string(TFault_, xml_string)
class TBindingOperationMessage_(TExtensibleDocumented_):
"""The http://schemas.xmlsoap.org/wsdl/:tBindingOperationMessage element """
c_tag = 'tBindingOperationMessage'
c_namespace = NAMESPACE
c_children = TExtensibleDocumented_.c_children.copy()
c_attributes = TExtensibleDocumented_.c_attributes.copy()
c_child_order = TExtensibleDocumented_.c_child_order[:]
c_cardinality = TExtensibleDocumented_.c_cardinality.copy()
c_attributes['name'] = ('name', 'NCName', False)
def __init__(self,
name=None,
documentation=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
TExtensibleDocumented_.__init__(self,
documentation=documentation,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.name=name
def t_binding_operation_message__from_string(xml_string):
return saml2.create_class_from_xml_string(TBindingOperationMessage_, xml_string)
class TBindingOperationFault_(TExtensibleDocumented_):
"""The http://schemas.xmlsoap.org/wsdl/:tBindingOperationFault element """
c_tag = 'tBindingOperationFault'
c_namespace = NAMESPACE
c_children = TExtensibleDocumented_.c_children.copy()
c_attributes = TExtensibleDocumented_.c_attributes.copy()
c_child_order = TExtensibleDocumented_.c_child_order[:]
c_cardinality = TExtensibleDocumented_.c_cardinality.copy()
c_attributes['name'] = ('name', 'NCName', True)
def __init__(self,
name=None,
documentation=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
TExtensibleDocumented_.__init__(self,
documentation=documentation,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.name=name
def t_binding_operation_fault__from_string(xml_string):
return saml2.create_class_from_xml_string(TBindingOperationFault_, xml_string)
class TBindingOperation_input(TBindingOperationMessage_):
c_tag = 'input'
c_namespace = NAMESPACE
c_children = TBindingOperationMessage_.c_children.copy()
c_attributes = TBindingOperationMessage_.c_attributes.copy()
c_child_order = TBindingOperationMessage_.c_child_order[:]
c_cardinality = TBindingOperationMessage_.c_cardinality.copy()
def t_binding_operation_input_from_string(xml_string):
return saml2.create_class_from_xml_string(TBindingOperation_input, xml_string)
class TBindingOperation_output(TBindingOperationMessage_):
c_tag = 'output'
c_namespace = NAMESPACE
c_children = TBindingOperationMessage_.c_children.copy()
c_attributes = TBindingOperationMessage_.c_attributes.copy()
c_child_order = TBindingOperationMessage_.c_child_order[:]
c_cardinality = TBindingOperationMessage_.c_cardinality.copy()
def t_binding_operation_output_from_string(xml_string):
return saml2.create_class_from_xml_string(TBindingOperation_output, xml_string)
class TBindingOperation_fault(TBindingOperationFault_):
c_tag = 'fault'
c_namespace = NAMESPACE
c_children = TBindingOperationFault_.c_children.copy()
c_attributes = TBindingOperationFault_.c_attributes.copy()
c_child_order = TBindingOperationFault_.c_child_order[:]
c_cardinality = TBindingOperationFault_.c_cardinality.copy()
def t_binding_operation_fault_from_string(xml_string):
return saml2.create_class_from_xml_string(TBindingOperation_fault, xml_string)
class TBindingOperation_(TExtensibleDocumented_):
"""The http://schemas.xmlsoap.org/wsdl/:tBindingOperation element """
c_tag = 'tBindingOperation'
c_namespace = NAMESPACE
c_children = TExtensibleDocumented_.c_children.copy()
c_attributes = TExtensibleDocumented_.c_attributes.copy()
c_child_order = TExtensibleDocumented_.c_child_order[:]
c_cardinality = TExtensibleDocumented_.c_cardinality.copy()
c_children['{http://schemas.xmlsoap.org/wsdl/}input'] = ('input', TBindingOperation_input)
c_cardinality['input'] = {"min":0, "max":1}
c_children['{http://schemas.xmlsoap.org/wsdl/}output'] = ('output', TBindingOperation_output)
c_cardinality['output'] = {"min":0, "max":1}
c_children['{http://schemas.xmlsoap.org/wsdl/}fault'] = ('fault', [TBindingOperation_fault])
c_cardinality['fault'] = {"min":0}
c_attributes['name'] = ('name', 'NCName', True)
c_child_order.extend(['input', 'output', 'fault'])
def __init__(self,
input=None,
output=None,
fault=None,
name=None,
documentation=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
TExtensibleDocumented_.__init__(self,
documentation=documentation,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.input=input
self.output=output
self.fault=fault or []
self.name=name
def t_binding_operation__from_string(xml_string):
return saml2.create_class_from_xml_string(TBindingOperation_, xml_string)
class TPort_(TExtensibleDocumented_):
"""The http://schemas.xmlsoap.org/wsdl/:tPort element """
c_tag = 'tPort'
c_namespace = NAMESPACE
c_children = TExtensibleDocumented_.c_children.copy()
c_attributes = TExtensibleDocumented_.c_attributes.copy()
c_child_order = TExtensibleDocumented_.c_child_order[:]
c_cardinality = TExtensibleDocumented_.c_cardinality.copy()
c_attributes['name'] = ('name', 'NCName', True)
c_attributes['binding'] = ('binding', 'QName', True)
def __init__(self,
name=None,
binding=None,
documentation=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
TExtensibleDocumented_.__init__(self,
documentation=documentation,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.name=name
self.binding=binding
def t_port__from_string(xml_string):
return saml2.create_class_from_xml_string(TPort_, xml_string)
class TExtensibilityElement_(SamlBase):
"""The http://schemas.xmlsoap.org/wsdl/:tExtensibilityElement element """
c_tag = 'tExtensibilityElement'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['required'] = ('required', 'None', False)
def __init__(self,
required=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.required=required
class Import(TImport_):
"""The http://schemas.xmlsoap.org/wsdl/:import element """
c_tag = 'import'
c_namespace = NAMESPACE
c_children = TImport_.c_children.copy()
c_attributes = TImport_.c_attributes.copy()
c_child_order = TImport_.c_child_order[:]
c_cardinality = TImport_.c_cardinality.copy()
def import_from_string(xml_string):
return saml2.create_class_from_xml_string(Import, xml_string)
class Types(TTypes_):
"""The http://schemas.xmlsoap.org/wsdl/:types element """
c_tag = 'types'
c_namespace = NAMESPACE
c_children = TTypes_.c_children.copy()
c_attributes = TTypes_.c_attributes.copy()
c_child_order = TTypes_.c_child_order[:]
c_cardinality = TTypes_.c_cardinality.copy()
def types_from_string(xml_string):
return saml2.create_class_from_xml_string(Types, xml_string)
class TMessage_part(TPart_):
c_tag = 'part'
c_namespace = NAMESPACE
c_children = TPart_.c_children.copy()
c_attributes = TPart_.c_attributes.copy()
c_child_order = TPart_.c_child_order[:]
c_cardinality = TPart_.c_cardinality.copy()
def t_message_part_from_string(xml_string):
return saml2.create_class_from_xml_string(TMessage_part, xml_string)
class TMessage_(TExtensibleDocumented_):
"""The http://schemas.xmlsoap.org/wsdl/:tMessage element """
c_tag = 'tMessage'
c_namespace = NAMESPACE
c_children = TExtensibleDocumented_.c_children.copy()
c_attributes = TExtensibleDocumented_.c_attributes.copy()
c_child_order = TExtensibleDocumented_.c_child_order[:]
c_cardinality = TExtensibleDocumented_.c_cardinality.copy()
c_children['{http://schemas.xmlsoap.org/wsdl/}part'] = ('part', [TMessage_part])
c_cardinality['part'] = {"min":0}
c_attributes['name'] = ('name', 'NCName', True)
c_child_order.extend(['part'])
def __init__(self,
part=None,
name=None,
documentation=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
TExtensibleDocumented_.__init__(self,
documentation=documentation,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.part=part or []
self.name=name
def t_message__from_string(xml_string):
return saml2.create_class_from_xml_string(TMessage_, xml_string)
class TPortType_operation(TOperation_):
c_tag = 'operation'
c_namespace = NAMESPACE
c_children = TOperation_.c_children.copy()
c_attributes = TOperation_.c_attributes.copy()
c_child_order = TOperation_.c_child_order[:]
c_cardinality = TOperation_.c_cardinality.copy()
def t_port_type_operation_from_string(xml_string):
return saml2.create_class_from_xml_string(TPortType_operation, xml_string)
class TPortType_(TExtensibleAttributesDocumented_):
"""The http://schemas.xmlsoap.org/wsdl/:tPortType element """
c_tag = 'tPortType'
c_namespace = NAMESPACE
c_children = TExtensibleAttributesDocumented_.c_children.copy()
c_attributes = TExtensibleAttributesDocumented_.c_attributes.copy()
c_child_order = TExtensibleAttributesDocumented_.c_child_order[:]
c_cardinality = TExtensibleAttributesDocumented_.c_cardinality.copy()
c_children['{http://schemas.xmlsoap.org/wsdl/}operation'] = ('operation', [TPortType_operation])
c_cardinality['operation'] = {"min":0}
c_attributes['name'] = ('name', 'NCName', True)
c_child_order.extend(['operation'])
def __init__(self,
operation=None,
name=None,
documentation=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
TExtensibleAttributesDocumented_.__init__(self,
documentation=documentation,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.operation=operation or []
self.name=name
def t_port_type__from_string(xml_string):
return saml2.create_class_from_xml_string(TPortType_, xml_string)
class TBinding_operation(TBindingOperation_):
c_tag = 'operation'
c_namespace = NAMESPACE
c_children = TBindingOperation_.c_children.copy()
c_attributes = TBindingOperation_.c_attributes.copy()
c_child_order = TBindingOperation_.c_child_order[:]
c_cardinality = TBindingOperation_.c_cardinality.copy()
def t_binding_operation_from_string(xml_string):
return saml2.create_class_from_xml_string(TBinding_operation, xml_string)
class TBinding_(TExtensibleDocumented_):
"""The http://schemas.xmlsoap.org/wsdl/:tBinding element """
c_tag = 'tBinding'
c_namespace = NAMESPACE
c_children = TExtensibleDocumented_.c_children.copy()
c_attributes = TExtensibleDocumented_.c_attributes.copy()
c_child_order = TExtensibleDocumented_.c_child_order[:]
c_cardinality = TExtensibleDocumented_.c_cardinality.copy()
c_children['{http://schemas.xmlsoap.org/wsdl/}operation'] = ('operation', [TBinding_operation])
c_cardinality['operation'] = {"min":0}
c_attributes['name'] = ('name', 'NCName', True)
c_attributes['type'] = ('type', 'QName', True)
c_child_order.extend(['operation'])
def __init__(self,
operation=None,
name=None,
type=None,
documentation=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
TExtensibleDocumented_.__init__(self,
documentation=documentation,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.operation=operation or []
self.name=name
self.type=type
def t_binding__from_string(xml_string):
return saml2.create_class_from_xml_string(TBinding_, xml_string)
class TService_port(TPort_):
c_tag = 'port'
c_namespace = NAMESPACE
c_children = TPort_.c_children.copy()
c_attributes = TPort_.c_attributes.copy()
c_child_order = TPort_.c_child_order[:]
c_cardinality = TPort_.c_cardinality.copy()
def t_service_port_from_string(xml_string):
return saml2.create_class_from_xml_string(TService_port, xml_string)
class TService_(TExtensibleDocumented_):
"""The http://schemas.xmlsoap.org/wsdl/:tService element """
c_tag = 'tService'
c_namespace = NAMESPACE
c_children = TExtensibleDocumented_.c_children.copy()
c_attributes = TExtensibleDocumented_.c_attributes.copy()
c_child_order = TExtensibleDocumented_.c_child_order[:]
c_cardinality = TExtensibleDocumented_.c_cardinality.copy()
c_children['{http://schemas.xmlsoap.org/wsdl/}port'] = ('port', [TService_port])
c_cardinality['port'] = {"min":0}
c_attributes['name'] = ('name', 'NCName', True)
c_child_order.extend(['port'])
def __init__(self,
port=None,
name=None,
documentation=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
TExtensibleDocumented_.__init__(self,
documentation=documentation,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.port=port or []
self.name=name
def t_service__from_string(xml_string):
return saml2.create_class_from_xml_string(TService_, xml_string)
class Message(TMessage_):
"""The http://schemas.xmlsoap.org/wsdl/:message element """
c_tag = 'message'
c_namespace = NAMESPACE
c_children = TMessage_.c_children.copy()
c_attributes = TMessage_.c_attributes.copy()
c_child_order = TMessage_.c_child_order[:]
c_cardinality = TMessage_.c_cardinality.copy()
def message_from_string(xml_string):
return saml2.create_class_from_xml_string(Message, xml_string)
class PortType(TPortType_):
"""The http://schemas.xmlsoap.org/wsdl/:portType element """
c_tag = 'portType'
c_namespace = NAMESPACE
c_children = TPortType_.c_children.copy()
c_attributes = TPortType_.c_attributes.copy()
c_child_order = TPortType_.c_child_order[:]
c_cardinality = TPortType_.c_cardinality.copy()
def port_type_from_string(xml_string):
return saml2.create_class_from_xml_string(PortType, xml_string)
class Binding(TBinding_):
"""The http://schemas.xmlsoap.org/wsdl/:binding element """
c_tag = 'binding'
c_namespace = NAMESPACE
c_children = TBinding_.c_children.copy()
c_attributes = TBinding_.c_attributes.copy()
c_child_order = TBinding_.c_child_order[:]
c_cardinality = TBinding_.c_cardinality.copy()
def binding_from_string(xml_string):
return saml2.create_class_from_xml_string(Binding, xml_string)
class Service(TService_):
"""The http://schemas.xmlsoap.org/wsdl/:service element """
c_tag = 'service'
c_namespace = NAMESPACE
c_children = TService_.c_children.copy()
c_attributes = TService_.c_attributes.copy()
c_child_order = TService_.c_child_order[:]
c_cardinality = TService_.c_cardinality.copy()
def service_from_string(xml_string):
return saml2.create_class_from_xml_string(Service, xml_string)
class TDefinitions_(TExtensibleDocumented_):
"""The http://schemas.xmlsoap.org/wsdl/:tDefinitions element """
c_tag = 'tDefinitions'
c_namespace = NAMESPACE
c_children = TExtensibleDocumented_.c_children.copy()
c_attributes = TExtensibleDocumented_.c_attributes.copy()
c_child_order = TExtensibleDocumented_.c_child_order[:]
c_cardinality = TExtensibleDocumented_.c_cardinality.copy()
c_children['{http://schemas.xmlsoap.org/wsdl/}import'] = ('import', Import)
c_cardinality['import'] = {"min":0, "max":1}
c_children['{http://schemas.xmlsoap.org/wsdl/}types'] = ('types', Types)
c_cardinality['types'] = {"min":0, "max":1}
c_children['{http://schemas.xmlsoap.org/wsdl/}message'] = ('message', Message)
c_cardinality['message'] = {"min":0, "max":1}
c_children['{http://schemas.xmlsoap.org/wsdl/}portType'] = ('port_type', PortType)
c_cardinality['port_type'] = {"min":0, "max":1}
c_children['{http://schemas.xmlsoap.org/wsdl/}binding'] = ('binding', Binding)
c_cardinality['binding'] = {"min":0, "max":1}
c_children['{http://schemas.xmlsoap.org/wsdl/}service'] = ('service', Service)
c_cardinality['service'] = {"min":0, "max":1}
c_attributes['targetNamespace'] = ('target_namespace', 'anyURI', False)
c_attributes['name'] = ('name', 'NCName', False)
c_child_order.extend(['import', 'types', 'message', 'port_type', 'binding', 'service'])
def __init__(self,
import_=None,
types=None,
message=None,
port_type=None,
binding=None,
service=None,
target_namespace=None,
name=None,
documentation=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
TExtensibleDocumented_.__init__(self,
documentation=documentation,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.import_=import_
self.types=types
self.message=message
self.port_type=port_type
self.binding=binding
self.service=service
self.target_namespace=target_namespace
self.name=name
def t_definitions__from_string(xml_string):
return saml2.create_class_from_xml_string(TDefinitions_, xml_string)
class Definitions(TDefinitions_):
"""The http://schemas.xmlsoap.org/wsdl/:definitions element """
c_tag = 'definitions'
c_namespace = NAMESPACE
c_children = TDefinitions_.c_children.copy()
c_attributes = TDefinitions_.c_attributes.copy()
c_child_order = TDefinitions_.c_child_order[:]
c_cardinality = TDefinitions_.c_cardinality.copy()
def definitions_from_string(xml_string):
return saml2.create_class_from_xml_string(Definitions, xml_string)
#..................
# []
ELEMENT_FROM_STRING = {
TDocumentation_.c_tag: t_documentation__from_string,
TDocumented_.c_tag: t_documented__from_string,
Definitions.c_tag: definitions_from_string,
TDefinitions_.c_tag: t_definitions__from_string,
TImport_.c_tag: t_import__from_string,
TTypes_.c_tag: t_types__from_string,
TMessage_.c_tag: t_message__from_string,
TPart_.c_tag: t_part__from_string,
TPortType_.c_tag: t_port_type__from_string,
TOperation_.c_tag: t_operation__from_string,
TParam_.c_tag: t_param__from_string,
TFault_.c_tag: t_fault__from_string,
TBinding_.c_tag: t_binding__from_string,
TBindingOperationMessage_.c_tag: t_binding_operation_message__from_string,
TBindingOperationFault_.c_tag: t_binding_operation_fault__from_string,
TBindingOperation_.c_tag: t_binding_operation__from_string,
TService_.c_tag: t_service__from_string,
TPort_.c_tag: t_port__from_string,
TDocumented_documentation.c_tag: t_documented_documentation_from_string,
TBindingOperation_input.c_tag: t_binding_operation_input_from_string,
TBindingOperation_output.c_tag: t_binding_operation_output_from_string,
TBindingOperation_fault.c_tag: t_binding_operation_fault_from_string,
Import.c_tag: import_from_string,
Types.c_tag: types_from_string,
TMessage_part.c_tag: t_message_part_from_string,
TPortType_operation.c_tag: t_port_type_operation_from_string,
TService_port.c_tag: t_service_port_from_string,
Message.c_tag: message_from_string,
PortType.c_tag: port_type_from_string,
Binding.c_tag: binding_from_string,
Service.c_tag: service_from_string,
}
ELEMENT_BY_TAG = {
'tDocumentation': TDocumentation_,
'tDocumented': TDocumented_,
'definitions': Definitions,
'tDefinitions': TDefinitions_,
'tImport': TImport_,
'tTypes': TTypes_,
'tMessage': TMessage_,
'tPart': TPart_,
'tPortType': TPortType_,
'tOperation': TOperation_,
'tParam': TParam_,
'tFault': TFault_,
'tBinding': TBinding_,
'tBindingOperationMessage': TBindingOperationMessage_,
'tBindingOperationFault': TBindingOperationFault_,
'tBindingOperation': TBindingOperation_,
'tService': TService_,
'tPort': TPort_,
'documentation': TDocumented_documentation,
'input': TBindingOperation_input,
'output': TBindingOperation_output,
'fault': TBindingOperation_fault,
'import': Import,
'types': Types,
'part': TMessage_part,
'operation': TPortType_operation,
'port': TService_port,
'message': Message,
'portType': PortType,
'binding': Binding,
'service': Service,
'tExtensibleAttributesDocumented': TExtensibleAttributesDocumented_,
'tExtensibleDocumented': TExtensibleDocumented_,
'tExtensibilityElement': TExtensibilityElement_,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs)
| {
"content_hash": "a017ae94e8ecc0c85b23f2e34fc0e51d",
"timestamp": "",
"source": "github",
"line_count": 895,
"max_line_length": 112,
"avg_line_length": 35.924022346368716,
"alnum_prop": 0.6527743219706394,
"repo_name": "rohe/pysaml2-3",
"id": "99f22668049e6024b55211968208b990f8d121c4",
"size": "32350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/schema/wsdl.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "5367558"
},
{
"name": "Shell",
"bytes": "6973"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from django.contrib import messages
from django.http import HttpResponseBadRequest, JsonResponse
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.generic import FormView, TemplateView
from shop.modifiers.pool import cart_modifiers_pool
from shopit.forms import shop as shop_forms
from shopit.models.cart import Cart, CartItem
from shopit.models.modifier import DiscountCode
from shopit.models.order import Order
class CartObjectMixin(object):
"""
Cart object mixin adds cart, cart items and watch items to the context.
Resets the values from `extra` dict that are populated by the checkout.
"""
def dispatch(self, request, *args, **kwargs):
self.cart = Cart.objects.get_or_create_from_request(request)
for key in ['payment_modifier', 'shipping_modifier', 'annotation']:
self.cart.extra.pop(key, None)
self.update_cart()
self.cart.save()
return super(CartObjectMixin, self).dispatch(request, *args, **kwargs)
def update_cart(self):
self.cart._dirty = True
self.cart._cached_cart_items = None
self.cart.update(self.request)
def get_cart_data(self):
return {
'cart': self.cart,
'cart_items': self.cart._cached_cart_items,
'watch_items': CartItem.objects.filter_watch_items(self.cart, self.request),
}
def get_context_data(self, **kwargs):
context = super(CartObjectMixin, self).get_context_data(**kwargs)
context.update(self.get_cart_data())
return context
class CartView(CartObjectMixin, FormView):
"""
Cart view displays the cart and handles updating item's quantity,
deleting the cart and adding modifier codes to the cart.
"""
empty = False
form_class = shop_forms.CartDiscountCodeForm
template_name = 'shopit/shop/cart.html'
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
if self.empty:
cart = Cart.objects.get_from_request(request)
for item in cart.items.all():
item.delete()
codes = []
for code in cart.get_discount_codes():
codes.append(code.code)
code.delete()
for dc in DiscountCode.objects.filter(code__in=codes):
dc.use(-1)
return super(CartView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
code = form.cleaned_data.get('code', None)
if code and self.request.POST.get('validate', None):
msg = _('Discount code is valid.')
else:
if code:
form.save()
msg = _('Discount code has been applied successfully.')
else:
msg = _('Cart has been updated successfully.')
messages.success(self.request, msg)
if self.request.is_ajax():
return JsonResponse({'success': msg})
return redirect('shopit-cart')
def form_invalid(self, form):
if self.request.is_ajax():
return JsonResponse(dict(form.errors), status=400)
return super(CartView, self).form_invalid(form)
def get_form_kwargs(self):
kwargs = super(CartView, self).get_form_kwargs()
kwargs['cart'] = self.cart
return kwargs
@method_decorator(csrf_protect)
def post(self, request, *args, **kwargs):
for item, quantity in [x for x in request.POST.items() if x[0].startswith('quantity') and x[1]]:
item, quantity = int(item.split('-').pop()), int(quantity)
item = CartItem.objects.get(pk=item)
if quantity > 0:
available, diff = item.product.is_available(quantity)
if available:
item.quantity = quantity
else:
item.quantity = quantity + diff
item.save()
else:
item.delete()
self.update_cart()
return super(CartView, self).post(request, *args, **kwargs)
class WatchView(CartObjectMixin, TemplateView):
template_name = 'shopit/shop/watch.html'
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
return super(WatchView, self).dispatch(request, *args, **kwargs)
class CheckoutView(CartObjectMixin, FormView):
"""
Checkout view that handles customer selection forms and redirects to
purchase url of selected payment provider.
"""
template_name = 'shopit/shop/checkout.html'
def forms_valid(self, **forms):
"""
All the forms are valid, make the purchase happen. Return's JSON.
"""
forms['customer_form'].save()
self.cart.shipping_address = forms['shipping_form'].save()
self.cart.billing_address = forms['billing_form'].save()
self.cart.extra.update(forms['payment_form'].cleaned_data)
self.cart.extra.update(forms['delivery_form'].cleaned_data)
self.cart.extra.update(forms['extra_form'].cleaned_data)
self.update_cart()
self.cart.save()
for modifier in cart_modifiers_pool.get_payment_modifiers():
if modifier.is_active(self.cart):
payment_provider = getattr(modifier, 'payment_provider', None)
if payment_provider:
expression = payment_provider.get_payment_request(self.cart, self.request)
return JsonResponse({'expression': expression})
return HttpResponseBadRequest()
def forms_invalid(self, **forms):
self.cart.save()
errors = dict([('%s-%s' % (x.prefix, y[0]), y[1]) for x in forms.values() for y in x.errors.items()])
return JsonResponse(errors, status=400)
@method_decorator(never_cache)
def get(self, request, *args, **kwargs):
response = super(CheckoutView, self).get(request, *args, **kwargs)
return response if not self.cart.is_empty else redirect('shopit-cart')
def get_context_data(self, **kwargs):
context = {'view': self}
context.update(self.get_cart_data())
context.update(self.get_forms())
context.update(kwargs)
return context
def get_forms(self, **kwargs):
kwargs['request'] = self.request
kwargs['cart'] = self.cart
forms = {
'shipping_form': shop_forms.ShippingAddressForm(prefix='shipping', **kwargs),
'billing_form': shop_forms.BillingAddressForm(prefix='billing', **kwargs),
'payment_form': shop_forms.PaymentMethodForm(prefix='payment', **kwargs),
'delivery_form': shop_forms.DeliveryMethodForm(prefix='delivery', **kwargs),
'extra_form': shop_forms.ExtraAnnotationForm(prefix='extra', **kwargs),
'accept_form': shop_forms.AcceptConditionForm(prefix='accept', **kwargs),
}
if self.request.customer.is_registered():
forms['customer_form'] = shop_forms.CustomerForm(prefix='customer', **kwargs)
else:
forms['customer_form'] = shop_forms.GuestForm(prefix='guest', **kwargs)
return forms
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def post(self, request, *args, **kwargs):
"""
Ment to be accessed via ajax, since the `get_payment_request` method
from the payment provider returns a javascript expression that needs
to be evaluated within javascript.
"""
if not request.is_ajax():
return HttpResponseBadRequest()
forms = self.get_forms(data=request.POST)
if all([x.is_valid() for x in forms.values()]):
return self.forms_valid(**forms)
return self.forms_invalid(**forms)
class ThanksView(TemplateView):
"""
A generic thank you view, adds last updated order to the context,
redirects to cart if no order.
"""
template_name = 'shopit/shop/thanks.html'
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
self.order = Order.objects.filter_from_request(request).first()
if not self.order:
return redirect('shopit-cart')
return super(ThanksView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ThanksView, self).get_context_data(**kwargs)
context['order'] = self.order
return context
| {
"content_hash": "c9e089a76bfbef142aabb67f195119c9",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 109,
"avg_line_length": 39.63470319634703,
"alnum_prop": 0.6278801843317973,
"repo_name": "dinoperovic/djangoshop-shopit",
"id": "29b412bd11a20816b341f60bfc3f8ff467e89ba8",
"size": "8704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shopit/views/shop.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1186"
},
{
"name": "HTML",
"bytes": "36912"
},
{
"name": "JavaScript",
"bytes": "1704"
},
{
"name": "Makefile",
"bytes": "251"
},
{
"name": "Python",
"bytes": "369105"
},
{
"name": "Shell",
"bytes": "548"
}
],
"symlink_target": ""
} |
def extractWwwNekoseireiCom(item):
'''
Parser for 'www.nekoseirei.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Godly Alchemist', 'Godly Alchemist', 'translated'),
('Kein no Zenkou', 'Kein no Zenkou', 'translated'),
('God of Music', 'God of Music', 'translated'),
('king of skill', 'King of Skill', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | {
"content_hash": "41b9839d7f0179a8616c909ab12b0bb3",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 104,
"avg_line_length": 36.541666666666664,
"alnum_prop": 0.5438996579247435,
"repo_name": "fake-name/ReadableWebProxy",
"id": "9d329311b79237b2793ad2da708d36d4c62e3adb",
"size": "877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractWwwNekoseireiCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_pod_security_policy_list import V1beta1PodSecurityPolicyList
class TestV1beta1PodSecurityPolicyList(unittest.TestCase):
""" V1beta1PodSecurityPolicyList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1PodSecurityPolicyList(self):
"""
Test V1beta1PodSecurityPolicyList
"""
model = kubernetes.client.models.v1beta1_pod_security_policy_list.V1beta1PodSecurityPolicyList()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "df31f1321457b27fd166744634ecffae",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 105,
"avg_line_length": 23.75,
"alnum_prop": 0.7221052631578947,
"repo_name": "skuda/client-python",
"id": "0d2050ef7a6a906fe1790b72e14755fedf8ff26f",
"size": "967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/test/test_v1beta1_pod_security_policy_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5907789"
},
{
"name": "Shell",
"bytes": "8195"
}
],
"symlink_target": ""
} |
DOCUMENTATION = '''
---
module: bigip_provision
short_description: Manage BIG-IP module provisioning
description:
- Manage BIG-IP module provisioning. This module will only provision at the
standard levels of Dedicated, Nominal, and Minimum. While iControl SOAP
additionally supports a Custom level, this level is not supported by this
module.
version_added: "2.2"
options:
server:
description:
- BIG-IP host
required: true
module:
description:
- The module to provision in BIG-IP
required: true
choices:
- afm
- am
- sam
- asm
- avr
- fps
- gtm
- lc
- ltm
- pem
- swg
password:
description:
- BIG-IP password
required: true
default: admin
level:
description:
- Sets the provisioning level for the requested modules. Changing the
level for one module may require modifying the level of another module.
For example, changing one module to C(dedicated) requires setting all
others to C(none). Setting the level of a module to C(none) means that
the module is not run.
required: false
default: nominal
choices:
- dedicated
- nominal
- minimum
user:
description:
- BIG-IP username
required: false
default: admin
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be
used on personally controlled sites using self-signed certificates.
required: false
default: yes
choices:
- yes
- no
state:
description:
- The state of the provisioned module on the system. When C(present),
guarantees that the specified module is provisioned at the requested
level provided that there are sufficient resources on the device (such
as physical RAM) to support the provisioned module. When C(absent),
unprovisions the module.
required: false
default: present
choices:
- present
- absent
notes:
- Requires the bigsuds Python package on the host if using the iControl
interface. This is as easy as pip install bigsuds
requirements:
- bigsuds
- requests
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Provision PEM at "nominal" level
bigip_provision:
server: "big-ip"
module: "pem"
level: "nominal"
delegate_to: localhost
- name: Provision a dedicated SWG. This will unprovision every other module
bigip_provision:
server: "big-ip"
module: "swg"
level: "dedicated"
delegate_to: localhost
'''
import json
import socket
try:
import bigsuds
except ImportError:
bigsuds_found = False
else:
bigsuds_found = True
try:
import requests
except ImportError:
requests_found = False
else:
requests_found = True
class BigIpCommon(object):
def __init__(self, module):
self._username = module.params.get('user')
self._password = module.params.get('password')
self._hostname = module.params.get('server')
self._level = module.params.get('level')
self._module = module.params.get('module')
self._validate_certs = module.params.get('validate_certs')
class BigIpIControl(BigIpCommon):
def __init__(self, module):
super(BigIpIControl, self).__init__(module)
self._client = bigsuds.BIGIP(
hostname=self._hostname,
username=self._username,
password=self._password,
debug=True
)
# TODO(Make the REST interface to this work)
"""
module_map = {
'afm': 'TMOS_MODULE_AFM',
'am': 'TMOS_MODULE_AM',
'sam': 'TMOS_MODULE_SAM',
'asm': 'TMOS_MODULE_ASM',
'avr': 'TMOS_MODULE_AVR',
'fps': 'TMOS_MODULE_FPS',
'gtm': 'TMOS_MODULE_GTM',
'lc': 'TMOS_MODULE_LC',
'ltm': 'TMOS_MODULE_LTM',
'pem': 'TMOS_MODULE_PEM',
'swg': 'TMOS_MODULE_SWG'
}
level_map = {
'PROVISION_LEVEL_NONE',
'PROVISION_LEVEL_NOMINAL',
'PROVISION_LEVEL_MINIMAL',
'PROVISION_LEVEL_DEDICATED'
}
"""
def exists(self):
try:
self._client.Management.Provision.get_level(
modules=[self._module]
)
except bigsuds.ServerError:
return False
def read(self):
try:
response = self._client.Management.DBVariable.query(
variables=[self._key]
)
except bigsuds.ServerError:
return {}
return response[0]
def present(self):
changed = False
current = self.read()
if current and current['name'].lower() == self._key:
if current['value'] != self._value:
try:
params = dict(
name=self._key,
value=self._value
)
self._client.Management.DBVariable.modify(
variables=[params]
)
changed = True
except Exception:
raise Exception('Failed to set the provided key')
return changed
class BigIpRest(BigIpCommon):
def __init__(self, module):
super(BigIpRest, self).__init__(module)
self._uri = 'https://%s/mgmt/tm/sys/db/%s' % (self._hostname, self._key)
self._headers = {
'Content-Type': 'application/json'
}
def read(self):
resp = requests.get(self._uri,
auth=(self._username, self._password),
verify=self._validate_certs)
if resp.status_code != 200:
return {}
else:
return resp.json()
def present(self):
changed = False
current = self.read()
if current and current['name'] == self._key:
if current['value'] != self._value:
resp = requests.put(self._uri,
auth=(self._username, self._password),
data=json.dumps(self._payload),
verify=self._validate_certs)
if resp.status_code == 200:
changed = True
else:
raise Exception('Failed to set the provided key')
return changed
def main():
changed = False
module_choices = [
'afm', 'am', 'sam', 'asm', 'avr', 'fps',
'gtm', 'lc', 'ltm', 'pem', 'swg'
]
module = AnsibleModule(
argument_spec=dict(
connection=dict(default='rest', choices=['icontrol', 'rest']),
server=dict(required=True),
module=dict(required=True, choices=module_choices),
level=dict(default='nominal', choices=['nominal', 'dedicated', 'minimal']),
password=dict(default='admin'),
state=dict(default='present', choices=['present', 'reset']),
user=dict(default='admin'),
validate_certs=dict(default='yes', type='bool', choices=['yes', 'no']),
)
)
connection = module.params.get('connection')
hostname = module.params.get('server')
password = module.params.get('password')
username = module.params.get('user')
state = module.params.get('state')
try:
if connection == 'icontrol':
if not bigsuds_found:
raise Exception("The python bigsuds module is required")
icontrol = test_icontrol(username, password, hostname)
if icontrol:
obj = BigIpIControl(module)
elif connection == 'rest':
if not requests_found:
raise Exception("The python requests module is required")
obj = BigIpRest(module)
if state == "present":
if obj.present():
changed = True
elif state == "absent":
if obj.absent():
changed = True
except bigsuds.ConnectionError:
module.fail_json(msg="Could not connect to BIG-IP host %s" % hostname)
except socket.timeout:
module.fail_json(msg="Timed out connecting to the BIG-IP")
except bigsuds.ConnectionError as e:
module.fail_json(msg=str(e))
module.exit_json(changed=changed)
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
if __name__ == '__main__':
main()
| {
"content_hash": "78fb008b1780951a8a43087741e080b7",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 87,
"avg_line_length": 28.64666666666667,
"alnum_prop": 0.5609727717011869,
"repo_name": "buzzsurfr/f5-ansible",
"id": "cd644fa870104dfb92647ee0cc702830b4d9f2f1",
"size": "9291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/bigip_provision.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2693"
},
{
"name": "Python",
"bytes": "518917"
},
{
"name": "Shell",
"bytes": "1371"
},
{
"name": "Tcl",
"bytes": "2122"
}
],
"symlink_target": ""
} |
import os
from catapult_base import binary_manager
from dependency_manager import base_config
from dependency_manager import exceptions as dependency_manager_exceptions
from devil import devil_env
from telemetry.core import exceptions
from telemetry.core import util
TELEMETRY_PROJECT_CONFIG = os.path.join(
util.GetTelemetryDir(), 'telemetry', 'internal', 'binary_dependencies.json')
CHROME_BINARY_CONFIG = os.path.join(util.GetCatapultDir(), 'catapult_base',
'catapult_base', 'chrome_binaries.json')
NoPathFoundError = dependency_manager_exceptions.NoPathFoundError
CloudStorageError = dependency_manager_exceptions.CloudStorageError
_binary_manager = None
def NeedsInit():
return not _binary_manager
def InitDependencyManager(environment_config):
global _binary_manager
if _binary_manager:
raise exceptions.InitializationError(
'Trying to re-initialize the binary manager with config %s'
% environment_config)
configs = [base_config.BaseConfig(TELEMETRY_PROJECT_CONFIG),
base_config.BaseConfig(CHROME_BINARY_CONFIG)]
if environment_config:
configs.insert(0, base_config.BaseConfig(environment_config))
_binary_manager = binary_manager.BinaryManager(configs)
devil_env.config.Initialize()
def FetchPath(binary_name, arch, os_name, os_version=None):
""" Return a path to the appropriate executable for <binary_name>, downloading
from cloud storage if needed, or None if it cannot be found.
"""
if _binary_manager is None:
raise exceptions.InitializationError(
'Called FetchPath with uninitialized binary manager.')
return _binary_manager.FetchPath(binary_name, arch, os_name, os_version)
def LocalPath(binary_name, arch, os_name, os_version=None):
""" Return a local path to the given binary name, or None if an executable
cannot be found. Will not download the executable.
"""
if _binary_manager is None:
raise exceptions.InitializationError(
'Called LocalPath with uninitialized binary manager.')
return _binary_manager.LocalPath(binary_name, arch, os_name, os_version)
| {
"content_hash": "e36735bc8b2c2610ce6d7f012ea668d3",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 80,
"avg_line_length": 33.95238095238095,
"alnum_prop": 0.7419354838709677,
"repo_name": "SummerLW/Perf-Insight-Report",
"id": "6af0c97c948266a9580bcba2cc12b8de28e4eb82",
"size": "2302",
"binary": false,
"copies": "1",
"ref": "refs/heads/test",
"path": "telemetry/telemetry/internal/util/binary_manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3598"
},
{
"name": "C++",
"bytes": "6411"
},
{
"name": "CSS",
"bytes": "14952"
},
{
"name": "HTML",
"bytes": "27508823"
},
{
"name": "JavaScript",
"bytes": "75587"
},
{
"name": "Python",
"bytes": "4638631"
},
{
"name": "Shell",
"bytes": "2124"
}
],
"symlink_target": ""
} |
from dallinger import nodes, information, models
class TestEnvironments(object):
def test_create_environment(self, db_session):
"""Create an environment"""
net = models.Network()
db_session.add(net)
environment = nodes.Environment(network=net)
information.State(origin=environment, contents="foo")
db_session.commit()
assert isinstance(environment.id, int)
assert environment.type == "environment"
assert environment.creation_time
assert environment.state().contents == "foo"
def test_create_environment_get_observed(self, db_session):
net = models.Network()
db_session.add(net)
environment = nodes.Environment(network=net)
information.State(origin=environment, contents="foo")
agent = nodes.ReplicatorAgent(network=net)
environment.connect(direction="to", whom=agent)
environment.transmit(to_whom=agent)
agent.receive()
assert agent.infos()[0].contents == "foo"
def test_environment_update(self, db_session):
net = models.Network()
db_session.add(net)
environment = nodes.Environment(network=net)
environment.update("some content")
db_session.commit()
state = environment.state()
assert state.contents == u'some content'
| {
"content_hash": "cdd1d6decc536f291f4751a683119984",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 63,
"avg_line_length": 32.214285714285715,
"alnum_prop": 0.6504065040650406,
"repo_name": "jcpeterson/Dallinger",
"id": "61d790428fcebd2c1da8c56480371963ed672bfc",
"size": "1353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_environments.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "366"
},
{
"name": "HTML",
"bytes": "32554"
},
{
"name": "JavaScript",
"bytes": "36269"
},
{
"name": "Python",
"bytes": "715512"
},
{
"name": "Ruby",
"bytes": "1769"
},
{
"name": "Shell",
"bytes": "136"
}
],
"symlink_target": ""
} |
import math
# external vertices selection
def extPoints(pointsLayer):
# read points from layer
points = [x.geometry().asPoint() for x in pointsLayer.getFeatures()]
angles = {}
externalPoints = []
# start at the most bottom point
currentVertex = startVertex = min(points, key=lambda x: x[1])
externalPoints.append(currentVertex)
# calculate angles as azimuths subtraction
for x in points:
if x == currentVertex: continue
currAz = math.atan2(x[1]-currentVertex[1],x[0]-currentVertex[0])
if currAz < 0: currAz += 2 * math.pi
angles[x] = currAz
# choose next vertex as the one with lowest angle
previousVertex = currentVertex
currentVertex = min(angles.items(), key=lambda x: x[1])[0]
# add recent vertex to the result
externalPoints.append(currentVertex)
# analyze remaining vertices
while(True):
angles.clear()
points.remove(currentVertex)
prevAz = math.atan2(previousVertex[1]-currentVertex[1],previousVertex[0]-currentVertex[0])
if prevAz < 0: prevAz += 2 * math.pi
# same as above
for x in points:
if x == previousVertex: continue
currAz = math.atan2(x[1]-currentVertex[1],x[0]-currentVertex[0])
if currAz < 0: currAz += 2 * math.pi
angles[x] = currAz - prevAz
if angles[x] < 0: angles[x] += 2 * math.pi
previousVertex = currentVertex
currentVertex = min(angles.items(), key=lambda x: x[1])[0]
# break the loop when it reaches starting vertex
if (currentVertex == startVertex):
break
externalPoints.append(currentVertex)
return externalPoints
# layer validation
def validLayer(pointsLayer):
if not isinstance(pointsLayer, QgsVectorLayer):
print('Not a vector layer')
return False
if (pointsLayer.geometryType() != QGis.Point):
print('Not a point layer')
return False
if (pointsLayer.featureCount() < 3):
print('At least 3 points required')
return False
return True
# main function
def pointsToPoly(pointsLayer):
if not validLayer(pointsLayer):
return
externalPoints = extPoints(pointsLayer)
# create new vector layer
polygonLayer = QgsVectorLayer('Polygon', 'extPoly', 'memory')
polygonLayer.setCrs(pointsLayer.crs())
QgsMapLayerRegistry.instance().addMapLayer(polygonLayer)
# add polygon feature to the layer
polygonLayer.startEditing()
polyFeature = QgsFeature()
polyFeature.setGeometry(QgsGeometry.fromPolygon([externalPoints]))
polygonLayer.dataProvider().addFeatures([polyFeature])
# confirm modification
polygonLayer.commitChanges()
polygonLayer.updateExtents()
| {
"content_hash": "b910cc066b6ff0ec8ddd0cdb404de8d9",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 98,
"avg_line_length": 30.835164835164836,
"alnum_prop": 0.6514611546685674,
"repo_name": "marekstrzelecki/pyqgis-recipes",
"id": "ec43e63d5309d99e336dd762721a78f671405c12",
"size": "2847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/points_to_polygon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16535"
}
],
"symlink_target": ""
} |
"""Tests for the noisy 2D Rosenbrock loss function."""
import os
import sys
import unittest
import tensorflow as tf
import numpy as np
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from deepobs.tensorflow import testproblems
class Two_d_RosenbrockTest(unittest.TestCase):
"""Tests for the noisy 2D Rosenbrock loss function."""
def setUp(self):
"""Sets up the 2D dataset for the tests."""
self.batch_size = 100
self.two_d_rosenbrock = testproblems.two_d_rosenbrock(self.batch_size)
def test_init_ops(self):
"""Tests all three initialization operations."""
tf.reset_default_graph()
tf.set_random_seed(42)
self.two_d_rosenbrock.set_up()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_param = [
np.prod(v.get_shape().as_list())
for v in tf.trainable_variables()
]
# Check if number of parameters per "layer" is equal to what we expect
# We will write them in the following form:
# - Conv layer: [input_filter*output_filter*kernel[0]*kernel[1]]
# - Batch norm: [input, input] (for beta and gamma)
# - Fully connected: [input*output]
# - Bias: [dim]
self.assertEqual(num_param, [
1, 1
])
for init_op in [
self.two_d_rosenbrock.train_init_op,
self.two_d_rosenbrock.test_init_op,
self.two_d_rosenbrock.train_eval_init_op
]:
sess.run(init_op)
losses_, regularizer_ = sess.run([
self.two_d_rosenbrock.losses, self.two_d_rosenbrock.regularizer
])
self.assertEqual(losses_.shape, (self.batch_size, ))
self.assertIsInstance(regularizer_, np.float32)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "e629b7c028b6e4b2e5f56730ed3fb41d",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 96,
"avg_line_length": 36.142857142857146,
"alnum_prop": 0.566699604743083,
"repo_name": "fsschneider/DeepOBS",
"id": "5a5ee5483a1446cc79d601ab54d206a150faa01f",
"size": "2048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testproblems/test_two_d_rosenbrock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "368026"
},
{
"name": "Shell",
"bytes": "8516"
}
],
"symlink_target": ""
} |
from openerp import tools
import openerp.addons.decimal_precision as dp
from openerp.osv import fields,osv
class account_invoice_report(osv.osv):
_name = "account.invoice.report"
_description = "Invoices Statistics"
_auto = False
_rec_name = 'date'
def _compute_amounts_in_user_currency(self, cr, uid, ids, field_names, args, context=None):
"""Compute the amounts in the currency of the user
"""
if context is None:
context={}
currency_obj = self.pool.get('res.currency')
currency_rate_obj = self.pool.get('res.currency.rate')
user_currency_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
currency_rate_id = currency_rate_obj.search(cr, uid, [('rate', '=', 1)], limit=1, context=context)[0]
base_currency_id = currency_rate_obj.browse(cr, uid, currency_rate_id, context=context).currency_id.id
res = {}
ctx = context.copy()
for item in self.browse(cr, uid, ids, context=context):
ctx['date'] = item.date
price_total = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.price_total, context=ctx)
price_average = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.price_average, context=ctx)
residual = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.residual, context=ctx)
res[item.id] = {
'user_currency_price_total': price_total,
'user_currency_price_average': price_average,
'user_currency_residual': residual,
}
return res
_columns = {
'date': fields.date('Date', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'product_qty':fields.float('Product Quantity', readonly=True),
'uom_name': fields.char('Reference Unit of Measure', size=128, readonly=True),
'payment_term': fields.many2one('account.payment.term', 'Payment Term', readonly=True),
'period_id': fields.many2one('account.period', 'Force Period', domain=[('state','<>','done')], readonly=True),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'categ_id': fields.many2one('product.category','Category of Product', readonly=True),
'journal_id': fields.many2one('account.journal', 'Journal', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'commercial_partner_id': fields.many2one('res.partner', 'Partner Company', help="Commercial Entity"),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True),
'price_total': fields.float('Total Without Tax', readonly=True),
'user_currency_price_total': fields.function(_compute_amounts_in_user_currency, string="Total Without Tax", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"),
'price_average': fields.float('Average Price', readonly=True, group_operator="avg"),
'user_currency_price_average': fields.function(_compute_amounts_in_user_currency, string="Average Price", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"),
'currency_rate': fields.float('Currency Rate', readonly=True),
'nbr': fields.integer('# of Invoices', readonly=True), # TDE FIXME master: rename into nbr_lines
'type': fields.selection([
('out_invoice','Customer Invoice'),
('in_invoice','Supplier Invoice'),
('out_refund','Customer Refund'),
('in_refund','Supplier Refund'),
],'Type', readonly=True),
'state': fields.selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('paid','Done'),
('cancel','Cancelled')
], 'Invoice Status', readonly=True),
'date_due': fields.date('Due Date', readonly=True),
'account_id': fields.many2one('account.account', 'Account',readonly=True),
'account_line_id': fields.many2one('account.account', 'Account Line',readonly=True),
'partner_bank_id': fields.many2one('res.partner.bank', 'Bank Account',readonly=True),
'residual': fields.float('Total Residual', readonly=True),
'user_currency_residual': fields.function(_compute_amounts_in_user_currency, string="Total Residual", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"),
'country_id': fields.many2one('res.country', 'Country of the Partner Company'),
}
_order = 'date desc'
_depends = {
'account.invoice': [
'account_id', 'amount_total', 'commercial_partner_id', 'company_id',
'currency_id', 'date_due', 'date_invoice', 'fiscal_position',
'journal_id', 'partner_bank_id', 'partner_id', 'payment_term',
'period_id', 'residual', 'state', 'type', 'user_id',
],
'account.invoice.line': [
'account_id', 'invoice_id', 'price_subtotal', 'product_id',
'quantity', 'uos_id',
],
'product.product': ['product_tmpl_id'],
'product.template': ['categ_id'],
'product.uom': ['category_id', 'factor', 'name', 'uom_type'],
'res.currency.rate': ['currency_id', 'name'],
'res.partner': ['country_id'],
}
def _select(self):
select_str = """
SELECT sub.id, sub.date, sub.product_id, sub.partner_id, sub.country_id,
sub.payment_term, sub.period_id, sub.uom_name, sub.currency_id, sub.journal_id,
sub.fiscal_position, sub.user_id, sub.company_id, sub.nbr, sub.type, sub.state,
sub.categ_id, sub.date_due, sub.account_id, sub.account_line_id, sub.partner_bank_id,
sub.product_qty, sub.price_total / cr.rate as price_total, sub.price_average /cr.rate as price_average,
cr.rate as currency_rate, sub.residual / cr.rate as residual, sub.commercial_partner_id as commercial_partner_id
"""
return select_str
def _sub_select(self):
select_str = """
SELECT min(ail.id) AS id,
ai.date_invoice AS date,
ail.product_id, ai.partner_id, ai.payment_term, ai.period_id,
CASE
WHEN u.uom_type::text <> 'reference'::text
THEN ( SELECT product_uom.name
FROM product_uom
WHERE product_uom.uom_type::text = 'reference'::text
AND product_uom.active
AND product_uom.category_id = u.category_id LIMIT 1)
ELSE u.name
END AS uom_name,
ai.currency_id, ai.journal_id, ai.fiscal_position, ai.user_id, ai.company_id,
count(ail.*) AS nbr,
ai.type, ai.state, pt.categ_id, ai.date_due, ai.account_id, ail.account_id AS account_line_id,
ai.partner_bank_id,
SUM(CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN (- ail.quantity) / u.factor
ELSE ail.quantity / u.factor
END) AS product_qty,
SUM(CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN - ail.price_subtotal
ELSE ail.price_subtotal
END) AS price_total,
CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN SUM(- ail.price_subtotal)
ELSE SUM(ail.price_subtotal)
END / CASE
WHEN SUM(ail.quantity / u.factor) <> 0::numeric
THEN CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN SUM((- ail.quantity) / u.factor)
ELSE SUM(ail.quantity / u.factor)
END
ELSE 1::numeric
END AS price_average,
CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN - ai.residual
ELSE ai.residual
END / CASE
WHEN (( SELECT count(l.id) AS count
FROM account_invoice_line l
LEFT JOIN account_invoice a ON a.id = l.invoice_id
WHERE a.id = ai.id)) <> 0
THEN ( SELECT count(l.id) AS count
FROM account_invoice_line l
LEFT JOIN account_invoice a ON a.id = l.invoice_id
WHERE a.id = ai.id)
ELSE 1::bigint
END::numeric AS residual,
ai.commercial_partner_id as commercial_partner_id,
partner.country_id
"""
return select_str
def _from(self):
from_str = """
FROM account_invoice_line ail
JOIN account_invoice ai ON ai.id = ail.invoice_id
JOIN res_partner partner ON ai.commercial_partner_id = partner.id
LEFT JOIN product_product pr ON pr.id = ail.product_id
left JOIN product_template pt ON pt.id = pr.product_tmpl_id
LEFT JOIN product_uom u ON u.id = ail.uos_id
"""
return from_str
def _group_by(self):
group_by_str = """
GROUP BY ail.product_id, ai.date_invoice, ai.id,
ai.partner_id, ai.payment_term, ai.period_id, u.name, ai.currency_id, ai.journal_id,
ai.fiscal_position, ai.user_id, ai.company_id, ai.type, ai.state, pt.categ_id,
ai.date_due, ai.account_id, ail.account_id, ai.partner_bank_id, ai.residual,
ai.amount_total, u.uom_type, u.category_id, ai.commercial_partner_id, partner.country_id
"""
return group_by_str
def init(self, cr):
# self._table = account_invoice_report
tools.drop_view_if_exists(cr, self._table)
cr.execute("""CREATE or REPLACE VIEW %s as (
%s
FROM (
%s %s %s
) AS sub
JOIN res_currency_rate cr ON (cr.currency_id = sub.currency_id)
WHERE
cr.id IN (SELECT id
FROM res_currency_rate cr2
WHERE (cr2.currency_id = sub.currency_id)
AND ((sub.date IS NOT NULL AND cr2.name <= sub.date)
OR (sub.date IS NULL AND cr2.name <= NOW()))
ORDER BY name DESC LIMIT 1)
)""" % (
self._table,
self._select(), self._sub_select(), self._from(), self._group_by()))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| {
"content_hash": "744cb7e779f0e2033861c4b069558ad1",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 200,
"avg_line_length": 56.39150943396226,
"alnum_prop": 0.5424508573818486,
"repo_name": "diogocs1/comps",
"id": "0639bfca2039b3c29f1535eaffd562bd2c08d088",
"size": "12934",
"binary": false,
"copies": "60",
"ref": "refs/heads/master",
"path": "web/addons/account/report/account_invoice_report.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "701"
},
{
"name": "CSS",
"bytes": "856533"
},
{
"name": "HTML",
"bytes": "299671"
},
{
"name": "Java",
"bytes": "620166"
},
{
"name": "JavaScript",
"bytes": "5844302"
},
{
"name": "Makefile",
"bytes": "21002"
},
{
"name": "PHP",
"bytes": "14259"
},
{
"name": "Python",
"bytes": "10647376"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "17746"
},
{
"name": "XSLT",
"bytes": "120278"
}
],
"symlink_target": ""
} |
import collections
# Settings
Settings = collections.namedtuple('Settings', 'opts version rt name_to_address address_to_name binary capstone text_offset data_offset rodata_offset')
Runtime = collections.namedtuple('Runtime', 'halfword word stack_register heap_register main_register arg_registers')
WordDesc = collections.namedtuple('WordDesc', 'size lg_size struct')
# Machine Values
Tagged = collections.namedtuple('Tagged', 'untagged tag')
UnknownValue = collections.namedtuple('UnknownValue', '')
# Pointers
Offset = collections.namedtuple('Offset', 'base index')
StaticValue = collections.namedtuple('StaticValue', 'value')
Argument = collections.namedtuple('Argument', 'func index')
CaseArgument = collections.namedtuple('CaseArgument', 'inspection matched_tag index')
HeapPointer = collections.namedtuple('HeapPointer', 'id owner')
StackPointer = collections.namedtuple('StackPointer', '')
CasePointer = collections.namedtuple('CasePointer', 'inspection matched_tag')
# Interpretations
Pointer = collections.namedtuple('Pointer', 'pointer')
Apply = collections.namedtuple('Apply', 'func func_type args pattern')
Case = collections.namedtuple('Case', 'scrutinee bound_ptr arms tags')
Lambda = collections.namedtuple('Lambda', 'func arg_pattern body')
UnknownInterpretation = collections.namedtuple('UnknownInterpretation', '')
# Tags
NamedTag = collections.namedtuple('NamedTag', 'name value')
NumericTag = collections.namedtuple('NumericTag', 'value')
DefaultTag = collections.namedtuple('DefaultTag', '')
# Types
UnknownType = collections.namedtuple('UnknownType', '')
StateType = collections.namedtuple('StateType', '')
FunctionType = collections.namedtuple('FunctionType', 'arg result')
EnumType = collections.namedtuple('EnumType', 'constructor_names complete')
# Work
ClosureWork = collections.namedtuple('ClosureWork', 'heaps pointer')
FunctionThunkWork = collections.namedtuple('FunctionThunkWork', 'heaps address main_register arg_pattern')
| {
"content_hash": "2ecfe34a3fa63871fa89c99282dd27e0",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 150,
"avg_line_length": 45.69767441860465,
"alnum_prop": 0.7816793893129771,
"repo_name": "gereeter/hsdecomp",
"id": "cab69f1cf1e7e8ce06d0a258cb80378f6df993c3",
"size": "1965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hsdecomp/types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Haskell",
"bytes": "653"
},
{
"name": "Python",
"bytes": "52408"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import know_me.models
import permission_utils.model_mixins
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [
migrations.CreateModel(
name="KMUser",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created_at",
models.DateTimeField(
auto_now_add=True,
help_text="The time that the Know Me user was created.",
verbose_name="created at",
),
),
(
"image",
models.ImageField(
blank=True,
help_text="The image to use as the user's hero image.",
max_length=255,
null=True,
upload_to=know_me.models.get_km_user_image_upload_path,
verbose_name="image",
),
),
(
"quote",
models.TextField(
blank=True,
help_text="A quote to introduce the user.",
null=True,
verbose_name="quote",
),
),
(
"updated_at",
models.DateTimeField(
auto_now=True,
help_text="The time that the Know Me user was last updated.",
verbose_name="updated at",
),
),
(
"user",
models.OneToOneField(
help_text="The user who owns the Know Me app account.",
on_delete=django.db.models.deletion.CASCADE,
related_name="km_user",
to=settings.AUTH_USER_MODEL,
verbose_name="user",
),
),
],
options={
"verbose_name": "Know Me user",
"verbose_name_plural": "Know Me users",
},
bases=(
permission_utils.model_mixins.IsAuthenticatedMixin,
models.Model,
),
)
]
| {
"content_hash": "6f6b622b9cc3fb76447206561e71e890",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 85,
"avg_line_length": 33.54117647058823,
"alnum_prop": 0.3910908453174325,
"repo_name": "knowmetools/km-api",
"id": "80e0c90c99edfcc642bc33c78c6e36f7e85b0ede",
"size": "2925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "km_api/know_me/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "746"
},
{
"name": "HTML",
"bytes": "7830"
},
{
"name": "JavaScript",
"bytes": "7009"
},
{
"name": "Python",
"bytes": "635776"
},
{
"name": "SCSS",
"bytes": "4662"
},
{
"name": "Shell",
"bytes": "1671"
}
],
"symlink_target": ""
} |
import sys
import os.path
from libsbml import *
def main (args):
# Creates an SBMLNamespaces object with the given SBML level, version
# package name, package version.
sbmlns = SBMLNamespaces(3, 1, "qual", 1);
# Creates an SBMLDocument object
document = SBMLDocument(sbmlns);
# mark qual as required
document.setPackageRequired("qual", True);
# create the Model
model = document.createModel();
# create the Compartment
compartment = model.createCompartment();
compartment.setId("c");
compartment.setConstant(True);
# Get a QualModelPlugin object plugged in the model object.
mplugin = model.getPlugin("qual");
# create the QualitativeSpecies
qs = mplugin.createQualitativeSpecies();
qs.setId("s1");
qs.setCompartment("c");
qs.setConstant(False);
qs.setInitialLevel(1);
qs.setMaxLevel(4);
qs.setName("sss");
# create the Transition
t = mplugin.createTransition();
t.setId("d");
t.setSBOTerm(1);
i = t.createInput();
i.setId("RD");
i.setQualitativeSpecies("s1");
i.setTransitionEffect("none");
i.setSign("negative");
i.setThresholdLevel(2);
i.setName("aa");
o = t.createOutput();
o.setId("wd");
o.setQualitativeSpecies("s1");
o.setTransitionEffect("production");
o.setOutputLevel(2);
o.setName("aa");
ft = t.createFunctionTerm();
math = parseL3Formula("geq(s1, 2)");
ft.setResultLevel(1);
ft.setMath(math);
dt = t.createDefaultTerm();
dt.setResultLevel(2);
writeSBML(document, "qual_example1.xml");
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "1e01be580c90cc8df2bca4bc934a6f68",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 73,
"avg_line_length": 24.055555555555557,
"alnum_prop": 0.6125866050808314,
"repo_name": "TheCoSMoCompany/biopredyn",
"id": "4dd710205f30939e3c87b7a9ff5f014265d19891",
"size": "3899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Prototype/src/libsbml-5.10.0/examples/python/qual/qual_example1.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3535918"
},
{
"name": "C++",
"bytes": "26120778"
},
{
"name": "CMake",
"bytes": "455400"
},
{
"name": "CSS",
"bytes": "49020"
},
{
"name": "Gnuplot",
"bytes": "206"
},
{
"name": "HTML",
"bytes": "193068"
},
{
"name": "Java",
"bytes": "66517"
},
{
"name": "JavaScript",
"bytes": "3847"
},
{
"name": "Makefile",
"bytes": "30905"
},
{
"name": "Perl",
"bytes": "3018"
},
{
"name": "Python",
"bytes": "7891301"
},
{
"name": "Shell",
"bytes": "247654"
},
{
"name": "TeX",
"bytes": "22566"
},
{
"name": "XSLT",
"bytes": "55564"
}
],
"symlink_target": ""
} |
import os
from setuptools import find_packages
from setuptools import setup
# lfs imports
from lfs_theme import __version__
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.txt')).read()
setup(name='lfs-theme',
version=__version__,
description='The default theme for LFS',
long_description=README,
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
keywords='django e-commerce online-shop',
author='Kai Diefenbach',
author_email='kai.diefenbach@iqpp.de',
url='http://www.getlfs.com',
license='BSD',
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
],
)
| {
"content_hash": "a02cbf8bed081cb9d4d7bc7705713618",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 54,
"avg_line_length": 28.96969696969697,
"alnum_prop": 0.6182008368200836,
"repo_name": "pigletto/lfs-theme",
"id": "9ddf54436f50f1a30e8ce165e4a631a160b59eef",
"size": "973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "68596"
},
{
"name": "HTML",
"bytes": "171525"
},
{
"name": "Python",
"bytes": "3695"
}
],
"symlink_target": ""
} |
'''OpenStack Keystone BVOX extension.'''
__version__ = '0.0.4'
| {
"content_hash": "ed1584d1edce87fe70e427fd658d72b4",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 40,
"avg_line_length": 31.5,
"alnum_prop": 0.6349206349206349,
"repo_name": "bvox/keystone-bvox-extension",
"id": "def516603d59595ac29db6fb404ac8d4c378ef88",
"size": "107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bvoxextension/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4476"
}
],
"symlink_target": ""
} |
"""Utility functions with internet connections"""
import socket
import urllib.error
from ftplib import FTP, all_errors, error_temp
from time import sleep
from typing import Tuple
from urllib.request import urlopen
import requests
from tqdm.auto import tqdm
from genomepy.exceptions import GenomeDownloadError
def download_file(url, filename) -> str:
"""
Helper method handling downloading large files from `url` to `filename`.
Parameters
----------
url : str
download target url
filename : str
file to download to
Returns
-------
str
filename
"""
def decorated_pbar(total):
"""Displays a progress bar with download speeds in MB/s."""
return tqdm(
desc="Download",
unit_scale=True,
unit_divisor=1024,
total=total,
unit="B",
)
def write_n_update_pbar(data):
pbar.update(len(data))
f.write(data)
if url.startswith("ftp"):
ftp, target = connect_ftp_link(url)
file_size = ftp.size(target)
with open(filename, "wb") as f:
pbar = decorated_pbar(file_size)
ftp.retrbinary(f"RETR {target}", write_n_update_pbar)
ftp.quit() # logout
else:
r = requests.get(url, stream=True)
file_size = int(r.headers.get("Content-Length", 0))
with open(filename, "wb") as f:
pbar = decorated_pbar(file_size)
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
write_n_update_pbar(chunk)
pbar.close() # stop updating pbar
return filename
def connect_ftp_link(link, timeout=None) -> Tuple[FTP, str]:
"""
Anonymous login to ftp.
Accepts link in the form of ftp://ftp.name.domain/...
and ftp.name.domain/...
Parameters
----------
link : str
FTP link
timeout : int, optional
number of idle seconds before the connection closes
Returns
-------
tuple
ftp: FTP
object with connection established
target: str
target file
"""
link = link.replace("ftp://", "")
host, target = link.split("/", 1)
try:
ftp = FTP(host, timeout=timeout)
except socket.gaierror:
raise GenomeDownloadError(f"FTP host not found: {host}")
ftp.login()
return ftp, target
def read_url(url):
"""Read a text-based URL."""
response = urlopen(url)
data = response.read()
text = data.decode("utf-8")
return text
def retry(func, tries, *args, **kwargs):
"""
Retry functions with potential connection errors.
args and kwargs are passed to func.
"""
for n in range(tries):
try:
answer = func(*args, **kwargs)
return answer
except (urllib.error.HTTPError, error_temp):
# HTTPError 404: URL not found
# FTP error_temp 450: file not found
return
except all_errors + (urllib.error.URLError, socket.timeout):
# connection errors: wait and try again
if n < tries - 1:
sleep(1)
def check_url(url, max_tries=1, timeout=15) -> bool:
"""Check if URL works. Returns bool"""
def _check_url(_url, _timeout):
if _url.startswith("ftp"):
ftp, target = connect_ftp_link(_url, timeout=_timeout)
listing = retry(ftp.nlst, 1, target)
ftp.quit() # logout
if listing:
return True
else:
ret = urlopen(_url, timeout=_timeout)
if ret.getcode() == 200:
return True
return False
return retry(_check_url, max_tries, url, timeout)
| {
"content_hash": "29f50dda7d4a82c68cf9b423b7de9183",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 76,
"avg_line_length": 25.89041095890411,
"alnum_prop": 0.5703703703703704,
"repo_name": "simonvh/genomepy",
"id": "a6a7ae4cf2a01adaafe79d5a309aca1f79f55f35",
"size": "3780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "genomepy/online.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80475"
},
{
"name": "TeX",
"bytes": "2505"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='CFAnalyze',
version='0.0.1',
packages=find_packages(),
install_requires=['boto>=2.5.2'],
entry_points={
'console_scripts': ['cfa = cfanalyze.__main__:main']
},
author='MinYoung Jung',
author_email='kkungkkung@gmail.com',
description='Just Simple Amazon CloudFront Access Log Analyzer',
long_description=open('README').read(),
url='https://github.com/kkung/cfanalyze',
keywords='aws cloudfront cf analyze',
license='MIT License',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent'])
| {
"content_hash": "65d1407f1aefd5ef2d2d5dd5db248dcf",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 68,
"avg_line_length": 33.523809523809526,
"alnum_prop": 0.6377840909090909,
"repo_name": "kkung/cfanalyze",
"id": "dd9e4a98640b4e03db80a897bfb71ee725311ad3",
"size": "704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7409"
}
],
"symlink_target": ""
} |
"""
sentry-ldap-auth
==============
An extension for Sentry which authenticates users from
an LDAP server and auto-adds them to the an organization in sentry.
"""
from setuptools import setup, find_packages
with open("README.md", "r") as readme:
long_description = readme.read()
install_requires = [
'django-auth-ldap==1.2.*',
'sentry>=8.0.0',
]
setup(
name='sentry-ldap-auth',
version='2.8.1',
author='Chad Killingsworth <chad.killingsworth@banno.com>, Barron Hagerman <barron.hagerman@banno.com>',
author_email='chad.killingsworth@banno.com',
url='http://github.com/banno/getsentry-ldap-auth',
description='A Sentry extension to add an LDAP server as an authentication source.',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
license='Apache-2.0',
zip_safe=False,
install_requires=install_requires,
include_package_data=True,
download_url='https://github.com/banno/getsentry-ldap-auth/tarball/2.8.1',
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
| {
"content_hash": "b3c571ed285086eb392dd42ba7a923ca",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 108,
"avg_line_length": 31.925,
"alnum_prop": 0.6773688332028192,
"repo_name": "Banno/getsentry-ldap-auth",
"id": "7275c313afdb17dce6f44e357bdb04032b7e6595",
"size": "1299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5061"
}
],
"symlink_target": ""
} |
from pytest import raises
from mio.errors import ImportError
def test_importer(mio, tmpdir, capfd):
with tmpdir.ensure("foo.mio").open("w") as f:
f.write("""
hello = block(
print("Hello World!")
)
""")
mio.eval("""Importer paths insert(0, "{0:s}")""".format(str(tmpdir)))
assert str(tmpdir) in list(mio.eval("Importer paths"))
mio.eval("foo = import(foo)")
mio.eval("foo hello()")
out, err = capfd.readouterr()
assert out == "Hello World!\n"
mio.eval("""del("foo")""")
def test_import_failure(mio):
with raises(ImportError):
mio.eval("import(blah)", reraise=True)
| {
"content_hash": "104ffb112129cf03c07d186db172e417",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 73,
"avg_line_length": 23.24137931034483,
"alnum_prop": 0.5741839762611276,
"repo_name": "prologic/mio",
"id": "6189405207cb0a32323bdb15b2c2256bc0306010",
"size": "674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/core/test_importer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15260"
},
{
"name": "Python",
"bytes": "191874"
},
{
"name": "Shell",
"bytes": "5303"
},
{
"name": "VimL",
"bytes": "1477"
}
],
"symlink_target": ""
} |
import os
TEMP_TAR_GZ_FILENAME = '/tmp/fairing.layer.tar.gz'
DEFAULT_IMAGE_NAME = 'fairing-job'
DEFAULT_BASE_IMAGE = 'gcr.io/kubeflow-images-public/fairing:dev'
DEFAULT_REGISTRY = 'index.docker.io'
DEFAULT_DEST_PREFIX = '/app/'
DEFAULT_CONTEXT_FILENAME = '/tmp/fairing.context.tar.gz'
DEFAULT_GENERATED_DOCKERFILE_FILENAME = '/tmp/Dockerfile'
GOOGLE_CREDS_ENV = 'GOOGLE_APPLICATION_CREDENTIALS'
GCP_SERVICE_ACCOUNT_NAME = 'default-editor'
GCP_CREDS_SECRET_NAME = 'user-gcp-sa'
AWS_CREDS_SECRET_NAME = 'aws-secret'
DOCKER_CREDS_SECRET_NAME = "docker-secret"
# IBM Cloud Constants
IBM_COS_CREDS_FILE_NAME = 'cos_credentials'
IBM_COS_CREDS_SECRET_NAME = 'ibm-cos-creds'
IBM_COS_DEFAULT_ENDPOINT = 'https://s3.us.cloud-object-storage.appdomain.cloud'
# See https://github.com/kubeflow/website/issues/1033 for documentation about these secrets.
AZURE_CREDS_SECRET_NAME = 'azcreds'
AZURE_ACR_CREDS_SECRET_NAME = 'acrcreds'
# The secret containing credentials to access a specific storage account is dynamically generated
# by using Azure credentials to get those storage credentials.
AZURE_STORAGE_CREDS_SECRET_NAME_PREFIX = 'storage-credentials-'
AZURE_FILES_SHARED_FOLDER = 'fairing-builds'
DEFAULT_USER_AGENT = 'kubeflow-fairing/{VERSION}'
# Job Constants
JOB_DEFAULT_NAME = 'fairing-job-'
JOB_DEPLOYER_TYPE = 'job'
# Serving Constants
SERVING_DEPLOPYER_TYPE = 'serving'
#TFJob Constants
TF_JOB_VERSION = os.environ.get('TF_JOB_VERSION', 'v1')
TF_JOB_GROUP = "kubeflow.org"
TF_JOB_KIND = "TFJob"
TF_JOB_PLURAL = "tfjobs"
TF_JOB_DEFAULT_NAME = 'fairing-tfjob-'
TF_JOB_DEPLOYER_TYPE = 'tfjob'
#PyTorchJob Constants
PYTORCH_JOB_VERSION = os.environ.get('PYTORCH_JOB_VERSION', 'v1')
PYTORCH_JOB_GROUP = "kubeflow.org"
PYTORCH_JOB_KIND = "PyTorchJob"
PYTORCH_JOB_PLURAL = "pytorchjobs"
PYTORCH_JOB_DEFAULT_NAME = 'fairing-pytorchjob-'
PYTORCH_JOB_DEPLOYER_TYPE = 'pytorchjob'
# KFServing constants
KFSERVING_GROUP = "serving.kubeflow.org"
KFSERVING_KIND = "InferenceService"
KFSERVING_PLURAL = "inferenceservices"
KFSERVING_VERSION = 'v1alpha2'
KFSERVING_DEFAULT_NAME = 'fairing-kfserving-'
KFSERVING_DEPLOYER_TYPE = 'kfservice'
KFSERVING_CONTAINER_NAME = 'user-container'
# persistent volume claim constants
PVC_DEFAULT_MOUNT_PATH = '/mnt'
DEFAULT_VOLUME_NAME = 'fairing-volume-'
# Kaniko Constants
KANIKO_IMAGE = 'gcr.io/kaniko-project/executor:v0.22.0'
#Fairing Logging Constants
FAIRING_LOG_LEVEL = os.environ.get('FAIRING_LOG_LEVEL', 'INFO').upper()
FAIRING_LOG_FORMAT = '%(levelname)s|%(asctime)s|%(pathname)s|%(lineno)d| %(message)s'
FAIRING_LOG_DATEFMT = '%Y-%m-%d %H:%M:%S'
| {
"content_hash": "d6c43afcd770ddaaa003276a67d10998",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 97,
"avg_line_length": 34.078947368421055,
"alnum_prop": 0.7544401544401544,
"repo_name": "kubeflow/fairing",
"id": "c5f73e7221d4ecd07648ea3d1464b1829a12066a",
"size": "2590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubeflow/fairing/constants/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2103"
},
{
"name": "Jsonnet",
"bytes": "2440311"
},
{
"name": "Jupyter Notebook",
"bytes": "1573"
},
{
"name": "Python",
"bytes": "523314"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
import time
import cv2
from ev3control.rpc import Robot
from rick.controllers import euclidian_move_to_brick, rotation_search_brick,move_to_brick_simple, move_to_brick_blind_and_grip
from rick.core import State
from rick.core import main_loop
from detection.marker_localization import get_marker_pose, load_camera_params
import cv2.aruco as aruco
import numpy as np
from rick.motion_control import euclidian_kalman
print("Creating robot...")
def euclidian_move_with_kalman(robot, frame,
path=[], iteration=0, ltrack_pos=0, rtrack_pos=0, TIME=0, P=np.identity(3), marker_list = []):
img_res = np.asarray((640,480))
brick_position = robot.map[0]
t0 = time.time()
print('t0 ', t0, 'TIME', TIME)
time_diff = t0 - TIME
print('time',time_diff)
new_ltrack_pos = robot.left_track.position
new_rtrack_pos = robot.right_track.position
odom_l, odom_r = new_ltrack_pos - ltrack_pos, new_rtrack_pos - rtrack_pos
print("odometry: ", odom_r,odom_l)
# Markers information coming from the compuetr vision stuff
frame,marker_list = camera_related(frame = frame)
marker_map = np.array([[200,100,0],[50, 0 , 0],[100,0,0],[0,100,0],[100,100,0],[200,0,0]])
#print("marker_list1: ",marker_list)
estim_rob_pos, vel_wheels, new_path , P , = euclidian_kalman(robot.position,
brick_position, robot.sampling_rate,
odom_r= odom_r,odom_l=odom_l,
iteration=iteration, path=path,
P=P, marker_list = marker_list ,marker_map=marker_map)
print("estimated position: ",estim_rob_pos)
print("{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{")
robot.position = estim_rob_pos
robot.move(vel_left=vel_wheels[1], vel_right=vel_wheels[0])
iteration += 1
# print("Path: ", pat, iteration)
# print("Robot positij "ffefrobot.position)
# print("Velocities rl: ", vel_wheels)
# print("##" *20)
return "MOVE_BY_MAP", frame, {"iteration" : iteration, "path" : new_path, "ltrack_pos": new_ltrack_pos, "rtrack_pos": new_rtrack_pos, "TIME": t0 , "P": P , "marker_list": marker_list}
def camera_related(frame):
arucoParams = aruco.DetectorParameters_create()
mtx,dist = load_camera_params()
image,marker_pos = get_marker_pose(frame, mtx, dist,arucoParams=arucoParams, marker_list=[0,1,2,3,4,5], markerLength = 3.3)
#print("Output marco function:",marker_pos)
return image,marker_pos
with Robot(cv2.VideoCapture(1)) as robot:
robot.map = [(200, 100)]
robot.sampling_rate = 0.1
print("These are the robot motor positions before planning:", robot.left_track.position, robot.right_track.position)
# Define the state graph, we can do this better, currently each method
# returns the next state name
states = [
State(
name="MOVE_BY_MAP",
act=euclidian_move_with_kalman,
default_args={
"ltrack_pos": robot.left_track.position,
"rtrack_pos": robot.right_track.position,
"TIME": time.time(),
"P" : np.identity(3)
}
),
State(
name="MOVE_TO_BRICK",
act=move_to_brick_simple,
default_args={"atol": 30,
"atol_move_blind" : 30,
}
),
State(
name="SEARCH",
act=rotation_search_brick
),
State(
name="MOVE_TO_BRICK_BLIND_AND_GRIP",
act=move_to_brick_blind_and_grip,
default_args={}
),
State(
name="FINAL_STATE",
act=lambda robot, frame, **args: time.sleep(.5)
)
]
print(states[0])
state_dict = {}
for state in states:
state_dict[state.name] = state
start_state = states[0]
main_loop(robot, start_state, state_dict, delay=0.05)
| {
"content_hash": "55254c2da876c0b8d58ce991b37658f6",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 187,
"avg_line_length": 32.51162790697674,
"alnum_prop": 0.5536480686695279,
"repo_name": "TheCamusean/DLRCev3",
"id": "f67c45572b1891dd895575b68e4c928f0f2ebea4",
"size": "4264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/benchmarks/euclidean_with_kalman.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3570356"
},
{
"name": "Python",
"bytes": "558758"
},
{
"name": "Shell",
"bytes": "1722"
}
],
"symlink_target": ""
} |
from toscaparser.functions import GetInput
from translator.hot.syntax.hot_resource import HotResource
# Name used to dynamically load appropriate map class.
TARGET_CLASS_NAME = 'ToscaBlockStorageAttachment'
class ToscaBlockStorageAttachment(HotResource):
'''Translate TOSCA relationship AttachesTo for Compute and BlockStorage.'''
toscatype = 'tosca.nodes.BlockStorageAttachment'
def __init__(self, template, nodetemplates, instance_uuid, volume_id):
super(ToscaBlockStorageAttachment,
self).__init__(template, type='OS::Cinder::VolumeAttachment')
self.nodetemplates = nodetemplates
self.instance_uuid = {'get_resource': instance_uuid}
self.volume_id = {'get_resource': volume_id}
def handle_properties(self):
tosca_props = {}
for prop in self.nodetemplate.get_properties_objects():
if isinstance(prop.value, GetInput):
tosca_props[prop.name] = {'get_param': prop.value.input_name}
else:
tosca_props[prop.name] = prop.value
self.properties = tosca_props
# instance_uuid and volume_id for Cinder volume attachment
self.properties['instance_uuid'] = self.instance_uuid
self.properties['volume_id'] = self.volume_id
if 'location' in self.properties:
self.properties['mountpoint'] = self.properties.pop('location')
def handle_life_cycle(self):
pass
| {
"content_hash": "6471bcb1d1201e7c0b3734c3ba1f4c8f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 79,
"avg_line_length": 41.42857142857143,
"alnum_prop": 0.6751724137931034,
"repo_name": "obulpathi/cloud-translator",
"id": "715d5b3d67aeb1364b1d0b7989a62dfa30bb8424",
"size": "1993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "transformer/hot/tosca/tosca_block_storage_attachment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "195222"
},
{
"name": "Shell",
"bytes": "7040"
}
],
"symlink_target": ""
} |
from django.core import serializers
from djspace.core.models import GenericChoice, UserProfile
from djspace.registration.models import Faculty
from taggit.models import Tag, TaggedItem
import django
django.setup()
#with open("profile.json", "w") as out:
#with open("faculty.json", "w") as out:
with open("generic_choice.json", "w") as out:
#with open("tag.json", "w") as out:
#with open("tagged_item.json", "w") as out:
json_serializer = serializers.get_serializer('json')()
#json_serializer.serialize(Faculty.objects.all(), stream=out)
#json_serializer.serialize(UserProfile.objects.all(), stream=out)
json_serializer.serialize(GenericChoice.objects.all(), stream=out)
#json_serializer.serialize(Tag.objects.all(), stream=out)
#json_serializer.serialize(TaggedItem.objects.all(), stream=out)
| {
"content_hash": "c71e278d65e71b76849760f81ad1ba35",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 70,
"avg_line_length": 39.095238095238095,
"alnum_prop": 0.7405602923264312,
"repo_name": "carthagecollege/django-djspace",
"id": "74022934cd7653f8cea45a90093522d8b6ee0a9e",
"size": "821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djspace/bin/cereal.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "148465"
},
{
"name": "Python",
"bytes": "199807"
},
{
"name": "Shell",
"bytes": "672"
}
],
"symlink_target": ""
} |
from marshmallow import pre_dump
from marshmallow_jsonapi import SchemaOpts
__all__ = [
'IncludableViewMixin',
'IncludableSchemaMixin'
]
def extract_requested_includes(query_key, request):
requested_includes = []
for key, val in request.params.items():
if key == query_key:
requested_includes.extend(val.split(',')) # Allow for a comma separated list of include values
return requested_includes
class IncludableViewMixin:
"""
Parses query strings params for related objects that should be included in the serialized response.
If includable_relationships is None then all relationships can be included.
includable_relationships = A dictionary where the key is the mapped querystring value and the value is a dict of:
{'rel': '<relationship name from schema>',
'join': <optional, sqla join method to use>,
'options': <optional, sqla query options>}
"""
QUERY_KEY = 'include'
includable_relationships = None
def get_schema(self, *args, **kwargs):
includable_names = self.includable_relationships.keys() if self.includable_relationships else None
requested_includes = extract_requested_includes(self.QUERY_KEY, self.request)
includes = []
for requested_include in requested_includes:
if includable_names is None or requested_include in includable_names:
includes.append(self.includable_relationships[requested_include]['rel'])
if includes:
kwargs['include_data'] = includes
return super(IncludableViewMixin, self).get_schema(*args, **kwargs)
def get_query(self):
"""
Allows for the query to be dynamically updated based on any included relationships.
Allowing for data to be pre-joined or pre-fetched cutting down on the number of db
queries required for the request.
"""
query = super(IncludableViewMixin, self).get_query()
includables = self.includable_relationships if self.includable_relationships else []
if includables:
requested_includes = extract_requested_includes(self.QUERY_KEY, self.request)
if requested_includes:
available_includes = self.includable_relationships.keys()
for name in requested_includes:
if name in available_includes:
field = self.includable_relationships[name]
join = field.get('join')
if join:
query = getattr(query, join)(field['rel'])
# Apply optional options
options = field.get('options')
if options:
query = query.options(*options)
return query
class IncludableOpts(SchemaOpts):
"""
Adds includable_fields to Class Meta. `includable_fields` should be a dict of key = field name
that the attribute should be replaced with and val = the new attribute name.
Example:
includable_fields = {'paymentmethod': 'paymentmethod_rel')
"""
def __init__(self, meta, *args, **kwargs):
super(IncludableOpts, self).__init__(meta, *args, **kwargs)
self.includable_fields = getattr(meta, 'includable_fields', {})
class IncludableSchemaMixin:
"""
Add support for replacing the attribute property of a relationship field when the relationship's data
should be included in the resulting data.
"""
OPTIONS_CLASS = IncludableOpts
QUERY_KEY = 'include'
@pre_dump
def update_includables(self, data):
"""
Swap the attribute value for requested includable relationships.
"""
request = self.context.get('request')
if request:
requested_includes = extract_requested_includes(self.QUERY_KEY, request)
available_includes = self.opts.includable_fields.keys()
for requested_include in requested_includes:
if requested_include in available_includes:
self.declared_fields[requested_include].attribute = self.opts.includable_fields[requested_include]
return data
class NestableSchemaMixin:
"""
I forget where I left off on this but I think this will only work for a single level of nesting.
Used when you need to use a Nested field with a JSONAPI Schema.
It formats the errors so that the nested schemas errors pointers have the correct attribute path.
For this to work, if a nested schema needs to have many=True, set it on an instance of the schema, not
as a Nested argument (you will get a Invalid Type error).
GOOD: fields.Nested(MySchema(many=True), required=True)
BAD: fields.Nested(MySchema,many=True, required=True)
"""
def format_nested_errors(self, key, errors):
"""
Updates the pointer attribute in an errors object so that it points to the nested attribute.
:param key: The nested field name.
:param errors: An array of already jsonapi formatted errors.
:return: The formatted errors array.
"""
for error in errors:
source = error['source']
source['pointer'] = '/data/attributes/{}'.format(key) + source['pointer']
return errors
def format_errors(self, errors, many):
if not errors:
return {}
if isinstance(errors, (list, tuple)):
return {'errors': errors}
formatted_errors = []
if many: # won't ever be a nested jsonapi schema
for index, errors in errors.items():
for field_name, field_errors in errors.items():
formatted_errors.extend(
[self.format_error(field_name, message, index=index) for message in field_errors])
else:
for field_name, field_errors in errors.items():
if isinstance(field_errors, dict):
errors = field_errors.get('errors')
if not errors:
# Not JSON API formatted, happens when a required nested attribute does not exist in the data
errors = self.format_errors(field_errors, False)['errors']
formatted_errors.extend(self.format_nested_errors(field_name, errors))
else:
formatted_errors.extend([self.format_error(field_name, message) for message in field_errors])
return {'errors': formatted_errors}
| {
"content_hash": "e39cec3da8710f26cf7fe2428641f995",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 118,
"avg_line_length": 36.68156424581006,
"alnum_prop": 0.6286932683521169,
"repo_name": "danpoland/pyramid-restful-jsonapi",
"id": "1f7068fcdb158e988f1a7e2683c1012a22aad728",
"size": "6566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyramid_restful_jsonapi/mixins.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "15273"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from backports.configparser import NoSectionError
import logging
import os
import subprocess
import textwrap
import random
import string
from importlib import import_module
import getpass
import reprlib
import argparse
from builtins import input
from collections import namedtuple
from airflow.utils.timezone import parse as parsedate
import json
from tabulate import tabulate
import daemon
from daemon.pidfile import TimeoutPIDLockFile
import signal
import sys
import threading
import traceback
import time
import psutil
import re
from urllib.parse import urlunparse
import airflow
from airflow import api
from airflow import jobs, settings
from airflow import configuration as conf
from airflow.exceptions import AirflowException, AirflowWebServerTimeout
from airflow.executors import GetDefaultExecutor
from airflow.models import (DagModel, DagBag, TaskInstance,
DagPickle, DagRun, Variable, DagStat,
Connection, DAG)
from airflow.ti_deps.dep_context import (DepContext, SCHEDULER_DEPS)
from airflow.utils import cli as cli_utils
from airflow.utils import db as db_utils
from airflow.utils.net import get_hostname
from airflow.utils.log.logging_mixin import (LoggingMixin, redirect_stderr,
redirect_stdout)
from airflow.www.app import (cached_app, create_app)
from airflow.www_rbac.app import cached_app as cached_app_rbac
from airflow.www_rbac.app import create_app as create_app_rbac
from airflow.www_rbac.app import cached_appbuilder
from sqlalchemy import func
from sqlalchemy.orm import exc
api.load_auth()
api_module = import_module(conf.get('cli', 'api_client'))
api_client = api_module.Client(api_base_url=conf.get('cli', 'endpoint_url'),
auth=api.api_auth.client_auth)
log = LoggingMixin().log
def sigint_handler(sig, frame):
sys.exit(0)
def sigquit_handler(sig, frame):
"""Helps debug deadlocks by printing stacktraces when this gets a SIGQUIT
e.g. kill -s QUIT <PID> or CTRL+\
"""
print("Dumping stack traces for all threads in PID {}".format(os.getpid()))
id_to_name = dict([(th.ident, th.name) for th in threading.enumerate()])
code = []
for thread_id, stack in sys._current_frames().items():
code.append("\n# Thread: {}({})"
.format(id_to_name.get(thread_id, ""), thread_id))
for filename, line_number, name, line in traceback.extract_stack(stack):
code.append('File: "{}", line {}, in {}'
.format(filename, line_number, name))
if line:
code.append(" {}".format(line.strip()))
print("\n".join(code))
def setup_logging(filename):
root = logging.getLogger()
handler = logging.FileHandler(filename)
formatter = logging.Formatter(settings.SIMPLE_LOG_FORMAT)
handler.setFormatter(formatter)
root.addHandler(handler)
root.setLevel(settings.LOGGING_LEVEL)
return handler.stream
def setup_locations(process, pid=None, stdout=None, stderr=None, log=None):
if not stderr:
stderr = os.path.join(os.path.expanduser(settings.AIRFLOW_HOME),
'airflow-{}.err'.format(process))
if not stdout:
stdout = os.path.join(os.path.expanduser(settings.AIRFLOW_HOME),
'airflow-{}.out'.format(process))
if not log:
log = os.path.join(os.path.expanduser(settings.AIRFLOW_HOME),
'airflow-{}.log'.format(process))
if not pid:
pid = os.path.join(os.path.expanduser(settings.AIRFLOW_HOME),
'airflow-{}.pid'.format(process))
return pid, stdout, stderr, log
def process_subdir(subdir):
if subdir:
subdir = subdir.replace('DAGS_FOLDER', settings.DAGS_FOLDER)
subdir = os.path.abspath(os.path.expanduser(subdir))
return subdir
def get_dag(args):
dagbag = DagBag(process_subdir(args.subdir))
if args.dag_id not in dagbag.dags:
raise AirflowException(
'dag_id could not be found: {}. Either the dag did not exist or it failed to '
'parse.'.format(args.dag_id))
return dagbag.dags[args.dag_id]
def get_dags(args):
if not args.dag_regex:
return [get_dag(args)]
dagbag = DagBag(process_subdir(args.subdir))
matched_dags = [dag for dag in dagbag.dags.values() if re.search(
args.dag_id, dag.dag_id)]
if not matched_dags:
raise AirflowException(
'dag_id could not be found with regex: {}. Either the dag did not exist '
'or it failed to parse.'.format(args.dag_id))
return matched_dags
@cli_utils.action_logging
def backfill(args, dag=None):
logging.basicConfig(
level=settings.LOGGING_LEVEL,
format=settings.SIMPLE_LOG_FORMAT)
dag = dag or get_dag(args)
if not args.start_date and not args.end_date:
raise AirflowException("Provide a start_date and/or end_date")
# If only one date is passed, using same as start and end
args.end_date = args.end_date or args.start_date
args.start_date = args.start_date or args.end_date
if args.task_regex:
dag = dag.sub_dag(
task_regex=args.task_regex,
include_upstream=not args.ignore_dependencies)
run_conf = None
if args.conf:
run_conf = json.loads(args.conf)
if args.dry_run:
print("Dry run of DAG {0} on {1}".format(args.dag_id,
args.start_date))
for task in dag.tasks:
print("Task {0}".format(task.task_id))
ti = TaskInstance(task, args.start_date)
ti.dry_run()
else:
dag.run(
start_date=args.start_date,
end_date=args.end_date,
mark_success=args.mark_success,
local=args.local,
donot_pickle=(args.donot_pickle or
conf.getboolean('core', 'donot_pickle')),
ignore_first_depends_on_past=args.ignore_first_depends_on_past,
ignore_task_deps=args.ignore_dependencies,
pool=args.pool,
delay_on_limit_secs=args.delay_on_limit,
verbose=args.verbose,
conf=run_conf,
)
@cli_utils.action_logging
def trigger_dag(args):
"""
Creates a dag run for the specified dag
:param args:
:return:
"""
log = LoggingMixin().log
try:
message = api_client.trigger_dag(dag_id=args.dag_id,
run_id=args.run_id,
conf=args.conf,
execution_date=args.exec_date)
except IOError as err:
log.error(err)
raise AirflowException(err)
log.info(message)
@cli_utils.action_logging
def delete_dag(args):
"""
Deletes all DB records related to the specified dag
:param args:
:return:
"""
log = LoggingMixin().log
if args.yes or input(
"This will drop all existing records related to the specified DAG. "
"Proceed? (y/n)").upper() == "Y":
try:
message = api_client.delete_dag(dag_id=args.dag_id)
except IOError as err:
log.error(err)
raise AirflowException(err)
log.info(message)
else:
print("Bail.")
@cli_utils.action_logging
def pool(args):
log = LoggingMixin().log
def _tabulate(pools):
return "\n%s" % tabulate(pools, ['Pool', 'Slots', 'Description'],
tablefmt="fancy_grid")
try:
if args.get is not None:
pools = [api_client.get_pool(name=args.get)]
elif args.set:
pools = [api_client.create_pool(name=args.set[0],
slots=args.set[1],
description=args.set[2])]
elif args.delete:
pools = [api_client.delete_pool(name=args.delete)]
else:
pools = api_client.get_pools()
except (AirflowException, IOError) as err:
log.error(err)
else:
log.info(_tabulate(pools=pools))
@cli_utils.action_logging
def variables(args):
if args.get:
try:
var = Variable.get(args.get,
deserialize_json=args.json,
default_var=args.default)
print(var)
except ValueError as e:
print(e)
if args.delete:
session = settings.Session()
session.query(Variable).filter_by(key=args.delete).delete()
session.commit()
session.close()
if args.set:
Variable.set(args.set[0], args.set[1])
# Work around 'import' as a reserved keyword
imp = getattr(args, 'import')
if imp:
if os.path.exists(imp):
import_helper(imp)
else:
print("Missing variables file.")
if args.export:
export_helper(args.export)
if not (args.set or args.get or imp or args.export or args.delete):
# list all variables
session = settings.Session()
vars = session.query(Variable)
msg = "\n".join(var.key for var in vars)
print(msg)
def import_helper(filepath):
with open(filepath, 'r') as varfile:
var = varfile.read()
try:
d = json.loads(var)
except Exception:
print("Invalid variables file.")
else:
try:
n = 0
for k, v in d.items():
if isinstance(v, dict):
Variable.set(k, v, serialize_json=True)
else:
Variable.set(k, v)
n += 1
except Exception:
pass
finally:
print("{} of {} variables successfully updated.".format(n, len(d)))
def export_helper(filepath):
session = settings.Session()
qry = session.query(Variable).all()
session.close()
var_dict = {}
d = json.JSONDecoder()
for var in qry:
val = None
try:
val = d.decode(var.val)
except Exception:
val = var.val
var_dict[var.key] = val
with open(filepath, 'w') as varfile:
varfile.write(json.dumps(var_dict, sort_keys=True, indent=4))
print("{} variables successfully exported to {}".format(len(var_dict), filepath))
@cli_utils.action_logging
def pause(args, dag=None):
set_is_paused(True, args, dag)
@cli_utils.action_logging
def unpause(args, dag=None):
set_is_paused(False, args, dag)
def set_is_paused(is_paused, args, dag=None):
dag = dag or get_dag(args)
session = settings.Session()
dm = session.query(DagModel).filter(
DagModel.dag_id == dag.dag_id).first()
dm.is_paused = is_paused
session.commit()
msg = "Dag: {}, paused: {}".format(dag, str(dag.is_paused))
print(msg)
def _run(args, dag, ti):
if args.local:
run_job = jobs.LocalTaskJob(
task_instance=ti,
mark_success=args.mark_success,
pickle_id=args.pickle,
ignore_all_deps=args.ignore_all_dependencies,
ignore_depends_on_past=args.ignore_depends_on_past,
ignore_task_deps=args.ignore_dependencies,
ignore_ti_state=args.force,
pool=args.pool)
run_job.run()
elif args.raw:
ti._run_raw_task(
mark_success=args.mark_success,
job_id=args.job_id,
pool=args.pool,
)
else:
pickle_id = None
if args.ship_dag:
try:
# Running remotely, so pickling the DAG
session = settings.Session()
pickle = DagPickle(dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
# TODO: This should be written to a log
print('Pickled dag {dag} as pickle_id:{pickle_id}'
.format(**locals()))
except Exception as e:
print('Could not pickle the DAG')
print(e)
raise e
executor = GetDefaultExecutor()
executor.start()
print("Sending to executor.")
executor.queue_task_instance(
ti,
mark_success=args.mark_success,
pickle_id=pickle_id,
ignore_all_deps=args.ignore_all_dependencies,
ignore_depends_on_past=args.ignore_depends_on_past,
ignore_task_deps=args.ignore_dependencies,
ignore_ti_state=args.force,
pool=args.pool)
executor.heartbeat()
executor.end()
@cli_utils.action_logging
def run(args, dag=None):
if dag:
args.dag_id = dag.dag_id
log = LoggingMixin().log
# Load custom airflow config
if args.cfg_path:
with open(args.cfg_path, 'r') as conf_file:
conf_dict = json.load(conf_file)
if os.path.exists(args.cfg_path):
os.remove(args.cfg_path)
# Do not log these properties since some may contain passwords.
# This may also set default values for database properties like
# core.sql_alchemy_pool_size
# core.sql_alchemy_pool_recycle
for section, config in conf_dict.items():
for option, value in config.items():
try:
conf.set(section, option, value)
except NoSectionError:
log.error('Section {section} Option {option} '
'does not exist in the config!'.format(section=section,
option=option))
settings.configure_vars()
# IMPORTANT, have to use the NullPool, otherwise, each "run" command may leave
# behind multiple open sleeping connections while heartbeating, which could
# easily exceed the database connection limit when
# processing hundreds of simultaneous tasks.
settings.configure_orm(disable_connection_pool=True)
if not args.pickle and not dag:
dag = get_dag(args)
elif not dag:
session = settings.Session()
log.info('Loading pickle id {args.pickle}'.format(args=args))
dag_pickle = session.query(
DagPickle).filter(DagPickle.id == args.pickle).first()
if not dag_pickle:
raise AirflowException("Who hid the pickle!? [missing pickle]")
dag = dag_pickle.pickle
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
ti.refresh_from_db()
ti.init_run_context(raw=args.raw)
hostname = get_hostname()
log.info("Running %s on host %s", ti, hostname)
if args.interactive:
_run(args, dag, ti)
else:
with redirect_stdout(ti.log, logging.INFO), redirect_stderr(ti.log, logging.WARN):
_run(args, dag, ti)
logging.shutdown()
@cli_utils.action_logging
def task_failed_deps(args):
"""
Returns the unmet dependencies for a task instance from the perspective of the
scheduler (i.e. why a task instance doesn't get scheduled and then queued by the
scheduler, and then run by an executor).
>>> airflow task_failed_deps tutorial sleep 2015-01-01
Task instance dependencies not met:
Dagrun Running: Task instance's dagrun did not exist: Unknown reason
Trigger Rule: Task's trigger rule 'all_success' requires all upstream tasks
to have succeeded, but found 1 non-success(es).
"""
dag = get_dag(args)
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
dep_context = DepContext(deps=SCHEDULER_DEPS)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
# TODO, Do we want to print or log this
if failed_deps:
print("Task instance dependencies not met:")
for dep in failed_deps:
print("{}: {}".format(dep.dep_name, dep.reason))
else:
print("Task instance dependencies are all met.")
@cli_utils.action_logging
def task_state(args):
"""
Returns the state of a TaskInstance at the command line.
>>> airflow task_state tutorial sleep 2015-01-01
success
"""
dag = get_dag(args)
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
print(ti.current_state())
@cli_utils.action_logging
def dag_state(args):
"""
Returns the state of a DagRun at the command line.
>>> airflow dag_state tutorial 2015-01-01T00:00:00.000000
running
"""
dag = get_dag(args)
dr = DagRun.find(dag.dag_id, execution_date=args.execution_date)
print(dr[0].state if len(dr) > 0 else None)
@cli_utils.action_logging
def list_dags(args):
dagbag = DagBag(process_subdir(args.subdir))
s = textwrap.dedent("""\n
-------------------------------------------------------------------
DAGS
-------------------------------------------------------------------
{dag_list}
""")
dag_list = "\n".join(sorted(dagbag.dags))
print(s.format(dag_list=dag_list))
if args.report:
print(dagbag.dagbag_report())
@cli_utils.action_logging
def list_tasks(args, dag=None):
dag = dag or get_dag(args)
if args.tree:
dag.tree_view()
else:
tasks = sorted([t.task_id for t in dag.tasks])
print("\n".join(sorted(tasks)))
@cli_utils.action_logging
def test(args, dag=None):
dag = dag or get_dag(args)
task = dag.get_task(task_id=args.task_id)
# Add CLI provided task_params to task.params
if args.task_params:
passed_in_params = json.loads(args.task_params)
task.params.update(passed_in_params)
ti = TaskInstance(task, args.execution_date)
if args.dry_run:
ti.dry_run()
else:
ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True)
@cli_utils.action_logging
def render(args):
dag = get_dag(args)
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
ti.render_templates()
for attr in task.__class__.template_fields:
print(textwrap.dedent("""\
# ----------------------------------------------------------
# property: {}
# ----------------------------------------------------------
{}
""".format(attr, getattr(task, attr))))
@cli_utils.action_logging
def clear(args):
logging.basicConfig(
level=settings.LOGGING_LEVEL,
format=settings.SIMPLE_LOG_FORMAT)
dags = get_dags(args)
if args.task_regex:
for idx, dag in enumerate(dags):
dags[idx] = dag.sub_dag(
task_regex=args.task_regex,
include_downstream=args.downstream,
include_upstream=args.upstream)
DAG.clear_dags(
dags,
start_date=args.start_date,
end_date=args.end_date,
only_failed=args.only_failed,
only_running=args.only_running,
confirm_prompt=not args.no_confirm,
include_subdags=not args.exclude_subdags)
def get_num_ready_workers_running(gunicorn_master_proc):
workers = psutil.Process(gunicorn_master_proc.pid).children()
def ready_prefix_on_cmdline(proc):
try:
cmdline = proc.cmdline()
if len(cmdline) > 0:
return settings.GUNICORN_WORKER_READY_PREFIX in cmdline[0]
except psutil.NoSuchProcess:
pass
return False
ready_workers = [proc for proc in workers if ready_prefix_on_cmdline(proc)]
return len(ready_workers)
def get_num_workers_running(gunicorn_master_proc):
workers = psutil.Process(gunicorn_master_proc.pid).children()
return len(workers)
def restart_workers(gunicorn_master_proc, num_workers_expected, master_timeout):
"""
Runs forever, monitoring the child processes of @gunicorn_master_proc and
restarting workers occasionally.
Each iteration of the loop traverses one edge of this state transition
diagram, where each state (node) represents
[ num_ready_workers_running / num_workers_running ]. We expect most time to
be spent in [n / n]. `bs` is the setting webserver.worker_refresh_batch_size.
The horizontal transition at ? happens after the new worker parses all the
dags (so it could take a while!)
V ────────────────────────────────────────────────────────────────────────┐
[n / n] ──TTIN──> [ [n, n+bs) / n + bs ] ────?───> [n + bs / n + bs] ──TTOU─┘
^ ^───────────────┘
│
│ ┌────────────────v
└──────┴────── [ [0, n) / n ] <─── start
We change the number of workers by sending TTIN and TTOU to the gunicorn
master process, which increases and decreases the number of child workers
respectively. Gunicorn guarantees that on TTOU workers are terminated
gracefully and that the oldest worker is terminated.
"""
def wait_until_true(fn, timeout=0):
"""
Sleeps until fn is true
"""
t = time.time()
while not fn():
if 0 < timeout and timeout <= time.time() - t:
raise AirflowWebServerTimeout(
"No response from gunicorn master within {0} seconds"
.format(timeout))
time.sleep(0.1)
def start_refresh(gunicorn_master_proc):
batch_size = conf.getint('webserver', 'worker_refresh_batch_size')
log.debug('%s doing a refresh of %s workers', state, batch_size)
sys.stdout.flush()
sys.stderr.flush()
excess = 0
for _ in range(batch_size):
gunicorn_master_proc.send_signal(signal.SIGTTIN)
excess += 1
wait_until_true(lambda: num_workers_expected + excess ==
get_num_workers_running(gunicorn_master_proc),
master_timeout)
try:
wait_until_true(lambda: num_workers_expected ==
get_num_workers_running(gunicorn_master_proc),
master_timeout)
while True:
num_workers_running = get_num_workers_running(gunicorn_master_proc)
num_ready_workers_running = \
get_num_ready_workers_running(gunicorn_master_proc)
state = '[{0} / {1}]'.format(num_ready_workers_running, num_workers_running)
# Whenever some workers are not ready, wait until all workers are ready
if num_ready_workers_running < num_workers_running:
log.debug('%s some workers are starting up, waiting...', state)
sys.stdout.flush()
time.sleep(1)
# Kill a worker gracefully by asking gunicorn to reduce number of workers
elif num_workers_running > num_workers_expected:
excess = num_workers_running - num_workers_expected
log.debug('%s killing %s workers', state, excess)
for _ in range(excess):
gunicorn_master_proc.send_signal(signal.SIGTTOU)
excess -= 1
wait_until_true(lambda: num_workers_expected + excess ==
get_num_workers_running(gunicorn_master_proc),
master_timeout)
# Start a new worker by asking gunicorn to increase number of workers
elif num_workers_running == num_workers_expected:
refresh_interval = conf.getint('webserver', 'worker_refresh_interval')
log.debug(
'%s sleeping for %ss starting doing a refresh...',
state, refresh_interval
)
time.sleep(refresh_interval)
start_refresh(gunicorn_master_proc)
else:
# num_ready_workers_running == num_workers_running < num_workers_expected
log.error((
"%s some workers seem to have died and gunicorn"
"did not restart them as expected"
), state)
time.sleep(10)
if len(
psutil.Process(gunicorn_master_proc.pid).children()
) < num_workers_expected:
start_refresh(gunicorn_master_proc)
except (AirflowWebServerTimeout, OSError) as err:
log.error(err)
log.error("Shutting down webserver")
try:
gunicorn_master_proc.terminate()
gunicorn_master_proc.wait()
finally:
sys.exit(1)
@cli_utils.action_logging
def webserver(args):
print(settings.HEADER)
access_logfile = args.access_logfile or conf.get('webserver', 'access_logfile')
error_logfile = args.error_logfile or conf.get('webserver', 'error_logfile')
num_workers = args.workers or conf.get('webserver', 'workers')
worker_timeout = (args.worker_timeout or
conf.get('webserver', 'web_server_worker_timeout'))
ssl_cert = args.ssl_cert or conf.get('webserver', 'web_server_ssl_cert')
ssl_key = args.ssl_key or conf.get('webserver', 'web_server_ssl_key')
if not ssl_cert and ssl_key:
raise AirflowException(
'An SSL certificate must also be provided for use with ' + ssl_key)
if ssl_cert and not ssl_key:
raise AirflowException(
'An SSL key must also be provided for use with ' + ssl_cert)
if args.debug:
print(
"Starting the web server on port {0} and host {1}.".format(
args.port, args.hostname))
app = create_app_rbac(conf) if settings.RBAC else create_app(conf)
app.run(debug=True, port=args.port, host=args.hostname,
ssl_context=(ssl_cert, ssl_key) if ssl_cert and ssl_key else None)
else:
app = cached_app_rbac(conf) if settings.RBAC else cached_app(conf)
pid, stdout, stderr, log_file = setup_locations(
"webserver", args.pid, args.stdout, args.stderr, args.log_file)
if args.daemon:
handle = setup_logging(log_file)
stdout = open(stdout, 'w+')
stderr = open(stderr, 'w+')
print(
textwrap.dedent('''\
Running the Gunicorn Server with:
Workers: {num_workers} {args.workerclass}
Host: {args.hostname}:{args.port}
Timeout: {worker_timeout}
Logfiles: {access_logfile} {error_logfile}
=================================================================\
'''.format(**locals())))
run_args = [
'gunicorn',
'-w', str(num_workers),
'-k', str(args.workerclass),
'-t', str(worker_timeout),
'-b', args.hostname + ':' + str(args.port),
'-n', 'airflow-webserver',
'-p', str(pid),
'-c', 'python:airflow.www.gunicorn_config',
]
if args.access_logfile:
run_args += ['--access-logfile', str(args.access_logfile)]
if args.error_logfile:
run_args += ['--error-logfile', str(args.error_logfile)]
if args.daemon:
run_args += ['-D']
if ssl_cert:
run_args += ['--certfile', ssl_cert, '--keyfile', ssl_key]
webserver_module = 'www_rbac' if settings.RBAC else 'www'
run_args += ["airflow." + webserver_module + ".app:cached_app()"]
gunicorn_master_proc = None
def kill_proc(dummy_signum, dummy_frame):
gunicorn_master_proc.terminate()
gunicorn_master_proc.wait()
sys.exit(0)
def monitor_gunicorn(gunicorn_master_proc):
# These run forever until SIG{INT, TERM, KILL, ...} signal is sent
if conf.getint('webserver', 'worker_refresh_interval') > 0:
master_timeout = conf.getint('webserver', 'web_server_master_timeout')
restart_workers(gunicorn_master_proc, num_workers, master_timeout)
else:
while True:
time.sleep(1)
if args.daemon:
base, ext = os.path.splitext(pid)
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(base + "-monitor" + ext, -1),
files_preserve=[handle],
stdout=stdout,
stderr=stderr,
signal_map={
signal.SIGINT: kill_proc,
signal.SIGTERM: kill_proc
},
)
with ctx:
subprocess.Popen(run_args, close_fds=True)
# Reading pid file directly, since Popen#pid doesn't
# seem to return the right value with DaemonContext.
while True:
try:
with open(pid) as f:
gunicorn_master_proc_pid = int(f.read())
break
except IOError:
log.debug("Waiting for gunicorn's pid file to be created.")
time.sleep(0.1)
gunicorn_master_proc = psutil.Process(gunicorn_master_proc_pid)
monitor_gunicorn(gunicorn_master_proc)
stdout.close()
stderr.close()
else:
gunicorn_master_proc = subprocess.Popen(run_args, close_fds=True)
signal.signal(signal.SIGINT, kill_proc)
signal.signal(signal.SIGTERM, kill_proc)
monitor_gunicorn(gunicorn_master_proc)
@cli_utils.action_logging
def scheduler(args):
print(settings.HEADER)
job = jobs.SchedulerJob(
dag_id=args.dag_id,
subdir=process_subdir(args.subdir),
run_duration=args.run_duration,
num_runs=args.num_runs,
do_pickle=args.do_pickle)
if args.daemon:
pid, stdout, stderr, log_file = setup_locations("scheduler",
args.pid,
args.stdout,
args.stderr,
args.log_file)
handle = setup_logging(log_file)
stdout = open(stdout, 'w+')
stderr = open(stderr, 'w+')
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
files_preserve=[handle],
stdout=stdout,
stderr=stderr,
)
with ctx:
job.run()
stdout.close()
stderr.close()
else:
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
signal.signal(signal.SIGQUIT, sigquit_handler)
job.run()
@cli_utils.action_logging
def serve_logs(args):
print("Starting flask")
import flask
flask_app = flask.Flask(__name__)
@flask_app.route('/log/<path:filename>')
def serve_logs(filename): # noqa
log = os.path.expanduser(conf.get('core', 'BASE_LOG_FOLDER'))
return flask.send_from_directory(
log,
filename,
mimetype="application/json",
as_attachment=False)
WORKER_LOG_SERVER_PORT = \
int(conf.get('celery', 'WORKER_LOG_SERVER_PORT'))
flask_app.run(
host='0.0.0.0', port=WORKER_LOG_SERVER_PORT)
@cli_utils.action_logging
def worker(args):
env = os.environ.copy()
env['AIRFLOW_HOME'] = settings.AIRFLOW_HOME
# Celery worker
from airflow.executors.celery_executor import app as celery_app
from celery.bin import worker
worker = worker.worker(app=celery_app)
options = {
'optimization': 'fair',
'O': 'fair',
'queues': args.queues,
'concurrency': args.concurrency,
'hostname': args.celery_hostname,
}
if args.daemon:
pid, stdout, stderr, log_file = setup_locations("worker",
args.pid,
args.stdout,
args.stderr,
args.log_file)
handle = setup_logging(log_file)
stdout = open(stdout, 'w+')
stderr = open(stderr, 'w+')
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
files_preserve=[handle],
stdout=stdout,
stderr=stderr,
)
with ctx:
sp = subprocess.Popen(['airflow', 'serve_logs'], env=env, close_fds=True)
worker.run(**options)
sp.kill()
stdout.close()
stderr.close()
else:
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
sp = subprocess.Popen(['airflow', 'serve_logs'], env=env, close_fds=True)
worker.run(**options)
sp.kill()
def initdb(args): # noqa
print("DB: " + repr(settings.engine.url))
db_utils.initdb(settings.RBAC)
print("Done.")
@cli_utils.action_logging
def resetdb(args):
print("DB: " + repr(settings.engine.url))
if args.yes or input("This will drop existing tables "
"if they exist. Proceed? "
"(y/n)").upper() == "Y":
db_utils.resetdb(settings.RBAC)
else:
print("Bail.")
@cli_utils.action_logging
def upgradedb(args): # noqa
print("DB: " + repr(settings.engine.url))
db_utils.upgradedb()
# Populate DagStats table
session = settings.Session()
ds_rows = session.query(DagStat).count()
if not ds_rows:
qry = (
session.query(DagRun.dag_id, DagRun.state, func.count('*'))
.group_by(DagRun.dag_id, DagRun.state)
)
for dag_id, state, count in qry:
session.add(DagStat(dag_id=dag_id, state=state, count=count))
session.commit()
@cli_utils.action_logging
def version(args): # noqa
print(settings.HEADER + " v" + airflow.__version__)
alternative_conn_specs = ['conn_type', 'conn_host',
'conn_login', 'conn_password', 'conn_schema', 'conn_port']
@cli_utils.action_logging
def connections(args):
if args.list:
# Check that no other flags were passed to the command
invalid_args = list()
for arg in ['conn_id', 'conn_uri', 'conn_extra'] + alternative_conn_specs:
if getattr(args, arg) is not None:
invalid_args.append(arg)
if invalid_args:
msg = ('\n\tThe following args are not compatible with the ' +
'--list flag: {invalid!r}\n')
msg = msg.format(invalid=invalid_args)
print(msg)
return
session = settings.Session()
conns = session.query(Connection.conn_id, Connection.conn_type,
Connection.host, Connection.port,
Connection.is_encrypted,
Connection.is_extra_encrypted,
Connection.extra).all()
conns = [map(reprlib.repr, conn) for conn in conns]
msg = tabulate(conns, ['Conn Id', 'Conn Type', 'Host', 'Port',
'Is Encrypted', 'Is Extra Encrypted', 'Extra'],
tablefmt="fancy_grid")
if sys.version_info[0] < 3:
msg = msg.encode('utf-8')
print(msg)
return
if args.delete:
# Check that only the `conn_id` arg was passed to the command
invalid_args = list()
for arg in ['conn_uri', 'conn_extra'] + alternative_conn_specs:
if getattr(args, arg) is not None:
invalid_args.append(arg)
if invalid_args:
msg = ('\n\tThe following args are not compatible with the ' +
'--delete flag: {invalid!r}\n')
msg = msg.format(invalid=invalid_args)
print(msg)
return
if args.conn_id is None:
print('\n\tTo delete a connection, you Must provide a value for ' +
'the --conn_id flag.\n')
return
session = settings.Session()
try:
to_delete = (session
.query(Connection)
.filter(Connection.conn_id == args.conn_id)
.one())
except exc.NoResultFound:
msg = '\n\tDid not find a connection with `conn_id`={conn_id}\n'
msg = msg.format(conn_id=args.conn_id)
print(msg)
return
except exc.MultipleResultsFound:
msg = ('\n\tFound more than one connection with ' +
'`conn_id`={conn_id}\n')
msg = msg.format(conn_id=args.conn_id)
print(msg)
return
else:
deleted_conn_id = to_delete.conn_id
session.delete(to_delete)
session.commit()
msg = '\n\tSuccessfully deleted `conn_id`={conn_id}\n'
msg = msg.format(conn_id=deleted_conn_id)
print(msg)
return
if args.add:
# Check that the conn_id and conn_uri args were passed to the command:
missing_args = list()
invalid_args = list()
if not args.conn_id:
missing_args.append('conn_id')
if args.conn_uri:
for arg in alternative_conn_specs:
if getattr(args, arg) is not None:
invalid_args.append(arg)
elif not args.conn_type:
missing_args.append('conn_uri or conn_type')
if missing_args:
msg = ('\n\tThe following args are required to add a connection:' +
' {missing!r}\n'.format(missing=missing_args))
print(msg)
if invalid_args:
msg = ('\n\tThe following args are not compatible with the ' +
'--add flag and --conn_uri flag: {invalid!r}\n')
msg = msg.format(invalid=invalid_args)
print(msg)
if missing_args or invalid_args:
return
if args.conn_uri:
new_conn = Connection(conn_id=args.conn_id, uri=args.conn_uri)
else:
new_conn = Connection(conn_id=args.conn_id,
conn_type=args.conn_type,
host=args.conn_host,
login=args.conn_login,
password=args.conn_password,
schema=args.conn_schema,
port=args.conn_port)
if args.conn_extra is not None:
new_conn.set_extra(args.conn_extra)
session = settings.Session()
if not (session.query(Connection)
.filter(Connection.conn_id == new_conn.conn_id).first()):
session.add(new_conn)
session.commit()
msg = '\n\tSuccessfully added `conn_id`={conn_id} : {uri}\n'
msg = msg.format(conn_id=new_conn.conn_id,
uri=args.conn_uri or
urlunparse((args.conn_type,
'{login}:{password}@{host}:{port}'
.format(login=args.conn_login or '',
password=args.conn_password or '',
host=args.conn_host or '',
port=args.conn_port or ''),
args.conn_schema or '', '', '', '')))
print(msg)
else:
msg = '\n\tA connection with `conn_id`={conn_id} already exists\n'
msg = msg.format(conn_id=new_conn.conn_id)
print(msg)
return
@cli_utils.action_logging
def flower(args):
broka = conf.get('celery', 'BROKER_URL')
address = '--address={}'.format(args.hostname)
port = '--port={}'.format(args.port)
api = ''
if args.broker_api:
api = '--broker_api=' + args.broker_api
url_prefix = ''
if args.url_prefix:
url_prefix = '--url-prefix=' + args.url_prefix
flower_conf = ''
if args.flower_conf:
flower_conf = '--conf=' + args.flower_conf
if args.daemon:
pid, stdout, stderr, log_file = setup_locations("flower",
args.pid,
args.stdout,
args.stderr,
args.log_file)
stdout = open(stdout, 'w+')
stderr = open(stderr, 'w+')
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
stdout=stdout,
stderr=stderr,
)
with ctx:
os.execvp("flower", ['flower', '-b',
broka, address, port, api, flower_conf, url_prefix])
stdout.close()
stderr.close()
else:
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
os.execvp("flower", ['flower', '-b',
broka, address, port, api, flower_conf, url_prefix])
@cli_utils.action_logging
def kerberos(args): # noqa
print(settings.HEADER)
import airflow.security.kerberos
if args.daemon:
pid, stdout, stderr, log_file = setup_locations("kerberos",
args.pid,
args.stdout,
args.stderr,
args.log_file)
stdout = open(stdout, 'w+')
stderr = open(stderr, 'w+')
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
stdout=stdout,
stderr=stderr,
)
with ctx:
airflow.security.kerberos.run()
stdout.close()
stderr.close()
else:
airflow.security.kerberos.run()
@cli_utils.action_logging
def create_user(args):
fields = {
'role': args.role,
'username': args.username,
'email': args.email,
'firstname': args.firstname,
'lastname': args.lastname,
}
empty_fields = [k for k, v in fields.items() if not v]
if empty_fields:
raise SystemExit('Required arguments are missing: {}.'.format(
', '.join(empty_fields)))
appbuilder = cached_appbuilder()
role = appbuilder.sm.find_role(args.role)
if not role:
raise SystemExit('{} is not a valid role.'.format(args.role))
if args.use_random_password:
password = ''.join(random.choice(string.printable) for _ in range(16))
elif args.password:
password = args.password
else:
password = getpass.getpass('Password:')
password_confirmation = getpass.getpass('Repeat for confirmation:')
if password != password_confirmation:
raise SystemExit('Passwords did not match!')
user = appbuilder.sm.add_user(args.username, args.firstname, args.lastname,
args.email, role, password)
if user:
print('{} user {} created.'.format(args.role, args.username))
else:
raise SystemExit('Failed to create user.')
Arg = namedtuple(
'Arg', ['flags', 'help', 'action', 'default', 'nargs', 'type', 'choices', 'metavar'])
Arg.__new__.__defaults__ = (None, None, None, None, None, None, None)
class CLIFactory(object):
args = {
# Shared
'dag_id': Arg(("dag_id",), "The id of the dag"),
'task_id': Arg(("task_id",), "The id of the task"),
'execution_date': Arg(
("execution_date",), help="The execution date of the DAG",
type=parsedate),
'task_regex': Arg(
("-t", "--task_regex"),
"The regex to filter specific task_ids to backfill (optional)"),
'subdir': Arg(
("-sd", "--subdir"),
"File location or directory from which to look for the dag",
default=settings.DAGS_FOLDER),
'start_date': Arg(
("-s", "--start_date"), "Override start_date YYYY-MM-DD",
type=parsedate),
'end_date': Arg(
("-e", "--end_date"), "Override end_date YYYY-MM-DD",
type=parsedate),
'dry_run': Arg(
("-dr", "--dry_run"), "Perform a dry run", "store_true"),
'pid': Arg(
("--pid",), "PID file location",
nargs='?'),
'daemon': Arg(
("-D", "--daemon"), "Daemonize instead of running "
"in the foreground",
"store_true"),
'stderr': Arg(
("--stderr",), "Redirect stderr to this file"),
'stdout': Arg(
("--stdout",), "Redirect stdout to this file"),
'log_file': Arg(
("-l", "--log-file"), "Location of the log file"),
'yes': Arg(
("-y", "--yes"),
"Do not prompt to confirm reset. Use with care!",
"store_true",
default=False),
# backfill
'mark_success': Arg(
("-m", "--mark_success"),
"Mark jobs as succeeded without running them", "store_true"),
'verbose': Arg(
("-v", "--verbose"),
"Make logging output more verbose", "store_true"),
'local': Arg(
("-l", "--local"),
"Run the task using the LocalExecutor", "store_true"),
'donot_pickle': Arg(
("-x", "--donot_pickle"), (
"Do not attempt to pickle the DAG object to send over "
"to the workers, just tell the workers to run their version "
"of the code."),
"store_true"),
'bf_ignore_dependencies': Arg(
("-i", "--ignore_dependencies"),
(
"Skip upstream tasks, run only the tasks "
"matching the regexp. Only works in conjunction "
"with task_regex"),
"store_true"),
'bf_ignore_first_depends_on_past': Arg(
("-I", "--ignore_first_depends_on_past"),
(
"Ignores depends_on_past dependencies for the first "
"set of tasks only (subsequent executions in the backfill "
"DO respect depends_on_past)."),
"store_true"),
'pool': Arg(("--pool",), "Resource pool to use"),
'delay_on_limit': Arg(
("--delay_on_limit",),
help=("Amount of time in seconds to wait when the limit "
"on maximum active dag runs (max_active_runs) has "
"been reached before trying to execute a dag run "
"again."),
type=float,
default=1.0),
# list_tasks
'tree': Arg(("-t", "--tree"), "Tree view", "store_true"),
# list_dags
'report': Arg(
("-r", "--report"), "Show DagBag loading report", "store_true"),
# clear
'upstream': Arg(
("-u", "--upstream"), "Include upstream tasks", "store_true"),
'only_failed': Arg(
("-f", "--only_failed"), "Only failed jobs", "store_true"),
'only_running': Arg(
("-r", "--only_running"), "Only running jobs", "store_true"),
'downstream': Arg(
("-d", "--downstream"), "Include downstream tasks", "store_true"),
'no_confirm': Arg(
("-c", "--no_confirm"),
"Do not request confirmation", "store_true"),
'exclude_subdags': Arg(
("-x", "--exclude_subdags"),
"Exclude subdags", "store_true"),
'dag_regex': Arg(
("-dx", "--dag_regex"),
"Search dag_id as regex instead of exact string", "store_true"),
# trigger_dag
'run_id': Arg(("-r", "--run_id"), "Helps to identify this run"),
'conf': Arg(
('-c', '--conf'),
"JSON string that gets pickled into the DagRun's conf attribute"),
'exec_date': Arg(
("-e", "--exec_date"), help="The execution date of the DAG",
type=parsedate),
# pool
'pool_set': Arg(
("-s", "--set"),
nargs=3,
metavar=('NAME', 'SLOT_COUNT', 'POOL_DESCRIPTION'),
help="Set pool slot count and description, respectively"),
'pool_get': Arg(
("-g", "--get"),
metavar='NAME',
help="Get pool info"),
'pool_delete': Arg(
("-x", "--delete"),
metavar="NAME",
help="Delete a pool"),
# variables
'set': Arg(
("-s", "--set"),
nargs=2,
metavar=('KEY', 'VAL'),
help="Set a variable"),
'get': Arg(
("-g", "--get"),
metavar='KEY',
help="Get value of a variable"),
'default': Arg(
("-d", "--default"),
metavar="VAL",
default=None,
help="Default value returned if variable does not exist"),
'json': Arg(
("-j", "--json"),
help="Deserialize JSON variable",
action="store_true"),
'var_import': Arg(
("-i", "--import"),
metavar="FILEPATH",
help="Import variables from JSON file"),
'var_export': Arg(
("-e", "--export"),
metavar="FILEPATH",
help="Export variables to JSON file"),
'var_delete': Arg(
("-x", "--delete"),
metavar="KEY",
help="Delete a variable"),
# kerberos
'principal': Arg(
("principal",), "kerberos principal",
nargs='?', default=conf.get('kerberos', 'principal')),
'keytab': Arg(
("-kt", "--keytab"), "keytab",
nargs='?', default=conf.get('kerberos', 'keytab')),
# run
# TODO(aoen): "force" is a poor choice of name here since it implies it overrides
# all dependencies (not just past success), e.g. the ignore_depends_on_past
# dependency. This flag should be deprecated and renamed to 'ignore_ti_state' and
# the "ignore_all_dependencies" command should be called the"force" command
# instead.
'interactive': Arg(
('-int', '--interactive'),
help='Do not capture standard output and error streams '
'(useful for interactive debugging)',
action='store_true'),
'force': Arg(
("-f", "--force"),
"Ignore previous task instance state, rerun regardless if task already "
"succeeded/failed",
"store_true"),
'raw': Arg(("-r", "--raw"), argparse.SUPPRESS, "store_true"),
'ignore_all_dependencies': Arg(
("-A", "--ignore_all_dependencies"),
"Ignores all non-critical dependencies, including ignore_ti_state and "
"ignore_task_deps",
"store_true"),
# TODO(aoen): ignore_dependencies is a poor choice of name here because it is too
# vague (e.g. a task being in the appropriate state to be run is also a dependency
# but is not ignored by this flag), the name 'ignore_task_dependencies' is
# slightly better (as it ignores all dependencies that are specific to the task),
# so deprecate the old command name and use this instead.
'ignore_dependencies': Arg(
("-i", "--ignore_dependencies"),
"Ignore task-specific dependencies, e.g. upstream, depends_on_past, and "
"retry delay dependencies",
"store_true"),
'ignore_depends_on_past': Arg(
("-I", "--ignore_depends_on_past"),
"Ignore depends_on_past dependencies (but respect "
"upstream dependencies)",
"store_true"),
'ship_dag': Arg(
("--ship_dag",),
"Pickles (serializes) the DAG and ships it to the worker",
"store_true"),
'pickle': Arg(
("-p", "--pickle"),
"Serialized pickle object of the entire dag (used internally)"),
'job_id': Arg(("-j", "--job_id"), argparse.SUPPRESS),
'cfg_path': Arg(
("--cfg_path",), "Path to config file to use instead of airflow.cfg"),
# webserver
'port': Arg(
("-p", "--port"),
default=conf.get('webserver', 'WEB_SERVER_PORT'),
type=int,
help="The port on which to run the server"),
'ssl_cert': Arg(
("--ssl_cert",),
default=conf.get('webserver', 'WEB_SERVER_SSL_CERT'),
help="Path to the SSL certificate for the webserver"),
'ssl_key': Arg(
("--ssl_key",),
default=conf.get('webserver', 'WEB_SERVER_SSL_KEY'),
help="Path to the key to use with the SSL certificate"),
'workers': Arg(
("-w", "--workers"),
default=conf.get('webserver', 'WORKERS'),
type=int,
help="Number of workers to run the webserver on"),
'workerclass': Arg(
("-k", "--workerclass"),
default=conf.get('webserver', 'WORKER_CLASS'),
choices=['sync', 'eventlet', 'gevent', 'tornado'],
help="The worker class to use for Gunicorn"),
'worker_timeout': Arg(
("-t", "--worker_timeout"),
default=conf.get('webserver', 'WEB_SERVER_WORKER_TIMEOUT'),
type=int,
help="The timeout for waiting on webserver workers"),
'hostname': Arg(
("-hn", "--hostname"),
default=conf.get('webserver', 'WEB_SERVER_HOST'),
help="Set the hostname on which to run the web server"),
'debug': Arg(
("-d", "--debug"),
"Use the server that ships with Flask in debug mode",
"store_true"),
'access_logfile': Arg(
("-A", "--access_logfile"),
default=conf.get('webserver', 'ACCESS_LOGFILE'),
help="The logfile to store the webserver access log. Use '-' to print to "
"stderr."),
'error_logfile': Arg(
("-E", "--error_logfile"),
default=conf.get('webserver', 'ERROR_LOGFILE'),
help="The logfile to store the webserver error log. Use '-' to print to "
"stderr."),
# scheduler
'dag_id_opt': Arg(("-d", "--dag_id"), help="The id of the dag to run"),
'run_duration': Arg(
("-r", "--run-duration"),
default=None, type=int,
help="Set number of seconds to execute before exiting"),
'num_runs': Arg(
("-n", "--num_runs"),
default=-1, type=int,
help="Set the number of runs to execute before exiting"),
# worker
'do_pickle': Arg(
("-p", "--do_pickle"),
default=False,
help=(
"Attempt to pickle the DAG object to send over "
"to the workers, instead of letting workers run their version "
"of the code."),
action="store_true"),
'queues': Arg(
("-q", "--queues"),
help="Comma delimited list of queues to serve",
default=conf.get('celery', 'DEFAULT_QUEUE')),
'concurrency': Arg(
("-c", "--concurrency"),
type=int,
help="The number of worker processes",
default=conf.get('celery', 'worker_concurrency')),
'celery_hostname': Arg(
("-cn", "--celery_hostname"),
help=("Set the hostname of celery worker "
"if you have multiple workers on a single machine.")),
# flower
'broker_api': Arg(("-a", "--broker_api"), help="Broker api"),
'flower_hostname': Arg(
("-hn", "--hostname"),
default=conf.get('celery', 'FLOWER_HOST'),
help="Set the hostname on which to run the server"),
'flower_port': Arg(
("-p", "--port"),
default=conf.get('celery', 'FLOWER_PORT'),
type=int,
help="The port on which to run the server"),
'flower_conf': Arg(
("-fc", "--flower_conf"),
help="Configuration file for flower"),
'flower_url_prefix': Arg(
("-u", "--url_prefix"),
default=conf.get('celery', 'FLOWER_URL_PREFIX'),
help="URL prefix for Flower"),
'task_params': Arg(
("-tp", "--task_params"),
help="Sends a JSON params dict to the task"),
# connections
'list_connections': Arg(
('-l', '--list'),
help='List all connections',
action='store_true'),
'add_connection': Arg(
('-a', '--add'),
help='Add a connection',
action='store_true'),
'delete_connection': Arg(
('-d', '--delete'),
help='Delete a connection',
action='store_true'),
'conn_id': Arg(
('--conn_id',),
help='Connection id, required to add/delete a connection',
type=str),
'conn_uri': Arg(
('--conn_uri',),
help='Connection URI, required to add a connection without conn_type',
type=str),
'conn_type': Arg(
('--conn_type',),
help='Connection type, required to add a connection without conn_uri',
type=str),
'conn_host': Arg(
('--conn_host',),
help='Connection host, optional when adding a connection',
type=str),
'conn_login': Arg(
('--conn_login',),
help='Connection login, optional when adding a connection',
type=str),
'conn_password': Arg(
('--conn_password',),
help='Connection password, optional when adding a connection',
type=str),
'conn_schema': Arg(
('--conn_schema',),
help='Connection schema, optional when adding a connection',
type=str),
'conn_port': Arg(
('--conn_port',),
help='Connection port, optional when adding a connection',
type=str),
'conn_extra': Arg(
('--conn_extra',),
help='Connection `Extra` field, optional when adding a connection',
type=str),
# create_user
'role': Arg(
('-r', '--role',),
help='Role of the user. Existing roles include Admin, '
'User, Op, Viewer, and Public',
type=str),
'firstname': Arg(
('-f', '--firstname',),
help='First name of the user',
type=str),
'lastname': Arg(
('-l', '--lastname',),
help='Last name of the user',
type=str),
'email': Arg(
('-e', '--email',),
help='Email of the user',
type=str),
'username': Arg(
('-u', '--username',),
help='Username of the user',
type=str),
'password': Arg(
('-p', '--password',),
help='Password of the user',
type=str),
'use_random_password': Arg(
('--use_random_password',),
help='Do not prompt for password. Use random string instead',
default=False,
action='store_true'),
}
subparsers = (
{
'func': backfill,
'help': "Run subsections of a DAG for a specified date range",
'args': (
'dag_id', 'task_regex', 'start_date', 'end_date',
'mark_success', 'local', 'donot_pickle',
'bf_ignore_dependencies', 'bf_ignore_first_depends_on_past',
'subdir', 'pool', 'delay_on_limit', 'dry_run', 'verbose', 'conf'
)
}, {
'func': list_tasks,
'help': "List the tasks within a DAG",
'args': ('dag_id', 'tree', 'subdir'),
}, {
'func': clear,
'help': "Clear a set of task instance, as if they never ran",
'args': (
'dag_id', 'task_regex', 'start_date', 'end_date', 'subdir',
'upstream', 'downstream', 'no_confirm', 'only_failed',
'only_running', 'exclude_subdags', 'dag_regex'),
}, {
'func': pause,
'help': "Pause a DAG",
'args': ('dag_id', 'subdir'),
}, {
'func': unpause,
'help': "Resume a paused DAG",
'args': ('dag_id', 'subdir'),
}, {
'func': trigger_dag,
'help': "Trigger a DAG run",
'args': ('dag_id', 'subdir', 'run_id', 'conf', 'exec_date'),
}, {
'func': delete_dag,
'help': "Delete all DB records related to the specified DAG",
'args': ('dag_id', 'yes',),
}, {
'func': pool,
'help': "CRUD operations on pools",
"args": ('pool_set', 'pool_get', 'pool_delete'),
}, {
'func': variables,
'help': "CRUD operations on variables",
"args": ('set', 'get', 'json', 'default',
'var_import', 'var_export', 'var_delete'),
}, {
'func': kerberos,
'help': "Start a kerberos ticket renewer",
'args': ('principal', 'keytab', 'pid',
'daemon', 'stdout', 'stderr', 'log_file'),
}, {
'func': render,
'help': "Render a task instance's template(s)",
'args': ('dag_id', 'task_id', 'execution_date', 'subdir'),
}, {
'func': run,
'help': "Run a single task instance",
'args': (
'dag_id', 'task_id', 'execution_date', 'subdir',
'mark_success', 'force', 'pool', 'cfg_path',
'local', 'raw', 'ignore_all_dependencies', 'ignore_dependencies',
'ignore_depends_on_past', 'ship_dag', 'pickle', 'job_id', 'interactive',),
}, {
'func': initdb,
'help': "Initialize the metadata database",
'args': tuple(),
}, {
'func': list_dags,
'help': "List all the DAGs",
'args': ('subdir', 'report'),
}, {
'func': dag_state,
'help': "Get the status of a dag run",
'args': ('dag_id', 'execution_date', 'subdir'),
}, {
'func': task_failed_deps,
'help': (
"Returns the unmet dependencies for a task instance from the perspective "
"of the scheduler. In other words, why a task instance doesn't get "
"scheduled and then queued by the scheduler, and then run by an "
"executor)."),
'args': ('dag_id', 'task_id', 'execution_date', 'subdir'),
}, {
'func': task_state,
'help': "Get the status of a task instance",
'args': ('dag_id', 'task_id', 'execution_date', 'subdir'),
}, {
'func': serve_logs,
'help': "Serve logs generate by worker",
'args': tuple(),
}, {
'func': test,
'help': (
"Test a task instance. This will run a task without checking for "
"dependencies or recording its state in the database."),
'args': (
'dag_id', 'task_id', 'execution_date', 'subdir', 'dry_run',
'task_params'),
}, {
'func': webserver,
'help': "Start a Airflow webserver instance",
'args': ('port', 'workers', 'workerclass', 'worker_timeout', 'hostname',
'pid', 'daemon', 'stdout', 'stderr', 'access_logfile',
'error_logfile', 'log_file', 'ssl_cert', 'ssl_key', 'debug'),
}, {
'func': resetdb,
'help': "Burn down and rebuild the metadata database",
'args': ('yes',),
}, {
'func': upgradedb,
'help': "Upgrade the metadata database to latest version",
'args': tuple(),
}, {
'func': scheduler,
'help': "Start a scheduler instance",
'args': ('dag_id_opt', 'subdir', 'run_duration', 'num_runs',
'do_pickle', 'pid', 'daemon', 'stdout', 'stderr',
'log_file'),
}, {
'func': worker,
'help': "Start a Celery worker node",
'args': ('do_pickle', 'queues', 'concurrency', 'celery_hostname',
'pid', 'daemon', 'stdout', 'stderr', 'log_file'),
}, {
'func': flower,
'help': "Start a Celery Flower",
'args': ('flower_hostname', 'flower_port', 'flower_conf', 'flower_url_prefix',
'broker_api', 'pid', 'daemon', 'stdout', 'stderr', 'log_file'),
}, {
'func': version,
'help': "Show the version",
'args': tuple(),
}, {
'func': connections,
'help': "List/Add/Delete connections",
'args': ('list_connections', 'add_connection', 'delete_connection',
'conn_id', 'conn_uri', 'conn_extra') + tuple(alternative_conn_specs),
}, {
'func': create_user,
'help': "Create an admin account",
'args': ('role', 'username', 'email', 'firstname', 'lastname',
'password', 'use_random_password'),
},
)
subparsers_dict = {sp['func'].__name__: sp for sp in subparsers}
dag_subparsers = (
'list_tasks', 'backfill', 'test', 'run', 'pause', 'unpause')
@classmethod
def get_parser(cls, dag_parser=False):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(
help='sub-command help', dest='subcommand')
subparsers.required = True
subparser_list = cls.dag_subparsers if dag_parser else cls.subparsers_dict.keys()
for sub in subparser_list:
sub = cls.subparsers_dict[sub]
sp = subparsers.add_parser(sub['func'].__name__, help=sub['help'])
for arg in sub['args']:
if 'dag_id' in arg and dag_parser:
continue
arg = cls.args[arg]
kwargs = {
f: getattr(arg, f)
for f in arg._fields if f != 'flags' and getattr(arg, f)}
sp.add_argument(*arg.flags, **kwargs)
sp.set_defaults(func=sub['func'])
return parser
def get_parser():
return CLIFactory.get_parser()
| {
"content_hash": "fb53183d9a96712672cc45294c8e26b6",
"timestamp": "",
"source": "github",
"line_count": 1831,
"max_line_length": 90,
"avg_line_length": 36.73020207536865,
"alnum_prop": 0.5264746554057068,
"repo_name": "yk5/incubator-airflow",
"id": "10c104a607a8fe6bd345f786d4552ef68c944ebf",
"size": "68367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/bin/cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "274912"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "3877246"
},
{
"name": "Shell",
"bytes": "47007"
}
],
"symlink_target": ""
} |
import sys
import unittest
from max_stack import MaxStack
from max_stack2 import MaxStack2
__author__ = 'Hagay Onn'
__version__ = '1.0'
class StackTestCase(unittest.TestCase):
def setUp(self):
# choose one below :-)
self.testStack = MaxStack() # more space effective
#self.testStack = MaxStack2()
def test_init_stack_state(self):
self.assertEqual(self.testStack.get_size(), 0, 'incorrect get_size() on init')
self.assertEqual(self.testStack.get_last(), None, 'incorrect get_last() on init')
self.assertEqual(self.testStack.get_max(), None, 'incorrect get_max() on init')
self.assertEqual(self.testStack.pop(), None, 'incorrect pop() on init')
def test_stack_functionality(self):
self.testStack.push(3)
self.testStack.push(7)
self.testStack.push(12)
self.assertEqual(self.testStack.get_max(), 12, 'incorrect get_max() Stop-Point 1')
self.assertEqual(self.testStack.get_size(), 3, 'incorrect get_size() Stop-Point 1')
self.assertEqual(self.testStack.get_last(), 12, 'incorrect get_last() Stop-Point 1')
self.assertEqual(self.testStack.pop(), 12, 'incorrect pop() Stop-Point 1')
self.assertEqual(self.testStack.get_max(), 7, 'incorrect get_max() Stop-Point 2')
self.testStack.push(9)
self.assertEqual(self.testStack.get_max(), 9, 'incorrect get_max() Stop-Point 3')
self.testStack.push(-5)
self.assertEqual(self.testStack.get_max(), 9, 'incorrect get_max() Stop-Point 4')
self.testStack.push(208)
self.assertEqual(self.testStack.get_max(), 208, 'incorrect get_max() Stop-Point 5')
self.testStack.push(2)
self.assertEqual(self.testStack.get_size(), 6, 'incorrect get_size() Stop-Point 2')
self.assertEqual(self.testStack.get_last(), 2, 'incorrect get_last() Stop-Point 2')
self.assertEqual(self.testStack.pop(), 2, 'incorrect pop() Stop-Point 2')
self.testStack.pop() # pop 208 out
self.assertEqual(self.testStack.get_max(), 9, 'incorrect get_max() Stop-Point 6')
self.assertEqual(self.testStack.get_size(), 4, 'incorrect get_size() Stop-Point 3')
self.assertEqual(self.testStack.get_last(), -5, 'incorrect get_last() Stop-Point 3')
self.assertEqual(self.testStack.pop(), -5, 'incorrect pop() Stop-Point 3')
self.testStack.push(222222222224)
self.assertEqual(self.testStack.get_max(), 222222222224, 'incorrect get_max() Stop-Point 7')
self.testStack.push(6)
self.assertEqual(self.testStack.get_size(), 5, 'incorrect get_size() Stop-Point 4')
self.assertEqual(self.testStack.get_last(), 6, 'incorrect get_last() Stop-Point 4')
self.assertEqual(self.testStack.pop(), 6, 'incorrect pop() Stop-Point 4')
self.testStack.pop()
self.testStack.pop()
self.testStack.pop()
self.testStack.pop()
self.assertEqual(self.testStack.get_size(), 0, 'incorrect get_size() Stop-Point 5')
self.assertEqual(self.testStack.get_last(), None, 'incorrect get_last() Stop-Point 5')
self.assertEqual(self.testStack.get_max(), None, 'incorrect get_max() Stop-Point 8')
self.assertEqual(self.testStack.pop(), None, 'incorrect pop() Stop-Point 5')
self.testStack.pop()
self.assertEqual(self.testStack.pop(), None, 'incorrect pop() Stop-Point 6')
self.assertEqual(self.testStack.get_last(), None, 'incorrect get_last() Stop-Point 6')
self.assertEqual(self.testStack.get_max(), None, 'incorrect get_max() Stop-Point 9')
pass
@unittest.skipUnless(sys.platform.startswith("ubuntu"), "requires Ubuntu machine")
def test_stack_on_ubuntu(self):
"""just playing with unittest options to verify kill"""
self.assertEqual(True, False, 'incorrect Stack on Ubuntu machine ;-)')
pass
def tearDown(self):
self.testStack = None
| {
"content_hash": "a880701b0da2eb377bf0944e55446037",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 100,
"avg_line_length": 54.229729729729726,
"alnum_prop": 0.6459008223274358,
"repo_name": "hagayo/MaxStack_o1_Python",
"id": "54359d253ddaa2b4a9cc35f06dc9c1546d8f4e00",
"size": "4183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tests/test_stack_max_functionality.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16066"
}
],
"symlink_target": ""
} |
from __future__ import division
import numpy as np
import random
import tempfile
import unittest
from chainer import serializers
from chainer import testing
from chainer.testing import condition
from chainer import training
@testing.parameterize(
# iteration
{
'iter_per_epoch': 5, 'interval': (2, 'iteration'), 'resume': 4,
'expected': [False, True, False, True, False, True, False]},
# basic epoch
{
'iter_per_epoch': 1, 'interval': (3, 'epoch'), 'resume': 4,
'expected': [False, False, True, False, False, True, False]},
# fractional epoch
{
'iter_per_epoch': 2, 'interval': (1.5, 'epoch'), 'resume': 4,
'expected': [False, False, True, False, False, True, False]},
# unaligned epoch
{
'iter_per_epoch': 2.5, 'interval': (1, 'epoch'), 'resume': 3,
'expected': [False, False, True, False, True, False, False]},
# tiny epoch
{
'iter_per_epoch': 0.5, 'interval': (1, 'epoch'), 'resume': 4,
'expected': [True, True, True, True, True, True, True]},
)
class TestIntervalTrigger(unittest.TestCase):
def test_trigger(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
trigger = training.triggers.IntervalTrigger(*self.interval)
# before the first iteration, trigger should be False
for expected in [False] + self.expected:
self.assertEqual(trigger(trainer), expected)
trainer.updater.update()
def test_resumed_trigger(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.IntervalTrigger(*self.interval)
for expected in self.expected[:self.resume]:
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
serializers.save_npz(f.name, trigger)
trigger = training.triggers.IntervalTrigger(*self.interval)
serializers.load_npz(f.name, trigger)
for expected in self.expected[self.resume:]:
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
@condition.repeat(10)
def test_trigger_sparse_call(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
trigger = training.triggers.IntervalTrigger(*self.interval)
accumulated = False
# before the first iteration, trigger should be False
for expected in [False] + self.expected:
accumulated = accumulated or expected
if random.randrange(2):
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
trainer.updater.update()
@condition.repeat(10)
def test_resumed_trigger_sparse_call(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
accumulated = False
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.IntervalTrigger(*self.interval)
for expected in self.expected[:self.resume]:
trainer.updater.update()
accumulated = accumulated or expected
if random.randrange(2):
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
serializers.save_npz(f.name, trigger)
trigger = training.triggers.IntervalTrigger(*self.interval)
serializers.load_npz(f.name, trigger)
for expected in self.expected[self.resume:]:
trainer.updater.update()
accumulated = accumulated or expected
if random.randrange(2):
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
def test_resumed_trigger_backward_compat(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.IntervalTrigger(*self.interval)
for expected in self.expected[:self.resume]:
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
# old version does not save anything
np.savez(f, dummy=0)
trigger = training.triggers.IntervalTrigger(*self.interval)
serializers.load_npz(f.name, trigger)
for expected in self.expected[self.resume:]:
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
testing.run_module(__name__, __file__)
| {
"content_hash": "e977dcf5de94f49fbff2f39e2999d7e6",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 71,
"avg_line_length": 41.66386554621849,
"alnum_prop": 0.6192012908430818,
"repo_name": "aonotas/chainer",
"id": "ea13f5f7d4b90e04beaefb0f865f6ce73ff1f0f4",
"size": "4958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/training_tests/triggers_tests/test_interval_trigger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3357320"
}
],
"symlink_target": ""
} |
"""Tests for volume and images."""
import datetime
import mock
import os
import tempfile
from oslo_utils import imageutils
from oslo_utils import units
from cinder import db
from cinder import exception
from cinder.message import message_field
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder.tests import fake_driver
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit.image import fake as fake_image
from cinder.tests.unit import utils as tests_utils
from cinder.tests.unit import volume as base
import cinder.volume
from cinder.volume import manager as vol_manager
QUOTAS = quota.QUOTAS
NON_EXISTENT_IMAGE_ID = '003f540f-ec6b-4293-a3f9-7c68646b0f5c'
class FakeImageService(object):
def __init__(self, db_driver=None, image_service=None):
pass
def show(self, context, image_id):
return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'status': 'active'}
class CopyVolumeToImageTestCase(base.BaseVolumeTestCase):
def fake_local_path(self, volume):
return self.dst_path
def setUp(self):
super(CopyVolumeToImageTestCase, self).setUp()
self.dst_fd, self.dst_path = tempfile.mkstemp()
self.addCleanup(os.unlink, self.dst_path)
os.close(self.dst_fd)
self.mock_object(self.volume.driver, 'local_path',
self.fake_local_path)
self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
self.image_meta = {
'id': self.image_id,
'container_format': 'bare',
'disk_format': 'raw'
}
self.volume_id = fake.VOLUME_ID
self.addCleanup(db.volume_destroy, self.context, self.volume_id)
self.volume_attrs = {
'id': self.volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'uploading',
'host': 'dummy'
}
def test_copy_volume_to_image_status_available(self):
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_over_image_quota(self):
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
volume = db.volume_create(self.context, self.volume_attrs)
with mock.patch.object(self.volume.driver,
'copy_volume_to_image') as driver_copy_mock:
driver_copy_mock.side_effect = exception.ImageLimitExceeded
# test with image not in queued state
self.assertRaises(exception.ImageLimitExceeded,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
# Assert a user message was created
self.volume.message_api.create.assert_called_once_with(
self.context,
message_field.Action.COPY_VOLUME_TO_IMAGE,
resource_uuid=volume['id'],
exception=mock.ANY,
detail=message_field.Detail.FAILED_TO_UPLOAD_VOLUME)
def test_copy_volume_to_image_instance_deleted(self):
# During uploading volume to image if instance is deleted,
# volume should be in available status.
self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
# Creating volume testdata
self.volume_attrs['instance_uuid'] = 'b21f957d-a72f-4b93-b5a5-' \
'45b1161abb02'
db.volume_create(self.context, self.volume_attrs)
method = 'volume_update_status_based_on_attachment'
with mock.patch.object(db, method,
wraps=getattr(db, method)) as mock_update:
# Start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
# Check 'volume_update_status_after_copy_volume_to_image'
# is called 1 time
self.assertEqual(1, mock_update.call_count)
# Check volume status has changed to available because
# instance is deleted
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_status_use(self):
self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
# creating volume testdata
db.volume_create(self.context, self.volume_attrs)
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_exception(self):
self.image_meta['id'] = NON_EXISTENT_IMAGE_ID
# creating volume testdata
self.volume_attrs['status'] = 'in-use'
db.volume_create(self.context, self.volume_attrs)
# start test
self.assertRaises(exception.ImageNotFound,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
def test_copy_volume_to_image_driver_not_initialized(self):
# creating volume testdata
db.volume_create(self.context, self.volume_attrs)
# set initialized to False
self.volume.driver._initialized = False
# start test
self.assertRaises(exception.DriverNotInitialized,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume.status)
def test_copy_volume_to_image_driver_exception(self):
self.image_meta['id'] = self.image_id
image_service = fake_image.FakeImageService()
# create new image in queued state
queued_image_id = 'd5133f15-f753-41bd-920a-06b8c49275d9'
queued_image_meta = image_service.show(self.context, self.image_id)
queued_image_meta['id'] = queued_image_id
queued_image_meta['status'] = 'queued'
image_service.create(self.context, queued_image_meta)
# create new image in saving state
saving_image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2'
saving_image_meta = image_service.show(self.context, self.image_id)
saving_image_meta['id'] = saving_image_id
saving_image_meta['status'] = 'saving'
image_service.create(self.context, saving_image_meta)
# create volume
self.volume_attrs['status'] = 'available'
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
with mock.patch.object(self.volume.driver,
'copy_volume_to_image') as driver_copy_mock:
driver_copy_mock.side_effect = exception.VolumeDriverException(
"Error")
# test with image not in queued state
self.assertRaises(exception.VolumeDriverException,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
# Make sure we are passing an OVO instance and not an ORM instance
# to the driver
self.assertIsInstance(driver_copy_mock.call_args[0][1],
objects.Volume)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# image shouldn't be deleted if it is not in queued state
image_service.show(self.context, self.image_id)
# test with image in queued state
self.assertRaises(exception.VolumeDriverException,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
queued_image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# queued image should be deleted
self.assertRaises(exception.ImageNotFound,
image_service.show,
self.context,
queued_image_id)
# test with image in saving state
self.assertRaises(exception.VolumeDriverException,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
saving_image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# image in saving state should be deleted
self.assertRaises(exception.ImageNotFound,
image_service.show,
self.context,
saving_image_id)
@mock.patch.object(QUOTAS, 'reserve')
@mock.patch.object(QUOTAS, 'commit')
@mock.patch.object(vol_manager.VolumeManager, 'create_volume')
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver,
'copy_volume_to_image')
def _test_copy_volume_to_image_with_image_volume(
self, mock_copy, mock_create, mock_quota_commit,
mock_quota_reserve):
self.flags(glance_api_version=2)
self.volume.driver.configuration.image_upload_use_cinder_backend = True
self.addCleanup(fake_image.FakeImageService_reset)
image_service = fake_image.FakeImageService()
def add_location_wrapper(ctx, id, uri, metadata):
try:
volume = db.volume_get(ctx, id)
self.assertEqual(ctx.project_id,
volume['metadata']['image_owner'])
except exception.VolumeNotFound:
pass
return image_service.add_location_orig(ctx, id, uri, metadata)
image_service.add_location_orig = image_service.add_location
image_service.add_location = add_location_wrapper
image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2'
self.image_meta['id'] = image_id
self.image_meta['status'] = 'queued'
image_service.create(self.context, self.image_meta)
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
def fake_create(context, volume, **kwargs):
db.volume_update(context, volume.id, {'status': 'available'})
mock_create.side_effect = fake_create
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual('available', volume['status'])
# return create image
image = image_service.show(self.context, image_id)
image_service.delete(self.context, image_id)
return image
def test_copy_volume_to_image_with_image_volume(self):
image = self._test_copy_volume_to_image_with_image_volume()
self.assertTrue(image['locations'][0]['url'].startswith('cinder://'))
def test_copy_volume_to_image_with_image_volume_qcow2(self):
self.image_meta['disk_format'] = 'qcow2'
image = self._test_copy_volume_to_image_with_image_volume()
self.assertNotIn('locations', image)
@mock.patch.object(vol_manager.VolumeManager, 'delete_volume')
@mock.patch.object(fake_image._FakeImageService, 'add_location',
side_effect=exception.Invalid)
def test_copy_volume_to_image_with_image_volume_failure(
self, mock_add_location, mock_delete):
image = self._test_copy_volume_to_image_with_image_volume()
self.assertNotIn('locations', image)
self.assertTrue(mock_delete.called)
class ImageVolumeCacheTestCase(base.BaseVolumeTestCase):
def setUp(self):
super(ImageVolumeCacheTestCase, self).setUp()
self.volume.driver.set_initialized()
@mock.patch('oslo_utils.importutils.import_object')
def test_cache_configs(self, mock_import_object):
opts = {
'image_volume_cache_enabled': True,
'image_volume_cache_max_size_gb': 100,
'image_volume_cache_max_count': 20
}
def conf_get(option):
if option in opts:
return opts[option]
else:
return None
mock_driver = mock.Mock()
mock_driver.configuration.safe_get.side_effect = conf_get
mock_driver.configuration.extra_capabilities = 'null'
def import_obj(*args, **kwargs):
return mock_driver
mock_import_object.side_effect = import_obj
manager = vol_manager.VolumeManager(volume_driver=mock_driver)
self.assertIsNotNone(manager)
self.assertIsNotNone(manager.image_volume_cache)
self.assertEqual(100, manager.image_volume_cache.max_cache_size_gb)
self.assertEqual(20, manager.image_volume_cache.max_cache_size_count)
def test_delete_image_volume(self):
volume_params = {
'status': 'creating',
'host': 'some_host',
'cluster_name': 'some_cluster',
'size': 1
}
volume_api = cinder.volume.api.API()
volume = tests_utils.create_volume(self.context, **volume_params)
volume.status = 'available'
volume.save()
image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
db.image_volume_cache_create(self.context,
volume['host'],
volume_params['cluster_name'],
image_id,
datetime.datetime.utcnow(),
volume['id'],
volume['size'])
volume_api.delete(self.context, volume)
entry = db.image_volume_cache_get_by_volume_id(self.context,
volume['id'])
self.assertIsNone(entry)
def test_delete_volume_with_keymanager_exception(self):
volume_params = {
'host': 'some_host',
'size': 1
}
volume_api = cinder.volume.api.API()
volume = tests_utils.create_volume(self.context, **volume_params)
with mock.patch.object(
volume_api.key_manager, 'delete') as key_del_mock:
key_del_mock.side_effect = Exception("Key not found")
volume_api.delete(self.context, volume)
class ImageVolumeTestCases(base.BaseVolumeTestCase):
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'create_cloned_volume')
@mock.patch('cinder.quota.QUOTAS.rollback')
@mock.patch('cinder.quota.QUOTAS.commit')
@mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"])
def test_clone_image_volume(self, mock_reserve, mock_commit,
mock_rollback, mock_cloned_volume):
vol = tests_utils.create_volume(self.context,
**self.volume_params)
# unnecessary attributes should be removed from image volume
vol.consistencygroup = None
result = self.volume._clone_image_volume(self.context, vol,
{'id': fake.VOLUME_ID})
self.assertNotEqual(False, result)
mock_reserve.assert_called_once_with(self.context, volumes=1,
gigabytes=vol.size)
mock_commit.assert_called_once_with(self.context, ["RESERVATION"],
project_id=vol.project_id)
@mock.patch('cinder.quota.QUOTAS.rollback')
@mock.patch('cinder.quota.QUOTAS.commit')
@mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"])
def test_clone_image_volume_creation_failure(self, mock_reserve,
mock_commit, mock_rollback):
vol = tests_utils.create_volume(self.context, **self.volume_params)
with mock.patch.object(objects, 'Volume', side_effect=ValueError):
self.assertFalse(self.volume._clone_image_volume(
self.context, vol, {'id': fake.VOLUME_ID}))
mock_reserve.assert_called_once_with(self.context, volumes=1,
gigabytes=vol.size)
mock_rollback.assert_called_once_with(self.context, ["RESERVATION"])
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_cloned_status_available(
self, mock_qemu_info):
"""Test create volume from image via cloning.
Verify that after cloning image to volume, it is in available
state and is bootable.
"""
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
volume = self._create_volume_from_image()
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
self.volume.delete_volume(self.context, volume)
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_not_cloned_status_available(
self, mock_qemu_info):
"""Test create volume from image via full copy.
Verify that after copying image to volume, it is in available
state and is bootable.
"""
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
volume = self._create_volume_from_image(fakeout_clone_image=True)
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
self.volume.delete_volume(self.context, volume)
def test_create_volume_from_image_exception(self):
"""Test create volume from a non-existing image.
Verify that create volume from a non-existing image, the volume
status is 'error' and is not bootable.
"""
dst_fd, dst_path = tempfile.mkstemp()
os.close(dst_fd)
self.mock_object(self.volume.driver, 'local_path', lambda x: dst_path)
# creating volume testdata
kwargs = {'display_description': 'Test Desc',
'size': 20,
'availability_zone': 'fake_availability_zone',
'status': 'creating',
'attach_status': fields.VolumeAttachStatus.DETACHED,
'host': 'dummy'}
volume = objects.Volume(context=self.context, **kwargs)
volume.create()
self.assertRaises(exception.ImageNotFound,
self.volume.create_volume,
self.context,
volume,
{'image_id': NON_EXISTENT_IMAGE_ID})
volume = objects.Volume.get_by_id(self.context, volume.id)
self.assertEqual("error", volume['status'])
self.assertFalse(volume['bootable'])
# cleanup
volume.destroy()
os.unlink(dst_path)
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_copy_exception_rescheduling(
self, mock_qemu_info):
"""Test create volume with ImageCopyFailure
This exception should not trigger rescheduling and allocated_capacity
should be incremented so we're having assert for that here.
"""
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
def fake_copy_image_to_volume(context, volume, image_service,
image_id):
raise exception.ImageCopyFailure()
self.mock_object(self.volume.driver, 'copy_image_to_volume',
fake_copy_image_to_volume)
mock_delete = self.mock_object(self.volume.driver, 'delete_volume')
self.assertRaises(exception.ImageCopyFailure,
self._create_volume_from_image)
# NOTE(dulek): Rescheduling should not occur, so lets assert that
# allocated_capacity is incremented.
self.assertDictEqual(self.volume.stats['pools'],
{'_pool0': {'allocated_capacity_gb': 1}})
# NOTE(dulek): As we haven't rescheduled, make sure no delete_volume
# was called.
self.assertFalse(mock_delete.called)
@mock.patch('cinder.utils.brick_get_connector_properties')
@mock.patch('cinder.utils.brick_get_connector')
@mock.patch('cinder.volume.driver.BaseVD.secure_file_operations_enabled')
@mock.patch('cinder.volume.driver.BaseVD._detach_volume')
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_unavailable(
self, mock_qemu_info, mock_detach, mock_secure, *args):
"""Test create volume with ImageCopyFailure
We'll raise an exception inside _connect_device after volume has
already been attached to confirm that it detaches the volume.
"""
mock_secure.side_effect = NameError
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
unbound_copy_method = cinder.volume.driver.BaseVD.copy_image_to_volume
bound_copy_method = unbound_copy_method.__get__(self.volume.driver)
with mock.patch.object(self.volume.driver, 'copy_image_to_volume',
side_effect=bound_copy_method):
self.assertRaises(exception.ImageCopyFailure,
self._create_volume_from_image,
fakeout_copy_image_to_volume=False)
# We must have called detach method.
self.assertEqual(1, mock_detach.call_count)
@mock.patch('cinder.utils.brick_get_connector_properties')
@mock.patch('cinder.utils.brick_get_connector')
@mock.patch('cinder.volume.driver.BaseVD._connect_device')
@mock.patch('cinder.volume.driver.BaseVD._detach_volume')
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_unavailable_no_attach_info(
self, mock_qemu_info, mock_detach, mock_connect, *args):
"""Test create volume with ImageCopyFailure
We'll raise an exception on _connect_device call to confirm that it
detaches the volume even if the exception doesn't have attach_info.
"""
mock_connect.side_effect = NameError
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
unbound_copy_method = cinder.volume.driver.BaseVD.copy_image_to_volume
bound_copy_method = unbound_copy_method.__get__(self.volume.driver)
with mock.patch.object(self.volume.driver, 'copy_image_to_volume',
side_effect=bound_copy_method):
self.assertRaises(exception.ImageCopyFailure,
self._create_volume_from_image,
fakeout_copy_image_to_volume=False)
# We must have called detach method.
self.assertEqual(1, mock_detach.call_count)
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_create_volume_from_image_clone_image_volume(self, mock_qemu_info):
"""Test create volume from image via image volume.
Verify that after cloning image to volume, it is in available
state and is bootable.
"""
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
volume = self._create_volume_from_image(clone_image_volume=True)
self.assertEqual('available', volume['status'])
self.assertTrue(volume['bootable'])
self.volume.delete_volume(self.context, volume)
def test_create_volume_from_exact_sized_image(self):
"""Test create volume from an image of the same size.
Verify that an image which is exactly the same size as the
volume, will work correctly.
"""
try:
volume_id = None
volume_api = cinder.volume.api.API(
image_service=FakeImageService())
volume = volume_api.create(self.context, 2, 'name', 'description',
image_id=self.FAKE_UUID)
volume_id = volume['id']
self.assertEqual('creating', volume['status'])
finally:
# cleanup
db.volume_destroy(self.context, volume_id)
def test_create_volume_from_oversized_image(self):
"""Verify that an image which is too big will fail correctly."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.Gi + 1,
'disk_format': 'raw',
'container_format': 'bare',
'status': 'active'}
volume_api = cinder.volume.api.API(
image_service=_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
def test_create_volume_with_mindisk_error(self):
"""Verify volumes smaller than image minDisk will cause an error."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'min_disk': 5,
'status': 'active'}
volume_api = cinder.volume.api.API(
image_service=_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
def test_create_volume_with_deleted_imaged(self):
"""Verify create volume from image will cause an error."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'min_disk': 5,
'status': 'deleted'}
volume_api = cinder.volume.api.API(
image_service=_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
def test_copy_volume_to_image_maintenance(self):
"""Test copy volume to image in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.copy_volume_to_image,
self.context,
volume,
test_meta1,
force=True)
| {
"content_hash": "0f3467245571867a986cd4fbfa5a01ee",
"timestamp": "",
"source": "github",
"line_count": 679,
"max_line_length": 79,
"avg_line_length": 42.74521354933726,
"alnum_prop": 0.5825523704520397,
"repo_name": "eharney/cinder",
"id": "435d707c325b6cbbfcf5863aa08ad03f3c4d2730",
"size": "29755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/volume/test_image.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "561"
},
{
"name": "Python",
"bytes": "19839107"
},
{
"name": "Shell",
"bytes": "6453"
}
],
"symlink_target": ""
} |
from __future__ import division, absolute_import, unicode_literals
from collections import defaultdict
from django.db.models.deletion import Collector, ProtectedError
from django.db.models.sql.constants import QUERY_TERMS
from django.utils import six
from django.utils.encoding import force_bytes, force_text
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
This is adopted from the Django core. django-admin2 mandates that code
doesn't depend on imports from django.contrib.admin.
https://github.com/django/django/blob/1.9.6/django/contrib/admin/utils.py#L22
"""
lookup_fields = lookup_path.split('__')
# Remove the last item of the lookup path if it is a query term
if lookup_fields[-1] in QUERY_TERMS:
lookup_fields = lookup_fields[:-1]
# Now go through the fields (following all relations) and look for an m2m
for field_name in lookup_fields:
field = opts.get_field(field_name)
if hasattr(field, 'get_path_info'):
# This field is a relation, update opts to follow the relation
path_info = field.get_path_info()
opts = path_info[-1].to_opts
if any(path.m2m for path in path_info):
# This field is a m2m relation so we know we need to call distinct
return True
return False
def model_options(model):
"""
Wrapper for accessing model._meta. If this access point changes in core
Django, this function allows django-admin2 to address the change with
what should hopefully be less disruption to the rest of the code base.
Works on model classes and objects.
"""
return model._meta
def admin2_urlname(view, action):
"""
Converts the view and the specified action into a valid namespaced URLConf name.
"""
return 'admin2:%s_%s_%s' % (view.app_label, view.model_name, action)
def model_verbose_name(model):
"""
Returns the verbose name of a model instance or class.
"""
return model_options(model).verbose_name
def model_verbose_name_plural(model):
"""
Returns the pluralized verbose name of a model instance or class.
"""
return model_options(model).verbose_name_plural
def model_field_verbose_name(model, field_name):
"""
Returns the verbose name of a model field.
"""
meta = model_options(model)
field = meta.get_field_by_name(field_name)[0]
return field.verbose_name
def model_method_verbose_name(model, method_name):
"""
Returns the verbose name / short description of a model field.
"""
method = getattr(model, method_name)
try:
return method.short_description
except AttributeError:
return method_name
def model_app_label(obj):
"""
Returns the app label of a model instance or class.
"""
return model_options(obj).app_label
def get_attr(obj, attr):
"""
Get the right value for the attribute. Handle special cases like callables
and the __str__ attribute.
"""
if attr == '__str__':
from builtins import str as text
value = text(obj)
else:
attribute = getattr(obj, attr)
value = attribute() if callable(attribute) else attribute
return value
class NestedObjects(Collector):
"""
This is adopted from the Django core. django-admin2 mandates that code
doesn't depend on imports from django.contrib.admin.
https://github.com/django/django/blob/1.9.6/django/contrib/admin/utils.py#L171-L231
"""
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
self.model_objs = defaultdict(set)
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source=None, source_attr=None, **kwargs):
for obj in objs:
if source_attr and not source_attr.endswith('+'):
related_name = source_attr % {
'class': source._meta.model_name,
'app_label': source._meta.app_label,
}
self.add_edge(getattr(obj, related_name), obj)
else:
self.add_edge(None, obj)
self.model_objs[obj._meta.model].add(obj)
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
This is adopted from the Django core. django-admin2 mandates that code
doesn't depend on imports from django.contrib.admin.
https://github.com/django/django/blob/1.9.6/django/contrib/admin/utils.py#L66-L73
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"[]<>%\n\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def type_str(text):
if six.PY2:
return force_bytes(text)
else:
return force_text(text)
| {
"content_hash": "f1f1514a4c521bac38efbc6429517b8d",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 94,
"avg_line_length": 31.99033816425121,
"alnum_prop": 0.6270009060706735,
"repo_name": "andrewsmedina/django-admin2",
"id": "a831864cfa8a9fa88897dbda8b493f198ce97119",
"size": "6646",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "djadmin2/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19219"
},
{
"name": "HTML",
"bytes": "42539"
},
{
"name": "JavaScript",
"bytes": "2540"
},
{
"name": "Python",
"bytes": "229693"
}
],
"symlink_target": ""
} |
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return None
dummy = ListNode(0)
dummy.next = head
pp = dummy
p = head
# Use dummy node to resolve head node issue: duplicates happen at head
# Now
# dummy -> node1 -> node2 -> node3 ... -> noden
# ^
# |
# head
# at first, we make dummy.next = head and prev = dummy
# whenever no adjacent duplicates, move forward prev
# otherwise, prev.next = new_node_pos
# In the 1st case, p = probe may make p None, thus both p and p.next must be checked.
while p and p.next:
if p.next.val == p.val:
# probe points to unchecked node.
probe = p.next.next
while probe and probe.val == p.val:
probe = probe.next
pp.next = probe
p = probe
else:
pp = p
p = p.next
return dummy.next
| {
"content_hash": "325473babebc79510eb6bbce3f5f4d53",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 93,
"avg_line_length": 30.536585365853657,
"alnum_prop": 0.4728434504792332,
"repo_name": "kingsamchen/Eureka",
"id": "f8a5e0dda86e584dfa46578b74580713c9468bc0",
"size": "1289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crack-data-structures-and-algorithms/leetcode/remove_duplicates_from_sorted_list_II_q82.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AIDL",
"bytes": "320"
},
{
"name": "Assembly",
"bytes": "1523"
},
{
"name": "C",
"bytes": "115900"
},
{
"name": "C#",
"bytes": "21241"
},
{
"name": "C++",
"bytes": "1125565"
},
{
"name": "CMake",
"bytes": "645800"
},
{
"name": "Dockerfile",
"bytes": "717"
},
{
"name": "Go",
"bytes": "111089"
},
{
"name": "HTML",
"bytes": "3869"
},
{
"name": "Java",
"bytes": "101720"
},
{
"name": "Makefile",
"bytes": "110"
},
{
"name": "PowerShell",
"bytes": "9136"
},
{
"name": "Python",
"bytes": "210011"
},
{
"name": "Shell",
"bytes": "9338"
}
],
"symlink_target": ""
} |
from producteev import Producteev
from optparse import OptionParser
API_KEY = 'YOUR_API_KEY'
SECRET_KEY = 'YOUR_SECRET_KEY'
if __name__ == "__main__":
# Parse options
parser = OptionParser()
parser.add_option("-u", "--username", dest="username",
help="Set username", metavar="USERNAME")
parser.add_option("-p", "--password", dest="password",
help="Set password", metavar="PASSWORD")
parser.add_option("-d", "--debug",
action="store_true", dest="debug", default=False,
help="Print status messages to stdout")
(options, args) = parser.parse_args()
# Make clients
client = Producteev(API_KEY, SECRET_KEY)
client.login(options.username, options.password)
# Get time
print client.get_time()
# Get user
user = client.users.me
print user.timezone
print user.default_dashboard
print user.colleagues
# Get task with id 9909041
t = client.tasks.get(9909041)
print t.title
print t.labels
t.title = 'New task name'
# Get task list
tasks = client.tasks.list_all()
# Get dashboard list
dashboards = client.dashboards.list
d = dashboards[0]
print d.title
print d.users
print d.tasks
| {
"content_hash": "2dcadfe942a76b60129ddb6d2b2a6dae",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 71,
"avg_line_length": 27.25531914893617,
"alnum_prop": 0.6120218579234973,
"repo_name": "magarcia/python-producteev",
"id": "d86b9e09b59803b153e0c3b62fb756ff0d79cf70",
"size": "1304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57411"
}
],
"symlink_target": ""
} |
def execfile(file, glob=None, loc=None):
if glob is None:
import sys
glob = sys._getframe().f_back.f_globals
if loc is None:
loc = glob
stream = open(file, 'rb')
try:
encoding = None
#Get encoding!
for _i in range(2):
line = stream.readline() #Should not raise an exception even if there are no more contents
#Must be a comment line
if line.strip().startswith(b'#'):
#Don't import re if there's no chance that there's an encoding in the line
if b'coding' in line:
import re
p = re.search(br"coding[:=]\s*([-\w.]+)", line)
if p:
try:
encoding = p.group(1).decode('ascii')
break
except:
encoding = None
finally:
stream.close()
if encoding:
stream = open(file, encoding=encoding)
else:
stream = open(file)
try:
contents = stream.read()
finally:
stream.close()
exec(compile(contents+"\n", file, 'exec'), glob, loc) #execute the script | {
"content_hash": "7f64abeb62251b2c10014f91ae54fb60",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 102,
"avg_line_length": 33.189189189189186,
"alnum_prop": 0.4771986970684039,
"repo_name": "hinesmr/mica",
"id": "d60d7ed94bb08736c9a2d1f9dcad1afc22c486ea",
"size": "1283",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "util/pydevd/pysrc/_pydev_execfile.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "19796"
},
{
"name": "C++",
"bytes": "1653"
},
{
"name": "CSS",
"bytes": "718628"
},
{
"name": "HTML",
"bytes": "1110507"
},
{
"name": "JavaScript",
"bytes": "3180714"
},
{
"name": "Makefile",
"bytes": "70"
},
{
"name": "Python",
"bytes": "1379726"
},
{
"name": "Shell",
"bytes": "6032"
}
],
"symlink_target": ""
} |
'''
tunepk urlresolver plugin
Copyright (C) 2013 icharania
updated Copyright (C) 2017 gujal
updated Copyright (C) 2019 cache-sk
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import base64
import hashlib
import json
import time
from lib import helpers
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class TunePkResolver(UrlResolver):
name = "tune.pk"
domains = ["tune.pk", "tune.video"]
pattern = '(?://|\.)(tune\.(?:video|pk))/(?:player|video|play)/(?:[\w\.\?]+=)?(\d+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
apiurl = 'https://api.tune.pk/v3/videos/{}'.format(media_id)
currentTime = time.time()
x_req_time = time.strftime('%a, %d %b %Y %H:%M:%S GMT',time.gmtime(currentTime))
tunestring = 'videos/{} . {} . KH42JVbO'.format(media_id, int(currentTime))
token = hashlib.sha1(tunestring).hexdigest()
headers = {'Content-Type': 'application/json; charset=utf-8',
'User-Agent': common.FF_USER_AGENT,
'X-KEY': '777750fea4d3bd585bf47dc1873619fc',
#'X-REQ-APP': 'web' #not needed, returning bullshit hash anyways
'X-REQ-TIME': x_req_time,
'X-REQ-TOKEN': token}
try:
response = self.net.http_GET(apiurl, headers=headers)
jdata = json.loads(response.content)
if jdata['message'] == 'OK':
vids = jdata['data']['videos']['files']
sources = []
for key in vids.keys():
sources.append((vids[key]['label'], vids[key]['file']))
sources.reverse() #because it is from worst to best?
video_url = helpers.pick_source(sources)
# hash recount
serverTime = long(jdata['timestamp']) + (int(time.time()) - int(currentTime))
hashLifeDuration = long(jdata['data']['duration']) * 5
if hashLifeDuration < 3600:
hashLifeDuration = 3600
expiryTime = serverTime + hashLifeDuration
try:
startOfPathUrl = video_url.index('/files/videos/')
pathUrl = video_url[startOfPathUrl:None]
except ValueError:
try:
startOfPathUrl = video_url.index('/files/streams/')
pathUrl = video_url[startOfPathUrl:None]
except ValueError:
raise ResolverError('This video cannot be played.')
htoken = hashlib.md5(str(expiryTime) + pathUrl + ' ' + 'c@ntr@lw3biutun3cb').digest()
htoken = base64.urlsafe_b64encode(htoken).replace('=', '').replace('\n', '')
video_url = video_url + '?h=' + htoken + '&ttl=' + str(expiryTime)
headers = {'Referer': web_url,
'User-Agent': common.FF_USER_AGENT}
return video_url + helpers.append_headers(headers)
except:
raise ResolverError('This video has been removed due to a copyright claim.')
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template='https://tune.pk/video/{media_id}/')
| {
"content_hash": "ecf8acbb78e203e3efb85e1dcf1e36e0",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 101,
"avg_line_length": 42.784946236559136,
"alnum_prop": 0.584820306609701,
"repo_name": "dbiesecke/dbiesecke.github.io",
"id": "e20f28d36d172fa6098b2a7efde608d72836b683",
"size": "3979",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "repo/script.module.urlresolver/lib/urlresolver/plugins/tunepk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23106"
},
{
"name": "HTML",
"bytes": "1689379"
},
{
"name": "JavaScript",
"bytes": "103456"
},
{
"name": "Makefile",
"bytes": "4554"
},
{
"name": "Perl",
"bytes": "2785"
},
{
"name": "Python",
"bytes": "14200477"
},
{
"name": "Shell",
"bytes": "1804"
}
],
"symlink_target": ""
} |
import logging
from framework.celery_tasks.handlers import enqueue_task
from website import settings
logger = logging.getLogger(__name__)
if settings.SEARCH_ENGINE == 'elastic':
import elastic_search as search_engine
else:
search_engine = None
logger.warn('Elastic search is not set to load')
def requires_search(func):
def wrapped(*args, **kwargs):
if search_engine is not None and not settings.RUNNING_MIGRATION:
return func(*args, **kwargs)
return wrapped
@requires_search
def search(query, index=None, doc_type=None, raw=None):
index = index or settings.ELASTIC_INDEX
return search_engine.search(query, index=index, doc_type=doc_type, raw=raw)
@requires_search
def update_node(node, index=None, bulk=False, async=True, saved_fields=None):
kwargs = {
'index': index,
'bulk': bulk
}
if async:
node_id = node._id
# We need the transaction to be committed before trying to run celery tasks.
# For example, when updating a Node's privacy, is_public must be True in the
# database in order for method that updates the Node's elastic search document
# to run correctly.
if settings.USE_CELERY:
enqueue_task(search_engine.update_node_async.s(node_id=node_id, **kwargs))
else:
search_engine.update_node_async(node_id=node_id, **kwargs)
else:
index = index or settings.ELASTIC_INDEX
return search_engine.update_node(node, **kwargs)
@requires_search
def bulk_update_nodes(serialize, nodes, index=None):
index = index or settings.ELASTIC_INDEX
search_engine.bulk_update_nodes(serialize, nodes, index=index)
@requires_search
def delete_node(node, index=None):
index = index or settings.ELASTIC_INDEX
doc_type = node.project_or_component
if node.is_registration:
doc_type = 'registration'
search_engine.delete_doc(node._id, node, index=index, category=doc_type)
def update_contributors(nodes):
search_engine.bulk_update_contributors(nodes)
@requires_search
def update_user(user, index=None):
index = index or settings.ELASTIC_INDEX
search_engine.update_user(user, index=index)
@requires_search
def update_file(file_, index=None, delete=False):
index = index or settings.ELASTIC_INDEX
search_engine.update_file(file_, index=index, delete=delete)
@requires_search
def update_institution(institution, index=None):
index = index or settings.ELASTIC_INDEX
search_engine.update_institution(institution, index=index)
@requires_search
def delete_all():
search_engine.delete_all()
@requires_search
def delete_index(index):
search_engine.delete_index(index)
@requires_search
def create_index(index=None):
index = index or settings.ELASTIC_INDEX
search_engine.create_index(index=index)
@requires_search
def search_contributor(query, page=0, size=10, exclude=None, current_user=None):
exclude = exclude or []
result = search_engine.search_contributor(query=query, page=page, size=size,
exclude=exclude, current_user=current_user)
return result
| {
"content_hash": "9ea82d8fd1263add7d3c48df283eea6a",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 89,
"avg_line_length": 32.16326530612245,
"alnum_prop": 0.6986040609137056,
"repo_name": "monikagrabowska/osf.io",
"id": "b6216289949d4e3c6e7f2657da58766b7e912dc6",
"size": "3152",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "website/search/search.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "176566"
},
{
"name": "HTML",
"bytes": "183119"
},
{
"name": "JavaScript",
"bytes": "2017358"
},
{
"name": "Jupyter Notebook",
"bytes": "8510"
},
{
"name": "Makefile",
"bytes": "6905"
},
{
"name": "Mako",
"bytes": "755899"
},
{
"name": "PLpgSQL",
"bytes": "22144"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "9632033"
},
{
"name": "Shell",
"bytes": "436"
}
],
"symlink_target": ""
} |
from django import forms
from django.contrib.auth.models import User
from .models import useradd, tweet, comment
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
profile_picture = forms.FileField(required=False)
class Meta:
model = useradd
fields = ['username', 'email', 'password', 'country', 'profile_picture']
help_texts = None
class TweetForm(forms.ModelForm):
class Meta:
model = tweet
fields = ['text', 'tweetto']
class CommentForm(forms.ModelForm):
class Meta:
model = comment
fields = ['ctext']
| {
"content_hash": "7cc057c4efb1201c87dd989fbaf6ed21",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 80,
"avg_line_length": 27.08695652173913,
"alnum_prop": 0.666131621187801,
"repo_name": "Udayraj123/dashboard_IITG",
"id": "035158a8e952c77020c793a41bf976f53419c119",
"size": "623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Binder/Twitter/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "356"
},
{
"name": "CSS",
"bytes": "2484053"
},
{
"name": "HTML",
"bytes": "5650437"
},
{
"name": "JavaScript",
"bytes": "8566070"
},
{
"name": "PHP",
"bytes": "535968"
},
{
"name": "Python",
"bytes": "205490"
}
],
"symlink_target": ""
} |
from PyQt4.QtGui import (QVBoxLayout, QHBoxLayout, QTableWidgetItem,
QIcon, QGridLayout, QSplitter, QLineEdit, QFrame,
QPushButton, QMenu, QComboBox)
from PyQt4.QtCore import QDate, Qt, SIGNAL
from configuration import Config
from Common.ui.common import (FWidget, IntLineEdit,
FormLabel, FormatDate)
from Common.ui.util import is_int, date_to_datetime, date_on_or_end
from Common.ui.table import FTableWidget
from peewee import fn
from ui.reports_managers import GReportViewWidget
from GCommon.ui._product_detail import InfoTableWidget
from models import (Store, Product, Reports)
class StockOutputWidget(FWidget):
def __init__(self, store="", parent=0, *args, **kwargs):
super(StockOutputWidget, self).__init__(parent=parent, *args, **kwargs)
self.parentWidget().setWindowTitle(
"{} {}".format(Config.APP_NAME, "SORTIE STOCK "))
self.parent = parent
self.store = store
vbox = QVBoxLayout(self)
hbox = QHBoxLayout(self)
editbox = QGridLayout()
self.date_out = FormatDate(QDate.currentDate())
# Combobox widget for add store
self.liste_store = Store.all()
self.box_store = QComboBox()
for index in range(0, len(self.liste_store)):
op = self.liste_store[index]
self.box_store.addItem(op.name, op.id)
if self.store and self.store.name == op.name:
self.box_store.setCurrentIndex(index)
self.search_field = QLineEdit()
self.search_field.setPlaceholderText("Rechercher un article")
self.search_field.setMaximumSize(
200, self.search_field.maximumSize().height())
self.search_field.textChanged.connect(self.finder)
self.vline = QFrame()
self.vline.setFrameShape(QFrame.VLine)
self.vline.setFrameShadow(QFrame.Sunken)
self.table_resultat = ResultatTableWidget(parent=self)
self.table_info = InfoTableWidget(parent=self)
self.table_out = InproductTableWidget(parent=self)
self.box_store.connect(self.box_store,
SIGNAL("currentIndexChanged(int)"),
self.table_out.changed_value)
self.table_resultat.refresh_("")
# editbox.addWidget(FormLabel(u"Recherche:"), 0, 0)
editbox.addWidget(self.search_field, 0, 0)
editbox.addWidget(self.vline, 0, 2, 1, 1)
editbox.addWidget(FormLabel(u"Magasin:"), 0, 4)
editbox.addWidget(self.box_store, 0, 5)
editbox.addWidget(FormLabel(u"Date d'achat:"), 0, 6)
editbox.addWidget(self.date_out, 0, 7)
editbox.setColumnStretch(3, 3)
splitter = QSplitter(Qt.Horizontal)
splitter_left = QSplitter(Qt.Vertical)
# splitter_left.addWidget(FBoxTitle(u"Les products"))
splitter_left.addWidget(self.table_resultat)
splitter_down = QSplitter(Qt.Vertical)
splitter_down.resize(15, 20)
splitter_down.addWidget(self.table_info)
splitter_rigth = QSplitter(Qt.Vertical)
# splitter_rigth.addWidget(FBoxTitle(u"Les products achatés"))
splitter_rigth.addWidget(self.table_out)
splitter_rigth.resize(500, 900)
splitter_left.addWidget(splitter_down)
splitter.addWidget(splitter_left)
splitter.addWidget(splitter_rigth)
hbox.addWidget(splitter)
vbox.addLayout(editbox)
vbox.addLayout(hbox)
self.setLayout(vbox)
def finder(self):
value = str(self.search_field.text())
self.table_resultat.refresh_(value)
def save_report(self):
''' add operation '''
# entete de la facture
self.table_out.changed_value()
if not self.table_out.isvalid:
return False
date_out = str(self.date_out.text())
datetime_ = date_to_datetime(date_out)
self.current_store = self.liste_store[self.box_store.currentIndex()]
values_t = self.table_out.get_table_items()
for qty, name in values_t:
rep = Reports(type_=Reports.S, store=self.current_store,
date=datetime_, product=Product.get(name=name),
qty_use=int(qty))
try:
rep.save()
except:
self.parent.Notify(
u"Ce mouvement n'a pas pu être enrgistré dans les raports",
"error")
return False
self.parent.change_context(GReportViewWidget)
self.parent.Notify(u"La sortie des articles avec succès", "success")
class ResultatTableWidget(FTableWidget):
"""docstring for ResultatTableWidget"""
def __init__(self, parent, *args, **kwargs):
FTableWidget.__init__(self, parent=parent, *args, **kwargs)
self.parent = parent
self.hheaders = [u"i", u"Resultat", u"Ajouter"]
self.stretch_columns = [1]
self.align_map = {1: 'l'}
# self.display_fixed = True
# self.display_vheaders = False
self.refresh_()
def refresh_(self, value=None):
""" """
pw = self.width()
self.setColumnWidth(0, 20)
self.setColumnWidth(1, pw)
self.setColumnWidth(2, 40)
self._reset()
self.set_data_for(value)
self.refresh()
def set_data_for(self, value):
products = [(Product.get(id=rpt.product_id).name) for rpt in
Reports.select(fn.Distinct(Reports.product))]
if value:
products = [(prod.name) for prod in Product.select(
).where(Product.name.contains(value)).where(
Product.name << products).order_by(Product.name.desc())]
self.data = [("", rpt, "") for rpt in products]
def _item_for_data(self, row, column, data, context=None):
if column == 2:
return QTableWidgetItem(
QIcon(u"{img_media}{img}".format(img_media=Config.img_cmedia,
img="go-next.png")), "")
if column == 0:
return QTableWidgetItem(
QIcon(u"{img_media}{img}".format(img_media=Config.img_cmedia,
img="info.png")), "")
return super(ResultatTableWidget, self)._item_for_data(row, column,
data, context)
def click_item(self, row, column, *args):
self.choix = Product.filter(name=self.data[row][1]).get()
if column != 2:
self.parent.table_info.refresh_(self.choix.id)
if column == 2:
self.parent.table_out.refresh_(self.choix)
class InproductTableWidget(FTableWidget):
def __init__(self, parent, *args, **kwargs):
FTableWidget.__init__(self, parent=parent, *args, **kwargs)
self.parent = parent
self.hheaders = ["Quantité (carton)", "Nombre pièce", "Désignation"]
# self.setSelectionMode(QAbstractItemView.NoSelection)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.popup)
self.stretch_columns = [0, 1, 2]
self.align_map = {0: 'r', 1: 'r', 2: 'l'}
self.display_vheaders = False
# self.display_fixed = True
self.refresh_()
# self.isvalid = True
self.col_dest = 2
self.col_qtty = 0
def refresh_(self, choix=None):
if choix:
self.row = [1, choix.number_parts_box, choix.name]
if not [row for row in self.data if self.row[
self.col_dest] in row]:
self.set_data_for()
self.refresh()
self.refresh()
def set_data_for(self):
self._reset()
self.data.extend([self.row])
self.refresh_()
pw = self.width() / 4 - 20
self.setColumnWidth(0, pw)
self.setColumnWidth(1, pw)
self.setColumnWidth(2, (pw * 2))
def popup(self, pos):
if (len(self.data) - 1) < self.selectionModel().selection().indexes(
)[0].row():
return False
menu = QMenu()
quit_action = menu.addAction("Supprimer cette ligne")
action = menu.exec_(self.mapToGlobal(pos))
if action == quit_action:
try:
self.data.pop(self.selectionModel()
.selection().indexes()[0].row())
except IndexError:
pass
if self.data == []:
self._reset()
self.refresh()
def extend_rows(self):
nb_rows = self.rowCount()
self.setRowCount(nb_rows + 1)
bicon = QIcon.fromTheme(
'', QIcon(u"{img_media}{img}".format(img_media=Config.img_cmedia,
img='save.png')))
self.button = QPushButton(bicon, u"Enrgistrer la sortie")
self.button.released.connect(self.parent.save_report)
self.setCellWidget(nb_rows, self.col_dest, self.button)
def _item_for_data(self, row, column, data, context=None):
if column == 0:
self.line_edit = IntLineEdit(u"%s" % data)
self.line_edit.textChanged.connect(self.changed_value)
return self.line_edit
return super(InproductTableWidget, self)._item_for_data(
row, column, data, context)
def _update_data(self, row_num, new_data):
self.data[row_num] = (new_data[0], new_data[1], self.data[row_num][2])
def get_table_items(self):
""" """
list_order = []
for i in range(self.rowCount() - 1):
liste_item = []
row_data = self.data[i]
try:
liste_item.append(int(row_data[self.col_qtty]))
liste_item.append(str(row_data[self.col_dest]))
list_order.append(liste_item)
except:
liste_item.append("")
return list_order
def changed_value(self, refresh=False):
""" Calcule les Resultat """
current_store = self.parent.liste_store[
self.parent.box_store.currentIndex()]
self.button.setEnabled(True)
self.isvalid = True
for row_num in range(0, self.data.__len__()):
qtsaisi = is_int(self.cellWidget(row_num, self.col_qtty).text())
nb_parts_box = Product.filter(
name=self.item(row_num, self.col_dest).text()).get(
).number_parts_box * qtsaisi
self.setItem(row_num, 1, QTableWidgetItem(
"{}".format(nb_parts_box)))
self._update_data(row_num, [qtsaisi, nb_parts_box])
try:
last_report = Reports.filter(
store=current_store, product__name=str(
self.item(row_num, self.col_dest).text())).order_by(
Reports.date.desc()).get()
qtremaining = last_report.remaining
date_out = str(self.parent.date_out.text())
if last_report.date > date_on_or_end(date_out, on=False):
self.parent.date_out.setStyleSheet("font-size:15px;"
"color:red")
self.parent.date_out.setToolTip(
"Cette date est Inférieure à la date de la dernière rapport ({}).".format(last_report.date))
self.isvalid = False
self.button.setEnabled(False)
# return False
except Exception as e:
qtremaining = 0
viderreur_qtsaisi = ""
stylerreur = "background-color: rgb(255, 235, 235);" + \
"border: 3px double SeaGreen"
if qtsaisi == 0:
viderreur_qtsaisi = stylerreur
self.cellWidget(row_num, self.col_qtty).setToolTip(
u"obligatoire")
self.isvalid = False
self.button.setEnabled(False)
self.cellWidget(row_num, self.col_qtty).setStyleSheet(
viderreur_qtsaisi)
self.cellWidget(row_num, self.col_qtty).setToolTip("")
if qtremaining < qtsaisi:
self.cellWidget(row_num, self.col_qtty).setStyleSheet(
"font-size:20px; color: red")
self.cellWidget(
row_num, self.col_qtty).setToolTip(
u"{} est > {} la quantité restante.".format(
qtsaisi, qtremaining))
self.isvalid = False
# self.button.setEnabled(False)
| {
"content_hash": "b0bf801f1c33ce9c100286e9298ecf0e",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 116,
"avg_line_length": 37.498525073746315,
"alnum_prop": 0.5597073631214601,
"repo_name": "fadiga/mstock",
"id": "fa512fae417fc187b9df5f6ebaf23a9d2b25a921",
"size": "12787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ui/stock_output.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "NSIS",
"bytes": "3329"
},
{
"name": "Python",
"bytes": "146547"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class LegendgroupValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="legendgroup", parent_name="waterfall", **kwargs):
super(LegendgroupValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
| {
"content_hash": "b653945f9dd6b8825193e0607298f92e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 85,
"avg_line_length": 37.45454545454545,
"alnum_prop": 0.6359223300970874,
"repo_name": "plotly/plotly.py",
"id": "8cfac6d85569a99eb7a78bfcbe2d62bc49501cd3",
"size": "412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/waterfall/_legendgroup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""Tests the import_data.py script.
Typical usage:
python3 test_import.py
"""
import os
import sys
import unittest
import pandas as pd
# Allows the following module imports to work when running as a script
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from us_bea.states_gdp import import_data
from us_bea.states_gdp import import_industry_data_and_gen_mcf
# _MODULE_DIR is the path to where this test is running from.
_MODULE_DIR = os.path.dirname(__file__)
_TEST_DATA_DIR = os.path.join(_MODULE_DIR, "test_csvs")
class USStateQuarterlyGDPImportTest(unittest.TestCase):
def test_date_converter(self):
"""Tests the date converter function used to process raw data."""
date_conv_fn = import_data.StateGDPDataLoader.date_to_obs_date
self.assertEqual(date_conv_fn("2005:Q1"), "2005-03")
self.assertEqual(date_conv_fn("2005:Q2"), "2005-06")
self.assertEqual(date_conv_fn("2005:Q3"), "2005-09")
self.assertEqual(date_conv_fn("2005:Q4"), "2005-12")
self.assertEqual(date_conv_fn("1999:Q1"), "1999-03")
self.assertEqual(date_conv_fn("2020:Q2"), "2020-06")
def test_geoid_converter(self):
"""Tests the geoid converter function used to process raw data."""
geoid_conv_fn = import_data.StateGDPDataLoader.convert_geoid
self.assertEqual(geoid_conv_fn(' "1000"'), "geoId/10")
self.assertEqual(geoid_conv_fn("1000"), "geoId/10")
self.assertEqual(geoid_conv_fn("10"), "geoId/10")
self.assertEqual(geoid_conv_fn("10 "), "geoId/10")
self.assertEqual(geoid_conv_fn('10""""""'), "geoId/10")
self.assertEqual(geoid_conv_fn("25100"), "geoId/25")
self.assertEqual(geoid_conv_fn(' "760000"'), "geoId/76")
self.assertEqual(geoid_conv_fn('123""""""'), "geoId/12")
def test_data_processing_tiny(self):
"""Tests end-to-end data cleaning on a tiny example."""
raw_df = pd.read_csv(os.path.join(_TEST_DATA_DIR, "test_tiny_raw.csv"),
index_col=0)
clean_df = pd.read_csv(os.path.join(_TEST_DATA_DIR,
"test_tiny_cleaned.csv"),
index_col=0)
loader = import_data.StateGDPDataLoader()
loader.process_data(raw_df)
pd.testing.assert_frame_equal(clean_df, loader.clean_df)
def test_data_processing_small(self):
"""Tests end-to-end data cleaning on a small example."""
raw_df = pd.read_csv(os.path.join(_TEST_DATA_DIR, "test_small_raw.csv"),
index_col=0)
clean_df = pd.read_csv(os.path.join(_TEST_DATA_DIR,
"test_small_cleaned.csv"),
index_col=0)
loader = import_data.StateGDPDataLoader()
loader.process_data(raw_df)
pd.testing.assert_frame_equal(clean_df, loader.clean_df)
class USStateQuarterlyPerIndustryImportTest(unittest.TestCase):
def test_data_processing_tiny(self):
"""Tests end-to-end data cleaning on a tiny example."""
raw_df = pd.read_csv(os.path.join(_TEST_DATA_DIR,
"test_industry_tiny_raw.csv"),
index_col=0)
clean_df = pd.read_csv(os.path.join(_TEST_DATA_DIR,
"test_industry_tiny_cleaned.csv"),
index_col=0)
loader = import_industry_data_and_gen_mcf.StateGDPIndustryDataLoader()
loader.process_data(raw_df)
pd.testing.assert_frame_equal(clean_df, loader.clean_df)
def test_value_converter(self):
"""Tests value converter function that cleans out empty datapoints."""
val_conv_fn = (import_industry_data_and_gen_mcf.
StateGDPIndustryDataLoader.value_converter)
self.assertEqual(val_conv_fn("(D)"), -1)
self.assertEqual(val_conv_fn("(E)"), -1)
self.assertEqual(val_conv_fn("356785)"), -1)
self.assertEqual(val_conv_fn("35678.735"), 35678.735)
self.assertEqual(val_conv_fn(5), 5)
self.assertEqual(val_conv_fn(35678.735), 35678.735)
self.assertEqual(val_conv_fn(""), -1)
def test_industry_class(self):
"""Tests industry class converter function that cleans out empty
datapoints.
"""
ind_conv_fn = (import_industry_data_and_gen_mcf.
StateGDPIndustryDataLoader.convert_industry_class)
prefix = "dcs:USStateQuarterlyIndustryGDP_NAICS_"
self.assertEqual(ind_conv_fn("35"), prefix + "35")
self.assertEqual(ind_conv_fn("987"), prefix + "987")
self.assertEqual(ind_conv_fn("35-37"), prefix + "35_37")
self.assertEqual(ind_conv_fn("35-37,40"), prefix + "35_37&40")
self.assertEqual(ind_conv_fn("13-97,2,45-78"), prefix + "13_97&2&45_78")
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "4a6533350a659433be5e9922d3f3464d",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 80,
"avg_line_length": 42.19327731092437,
"alnum_prop": 0.6004779924317865,
"repo_name": "datacommonsorg/data",
"id": "621a3396d6065c77e31b83cc9b797d47a0b3f9e3",
"size": "5596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/us_bea/states_gdp/import_data_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "78"
},
{
"name": "Go",
"bytes": "51446"
},
{
"name": "HTML",
"bytes": "32842253"
},
{
"name": "JavaScript",
"bytes": "458"
},
{
"name": "Jupyter Notebook",
"bytes": "5088443"
},
{
"name": "Python",
"bytes": "3723204"
},
{
"name": "R",
"bytes": "28607"
},
{
"name": "Shell",
"bytes": "25468"
},
{
"name": "TypeScript",
"bytes": "13472"
}
],
"symlink_target": ""
} |
from twisted.internet import defer
from twisted.trial import unittest
from twisted.internet import reactor
from mock import Mock
from cyclone import template
class TestTemplates(unittest.TestCase):
def test_simple_var(self):
t = template.Template(r"My name is: {{ name }}")
self.assertEqual(t.generate(name="Alice"), "My name is: Alice")
self.assertEqual(t.generate(name="Bob"), "My name is: Bob")
def test_blocks(self):
loader = template.DictLoader({
"base.html": r"value: {% block value %}original{% end %}",
"ext1.html": r'{% extends "base.html" %}{% block value %}new1{% end %}',
"ext2.html": r'{% extends "base.html" %}{% block value %}{% super %}, new2{% end %}',
"ext3.html": r'{% extends "ext2.html" %}{% block value %}{% super %}, new3{% end %}',
"ext2_1.html": r'{% extends "base.html" %}{% block value %}{% super %}, a={{a}}{% end %}',
"ext3_1.html": r'{% extends "ext2_1.html" %}{% block value %}{% super %}, a={{a}}{% end %}',
"ext3_2.html": r'{% extends "ext2_1.html" %}{% block value %}{% super %}, a={{a}}:b={{b}}{% end %}',
"ext3_3.html": r'{% extends "ext2_1.html" %}{% block value %}{% super %}, b={{b}}{% end %}',
})
self.assertEqual(loader.load("base.html").generate(), "value: original")
self.assertEqual(loader.load("ext1.html").generate(), "value: new1")
self.assertEqual(loader.load("ext2.html").generate(), "value: original, new2")
self.assertEqual(loader.load("ext3.html").generate(), "value: original, new2, new3")
self.assertEqual(loader.load("ext2_1.html").generate(a=-5), "value: original, a=-5")
self.assertEqual(loader.load("ext2_1.html").generate(a=42), "value: original, a=42")
self.assertEqual(loader.load("ext3_1.html").generate(a=-5), "value: original, a=-5, a=-5")
self.assertEqual(loader.load("ext3_1.html").generate(a=42), "value: original, a=42, a=42")
self.assertEqual(loader.load("ext3_2.html").generate(a=42, b=11), "value: original, a=42, a=42:b=11")
self.assertEqual(loader.load("ext3_3.html").generate(a=42, b=7), "value: original, a=42, b=7")
loader = template.DictLoader({
"base.html": r"{% block v1 %}1{% end %} {% block v2 %}2{% end %}",
"ext1.html": r'{% extends "base.html" %}{% block v1 %}Hello{% end %}{% block v2 %}World{% end %}',
"ext2.html": r'{% extends "base.html" %}{% block v1 %}{% super %}+10{% end %}{% block v2 %}= 9 + {% super %}{% end %}',
})
self.assertEqual(loader.load("ext1.html").generate(), "Hello World")
self.assertEqual(loader.load("ext2.html").generate(), "1+10 = 9 + 2")
def test_if(self):
t = template.Template(r"{% if a == 1 %}One{% elif a < 0 %}Negative{% elif isinstance(a, basestring) %}String{% else %}Unknown{% end %}")
self.assertEqual(t.generate(a=1), "One")
self.assertEqual(t.generate(a=-1), "Negative")
self.assertEqual(t.generate(a=-1.67), "Negative")
self.assertEqual(t.generate(a=-100), "Negative")
self.assertEqual(t.generate(a=42), "Unknown")
self.assertEqual(t.generate(a=42.5), "Unknown")
self.assertEqual(t.generate(a="meow"), "String")
def test_comment(self):
self.assertEqual(
template.Template(r"{% comment blah! %}42").generate(),
"42"
)
def test_set(self):
self.assertEqual(
template.Template(r"{% set x=42 %}{{ val + x }}").generate(val=-42),
"0"
)
self.assertEqual(
template.Template(r"{% set x=val2 %}{{ val + x }}").generate(val=1, val2=10),
"11"
)
def test_loops(self):
self.assertEqual(
template.Template(r"{% for x in [1,2,3,4] %}{{ x }}:{% end %}").generate(),
"1:2:3:4:"
)
self.assertEqual(
template.Template(r"{% set x=0 %}{% while x < 10 %}{{x}};{% set x += 2 %}{% end %}").generate(),
"0;2;4;6;8;"
)
def test_autoescape(self):
t = template.Template(r"<{{x}}>")
self.assertEqual(
t.generate(x="<"),
"<<>"
)
self.assertEqual(
t.generate(x=">"),
"<>>"
)
t2 = template.Template(r"{% autoescape None %}<{{x}}>")
self.assertEqual(
t2.generate(x="<"),
"<<>"
)
self.assertEqual(
t2.generate(x=">"),
"<>>"
)
@defer.inlineCallbacks
def test_deferreds(self):
def _mkDeferred(rv, delay=None):
d = defer.Deferred()
if delay is None:
d.callback(rv)
else:
reactor.callLater(delay, d.callback, rv)
return d
# Test that template immidiatly resolves deferreds if possible
t = template.Template(r"-) {{x}} <-> {{y(63)}} :!")
self.assertEqual(
t.generate(x=_mkDeferred(42), y=_mkDeferred),
"-) 42 <-> 63 :!"
)
# Test delayed execution
d = t.generate(
x=_mkDeferred("hello", 0.1),
y=lambda val: _mkDeferred(val-60, 0.5)
)
self.assertTrue(isinstance(d, defer.Deferred), d)
txt = yield d
self.assertEqual(
txt,
"-) hello <-> 3 :!"
) | {
"content_hash": "06d62450ad5a1374c4c5cc2eee918064",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 138,
"avg_line_length": 35.61363636363637,
"alnum_prop": 0.6064667092108063,
"repo_name": "dpnova/cyclone",
"id": "b5dfe80ff2bec4f28437238f9b14fa20db259186",
"size": "5283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cyclone/tests/test_template.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7348"
},
{
"name": "HTML",
"bytes": "50352"
},
{
"name": "JavaScript",
"bytes": "6002"
},
{
"name": "Makefile",
"bytes": "642"
},
{
"name": "Nginx",
"bytes": "529"
},
{
"name": "Python",
"bytes": "591752"
},
{
"name": "Shell",
"bytes": "9852"
}
],
"symlink_target": ""
} |
from nose.tools import assert_almost_equal, assert_equal, assert_raises, assert_true, assert_less_equal, assert_greater_equal, assert_greater
from stationary.utils.bomze import bomze_matrices
def test_bomze_matrices():
"""
Check that the data file with the Bomze classification matrices is present
and loads the correct number of matrices.
"""
matrices = list(bomze_matrices())
assert_equal(len(matrices), 49)
| {
"content_hash": "b35a18c456a97b414073140f7a93b000",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 141,
"avg_line_length": 36.416666666666664,
"alnum_prop": 0.7414187643020596,
"repo_name": "marcharper/stationary",
"id": "c69f47cf14f10d560699a93d3eb53a892c4ce2ed",
"size": "438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_bomze.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2641"
},
{
"name": "Python",
"bytes": "85484"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import versioneer
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='pyretools',
version='0.1.1',
#cmdclass=versioneer.get_cmdclass(),
description="Tools that manipulate Regular Expression",
long_description=readme + '\n\n' + history,
author="Chia-Jung, Yang",
author_email='jeroyang@gmail.com',
url='https://github.com/jeroyang/retools',
packages=[
'retools',
],
package_dir={'retools':
'retools'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='retools',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
| {
"content_hash": "7e28b8cf8cc2d2444ca3b09eeceefb1c",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 63,
"avg_line_length": 27.37037037037037,
"alnum_prop": 0.6244925575101489,
"repo_name": "jeroyang/retools",
"id": "2463bec1c4fbb3a2bc83d1b763a381d7f26af971",
"size": "1526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1696"
},
{
"name": "Python",
"bytes": "83511"
}
],
"symlink_target": ""
} |
from rest_framework import viewsets, mixins, permissions
from rest_framework.filters import OrderingFilter, SearchFilter, DjangoFilterBackend
from rest_framework.permissions import DjangoModelPermissions, DjangoObjectPermissions
from rest_framework.settings import api_settings
from rest_framework.response import Response
class BaseViewSets(object):
list_serializer_class = False
retrieve_serializer_class = False
to_filter_backends = False
@property
def filter_backends(self):
if hasattr(self, 'to_filter_backends'):
if self.to_filter_backends:
return api_settings.DEFAULT_FILTER_BACKENDS + self.to_filter_backends
return api_settings.DEFAULT_FILTER_BACKENDS
def get_serializer_class(self):
if hasattr(self, 'list_serializer_class') or hasattr(self, 'serializer_class_list'):
if self.list_serializer_class:
if self.request.method == 'PUT':
return self.serializer_class
if self.request.method == 'POST':
return self.serializer_class
return self.list_serializer_class
return self.serializer_class
def list(self, request, *args, **kwargs):
list = super(BaseViewSets, self).list(request, *args, **kwargs)
return list
class KernelViewSets(BaseViewSets, viewsets.ModelViewSet):
def retrieve(self, *args, **kwargs):
instance = self.get_object()
if hasattr(self, 'retrieve_serializer_class'):
if hasattr(self, 'serializer_class_retrieve'):
serializer = self.serializer_class_retrieve(instance)
else:
serializer = self.get_serializer(instance)
else:
serializer = self.get_serializer(instance)
return Response(serializer.data)
class KernelReadOnlyViewSets(BaseViewSets, viewsets.ReadOnlyModelViewSet):
pass
class AnyViewSet(KernelViewSets):
"""
Простой класс управления сереализации, предназначен для простых моделей
"""
permission_classes = [permissions.AllowAny]
filter_backends = [SearchFilter, OrderingFilter]
class AnyViewReadOnlySet(viewsets.ReadOnlyModelViewSet):
"""
Простой класс управления сереализцаии, доступный только на чтение и предназначеный для простых моделей
"""
permission_classes = (permissions.AllowAny,)
filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter]
class ReadOnlyListModelViewSet(viewsets.ReadOnlyModelViewSet):
"""
Класс разграничивающий права доступа и методы фильтрации данных для API.
"""
permission_classes = (DjangoModelPermissions, DjangoObjectPermissions)
filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter]
class ModelViewSet(viewsets.ModelViewSet):
"""
Простой класс управления сереализцаии, предназначен для простых моделей
"""
permission_classes = (DjangoModelPermissions, DjangoObjectPermissions)
filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter]
| {
"content_hash": "0896b5043fd1d0260de175ca7200db24",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 106,
"avg_line_length": 36.30952380952381,
"alnum_prop": 0.7072131147540983,
"repo_name": "pycodi/django-kernel",
"id": "c6b85919ecaa9151c3ac558d60b1462563278d3e",
"size": "3324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kernel/rest/viewsets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "427"
},
{
"name": "Makefile",
"bytes": "1227"
},
{
"name": "Python",
"bytes": "74590"
}
],
"symlink_target": ""
} |
"""
banopata - Banking Nordea Parse Transactions
"""
from common import *
import re
if __name__ == '__main__':
PathToProperties = config["PathToProperties"]
PathToTransactions = config["PathToTransactions"]
errors = []
for line in open(PathToTransactions, encoding='utf-8').readlines():
fields = line.rstrip().split('\t')
if len(fields) > 5 and all([fields[:2], fields[3], fields[5]]) and '.' in fields[0]:
datebook, typetrns, text, datevald, accout, accoin = fields
d, M, Y = map(int, datebook.split('.'))
accoin = safe_cast(accoin.replace(',', '.').replace(' ', ''), float, .0)
navn = re.findall(r"[\w']+", text.upper())
navn = ' '.join((navn[0], navn[-1])) if len(navn) > 0 else ''
keysearch = max(similar(navn, key) for key in unitlookup.keys())
molike = keysearch[1] # most likely key
if molike in unitlookup and molike == navn and datebook == datevald:
unit = unitlookup[molike]
if not unit in data:
data[unit] = {}
if not "payments" in data[unit]:
data[unit]["payments"] = {}
if not Y in data[unit]["payments"]:
data[unit]["payments"][Y] = {}
if not M in data[unit]["payments"][Y]:
data[unit]["payments"][Y][M] = {}
data[unit]["payments"][Y][M][d] = accoin
else:
errors.append(str((fields, keysearch)))
savedata()
print('#' * 50, 'ERRORS', '#' * 50)
print('\n'.join(errors))
| {
"content_hash": "810dd0fab9cae50431e29e2ab42d2bde",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 92,
"avg_line_length": 39.53658536585366,
"alnum_prop": 0.5181986428130784,
"repo_name": "wittrup/crap",
"id": "15e6123bf9d0817a68f5333f9b1c8f4bfc8f4e2b",
"size": "1904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skattmestring/banopata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "129479"
},
{
"name": "Python",
"bytes": "168586"
}
],
"symlink_target": ""
} |
import pkg_resources
def version_string():
"""
Return the current version number of MapProxy.
"""
try:
return pkg_resources.working_set.by_key['mapproxy'].version
except KeyError:
return 'unknown_version'
__version__ = version = version_string()
if __name__ == '__main__':
print __version__ | {
"content_hash": "975818e567562090b948e545efc2dfa1",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 67,
"avg_line_length": 22.2,
"alnum_prop": 0.6216216216216216,
"repo_name": "Anderson0026/mapproxy",
"id": "fdf834faaf64a3dfa07b70dafe8db8f9ccad6306",
"size": "980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mapproxy/version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12401"
},
{
"name": "Python",
"bytes": "1477825"
},
{
"name": "Shell",
"bytes": "3087"
}
],
"symlink_target": ""
} |
"""Metrics."""
from tensorflow_recommenders.metrics.factorized_top_k import Factorized
from tensorflow_recommenders.metrics.factorized_top_k import FactorizedTopK
| {
"content_hash": "edcb2ff30cf996d3d3aeb43a223502db",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 75,
"avg_line_length": 41,
"alnum_prop": 0.8353658536585366,
"repo_name": "tensorflow/recommenders",
"id": "51f3ee37af4d02079aced472cc9c44064e0e5348",
"size": "765",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_recommenders/metrics/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "232265"
},
{
"name": "Shell",
"bytes": "2138"
}
],
"symlink_target": ""
} |
import sublime
import sublime_plugin
from vex import ex_location
import ex_commands
def compute_flags(view, term):
flags = 0 # case sensitive
search_mode = view.settings().get('vintage_search_mode')
if search_mode == 'smart_case':
if term.lower() == term:
flags = sublime.IGNORECASE
elif search_mode == 'case_insensitive':
flags = sublime.IGNORECASE
return flags
class SearchImpl(object):
last_term = ""
def __init__(self, view, cmd, remember=True, start_sel=None):
self.start_sel = start_sel
self.remember = remember
if not cmd:
return
self.view = view
self.reversed = cmd.startswith("?")
if not cmd.startswith(("?", "/")):
cmd = "/" + cmd
if len(cmd) == 1 and SearchImpl.last_term:
cmd += SearchImpl.last_term
elif not cmd:
return
self.cmd = cmd[1:]
self.flags = compute_flags(self.view, self.cmd)
def search(self):
if not getattr(self, "cmd", None):
return
if self.remember:
SearchImpl.last_term = self.cmd
sel = self.start_sel[0]
next_match = None
if self.reversed:
current_line = self.view.line(self.view.sel()[0])
left_side = sublime.Region(current_line.begin(),
self.view.sel()[0].begin())
if ex_location.search_in_range(self.view, self.cmd,
left_side.begin(),
left_side.end(),
self.flags):
next_match = ex_location.find_last_match(self.view,
self.cmd,
left_side.begin(),
left_side.end(),
self.flags)
else:
line_nr = ex_location.reverse_search(self.view, self.cmd,
end=current_line.begin() - 1,
flags=self.flags)
if line_nr:
pt = self.view.text_point(line_nr - 1, 0)
line = self.view.full_line(pt)
if line.begin() != current_line.begin():
next_match = ex_location.find_last_match(self.view,
self.cmd,
line.begin(),
line.end(),
self.flags)
else:
next_match = self.view.find(self.cmd, sel.end(), self.flags)
# handle search restart
if not next_match:
if self.reversed:
sublime.status_message("VintageEx: search hit TOP, continuing at BOTTOM")
line_nr = ex_location.reverse_search(self.view, self.cmd, flags=self.flags)
if line_nr:
pt = self.view.text_point(line_nr - 1, 0)
line = self.view.full_line(pt)
next_match = ex_location.find_last_match(self.view,
self.cmd,
line.begin(),
line.end(),
self.flags)
else:
sublime.status_message("VintageEx: search hit BOTTOM, continuing at TOP")
next_match = self.view.find(self.cmd, 0, sel.end())
# handle result
if next_match:
self.view.sel().clear()
if not self.remember:
self.view.add_regions("vi_search", [next_match], "search.vi",
sublime.DRAW_OUTLINED)
else:
self.view.sel().add(next_match)
self.view.show(next_match)
else:
sublime.status_message("VintageEx: Pattern not found:" + self.cmd)
class ViRepeatSearchBackward(sublime_plugin.TextCommand):
def run(self, edit):
if ex_commands.VintageExState.search_buffer_type == 'pattern_search':
SearchImpl(self.view, "?" + SearchImpl.last_term,
start_sel=self.view.sel()).search()
elif ex_commands.VintageExState.search_buffer_type == 'find_under':
self.view.window().run_command("find_prev", {"select_text": False})
class ViRepeatSearchForward(sublime_plugin.TextCommand):
def run(self, edit):
if ex_commands.VintageExState.search_buffer_type == 'pattern_search':
SearchImpl(self.view, SearchImpl.last_term,
start_sel=self.view.sel()).search()
elif ex_commands.VintageExState.search_buffer_type == 'find_under':
self.view.window().run_command("find_next", {"select_text": False})
class ViFindUnder(sublime_plugin.TextCommand):
def run(self, edit, forward=True):
ex_commands.VintageExState.search_buffer_type = 'find_under'
if forward:
self.view.window().run_command('find_under', {'select_text': False})
else:
self.view.window().run_command('find_under_prev', {'select_text': False})
class ViSearch(sublime_plugin.TextCommand):
def run(self, edit, initial_text=""):
self.original_sel = list(self.view.sel())
self.view.window().show_input_panel("", initial_text,
self.on_done,
self.on_change,
self.on_cancel)
def on_done(self, s):
self._restore_sel()
try:
SearchImpl(self.view, s, start_sel=self.original_sel).search()
ex_commands.VintageExState.search_buffer_type = 'pattern_search'
except RuntimeError, e:
if 'parsing' in str(e):
print "VintageEx: Regex parsing error. Incomplete pattern: %s" % s
else:
raise e
self.original_sel = None
self._restore_sel()
def on_change(self, s):
if s in ("/", "?"):
return
self._restore_sel()
try:
SearchImpl(self.view, s, remember=False,
start_sel=self.original_sel).search()
except RuntimeError, e:
if 'parsing' in str(e):
print "VintageEx: Regex parsing error. Expected error."
else:
raise e
def on_cancel(self):
self._restore_sel()
self.original_sel = None
def _restore_sel(self):
self.view.erase_regions("vi_search")
if not self.original_sel:
return
self.view.sel().clear()
for s in self.original_sel:
self.view.sel().add(s)
self.view.show(self.view.sel()[0])
| {
"content_hash": "ccfde8b481bdf1485815e8bf2a6c62b1",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 91,
"avg_line_length": 41.97142857142857,
"alnum_prop": 0.46807351940095304,
"repo_name": "SublimeText/VintageEx",
"id": "a1f9e4c372bd9942284be34c6bf1ddd6601a35ff",
"size": "7553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex_search_cmd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "883"
},
{
"name": "Python",
"bytes": "156697"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
class League(models.Model):
owner = models.ForeignKey(User)
name = models.CharField(max_length=256)
is_public = models.BooleanField(default=False)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Membership(models.Model):
league = models.ForeignKey(League)
summoner_id = models.BigIntegerField('Summoner ID')
nickname = models.CharField(max_length=32, default=None)
def __str__(self):
if self.nickname is None:
return self.summoner_id
else:
return '%s (%s)' % (self.summoner_id, self.nickname) | {
"content_hash": "73024d0f11c5c8c5ae7deecda7fdcb90",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 64,
"avg_line_length": 30.4,
"alnum_prop": 0.6776315789473685,
"repo_name": "dylanseago/LeagueOfLadders",
"id": "2f931e55e1287823762e26f06638edec629ead6d",
"size": "760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leagueofladders/apps/myleague/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "486"
},
{
"name": "Python",
"bytes": "26880"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('saltstack', '0003_unique_spl'),
]
operations = [
migrations.RemoveField(
model_name='saltstackserviceprojectlink',
name='error_message',
),
migrations.RemoveField(
model_name='saltstackserviceprojectlink',
name='state',
),
]
| {
"content_hash": "cb06df333812ed0e7c6bd214a4b24b68",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 53,
"avg_line_length": 22.523809523809526,
"alnum_prop": 0.5940803382663847,
"repo_name": "opennode/nodeconductor-saltstack",
"id": "2622d156ef52907ce6cd463d24252e47a7ee9484",
"size": "497",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/nodeconductor_saltstack/saltstack/migrations/0004_remove_spl_state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "201917"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Resume, ContactInfo, Aboutme, OpenSourceProject, ContributedProject
class ResumeAdmin(admin.ModelAdmin):
list_display = ('company', 'position', 'entry_time', 'time_of_separation')
admin.site.register(Resume, ResumeAdmin)
admin.site.register(ContactInfo)
admin.site.register(Aboutme)
admin.site.register(OpenSourceProject)
admin.site.register(ContributedProject)
| {
"content_hash": "bfbba45f2a31cc3983d2b6af344a7e05",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 87,
"avg_line_length": 35.166666666666664,
"alnum_prop": 0.8056872037914692,
"repo_name": "zhengxiaowai/Athena",
"id": "cf37a6aa65aff5787091d739f568555f7a562f92",
"size": "422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "personal_info/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7748"
},
{
"name": "Makefile",
"bytes": "965"
},
{
"name": "Python",
"bytes": "94747"
}
],
"symlink_target": ""
} |
import modelingcounter
import os, sys
import SonicScrewdriver as utils
import csv
sourcedir = "/Volumes/TARDIS/work/US_NOVELS_1923-1950/"
filelist = os.listdir(sourcedir)
fileset = set([x for x in filelist if x.endswith(".txt")])
filelist = list(fileset)
metafile = os.path.join(sourcedir, "US_NOVELS_1923-1950_META.txt")
datedict = dict()
dateset = set()
with open(metafile, newline='', encoding = 'utf-8') as f:
reader = csv.reader(f)
for fields in reader:
idcode = fields[0]
date = int(fields[8])
datedict[idcode] = date
dateset.add(date)
verbose = True
targetwords = {'crown', 'crowns', 'guinea', 'guineas', 'nickel', 'sovereign', 'sovereigns', 'pound', 'pounds', 'quid'}
contexts = []
for filename in filelist:
htid = utils.pairtreelabel(filename.replace('.txt', ''))
if htid not in datedict:
print(htid)
continue
else:
date = datedict[htid]
filepath = os.path.join(sourcedir, filename)
with open(filepath, encoding = 'utf-8') as f:
filelines = f.readlines()
pagelist = [filelines]
# The wordcounter module expects a list of pages, each of which is a list of lines.
# Ebooks have no pages -- at least as I currently receive them -- so we treat it
# all as one giant page.
tokenstream = modelingcounter.makestream(pagelist)
newcontexts = modelingcounter.extract_context(tokenstream, targetwords)
for alist in newcontexts:
contexts.append((htid, date, alist))
outfile = "/Volumes/TARDIS/work/moneycontext/contexts.tsv"
with open(outfile, mode='a', encoding='utf-8') as f:
for context in contexts:
htid, date, alist = context
outline = " ".join(alist)
f.write(htid + '\t' + str(date) + '\t' + outline + '\n')
| {
"content_hash": "1374660111a9dc4066e359800d1fe81b",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 118,
"avg_line_length": 24.589041095890412,
"alnum_prop": 0.6512534818941504,
"repo_name": "tedunderwood/GenreProject",
"id": "21c870a1c9d744837ac0ad46e028954e3e4d6676",
"size": "1893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/piketty/make_20csnippets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "187389"
},
{
"name": "Python",
"bytes": "645172"
},
{
"name": "R",
"bytes": "34870"
}
],
"symlink_target": ""
} |
from . import _dbg
from . import gs, sh, about
from .margo_common import NS
from os.path import basename, splitext
import os
import re
import sublime
actions = NS(**{k: {'Name': k} for k in (
'QueryCompletions',
'QueryCmdCompletions',
'QueryTooltips',
'QueryIssues',
'QueryUserCmds',
'QueryTestCmds',
'ViewActivated',
'ViewModified',
'ViewPosChanged',
'ViewFmt',
'ViewPreSave',
'ViewSaved',
'ViewLoaded',
'RunCmd',
)})
client_actions = NS(**{k: k for k in (
'Activate',
'Restart',
'Shutdown',
'CmdOutput',
'DisplayIssues',
)})
class MgView(sublime.View):
def __init__(self, *, mg, view):
self.mg = mg
self.is_9o = False
self.is_file = False
self.is_widget = False
self.sync(view=view)
def sync(self, *, view):
if view is None:
return
_pf=_dbg.pf(dot=self.id)
self.id = view.id()
self.view = view
self.name = view_name(view)
self.is_file = self.id in self.mg.file_ids
self.is_widget = not self.is_file
def __eq__(self, v):
return self.view == v
def __hash__(self):
return self.id
def __repr__(self):
return repr(vars(self))
def name(self):
return view_name(self.view)
class Config(object):
def __init__(self, m):
efl = m.get('EnabledForLangs')
if m and (not isinstance(efl, list) or len(efl) == 0):
print('MARGO BUG: EnabledForLangs is invalid.\nIt must be a non-empty list, not `%s: %s`\nconfig data: %s' % (type(efl), efl, m))
self.override_settings = m.get('OverrideSettings') or {}
self.enabled_for_langs = efl or ['*']
self.inhibit_explicit_completions = m.get('InhibitExplicitCompletions') is True
self.inhibit_word_completions = m.get('InhibitWordCompletions') is True
self.auto_complete_opts = 0
if self.inhibit_word_completions:
self.auto_complete_opts |= sublime.INHIBIT_WORD_COMPLETIONS
if self.inhibit_explicit_completions:
self.auto_complete_opts |= sublime.INHIBIT_EXPLICIT_COMPLETIONS
def __repr__(self):
return repr(self.__dict__)
class State(object):
def __init__(self, v={}):
self.config = Config(v.get('Config') or {})
self.errors = v.get('Errors') or []
self.status = v.get('Status') or []
self.view = ResView(v=v.get('View') or {})
self.completions = [Completion(c) for c in (v.get('Completions') or [])]
self.tooltips = [Tooltip(t) for t in (v.get('Tooltips') or [])]
self.issues = [Issue(l) for l in (v.get('Issues') or [])]
self.user_cmds = [UserCmd(c) for c in (v.get('UserCmds') or [])]
self.hud = HUD(v=v.get('HUD') or {})
self.client_actions = []
for ca in (v.get('ClientActions') or []):
CA = client_action_creators.get(ca.get('Name') or '') or ClientAction
self.client_actions.append(CA(v=ca))
def __repr__(self):
return repr(self.__dict__)
class ClientAction(object):
def __init__(self, v={}):
self.action_name = v.get('Name') or ''
self.action_data = v.get('Data') or {}
def __repr__(self):
return repr(vars(self))
class ClientAction_Output(ClientAction):
def __init__(self, v):
super().__init__(v=v)
ad = self.action_data
self.fd = ad.get('Fd') or ''
self.output = ad.get('Output') or ''
self.close = ad.get('Close') or False
self.fd = ad.get('Fd') or ''
def __repr__(self):
return repr(vars(self))
class ClientAction_Activate(ClientAction):
def __init__(self, v):
super().__init__(v=v)
ad = self.action_data
self.path = ad.get('Path') or ''
self.name = ad.get('Name') or ''
self.row = ad.get('Row') or 0
self.col = ad.get('Col') or 0
def __repr__(self):
return repr(vars(self))
client_action_creators = {
client_actions.CmdOutput: ClientAction_Output,
client_actions.Activate: ClientAction_Activate,
}
class Completion(object):
def __init__(self, v):
self.query = v.get('Query') or ''
self.title = v.get('Title') or ''
self.src = v.get('Src') or ''
self.tag = v.get('Tag') or ''
def entry(self):
return (
'%s\t%s %s' % (self.query, self.title, self.tag),
self.src,
)
def __repr__(self):
return repr(self.__dict__)
class Tooltip(object):
def __init__(self, v):
self.content = v.get('Content') or ''
def __repr__(self):
return repr(self.__dict__)
class PathName(object):
def __init__(self, *, path, name):
self.path = path or ''
self.name = name or ''
def match(self, p):
if self.path and self.path == p.path:
return True
if self.name and self.name == p.name:
return True
return False
def __repr__(self):
return repr(vars(self))
class ViewPathName(PathName):
def __init__(self, view):
super().__init__(
path = view_path(view),
name = view_name(view),
)
class Issue(PathName):
def __init__(self, v):
super().__init__(
path = v.get('Path') or '',
name = v.get('Name') or '',
)
self.hash = v.get('Hash') or ''
self.row = v.get('Row') or 0
self.col = v.get('Col') or 0
self.end = v.get('End') or 0
self.tag = v.get('Tag') or ''
self.label = v.get('Label') or ''
self.message = v.get('Message') or ''
def __repr__(self):
return repr(self.__dict__)
def relpath(self, dir):
if not self.path:
return self.name
if not dir:
return self.path
return os.path.relpath(self.path, dir)
def basename(self):
if not self.path:
return self.name
return os.path.basename(self.path)
class ResView(object):
def __init__(self, v={}):
self.name = v.get('Name') or ''
self.src = v.get('Src') or ''
if isinstance(self.src, bytes):
self.src = self.src.decode('utf-8')
class UserCmd(object):
def __init__(self, v={}):
self.title = v.get('Title') or ''
self.desc = v.get('Desc') or ''
self.name = v.get('Name') or ''
self.args = v.get('Args') or []
self.dir = v.get('Dir') or ''
self.prompts = v.get('Prompts') or []
class HUD(object):
def __init__(self, v={}):
self.articles = v.get('Articles') or []
# in testing, we should be able to push 50MiB+ files constantly without noticing a performance problem
# but keep this number low (realistic source files sizes) at least until we optimize things
MAX_VIEW_SIZE = 8 << 20
# TODO: only send the content when it actually changes
# TODO: do chunked copying i.e. copy e.g. 1MiB at a time
# testing in the past revealed that ST will choke under Python memory problems
# if we attempt to copy large files because it has to convert into utf*
# which could use up to x4 to convert into the string it gives us
# and then we have to re-encode that into bytes to send it
def make_props(view=None, wd=''):
props = {
'Editor': _editor_props(view),
'Env': sh.env(),
'View': _view_props(view, wd=wd),
}
return props
def _editor_props(view):
sett = gs.setting('margo') or {}
if view is not None:
sett.update(view.settings().get('margo') or {})
return {
'Name': 'sublime',
'Version': sublime.version(),
'Client': {
'Name': 'gosublime',
'Tag': about.TAG,
},
'Settings': sett,
}
def view_is_9o(view):
return view is not None and view.settings().get('9o')
def _view_props(view, wd=''):
was_9o = view_is_9o(view)
if was_9o:
view = gs.active_view()
else:
view = gs.active_view(view=view)
if view is None:
return {}
pos = gs.sel(view).begin()
scope, lang, fn, props = _view_header(view, pos)
wd = wd or gs.getwd() or gs.basedir_or_cwd(fn)
src = _view_src(view, lang)
props.update({
'Wd': wd,
'Pos': pos,
'Dirty': view.is_dirty(),
'Src': src,
})
return props
_sanitize_view_name_pat = re.compile(r'[^-~,.@\w]')
def view_name(view, ext='', lang=''):
if view is None:
return '_._'
nm = basename(view.file_name() or view.name() or '_')
nm, nm_ext = splitext(nm)
if not ext:
ext = _view_ext(view, lang=lang) or nm_ext or '._'
nm = 'view@%s,%s%s' % (_view_id(view), nm, ext)
nm = _sanitize_view_name_pat.sub('', nm)
return nm
def view_path(view):
if view is None:
return ''
return view.file_name() or ''
def _view_ext(view, lang=''):
if view is None:
return ''
if not lang:
_, lang = _view_scope_lang(view, 0)
return '.%s' % ((view.name() or view_path(view)).split('.')[-1] or lang)
def _view_header(view, pos):
scope, lang = _view_scope_lang(view, pos)
path = view_path(view)
ext = _view_ext(view, lang=lang)
return scope, lang, path, {
'Path': path,
'Name': view_name(view, ext=ext, lang=lang),
'Hash': _view_hash(view),
'Lang': lang,
'Scope': scope,
}
def _view_id(view):
if view is None:
return ''
return str(view.id())
def _view_hash(view):
if view is None:
return ''
return 'id=%s,change=%d' % (_view_id(view), view.change_count())
_lang_by_basename = {
'go.mod': 'go.mod',
'go.sum': 'go.sum',
}
_scope_lang_pat = re.compile(r'(?:source\.\w+|source|text)[.]([^\s.]+)')
def _view_scope_lang(view, pos):
if view is None:
return ('', '')
_pf=_dbg.pf()
scope = view.scope_name(pos).strip().lower()
if view_is_9o(view):
return (scope, 'cmd-prompt')
nm = basename(view_path(view))
lb = _lang_by_basename.get(nm)
if lb:
return (scope, lb)
l = _scope_lang_pat.findall(scope)
if not l:
return (scope, '')
blacklist = (
'plain',
'find-in-files',
)
lang = l[-1]
if lang in blacklist:
return (scope, '')
return (scope, lang)
def _view_src(view, lang):
if view is None:
return ''
if not lang:
return ''
if not view.is_dirty():
return ''
if view.is_loading():
return ''
if view.size() > MAX_VIEW_SIZE:
return ''
return gs.view_src(view)
| {
"content_hash": "f980c9224a070974555b17e420945785",
"timestamp": "",
"source": "github",
"line_count": 408,
"max_line_length": 132,
"avg_line_length": 22.808823529411764,
"alnum_prop": 0.6230388996346443,
"repo_name": "DisposaBoy/GoSublime",
"id": "e225c5b246d41e4b2d7da0628c74bfeb6627c812",
"size": "9306",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "gosubl/margo_state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "786261"
},
{
"name": "Python",
"bytes": "383358"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import time
import datetime
import boto3
from botocore.exceptions import ClientError
import sure # noqa
from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, mock_logs
import functools
import nose
def expected_failure(test):
@functools.wraps(test)
def inner(*args, **kwargs):
try:
test(*args, **kwargs)
except Exception as err:
raise nose.SkipTest
return inner
DEFAULT_REGION = 'eu-central-1'
def _get_clients():
return boto3.client('ec2', region_name=DEFAULT_REGION), \
boto3.client('iam', region_name=DEFAULT_REGION), \
boto3.client('ecs', region_name=DEFAULT_REGION), \
boto3.client('logs', region_name=DEFAULT_REGION), \
boto3.client('batch', region_name=DEFAULT_REGION)
def _setup(ec2_client, iam_client):
"""
Do prerequisite setup
:return: VPC ID, Subnet ID, Security group ID, IAM Role ARN
:rtype: tuple
"""
resp = ec2_client.create_vpc(CidrBlock='172.30.0.0/24')
vpc_id = resp['Vpc']['VpcId']
resp = ec2_client.create_subnet(
AvailabilityZone='eu-central-1a',
CidrBlock='172.30.0.0/25',
VpcId=vpc_id
)
subnet_id = resp['Subnet']['SubnetId']
resp = ec2_client.create_security_group(
Description='test_sg_desc',
GroupName='test_sg',
VpcId=vpc_id
)
sg_id = resp['GroupId']
resp = iam_client.create_role(
RoleName='TestRole',
AssumeRolePolicyDocument='some_policy'
)
iam_arn = resp['Role']['Arn']
return vpc_id, subnet_id, sg_id, iam_arn
# Yes, yes it talks to all the things
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_managed_compute_environment():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='MANAGED',
state='ENABLED',
computeResources={
'type': 'EC2',
'minvCpus': 5,
'maxvCpus': 10,
'desiredvCpus': 5,
'instanceTypes': [
't2.small',
't2.medium'
],
'imageId': 'some_image_id',
'subnets': [
subnet_id,
],
'securityGroupIds': [
sg_id,
],
'ec2KeyPair': 'string',
'instanceRole': iam_arn,
'tags': {
'string': 'string'
},
'bidPercentage': 123,
'spotIamFleetRole': 'string'
},
serviceRole=iam_arn
)
resp.should.contain('computeEnvironmentArn')
resp['computeEnvironmentName'].should.equal(compute_name)
# Given a t2.medium is 2 vcpu and t2.small is 1, therefore 2 mediums and 1 small should be created
resp = ec2_client.describe_instances()
resp.should.contain('Reservations')
len(resp['Reservations']).should.equal(3)
# Should have created 1 ECS cluster
resp = ecs_client.list_clusters()
resp.should.contain('clusterArns')
len(resp['clusterArns']).should.equal(1)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_unmanaged_compute_environment():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
resp.should.contain('computeEnvironmentArn')
resp['computeEnvironmentName'].should.equal(compute_name)
# Its unmanaged so no instances should be created
resp = ec2_client.describe_instances()
resp.should.contain('Reservations')
len(resp['Reservations']).should.equal(0)
# Should have created 1 ECS cluster
resp = ecs_client.list_clusters()
resp.should.contain('clusterArns')
len(resp['clusterArns']).should.equal(1)
# TODO create 1000s of tests to test complex option combinations of create environment
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_describe_compute_environment():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
resp = batch_client.describe_compute_environments()
len(resp['computeEnvironments']).should.equal(1)
resp['computeEnvironments'][0]['computeEnvironmentName'].should.equal(compute_name)
# Test filtering
resp = batch_client.describe_compute_environments(
computeEnvironments=['test1']
)
len(resp['computeEnvironments']).should.equal(0)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_delete_unmanaged_compute_environment():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
batch_client.delete_compute_environment(
computeEnvironment=compute_name,
)
resp = batch_client.describe_compute_environments()
len(resp['computeEnvironments']).should.equal(0)
resp = ecs_client.list_clusters()
len(resp.get('clusterArns', [])).should.equal(0)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_delete_managed_compute_environment():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='MANAGED',
state='ENABLED',
computeResources={
'type': 'EC2',
'minvCpus': 5,
'maxvCpus': 10,
'desiredvCpus': 5,
'instanceTypes': [
't2.small',
't2.medium'
],
'imageId': 'some_image_id',
'subnets': [
subnet_id,
],
'securityGroupIds': [
sg_id,
],
'ec2KeyPair': 'string',
'instanceRole': iam_arn,
'tags': {
'string': 'string'
},
'bidPercentage': 123,
'spotIamFleetRole': 'string'
},
serviceRole=iam_arn
)
batch_client.delete_compute_environment(
computeEnvironment=compute_name,
)
resp = batch_client.describe_compute_environments()
len(resp['computeEnvironments']).should.equal(0)
resp = ec2_client.describe_instances()
resp.should.contain('Reservations')
len(resp['Reservations']).should.equal(3)
for reservation in resp['Reservations']:
reservation['Instances'][0]['State']['Name'].should.equal('terminated')
resp = ecs_client.list_clusters()
len(resp.get('clusterArns', [])).should.equal(0)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_update_unmanaged_compute_environment_state():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
batch_client.update_compute_environment(
computeEnvironment=compute_name,
state='DISABLED'
)
resp = batch_client.describe_compute_environments()
len(resp['computeEnvironments']).should.equal(1)
resp['computeEnvironments'][0]['state'].should.equal('DISABLED')
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_job_queue():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
resp.should.contain('jobQueueArn')
resp.should.contain('jobQueueName')
queue_arn = resp['jobQueueArn']
resp = batch_client.describe_job_queues()
resp.should.contain('jobQueues')
len(resp['jobQueues']).should.equal(1)
resp['jobQueues'][0]['jobQueueArn'].should.equal(queue_arn)
resp = batch_client.describe_job_queues(jobQueues=['test_invalid_queue'])
resp.should.contain('jobQueues')
len(resp['jobQueues']).should.equal(0)
# Create job queue which already exists
try:
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
except ClientError as err:
err.response['Error']['Code'].should.equal('ClientException')
# Create job queue with incorrect state
try:
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue2',
state='JUNK',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
except ClientError as err:
err.response['Error']['Code'].should.equal('ClientException')
# Create job queue with no compute env
try:
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue3',
state='JUNK',
priority=123,
computeEnvironmentOrder=[
]
)
except ClientError as err:
err.response['Error']['Code'].should.equal('ClientException')
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_job_queue_bad_arn():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
try:
batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn + 'LALALA'
},
]
)
except ClientError as err:
err.response['Error']['Code'].should.equal('ClientException')
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_update_job_queue():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
queue_arn = resp['jobQueueArn']
batch_client.update_job_queue(
jobQueue=queue_arn,
priority=5
)
resp = batch_client.describe_job_queues()
resp.should.contain('jobQueues')
len(resp['jobQueues']).should.equal(1)
resp['jobQueues'][0]['priority'].should.equal(5)
batch_client.update_job_queue(
jobQueue='test_job_queue',
priority=5
)
resp = batch_client.describe_job_queues()
resp.should.contain('jobQueues')
len(resp['jobQueues']).should.equal(1)
resp['jobQueues'][0]['priority'].should.equal(5)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_update_job_queue():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
queue_arn = resp['jobQueueArn']
batch_client.delete_job_queue(
jobQueue=queue_arn
)
resp = batch_client.describe_job_queues()
resp.should.contain('jobQueues')
len(resp['jobQueues']).should.equal(0)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_register_task_definition():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
resp = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
resp.should.contain('jobDefinitionArn')
resp.should.contain('jobDefinitionName')
resp.should.contain('revision')
assert resp['jobDefinitionArn'].endswith('{0}:{1}'.format(resp['jobDefinitionName'], resp['revision']))
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_reregister_task_definition():
# Reregistering task with the same name bumps the revision number
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
resp1 = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
resp1.should.contain('jobDefinitionArn')
resp1.should.contain('jobDefinitionName')
resp1.should.contain('revision')
assert resp1['jobDefinitionArn'].endswith('{0}:{1}'.format(resp1['jobDefinitionName'], resp1['revision']))
resp1['revision'].should.equal(1)
resp2 = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 68,
'command': ['sleep', '10']
}
)
resp2['revision'].should.equal(2)
resp2['jobDefinitionArn'].should_not.equal(resp1['jobDefinitionArn'])
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_delete_task_definition():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
resp = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
batch_client.deregister_job_definition(jobDefinition=resp['jobDefinitionArn'])
resp = batch_client.describe_job_definitions()
len(resp['jobDefinitions']).should.equal(0)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_describe_task_definition():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 64,
'command': ['sleep', '10']
}
)
batch_client.register_job_definition(
jobDefinitionName='test1',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 64,
'command': ['sleep', '10']
}
)
resp = batch_client.describe_job_definitions(
jobDefinitionName='sleep10'
)
len(resp['jobDefinitions']).should.equal(2)
resp = batch_client.describe_job_definitions()
len(resp['jobDefinitions']).should.equal(3)
resp = batch_client.describe_job_definitions(
jobDefinitions=['sleep10', 'test1']
)
len(resp['jobDefinitions']).should.equal(3)
# SLOW TESTS
@expected_failure
@mock_logs
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_submit_job():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
queue_arn = resp['jobQueueArn']
resp = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
job_def_arn = resp['jobDefinitionArn']
resp = batch_client.submit_job(
jobName='test1',
jobQueue=queue_arn,
jobDefinition=job_def_arn
)
job_id = resp['jobId']
future = datetime.datetime.now() + datetime.timedelta(seconds=30)
while datetime.datetime.now() < future:
resp = batch_client.describe_jobs(jobs=[job_id])
print("{0}:{1} {2}".format(resp['jobs'][0]['jobName'], resp['jobs'][0]['jobId'], resp['jobs'][0]['status']))
if resp['jobs'][0]['status'] == 'FAILED':
raise RuntimeError('Batch job failed')
if resp['jobs'][0]['status'] == 'SUCCEEDED':
break
time.sleep(0.5)
else:
raise RuntimeError('Batch job timed out')
resp = logs_client.describe_log_streams(logGroupName='/aws/batch/job')
len(resp['logStreams']).should.equal(1)
ls_name = resp['logStreams'][0]['logStreamName']
resp = logs_client.get_log_events(logGroupName='/aws/batch/job', logStreamName=ls_name)
len(resp['events']).should.be.greater_than(5)
@expected_failure
@mock_logs
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_list_jobs():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
queue_arn = resp['jobQueueArn']
resp = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
job_def_arn = resp['jobDefinitionArn']
resp = batch_client.submit_job(
jobName='test1',
jobQueue=queue_arn,
jobDefinition=job_def_arn
)
job_id1 = resp['jobId']
resp = batch_client.submit_job(
jobName='test2',
jobQueue=queue_arn,
jobDefinition=job_def_arn
)
job_id2 = resp['jobId']
future = datetime.datetime.now() + datetime.timedelta(seconds=30)
resp_finished_jobs = batch_client.list_jobs(
jobQueue=queue_arn,
jobStatus='SUCCEEDED'
)
# Wait only as long as it takes to run the jobs
while datetime.datetime.now() < future:
resp = batch_client.describe_jobs(jobs=[job_id1, job_id2])
any_failed_jobs = any([job['status'] == 'FAILED' for job in resp['jobs']])
succeeded_jobs = all([job['status'] == 'SUCCEEDED' for job in resp['jobs']])
if any_failed_jobs:
raise RuntimeError('A Batch job failed')
if succeeded_jobs:
break
time.sleep(0.5)
else:
raise RuntimeError('Batch jobs timed out')
resp_finished_jobs2 = batch_client.list_jobs(
jobQueue=queue_arn,
jobStatus='SUCCEEDED'
)
len(resp_finished_jobs['jobSummaryList']).should.equal(0)
len(resp_finished_jobs2['jobSummaryList']).should.equal(2)
@expected_failure
@mock_logs
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_terminate_job():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
queue_arn = resp['jobQueueArn']
resp = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
job_def_arn = resp['jobDefinitionArn']
resp = batch_client.submit_job(
jobName='test1',
jobQueue=queue_arn,
jobDefinition=job_def_arn
)
job_id = resp['jobId']
time.sleep(2)
batch_client.terminate_job(jobId=job_id, reason='test_terminate')
time.sleep(1)
resp = batch_client.describe_jobs(jobs=[job_id])
resp['jobs'][0]['jobName'].should.equal('test1')
resp['jobs'][0]['status'].should.equal('FAILED')
resp['jobs'][0]['statusReason'].should.equal('test_terminate')
| {
"content_hash": "53d241ee22e2e2b9f6d38bbc89c7b0f1",
"timestamp": "",
"source": "github",
"line_count": 868,
"max_line_length": 116,
"avg_line_length": 28.28917050691244,
"alnum_prop": 0.5971492567705152,
"repo_name": "whummer/moto",
"id": "310ac0b48bff7848bde6fd4efa60347d30072167",
"size": "24555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_batch/test_batch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "443"
},
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "1148"
},
{
"name": "Python",
"bytes": "6015085"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "797"
}
],
"symlink_target": ""
} |
"""
This script sets the contents of spark-defaults.conf and
gpuDiscoveryScriptStub.sh for the purpose of integration
testing. It does so based on the arg inputs, spark-base.conf
which is static, and spark-custom.conf which is dynamically
changed by tests.
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--num_workers',
help='Number of workers to be set in the spark conf'
)
parser.add_argument(
'--num_gpus_per_worker',
help='Number of gpus on each worker to be set in the spark conf'
)
args = parser.parse_args()
num_workers = int(args.num_workers)
num_gpus_per_worker = str(args.num_gpus_per_worker)
conf = {}
with open('tests/integration/spark_conf/spark-base.conf', 'r') as f:
lines = f.readlines()
for i in range(len(lines)):
l = lines[i].strip()
if l:
k, v = l.split(None, 1)
conf[k] = v
with open('tests/integration/spark_conf/spark-custom.conf', 'r') as f:
lines = f.readlines()
for i in range(len(lines)):
l = lines[i].strip()
if l:
k, v = l.split(None, 1)
conf[k] = v
with open('tests/integration/spark_conf/spark-defaults.conf', 'w') as f:
f.writelines(
['{} {}\n'.format(k, v) for k, v in conf.items()]
)
with open('tests/integration/spark_conf/gpuDiscoveryScriptStub.sh', 'w+') as f:
original_file_content = f.read()
gpus = '","'.join(str(e) for e in range(int(num_gpus_per_worker)))
cmd = "echo '{\"name\": \"gpu\", \"addresses\":[\"" + gpus + "\"]}'"
f.writelines([
'#!/usr/bin/env bash\n',
'\n',
cmd,
])
| {
"content_hash": "e45223ad8d4df45f0745614deb480bc4",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 79,
"avg_line_length": 29.19298245614035,
"alnum_prop": 0.5997596153846154,
"repo_name": "tensorflow/ecosystem",
"id": "51ca6898c00c5473a6db1fc8028957f37f2b2467",
"size": "1664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spark/spark-tensorflow-distributor/tests/integration/set_spark_conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2382"
},
{
"name": "Java",
"bytes": "32184"
},
{
"name": "Jinja",
"bytes": "10609"
},
{
"name": "Python",
"bytes": "51860"
},
{
"name": "Scala",
"bytes": "132999"
},
{
"name": "Shell",
"bytes": "2098"
}
],
"symlink_target": ""
} |
__author__ = 'tivvit'
import webapp2
from backend.model.game import Game
class generateLeaderboardHandler(webapp2.RedirectHandler):
def get(self):
g = Game()
g.generateLeaderboard()
app = webapp2.WSGIApplication([
('/generateLeaderboard', generateLeaderboardHandler)
], debug=True)
| {
"content_hash": "76a6a7532418f81a1567eec07fda16f2",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 58,
"avg_line_length": 22.142857142857142,
"alnum_prop": 0.7161290322580646,
"repo_name": "gugcz/devfest-rpg",
"id": "3441c20fff744962ac091c754ce1333014637b5d",
"size": "310",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1311"
},
{
"name": "Dart",
"bytes": "5393"
},
{
"name": "HTML",
"bytes": "5371"
},
{
"name": "Java",
"bytes": "20873"
},
{
"name": "JavaScript",
"bytes": "5104"
},
{
"name": "Python",
"bytes": "31033"
},
{
"name": "Shell",
"bytes": "208"
}
],
"symlink_target": ""
} |
import os
from collections import Counter
from copy import copy
import numpy as np
import pytest
from surprise import Dataset, Reader
from surprise.model_selection import (
KFold,
LeaveOneOut,
PredefinedKFold,
RepeatedKFold,
ShuffleSplit,
train_test_split,
)
from surprise.model_selection.split import get_cv
np.random.seed(1) # This is bad
def test_KFold(toy_data):
# Test n_folds parameter
kf = KFold(n_splits=5)
assert len(list(kf.split(toy_data))) == 5
with pytest.raises(ValueError):
kf = KFold(n_splits=10)
next(kf.split(toy_data)) # Too big (greater than number of ratings)
with pytest.raises(ValueError):
kf = KFold(n_splits=1)
next(kf.split(toy_data)) # Too low (must be >= 2)
# Make sure data has not been shuffled. If not shuffled, the users in the
# testsets are 0, 1, 2... 4 (in that order).
kf = KFold(n_splits=5, shuffle=False)
users = [int(testset[0][0][-1]) for (_, testset) in kf.split(toy_data)]
assert users == list(range(5))
# Make sure that when called two times without shuffling, folds are the
# same.
kf = KFold(n_splits=5, shuffle=False)
testsets_a = [testset for (_, testset) in kf.split(toy_data)]
testsets_b = [testset for (_, testset) in kf.split(toy_data)]
assert testsets_a == testsets_b
# test once again with another KFold instance
kf = KFold(n_splits=5, shuffle=False)
testsets_a = [testset for (_, testset) in kf.split(toy_data)]
assert testsets_a == testsets_b
# We'll now shuffle b and check that folds are different.
# (this is conditioned by seed setting at the beginning of file)
kf = KFold(n_splits=5, random_state=None, shuffle=True)
testsets_b = [testset for (_, testset) in kf.split(toy_data)]
assert testsets_a != testsets_b
# test once again: two calls to kf.split make different splits when
# random_state=None
testsets_a = [testset for (_, testset) in kf.split(toy_data)]
assert testsets_a != testsets_b
# Make sure that folds are the same when same KFold instance is used with
# suffle is True but random_state is set to some value
kf = KFold(n_splits=5, random_state=1, shuffle=True)
testsets_a = [testset for (_, testset) in kf.split(toy_data)]
testsets_b = [testset for (_, testset) in kf.split(toy_data)]
assert testsets_a == testsets_b
# Make sure raw ratings are not shuffled by KFold
old_raw_ratings = copy(toy_data.raw_ratings)
kf = KFold(n_splits=5, shuffle=True)
next(kf.split(toy_data))
assert old_raw_ratings == toy_data.raw_ratings
def test_ShuffleSplit(toy_data):
with pytest.raises(ValueError):
ss = ShuffleSplit(n_splits=0)
with pytest.raises(ValueError):
ss = ShuffleSplit(test_size=10)
next(ss.split(toy_data))
with pytest.raises(ValueError):
ss = ShuffleSplit(train_size=10)
next(ss.split(toy_data))
with pytest.raises(ValueError):
ss = ShuffleSplit(test_size=3, train_size=3)
next(ss.split(toy_data))
with pytest.raises(ValueError):
ss = ShuffleSplit(test_size=3, train_size=0)
next(ss.split(toy_data))
with pytest.raises(ValueError):
ss = ShuffleSplit(test_size=0, train_size=3)
next(ss.split(toy_data))
# No need to cover the entire dataset
ss = ShuffleSplit(test_size=1, train_size=1)
next(ss.split(toy_data))
# test test_size to int and train_size to None (complement)
ss = ShuffleSplit(test_size=1)
assert all(len(testset) == 1 for (_, testset) in ss.split(toy_data))
assert all(trainset.n_ratings == 4 for (trainset, _) in ss.split(toy_data))
# test test_size to float and train_size to None (complement)
ss = ShuffleSplit(test_size=0.2) # 20% of 5 = 1
assert all(len(testset) == 1 for (_, testset) in ss.split(toy_data))
assert all(trainset.n_ratings == 4 for (trainset, _) in ss.split(toy_data))
# test test_size to int and train_size to int
ss = ShuffleSplit(test_size=2, train_size=2)
assert all(len(testset) == 2 for (_, testset) in ss.split(toy_data))
assert all(trainset.n_ratings == 2 for (trainset, _) in ss.split(toy_data))
# test test_size to None (complement) and train_size to int
ss = ShuffleSplit(test_size=None, train_size=2)
assert all(len(testset) == 3 for (_, testset) in ss.split(toy_data))
assert all(trainset.n_ratings == 2 for (trainset, _) in ss.split(toy_data))
# test test_size to None (complement) and train_size to float
ss = ShuffleSplit(test_size=None, train_size=0.2)
assert all(len(testset) == 4 for (_, testset) in ss.split(toy_data))
assert all(trainset.n_ratings == 1 for (trainset, _) in ss.split(toy_data))
# test default parameters: 5 splits, test_size = .2, train_size = None
ss = ShuffleSplit()
assert len(list(ss.split(toy_data))) == 5
assert all(len(testset) == 1 for (_, testset) in ss.split(toy_data))
assert all(trainset.n_ratings == 4 for (trainset, _) in ss.split(toy_data))
# Test random_state parameter
# If random_state is None, you get different split each time (conditioned
# by rng of course)
ss = ShuffleSplit(random_state=None)
testsets_a = [testset for (_, testset) in ss.split(toy_data)]
testsets_b = [testset for (_, testset) in ss.split(toy_data)]
assert testsets_a != testsets_b
# Repeated called to split when random_state is set lead to the same folds
ss = ShuffleSplit(random_state=1)
testsets_a = [testset for (_, testset) in ss.split(toy_data)]
testsets_b = [testset for (_, testset) in ss.split(toy_data)]
assert testsets_a == testsets_b
# Test shuffle parameter, if False then splits are the same regardless of
# random_state.
ss = ShuffleSplit(random_state=1, shuffle=False)
testsets_a = [testset for (_, testset) in ss.split(toy_data)]
testsets_b = [testset for (_, testset) in ss.split(toy_data)]
assert testsets_a == testsets_b
def test_train_test_split(toy_data):
# test test_size to int and train_size to None (complement)
trainset, testset = train_test_split(toy_data, test_size=2, train_size=None)
assert len(testset) == 2
assert trainset.n_ratings == 3
# test test_size to float and train_size to None (complement)
trainset, testset = train_test_split(toy_data, test_size=0.2, train_size=None)
assert len(testset) == 1
assert trainset.n_ratings == 4
# test test_size to int and train_size to int
trainset, testset = train_test_split(toy_data, test_size=2, train_size=3)
assert len(testset) == 2
assert trainset.n_ratings == 3
# test test_size to None (complement) and train_size to int
trainset, testset = train_test_split(toy_data, test_size=None, train_size=2)
assert len(testset) == 3
assert trainset.n_ratings == 2
# test test_size to None (complement) and train_size to float
trainset, testset = train_test_split(toy_data, test_size=None, train_size=0.2)
assert len(testset) == 4
assert trainset.n_ratings == 1
# Test random_state parameter
# If random_state is None, you get different split each time (conditioned
# by rng of course)
testsets = set()
# TODO: all other tests should use similar logic instead of just 1 single try
for _ in range(30):
_, testset = train_test_split(toy_data, random_state=None)
testsets.add(tuple(testset))
assert len(testsets) > 1
# Repeated called to split when random_state is set lead to the same folds
_, testset_a = train_test_split(toy_data, random_state=1)
_, testset_b = train_test_split(toy_data, random_state=1)
assert testset_a == testset_b
# Test shuffle parameter, if False then splits are the same regardless of
# random_state.
_, testset_a = train_test_split(toy_data, random_state=1, shuffle=None)
_, testset_b = train_test_split(toy_data, random_state=1, shuffle=None)
assert testset_a == testset_b
def test_RepeatedCV(toy_data):
# test n_splits and n_repeats parameters
rkf = RepeatedKFold(n_splits=3, n_repeats=2)
assert len(list(rkf.split(toy_data))) == 3 * 2
rkf = RepeatedKFold(n_splits=3, n_repeats=4)
assert len(list(rkf.split(toy_data))) == 3 * 4
rkf = RepeatedKFold(n_splits=4, n_repeats=3)
assert len(list(rkf.split(toy_data))) == 4 * 3
# Make sure folds different between 2 repetitions (even if
# random_state is set, random_state controls the whole sequence)
rkf = RepeatedKFold(n_splits=3, n_repeats=2, random_state=3)
testsets = list(testset for (_, testset) in rkf.split(toy_data))
for i in range(3):
assert testsets[i] != testsets[i + 3]
# Make sure folds are same when same cv iterator is called on same data (if
# random_state is set)
rkf = RepeatedKFold(n_splits=3, n_repeats=2, random_state=3)
testsets_a = list(testset for (_, testset) in rkf.split(toy_data))
testsets_b = list(testset for (_, testset) in rkf.split(toy_data))
assert testsets_a == testsets_b
# Make sure folds are different when random_state is None
rkf = RepeatedKFold(n_splits=3, n_repeats=2, random_state=None)
testsets_a = list(testset for (_, testset) in rkf.split(toy_data))
testsets_b = list(testset for (_, testset) in rkf.split(toy_data))
assert testsets_a != testsets_b
def test_LeaveOneOut(toy_data):
loo = LeaveOneOut()
with pytest.raises(ValueError):
next(loo.split(toy_data)) # each user only has 1 item so trainsets fail
reader = Reader("ml-100k")
data_path = os.path.dirname(os.path.realpath(__file__)) + "/u1_ml100k_test"
data = Dataset.load_from_file(file_path=data_path, reader=reader)
# Test random_state parameter
# If random_state is None, you get different split each time (conditioned
# by rng of course)
loo = LeaveOneOut(random_state=None)
testsets_a = [testset for (_, testset) in loo.split(data)]
testsets_b = [testset for (_, testset) in loo.split(data)]
assert testsets_a != testsets_b
# Repeated called to split when random_state is set lead to the same folds
loo = LeaveOneOut(random_state=1)
testsets_a = [testset for (_, testset) in loo.split(data)]
testsets_b = [testset for (_, testset) in loo.split(data)]
assert testsets_a == testsets_b
# Make sure only one rating per user is present in the testset
loo = LeaveOneOut()
for _, testset in loo.split(data):
cnt = Counter([uid for (uid, _, _) in testset])
assert all(val == 1 for val in cnt.values())
# test the min_n_ratings parameter
loo = LeaveOneOut(min_n_ratings=5)
for trainset, _ in loo.split(data):
assert all(len(ratings) >= 5 for ratings in trainset.ur.values())
loo = LeaveOneOut(min_n_ratings=10)
for trainset, _ in loo.split(data):
assert all(len(ratings) >= 10 for ratings in trainset.ur.values())
loo = LeaveOneOut(min_n_ratings=10000) # too high
with pytest.raises(ValueError):
next(loo.split(data))
def test_PredifinedKFold():
reader = Reader(
line_format="user item rating", sep=" ", skip_lines=3, rating_scale=(1, 5)
)
current_dir = os.path.dirname(os.path.realpath(__file__))
folds_files = [(current_dir + "/custom_train", current_dir + "/custom_test")]
data = Dataset.load_from_folds(folds_files=folds_files, reader=reader)
# Make sure rating files are read correctly
pkf = PredefinedKFold()
trainset, testset = next(pkf.split(data))
assert trainset.n_ratings == 6
assert len(testset) == 3
def test_get_cv():
get_cv(None)
get_cv(4)
get_cv(KFold())
with pytest.raises(ValueError):
get_cv(23.2)
with pytest.raises(ValueError):
get_cv("bad")
| {
"content_hash": "f3cf09f5b5c879abcb3d9d5d6daa359b",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 82,
"avg_line_length": 37.907051282051285,
"alnum_prop": 0.6618753699162933,
"repo_name": "NicolasHug/Surprise",
"id": "1ca2502ed9a6e439c7dd51ebe6d259514c6a5a89",
"size": "11827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_split.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cython",
"bytes": "58580"
},
{
"name": "Python",
"bytes": "198805"
},
{
"name": "Shell",
"bytes": "559"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import boto
import boto.s3.connection
import boto.s3.key
import boto.s3.bucket
import functools
import itertools
import os.path
import posixpath
from six import iteritems, itervalues
import sys
import s3pub.progress
def _upload(bucket, local_path, remote_path, md5, pbar):
'''
Upload a file to S3 if etags differ, or the remote doesn't exist.
Return True if the file was uploaded; false otherwise.
'''
pbar.change_file(local_path)
# begin upload
boto.s3.key.Key(bucket, remote_path).set_contents_from_filename(
local_path,
policy='public-read',
cb=functools.partial(_xfer_status, pbar),
md5=md5,
)
def _xfer_status(pbar, done, _):
pbar.increment(done)
def _remote_path(dest, local_path, src_root):
'''
Return the key corresponding to a local path.
'os.path' is dynamically mapped to the relevant path module at runtime.
The 'ntpath' version of 'relpath' returns absolute paths when its second
argument is omitted; we avoid this by using 'posixpath' and converting
directory separators, since all s3 paths use forward slashes.
'''
dest = dest.rstrip('/')
local_path = local_path.replace('\\', '/')
src_root = src_root.replace('\\', '/')
return (dest and dest + '/' or '') + \
posixpath.relpath(local_path, src_root)
def _todos(bucket, prefix, paths, check_removed=True):
'''
Return information about upcoming uploads and deletions.
Returns a tuple: (upload, delete)
'upload' is a dictionary of info about files that need to be uploaded.
It is keyed on local paths, and maps to a tuple:
((hex_md5, base64_md5, filesize), remote_path)
'delete' is a list of S3 keys that should be removed. If 'check_removed'
is False, this list will always be empty.
'''
# map rpath -> lpath; we use this to compare md5s for existing keys
rpath_map = dict((i[1], i[0]) for i in paths)
# Iterate through the BucketListResultSet only once; we'll add elements to
# two containers and will return them at the end.
up = {}
delete = []
# Create a set of keys in S3 for comparison later
s3_keys = set()
# add entries for keys that have different contents
for key in bucket.list(prefix):
# Since we're already iterating through the result set, we'll save
# key names.
s3_keys.add(key.name)
if check_removed and key.name not in rpath_map:
# this key doesn't exist locally, schedule deletion
delete.append(key.name)
continue
# file exists in both; compare md5s
lpath = rpath_map[key.name]
with open(lpath, 'rb') as fp:
md5 = boto.s3.key.compute_md5(fp)
if key.etag.strip('"') != md5[0].strip('"'):
up[lpath] = (md5, key.name)
# schedule uploads for new keys
for rpath in set(i[1] for i in paths) - s3_keys:
lpath = rpath_map[rpath]
with open(lpath, 'rb') as fp:
md5 = boto.s3.key.compute_md5(fp)
up[lpath] = (md5, rpath)
return up, delete
def _split_dest(dest):
'''
Split apart the bucket name and key prefix for uploads.
'''
dest = dest.strip()
if not dest:
raise ValueError(u'invalid value: {}'.format(dest))
idx = dest.find(u'/')
if idx == -1:
return (dest, u'')
return (dest[:idx], dest[idx+1:])
def _get_index_doc(bucket):
'''
Return the configured index document name for the bucket.
'''
try:
conf = bucket.get_website_configuration()
except boto.exception.S3ResponseError:
return
return conf['WebsiteConfiguration']['IndexDocument']['Suffix']
def do_upload(src, dst, delete, creds):
'''
Upload and delete files as necessary to synchronize S3.
Return a list of remote keys modified.
'''
conn = boto.s3.connection.S3Connection(**creds.as_dict())
# split bucket name from key prefix
bucket_name, prefix = _split_dest(dst)
bucket = conn.get_bucket(bucket_name)
# paths is a list of tuples: (local, remote)
paths = []
for root, _, files in os.walk(src):
for filename in files:
lpath = os.path.join(root, filename)
paths.append((lpath, _remote_path(prefix, lpath, src)))
to_upload, to_delete = _todos(bucket, prefix, paths, delete)
if not to_upload and not to_delete:
return []
inval_paths = []
if to_upload:
# do upload
pbar = s3pub.progress.UploadProgressBar(
dict((lpath, info[2]) for lpath, (info, _) in iteritems(to_upload)))
for lpath, (md5, rpath) in iteritems(to_upload):
_upload(bucket, lpath, rpath, md5, pbar)
inval_paths.append(rpath)
pbar.finish()
indexname = _get_index_doc(bucket)
if indexname:
inval_paths.extend(
itertools.chain.from_iterable(
# add index paths with and without trailing slash
[os.path.dirname(rpath), os.path.dirname(rpath) + '/'] for
_, rpath in itervalues(to_upload)
if os.path.basename(rpath) == indexname
)
)
if delete and to_delete:
# do deletion
mdr = bucket.delete_keys(to_delete)
if mdr.errors:
sys.stderr.write(
'ERROR: problems were encountered trying to remove the '
'following objects.\n')
for e in mdr.errors:
sys.stderr.write(u' {} - {} - {}\n'.format(
e.key, e.code, e.message))
raise Exception('Errors reported by S3')
inval_paths.extend(to_delete)
return inval_paths
| {
"content_hash": "e9f1816e2f808aabd8fe62b5ef9254d1",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 80,
"avg_line_length": 31.983425414364643,
"alnum_prop": 0.607877008118846,
"repo_name": "marmida/s3pub",
"id": "51367aa00c1d5717e7affd02c6a10fc258ddbcc5",
"size": "5789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "s3pub/upload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cucumber",
"bytes": "510"
},
{
"name": "Python",
"bytes": "32089"
}
],
"symlink_target": ""
} |
import os
import pytest
import json
import pyethereum.processblock as processblock
import pyethereum.blocks as blocks
import pyethereum.transactions as transactions
import pyethereum.rlp as rlp
import pyethereum.trie as trie
import pyethereum.miner as miner
import pyethereum.utils as utils
from pyethereum.db import DB as DB
from pyethereum.config import get_default_config
from tests.utils import set_db
import logging
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
logger = logging.getLogger()
set_db()
pblogger = processblock.pblogger
# customize VM log output to your needs
# hint: use 'py.test' with the '-s' option to dump logs to the console
pblogger.log_pre_state = True # dump storage at account before execution
pblogger.log_post_state = True # dump storage at account after execution
pblogger.log_block = False # dump block after TX was applied
pblogger.log_memory = False # dump memory before each op
pblogger.log_op = True # log op, gas, stack before each op
pblogger.log_json = False # generate machine readable output
@pytest.fixture(scope="module")
def genesis_fixture():
"""
Read genesis block from fixtures.
"""
genesis_fixture = None
with open('fixtures/genesishashestest.json', 'r') as f:
genesis_fixture = json.load(f)
assert genesis_fixture is not None, "Could not read genesishashtest.json from fixtures. Make sure you did 'git submodule init'!"
# FIXME: assert that link is uptodate
for k in ('genesis_rlp_hex', 'genesis_state_root', 'genesis_hash', 'initial_alloc'):
assert k in genesis_fixture
assert utils.sha3(genesis_fixture['genesis_rlp_hex'].decode('hex')).encode('hex') ==\
genesis_fixture['genesis_hash']
return genesis_fixture
@pytest.fixture(scope="module")
def accounts():
k = utils.sha3('cow')
v = utils.privtoaddr(k)
k2 = utils.sha3('horse')
v2 = utils.privtoaddr(k2)
return k, v, k2, v2
@pytest.fixture(scope="module")
def mkgenesis(initial_alloc={}):
return blocks.genesis(initial_alloc)
@pytest.fixture(scope="module")
def mkquickgenesis(initial_alloc={}):
"set INITIAL_DIFFICULTY to a value that is quickly minable"
return blocks.genesis(initial_alloc, difficulty=2 ** 16)
def mine_next_block(parent, uncles=[], coinbase=None, transactions=[]):
# advance one block
coinbase = coinbase or parent.coinbase
m = miner.Miner(parent, uncles=uncles, coinbase=coinbase)
for tx in transactions:
m.add_transaction(tx)
blk = m.mine(steps=1000 ** 2)
assert blk is not False, "Mining failed. Use mkquickgenesis!"
return blk
@pytest.fixture(scope="module")
def get_transaction(gasprice=0, nonce=0):
k, v, k2, v2 = accounts()
tx = transactions.Transaction(
nonce, gasprice, startgas=10000,
to=v2, value=utils.denoms.finney * 10, data='').sign(k)
return tx
@pytest.fixture(scope="module")
def get_chainmanager(genesis=None):
import pyethereum.chainmanager as chainmanager
cm = chainmanager.ChainManager()
cm.configure(config=get_default_config(), genesis=genesis)
return cm
def db_store(blk):
utils.db_put(blk.hash, blk.serialize())
assert blocks.get_block(blk.hash) == blk
def test_db():
set_db()
db = DB(utils.get_db_path())
a, b = DB(utils.get_db_path()), DB(utils.get_db_path())
assert a == b
assert a.uncommitted == b.uncommitted
a.put('a', 'b')
b.get('a') == 'b'
assert a.uncommitted == b.uncommitted
a.commit()
assert a.uncommitted == b.uncommitted
assert 'test' not in db
set_db()
assert a != DB(utils.get_db_path())
def test_transfer():
k, v, k2, v2 = accounts()
blk = blocks.genesis({v: utils.denoms.ether * 1})
b_v = blk.get_balance(v)
b_v2 = blk.get_balance(v2)
value = 42
success = blk.transfer_value(v, v2, value)
assert success
assert blk.get_balance(v) == b_v - value
assert blk.get_balance(v2) == b_v2 + value
def test_failing_transfer():
k, v, k2, v2 = accounts()
blk = blocks.genesis({v: utils.denoms.ether * 1})
b_v = blk.get_balance(v)
b_v2 = blk.get_balance(v2)
value = utils.denoms.ether * 2
# should fail
success = blk.transfer_value(v, v2, value)
assert not success
assert blk.get_balance(v) == b_v
assert blk.get_balance(v2) == b_v2
def test_transient_block():
blk = blocks.genesis()
tb_blk = blocks.TransientBlock(blk.serialize())
assert blk.hash == tb_blk.hash
assert blk.number == tb_blk.number
def test_genesis():
k, v, k2, v2 = accounts()
set_db()
blk = blocks.genesis({v: utils.denoms.ether * 1})
sr = blk.state_root
db = DB(utils.get_db_path())
assert blk.state.db.db == db.db
db.put(blk.hash, blk.serialize())
blk.state.db.commit()
assert sr in db
db.commit()
assert sr in db
blk2 = blocks.genesis({v: utils.denoms.ether * 1})
blk3 = blocks.genesis()
assert blk == blk2
assert blk != blk3
set_db()
blk2 = blocks.genesis({v: utils.denoms.ether * 1})
blk3 = blocks.genesis()
assert blk == blk2
assert blk != blk3
def test_genesis_db():
k, v, k2, v2 = accounts()
set_db()
blk = blocks.genesis({v: utils.denoms.ether * 1})
db_store(blk)
blk2 = blocks.genesis({v: utils.denoms.ether * 1})
blk3 = blocks.genesis()
assert blk == blk2
assert blk != blk3
set_db()
blk2 = blocks.genesis({v: utils.denoms.ether * 1})
blk3 = blocks.genesis()
assert blk == blk2
assert blk != blk3
@pytest.mark.xfail
def test_genesis_state_root(genesis_fixture):
# https://ethereum.etherpad.mozilla.org/12
set_db()
genesis = blocks.genesis()
for k, v in blocks.GENESIS_INITIAL_ALLOC.items():
assert genesis.get_balance(k) == v
assert genesis.state_root.encode(
'hex') == genesis_fixture['genesis_state_root']
@pytest.mark.xfail
def test_genesis_hash(genesis_fixture):
set_db()
genesis = blocks.genesis()
"""
YP: https://raw.githubusercontent.com/ethereum/latexpaper/master/Paper.tex
0256 , SHA3RLP(), 0160 , stateRoot, 0256 , 2**22 , 0, 0, 1000000, 0, 0, (),
SHA3(42), (), ()
Where 0256 refers to the parent and state and transaction root hashes,
a 256-bit hash which is all zeroes;
0160 refers to the coinbase address,
a 160-bit hash which is all zeroes;
2**22 refers to the difficulty;
0 refers to the timestamp (the Unix epoch);
() refers to the extradata and the sequences of both uncles and
transactions, all empty.
SHA3(42) refers to the SHA3 hash of a byte array of length one whose first
and only byte is of value 42.
SHA3RLP() values refer to the hashes of the transaction and uncle lists
in RLP
both empty.
The proof-of-concept series include a development premine, making the state
root hash some value stateRoot. The latest documentation should be
consulted for the value of the state root.
"""
h256 = '\00' * 32
sr = genesis_fixture['genesis_state_root'].decode('hex')
genesis_block_defaults = [
["prevhash", "bin", h256], # h256()
["uncles_hash", "bin", utils.sha3(rlp.encode([]))], # sha3EmptyList
["coinbase", "addr", "0" * 40], # h160()
["state_root", "trie_root", sr], # stateRoot
["tx_list_root", "trie_root", trie.BLANK_ROOT], # h256()
["difficulty", "int", 2 ** 17], # c_genesisDifficulty
["number", "int", 0], # 0
["min_gas_price", "int", 0], # 0
["gas_limit", "int", 10 ** 6], # 10**6 for genesis
["gas_used", "int", 0], # 0
["timestamp", "int", 0], # 0
["extra_data", "bin", ""], # ""
["nonce", "bin", utils.sha3(chr(42))], # sha3(bytes(1, 42));
]
cpp_genesis_block = rlp.decode(
genesis_fixture['genesis_rlp_hex'].decode('hex'))
cpp_genesis_header = cpp_genesis_block[0]
for i, (name, typ, genesis_default) in enumerate(genesis_block_defaults):
assert utils.decoders[typ](cpp_genesis_header[i]) == genesis_default
assert getattr(genesis, name) == genesis_default
assert genesis.hex_hash() == genesis_fixture['genesis_hash']
assert genesis.hex_hash() == utils.sha3(
genesis_fixture['genesis_rlp_hex'].decode('hex')
).encode('hex')
def test_mine_block():
k, v, k2, v2 = accounts()
set_db()
blk = mkquickgenesis({v: utils.denoms.ether * 1})
db_store(blk)
blk2 = mine_next_block(blk, coinbase=v)
db_store(blk2)
assert blk2.get_balance(v) == blocks.BLOCK_REWARD + blk.get_balance(v)
assert blk.state.db.db == blk2.state.db.db
assert blk2.get_parent() == blk
def test_mine_block_with_transaction():
k, v, k2, v2 = accounts()
# mine two blocks
set_db()
a_blk = mkquickgenesis({v: utils.denoms.ether * 1})
db_store(a_blk)
tx = get_transaction()
a_blk2 = mine_next_block(a_blk, transactions=[tx])
assert tx in a_blk2.get_transactions()
def test_block_serialization_with_transaction_empty_genesis():
k, v, k2, v2 = accounts()
set_db()
a_blk = mkquickgenesis({})
db_store(a_blk)
tx = get_transaction(gasprice=10) # must fail, as there is no balance
a_blk2 = mine_next_block(a_blk, transactions=[tx])
assert tx not in a_blk2.get_transactions()
def test_mine_block_with_transaction():
k, v, k2, v2 = accounts()
set_db()
blk = mkquickgenesis({v: utils.denoms.ether * 1})
db_store(blk)
tx = get_transaction()
blk2 = mine_next_block(blk, coinbase=v, transactions=[tx])
assert tx in blk2.get_transactions()
db_store(blk2)
assert tx in blk2.get_transactions()
assert blocks.get_block(blk2.hash) == blk2
assert tx.gasprice == 0
assert blk2.get_balance(
v) == blocks.BLOCK_REWARD + blk.get_balance(v) - tx.value
assert blk.state.db.db == blk2.state.db.db
assert blk2.get_parent() == blk
assert tx in blk2.get_transactions()
assert tx not in blk.get_transactions()
def test_block_serialization_same_db():
k, v, k2, v2 = accounts()
set_db()
blk = mkquickgenesis({v: utils.denoms.ether * 1})
assert blk.hex_hash() == \
blocks.Block.deserialize(blk.serialize()).hex_hash()
db_store(blk)
blk2 = mine_next_block(blk)
assert blk.hex_hash() == \
blocks.Block.deserialize(blk.serialize()).hex_hash()
assert blk2.hex_hash() == \
blocks.Block.deserialize(blk2.serialize()).hex_hash()
def test_block_serialization_other_db():
k, v, k2, v2 = accounts()
# mine two blocks
set_db()
a_blk = mkquickgenesis()
db_store(a_blk)
a_blk2 = mine_next_block(a_blk)
db_store(a_blk2)
# receive in other db
set_db()
b_blk = mkquickgenesis()
assert b_blk == a_blk
db_store(b_blk)
b_blk2 = b_blk.deserialize(a_blk2.serialize())
assert a_blk2.hex_hash() == b_blk2.hex_hash()
db_store(b_blk2)
assert a_blk2.hex_hash() == b_blk2.hex_hash()
def test_block_serialization_with_transaction_other_db():
hx = lambda x: x.encode('hex')
k, v, k2, v2 = accounts()
# mine two blocks
set_db()
a_blk = mkquickgenesis({v: utils.denoms.ether * 1})
db_store(a_blk)
tx = get_transaction()
logger.debug('a: state_root before tx %r', hx(a_blk.state_root))
logger.debug('a: state:\n%s', utils.dump_state(a_blk.state))
a_blk2 = mine_next_block(a_blk, transactions=[tx])
logger.debug('a: state_root after tx %r', hx(a_blk2.state_root))
logger.debug('a: state:\n%s', utils.dump_state(a_blk2.state))
assert tx in a_blk2.get_transactions()
db_store(a_blk2)
assert tx in a_blk2.get_transactions()
logger.debug('preparing receiving chain ---------------------')
# receive in other db
set_db()
b_blk = mkquickgenesis({v: utils.denoms.ether * 1})
db_store(b_blk)
assert b_blk.number == 0
assert b_blk == a_blk
logger.debug('b: state_root before tx %r', hx(b_blk.state_root))
logger.debug('starting deserialization of remote block w/ tx')
b_blk2 = b_blk.deserialize(a_blk2.serialize()) # BOOM
logger.debug('b: state_root after %r', hx(b_blk2.state_root))
assert a_blk2.hex_hash() == b_blk2.hex_hash()
assert tx in b_blk2.get_transactions()
db_store(b_blk2)
assert a_blk2.hex_hash() == b_blk2.hex_hash()
assert tx in b_blk2.get_transactions()
def test_transaction():
k, v, k2, v2 = accounts()
set_db()
blk = mkquickgenesis({v: utils.denoms.ether * 1})
db_store(blk)
blk = mine_next_block(blk)
tx = get_transaction()
assert tx not in blk.get_transactions()
success, res = processblock.apply_transaction(blk, tx)
assert tx in blk.get_transactions()
assert blk.get_balance(v) == utils.denoms.finney * 990
assert blk.get_balance(v2) == utils.denoms.finney * 10
def test_transaction_serialization():
k, v, k2, v2 = accounts()
tx = get_transaction()
assert tx in set([tx])
assert tx.hex_hash() == \
transactions.Transaction.deserialize(tx.serialize()).hex_hash()
assert tx.hex_hash() == \
transactions.Transaction.hex_deserialize(tx.hex_serialize()).hex_hash()
assert tx in set([tx])
def test_mine_block_with_transaction():
k, v, k2, v2 = accounts()
set_db()
blk = mkquickgenesis({v: utils.denoms.ether * 1})
db_store(blk)
tx = get_transaction()
blk = mine_next_block(blk, transactions=[tx])
assert tx in blk.get_transactions()
assert blk.get_balance(v) == utils.denoms.finney * 990
assert blk.get_balance(v2) == utils.denoms.finney * 10
def test_invalid_transaction():
k, v, k2, v2 = accounts()
set_db()
blk = mkquickgenesis({v2: utils.denoms.ether * 1})
db_store(blk)
tx = get_transaction()
blk = mine_next_block(blk, transactions=[tx])
assert blk.get_balance(v) == 0
assert blk.get_balance(v2) == utils.denoms.ether * 1
assert tx not in blk.get_transactions()
def test_add_side_chain():
""""
Local: L0, L1, L2
add
Remote: R0, R1
"""
k, v, k2, v2 = accounts()
# Remote: mine one block
set_db()
R0 = mkquickgenesis({v: utils.denoms.ether * 1})
db_store(R0)
tx0 = get_transaction(nonce=0)
R1 = mine_next_block(R0, transactions=[tx0])
db_store(R1)
assert tx0 in R1.get_transactions()
# Local: mine two blocks
set_db()
L0 = mkquickgenesis({v: utils.denoms.ether * 1})
cm = get_chainmanager(genesis=L0)
tx0 = get_transaction(nonce=0)
L1 = mine_next_block(L0, transactions=[tx0])
cm.add_block(L1)
tx1 = get_transaction(nonce=1)
L2 = mine_next_block(L1, transactions=[tx1])
cm.add_block(L2)
# receive serialized remote blocks, newest first
transient_blocks = [blocks.TransientBlock(R0.serialize()),
blocks.TransientBlock(R1.serialize())]
cm.receive_chain(transient_blocks=transient_blocks)
assert L2.hash in cm
def test_add_longer_side_chain():
""""
Local: L0, L1, L2
Remote: R0, R1, R2, R3
"""
k, v, k2, v2 = accounts()
# Remote: mine one block
set_db()
blk = mkquickgenesis({v: utils.denoms.ether * 1})
db_store(blk)
remote_blocks = [blk]
for i in range(3):
tx = get_transaction(nonce=i)
blk = mine_next_block(remote_blocks[-1], transactions=[tx])
db_store(blk)
remote_blocks.append(blk)
# Local: mine two blocks
set_db()
L0 = mkquickgenesis({v: utils.denoms.ether * 1})
cm = get_chainmanager(genesis=L0)
tx0 = get_transaction(nonce=0)
L1 = mine_next_block(L0, transactions=[tx0])
cm.add_block(L1)
tx1 = get_transaction(nonce=1)
L2 = mine_next_block(L1, transactions=[tx1])
cm.add_block(L2)
# receive serialized remote blocks, newest first
transient_blocks = [blocks.TransientBlock(b.serialize()) for b in remote_blocks]
cm.receive_chain(transient_blocks=transient_blocks)
assert cm.head == remote_blocks[-1]
def test_reward_uncles():
"""
B0 B1 B2
B0 Uncle
We raise the block's coinbase account by Rb, the block reward,
and also add uncle and nephew rewards
"""
k, v, k2, v2 = accounts()
set_db()
blk0 = mkquickgenesis()
local_coinbase = '1' * 40
uncle_coinbase = '2' * 40
cm = get_chainmanager(genesis=blk0)
blk1 = mine_next_block(blk0, coinbase=local_coinbase)
cm.add_block(blk1)
assert blk1.get_balance(local_coinbase) == 1 * blocks.BLOCK_REWARD
uncle = mine_next_block(blk0, coinbase=uncle_coinbase)
cm.add_block(uncle)
assert uncle.hash in cm
assert cm.head.get_balance(local_coinbase) == 1 * blocks.BLOCK_REWARD
assert cm.head.get_balance(uncle_coinbase) == 0
# next block should reward uncles
blk2 = mine_next_block(blk1, uncles=[uncle], coinbase=local_coinbase)
cm.add_block(blk2)
assert blk2.get_parent().prevhash == uncle.prevhash
assert blk2 == cm.head
assert cm.head.get_balance(local_coinbase) == \
2 * blocks.BLOCK_REWARD + blocks.NEPHEW_REWARD
assert cm.head.get_balance(uncle_coinbase) == blocks.UNCLE_REWARD
# TODO ##########################################
#
# test for remote block with invalid transaction
# test for multiple transactions from same address received
# in arbitrary order mined in the same block
| {
"content_hash": "c566036f165652487a1a0f381b9fb3c0",
"timestamp": "",
"source": "github",
"line_count": 538,
"max_line_length": 132,
"avg_line_length": 32.267657992565056,
"alnum_prop": 0.6400345622119815,
"repo_name": "jnnk/pyethereum",
"id": "cb94a706a0a2e8ee1c6381a9f02864a2b49b4a5d",
"size": "17360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_chain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "460127"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^achievs/', include('achievs.urls')),
# Example:
# (r'^scoreboard/', include('scoreboard.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "39ab16b311935176fbc74aa6fe8bd86f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 71,
"avg_line_length": 32.8125,
"alnum_prop": 0.6914285714285714,
"repo_name": "eawerbaneth/Scoreboard",
"id": "84d036f25d2f3cc85158bb825484f78beeec8e20",
"size": "525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8003"
}
],
"symlink_target": ""
} |
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
if "SOSD" not in os.environ:
os.environ["SOSD"] = buildDir + '/src/sosd' + EXEEXT
if "SOSCLI" not in os.environ:
os.environ["SOSCLI"] = buildDir + '/src/sos-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print "Win tests currently disabled by default. Use -win option to enable"
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled"
sys.exit(0)
# python-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError as e:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " \
"to run zmq tests, see dependency info in /qa/README.md.")
raise e
#Tests
testScripts = [
'bip68-112-113-p2p.py',
'wallet.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'p2p-fullblocktest.py', # NOTE: needs sos_hash to pass
'blockchain.py',
'disablewallet.py',
'sendheaders.py', # NOTE: needs sos_hash to pass
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py', # NOTE: needs sos_hash to pass
'invalidtxrequest.py', # NOTE: needs sos_hash to pass
'abandonconflict.py',
'p2p-versionbits-warning.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py', # NOTE: needs sos_hash to pass
'bip68-sequence.py',
'bipdersig-p2p.py', # NOTE: needs sos_hash to pass
'bipdersig.py',
'getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
# 'pruning.py', # Prune mode is incompatible with -txindex.
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py', # NOTE: needs sos_hash to pass
'mempool_packages.py',
'maxuploadtarget.py',
# 'replace-by-fee.py', # RBF is disabled in Sos Core
]
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| {
"content_hash": "0a55185dd9d6b5a3bc5056e60a541680",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 163,
"avg_line_length": 31.38888888888889,
"alnum_prop": 0.6206489675516225,
"repo_name": "totcoindev/totcoin",
"id": "cf894e37b049b47c882f4dbe94eabae9b4e1cd05",
"size": "8690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/pull-tester/rpc-tests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1315035"
},
{
"name": "C++",
"bytes": "5282202"
},
{
"name": "CSS",
"bytes": "124299"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "141707"
},
{
"name": "Makefile",
"bytes": "96871"
},
{
"name": "Objective-C",
"bytes": "4930"
},
{
"name": "Objective-C++",
"bytes": "7222"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "706097"
},
{
"name": "QMake",
"bytes": "2054"
},
{
"name": "Roff",
"bytes": "3649"
},
{
"name": "Shell",
"bytes": "35569"
}
],
"symlink_target": ""
} |
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.asset_v1p1beta1.types import asset_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-asset",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AssetServiceTransport(abc.ABC):
"""Abstract transport class for AssetService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "cloudasset.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# Don't apply audience if the credentials file passed from user.
if hasattr(credentials, "with_gdch_audience"):
credentials = credentials.with_gdch_audience(
api_audience if api_audience else host
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.search_all_resources: gapic_v1.method.wrap_method(
self.search_all_resources,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=15.0,
),
default_timeout=15.0,
client_info=client_info,
),
self.search_all_iam_policies: gapic_v1.method.wrap_method(
self.search_all_iam_policies,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=15.0,
),
default_timeout=15.0,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def search_all_resources(
self,
) -> Callable[
[asset_service.SearchAllResourcesRequest],
Union[
asset_service.SearchAllResourcesResponse,
Awaitable[asset_service.SearchAllResourcesResponse],
],
]:
raise NotImplementedError()
@property
def search_all_iam_policies(
self,
) -> Callable[
[asset_service.SearchAllIamPoliciesRequest],
Union[
asset_service.SearchAllIamPoliciesResponse,
Awaitable[asset_service.SearchAllIamPoliciesResponse],
],
]:
raise NotImplementedError()
@property
def kind(self) -> str:
raise NotImplementedError()
__all__ = ("AssetServiceTransport",)
| {
"content_hash": "eef632d71e2b61e72d7fd9758988f2c5",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 101,
"avg_line_length": 37.58918918918919,
"alnum_prop": 0.5927523727351165,
"repo_name": "googleapis/python-asset",
"id": "66abbb1a915cf53090aca6338515dc01ea6bcdac",
"size": "7554",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/asset_v1p1beta1/services/asset_service/transports/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1590384"
},
{
"name": "Shell",
"bytes": "30657"
}
],
"symlink_target": ""
} |
import random
import threading
import time
from org.apache.helix.agent.SystemUtil import SystemUtil
from org.apache.helix.agent.SystemUtil import ProcessStateCode
from org.apache.helix.util.logger import get_logger
class ProcessMonitorThread(threading.Thread):
"""
Java modifiers:
private final static
Type:
Logger
"""
logger = get_logger(__name__)
"""
Java modifiers:
private final static
Type:
int
"""
MONITOR_PERIOD_BASE = 1000
"""
Parameters:
String pid
"""
def __init__(self, pid):
self._pid = pid
def run(self):
"""
Returns void
@Override
"""
try:
# ProcessStateCode
processState = SystemUtil.getProcessState(self._pid)
while processState != None:
if processState == ProcessStateCode.Z:
self.logger.error("process: " + self._pid + " is in zombie state")
break
time.sleep((random.randint(0,ProcessMonitorThread.MONITOR_PERIOD_BASE) + ProcessMonitorThread.MONITOR_PERIOD_BASE)/1000.0)
processState = SystemUtil.getProcessState(self._pid)
except Exception, e:
ProcessMonitorThread.logger.error("fail to monitor process: " + self._pid, e)
| {
"content_hash": "0919505eda96822221e45c456512216d",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 138,
"avg_line_length": 22.915254237288135,
"alnum_prop": 0.591715976331361,
"repo_name": "davzhang/helix-python-binding",
"id": "a12665479746c093cf849392bd5878c9bd80a182",
"size": "1580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "org/apache/helix/agent/ProcessMonitorThread.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "33558"
},
{
"name": "Python",
"bytes": "760674"
}
],
"symlink_target": ""
} |
"""Base stuff for XAL resources."""
class Resource(object):
"""Base class for XAL resources."""
def __init__(self):
"""Constructor."""
#: Execution context which the resource belongs to.
self.xal_session = None
#: List of internal methods that provides diagnosis information.
self.xal_diagnosis_methods = ['exists']
def exists(self):
"""Return True if the resource exists in current execution context."""
raise NotImplementedError()
def diagnosis(self, items):
"""Return a mapping containing diagnosis about the resource.
Diagnosis are not supposed to alter the resource.
"""
diagnosis = {}
for name in self.xal_diagnosis_methods:
method = getattr(self, name)
diagnosis[name] = method()
return diagnosis
def test(self, items):
"""Run tests on the resource.
Tests may temporarily use or alter the resource with some mock/fake
actions: write data to a file, connect to a database...
"""
raise NotImplementedError()
| {
"content_hash": "01eb1412997c1f81811561ca3c7334de",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 78,
"avg_line_length": 30.027027027027028,
"alnum_prop": 0.6174617461746175,
"repo_name": "benoitbryon/xal",
"id": "c08367503e108a85c9eb87874ddf5757ecd2b005",
"size": "1111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xal/resource.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1418"
},
{
"name": "Python",
"bytes": "63885"
},
{
"name": "Shell",
"bytes": "562"
}
],
"symlink_target": ""
} |
import enum
import logging
from typing import Iterable, Union, Set, Tuple
from collections import defaultdict, OrderedDict, deque
import networkx
from phasm.alignments import (LocalAlignment, AlignmentType, MergedReads,
OrientedDNASegment, OrientedRead)
from phasm.bubbles import find_superbubbles, superbubble_nodes
from phasm.typing import AlignmentsT, Node, Path
logger = logging.getLogger(__name__)
class AssemblyError(Exception):
pass
class NodeState(enum.IntEnum):
VACANT = 0
IN_PLAY = 1
ELIMINATED = 2
class AssemblyGraph(networkx.DiGraph):
adjlist_dict_factory = OrderedDict
def __init__(self, data: dict=None, **kwargs):
super().__init__(data, **kwargs)
self.sequence_src = None
def subgraph(self, nbunch):
g = super().subgraph(nbunch)
g.sequence_src = self.sequence_src
return g
@property
def edge_len(self):
return self.graph.get('edge_len', 'weight')
@property
def overlap_len(self):
return self.graph.get('overlap_len', 'overlap_len')
def sort_adjacency_lists(self, reverse: bool=False, weight: str='weight'):
for v in self:
sorted_iter = sorted(self.adj[v].items(),
key=lambda e: e[1][weight])
if reverse:
sorted_iter = reversed(sorted_iter)
self.adj[v] = OrderedDict(sorted_iter)
def get_sequence(self, read: OrientedDNASegment) -> bytes:
"""Get the sequence for an oriented read. Mostly a convenience function
which passes the request to `SequenceSource`."""
if not self.sequence_src:
raise ValueError("No valid sequence source provided. Cannot get "
"DNA sequence of a read.")
return self.sequence_src.get_sequence(read)
def sequence_for_path(self, path: Path, edge_len: str='weight',
include_last: bool=True) -> bytes:
"""Get the actual DNA sequence for a path through the assembly graph.
Requires `self.sequence_src` to be set."""
if not self.sequence_src:
raise ValueError("No valid sequence source provided. Cannot spell "
"DNA sequence of a path.")
last = None
sequence_parts = []
for u, v, data in path:
sequence = self.sequence_src.get_sequence(u)
sequence_parts.append(sequence[:data[edge_len]])
last = v
if include_last and last:
sequence_parts.append(self.sequence_src.get_sequence(last))
return b"".join(sequence_parts)
def path_length(self, path: Path, edge_len: str='weight',
include_last: bool=True):
path = list(path)
edge_len_sum = sum(d[edge_len] for u, v, d in path)
if include_last:
edge_len_sum += len(path[-1][1])
return edge_len_sum
def node_path_edges(self, nodes: Iterable[Node],
data: Union[bool, str]=None) -> Path:
"""A generator that yields the edges for a path between the given
nodes. If an edge does not exists in the graph an exception is raised.
Example::
>>> g = AssemblyGraph()
>>> g.add_edges_from([(1, 2), (2, 3), (3, 4)])
>>> g.node_path_edges([1, 2, 3])
>>> list(g.node_path_edges(path))
[(1, 2), (2, 3)]
"""
if len(nodes) < 2:
raise ValueError("Not enough nodes to generate a path from")
node_iter = iter(nodes)
# Automatically raises StopIteration at the end of the iterator
node_from = next(node_iter)
while True:
node_to = next(node_iter)
if not self.has_edge(node_from, node_to):
raise ValueError("Given nodes do not form a path, edge {} "
"not exist".format((node_from, node_to)))
if data:
if data is True:
yield (node_from, node_to, self[node_from][node_to])
else:
yield (node_from, node_to, self[node_from][node_to][data])
else:
yield (node_from, node_to)
node_from = node_to
def build_assembly_graph(la_iter: Iterable[LocalAlignment],
edge_len: str='weight',
overlap_len: str='overlap_len') -> AssemblyGraph:
g = AssemblyGraph(edge_len=edge_len, overlap_len=overlap_len)
for la in la_iter:
la_type = la.classify()
a_node, b_node = la.get_oriented_reads()
a_rev, b_rev = a_node.reverse(), b_node.reverse()
if la_type == AlignmentType.OVERLAP_AB:
g.add_edge(a_node, b_node, {
edge_len: la.arange[0] - la.brange[0],
overlap_len: la.get_overlap_length()
})
g.add_edge(b_rev, a_rev, {
edge_len: ((len(la.b) - la.brange[1]) -
(len(la.a) - la.arange[1])),
overlap_len: la.get_overlap_length()
})
logger.debug('Added edge (%s, %s) with weight %d',
a_node, b_node, g[a_node][b_node][edge_len])
logger.debug('Added edge (%s, %s) with weight %d',
b_rev, a_rev, g[b_rev][a_rev][edge_len])
elif la_type == AlignmentType.OVERLAP_BA:
g.add_edge(b_node, a_node, {
edge_len: la.brange[0] - la.arange[0],
overlap_len: la.get_overlap_length()
})
g.add_edge(a_rev, b_rev, {
edge_len: ((len(la.a) - la.arange[1]) -
(len(la.b) - la.brange[1])),
overlap_len: la.get_overlap_length()
})
logger.debug('Added edge (%s, %s) with weight %d',
b_node, a_node, g[b_node][a_node][edge_len])
logger.debug('Added edge (%s, %s) with weight %d',
a_rev, b_rev, g[a_rev][b_rev][edge_len])
return g
def remove_transitive_edges(g: AssemblyGraph, length_fuzz: int=1000):
"""This function implements the transitive edge reduction algorithm
described by Myers (2005) [MYERS2005]_.
It removes edges that are "redundant" in the sense that if there is an
edge between node `x` and node `y`, an edge between node `y` and node `z`,
and a node between node `x` and node `z`, the edge between `x` and `z` can
be removed.
As a visualisation::
___
/ \ => x---y---z
x---y---z
.. [MYERS2005] Myers, E. W. (2005). The fragment assembly string graph.
Bioinformatics, 21(SUPPL. 2), 79–85.
http://doi.org/10.1093/bioinformatics/bti1114
"""
edges_to_remove = []
node_state = defaultdict(lambda: NodeState.VACANT)
logger.info("Transitive edge removal for graph with %d nodes and %d edges",
networkx.number_of_nodes(g),
networkx.number_of_edges(g))
# Ensure that when we iterate over a node's neighbours, we obtain the
# "shortest" edge first.
g.sort_adjacency_lists(weight=g.edge_len)
logger.debug("Sorted adjacency lists")
num_nodes = networkx.number_of_nodes(g)
for i, v in enumerate(g):
v_neighbours = g[v]
logger.debug("Processing node %s with %d successors (%d/%d)",
v, g.out_degree(v), i, num_nodes)
if not v_neighbours:
continue
# Get the last item in the sorted adjacency list, which is the longest
# edge
longest_edge = next(reversed(v_neighbours.values()))[g.edge_len]
longest_edge += length_fuzz
for w in v_neighbours:
node_state[w] = NodeState.IN_PLAY
for w in v_neighbours:
if node_state[w] == NodeState.IN_PLAY:
w_neighbours = g[w]
for x in w_neighbours:
if node_state[x] == NodeState.IN_PLAY:
total_edge_len = (g[v][w][g.edge_len] +
g[w][x][g.edge_len])
if total_edge_len <= longest_edge:
node_state[x] = NodeState.ELIMINATED
logger.debug("Node %s marked eliminated", x)
for w in v_neighbours:
w_neighbours = g[w]
first = True
for x in w_neighbours:
if node_state[x] == NodeState.IN_PLAY:
if first:
# The first neighbour is the smallest (due to sorting)
node_state[x] = NodeState.ELIMINATED
logger.debug("Node %s marked eliminated (smallest)", x)
if g[w][x][g.edge_len] < length_fuzz:
node_state[x] = NodeState.ELIMINATED
logger.debug("Node %s marked eliminated", x)
first = False
for w in v_neighbours:
if node_state[w] == NodeState.ELIMINATED:
edges_to_remove.append((v, w))
logger.debug("Marked edge (%s, %s) for removal", v, w)
node_state[w] = NodeState.VACANT
return edges_to_remove
def remove_outgoing_tips(g: AssemblyGraph, max_tip_len: int=5,
max_tip_len_bases: int=5000):
"""Remove short outgoing tips from the assembly graph.
This function removes short "tips": paths which start at junction or a node
without incoming edges, ends in a node without any outgoing edges, and with
a length shorter than `max_tip_len`.
Example::
-> vt1 -> vt2
/
v0 -> v1 -> v2 -> v3 -> v4 -> v5 -- .. -> vn
The edges (v2, vt1), (vt1, vt2) will be removed.
Afterwards, it is recommended to delete isolated nodes: nodes without
incoming or outgoing edges.
"""
# Clean short tips in outgoing direction
tips = [n for n in g if g.out_degree(n) == 0]
num_tip_edges = 0
for tip in tips:
is_tip = True
if g.in_degree(tip) != 1:
continue
path = [tip]
curr_node = tip
prev = g.predecessors(tip)[0]
while g.in_degree(curr_node) == 1 and g.out_degree(curr_node) <= 1:
path.insert(0, prev)
curr_node = prev
prev = (g.predecessors(curr_node)[0] if g.in_degree(curr_node) > 0
else None)
logger.debug("Current path: %s", path)
if len(path) > max_tip_len+1:
is_tip = False
break
if g.path_length(g.node_path_edges(path, data=True), g.edge_len,
include_last=False) > max_tip_len_bases:
is_tip = False
break
if is_tip:
# Path is a tip, remove it
logger.debug("Removing tip: %s", path)
num_tip_edges += len(path)-1
g.remove_edges_from(g.node_path_edges(path))
return num_tip_edges
def remove_incoming_tips(g: AssemblyGraph, max_tip_len: int=5,
max_tip_len_bases: int=5000):
"""Remove short incoming tips from the assembly graph.
This function removes short "tips": paths which start at junction or a node
without incoming edges, ends in a node without any outgoing edges, and with
a length shorter than `max_tip_len`.
Example::
vt1 -> vt2 ----\
V
v0 -> v1 -> v2 -> v3 -> v4 -> v5 -- .. -> vn
The edges (vt1, vt2), (vt2, v4) will be removed.
Afterwards, it is recommended to delete isolated nodes: nodes without
incoming or outgoing edges.
"""
# Clean short tips in outgoing direction
tips = [n for n in g if g.in_degree(n) == 0]
num_tip_edges = 0
for tip in tips:
is_tip = True
if g.out_degree(tip) != 1:
continue
path = [tip]
curr_node = tip
neighbour = g.neighbors(tip)[0]
while g.out_degree(curr_node) == 1 and g.in_degree(curr_node) <= 1:
path.append(neighbour)
curr_node = neighbour
neighbour = (g.neighbors(curr_node)[0] if
g.out_degree(curr_node) > 0 else None)
if len(path) > max_tip_len+1:
is_tip = False
break
if g.path_length(g.node_path_edges(path, data=True), g.edge_len,
include_last=False) > max_tip_len_bases:
is_tip = False
break
if is_tip:
# Path is a tip, remove it
logger.debug("Removing tip: %s", path)
num_tip_edges += len(path)-1
g.remove_edges_from(g.node_path_edges(path))
return num_tip_edges
def remove_tips(g: AssemblyGraph, max_tip_len: int=3,
max_tip_len_bases: int=5000):
"""Remove both small incoming and outgoing tips.
.. seealso:: remove_incoming_tips, remove_outgoing_tips
"""
num_incoming_tips = remove_incoming_tips(g, max_tip_len, max_tip_len_bases)
num_outgoing_tips = remove_outgoing_tips(g, max_tip_len, max_tip_len_bases)
return num_incoming_tips, num_outgoing_tips
def remove_short_overlaps(g: AssemblyGraph, drop_ratio: float,
sort: bool=False):
if sort:
g.sort_adjacency_lists(weight=g.edge_len)
junction_nodes = (n for n in g.nodes_iter() if g.out_degree(n) > 1)
for v in junction_nodes:
v_neighbours = g[v]
max_ovl = max(w[g.overlap_len] for w in v_neighbours.values())
_, shortest_edge_target, shortest_edge_ovl = next(
g.edges_iter(v, data=g.overlap_len))
if max_ovl != shortest_edge_ovl:
continue
threshold = int(round(max_ovl * drop_ratio))
# Longest edges first (higher chance of short overlap)
for w, data in reversed(v_neighbours.items()):
if w == shortest_edge_target:
# Don't remove the shortest edge
break
if data[g.overlap_len] < threshold:
# Remove this edge
yield (v, w)
else:
break
def make_symmetric(g: AssemblyGraph):
"""This function makes the graph symmetric by removing edges for which
their reverse complement edge has disappeared. Or in other words, if (u, v)
is an edge, and (v*, u*) is not an edge, then (u, v) is removed. Here u*
means the reverse complement of u.
.. note:: This function does not work if unambiguous paths have been
merged.
"""
edges_to_remove = [e for e in g.edges_iter()
if not g.has_edge(e[1].reverse(), e[0].reverse())]
g.remove_edges_from(edges_to_remove)
return len(edges_to_remove)
def clean_graph(g: AssemblyGraph):
"""Delete nodes that do not have any edges."""
# Remove nodes without any edges
isolated_nodes = [n for n in g if g.degree(n) == 0]
g.remove_nodes_from(isolated_nodes)
return len(isolated_nodes)
def merge_unambiguous_paths(g: AssemblyGraph):
"""Merge unambiguous (non-branching) paths to a single node.
If the graph is symmetric, then it tries to give the same new node ID to
paths that belong together (in the sense of original and its reverse
complement)."""
start_points = [
n for n in g.nodes_iter() if
(
(g.in_degree(n) == 1 and g.out_degree(g.predecessors(n)[0]) > 1) or
(g.in_degree(n) == 0 or g.in_degree(n) > 1)
) and g.out_degree(n) == 1
]
num_merged_nodes = 0
counter = 0
for start in start_points:
if g.out_degree(start) != 1:
continue
nodes_to_merge = [start]
curr_node = start
neighbour = g.neighbors(start)[0]
while (neighbour and g.out_degree(curr_node) == 1 and
g.in_degree(neighbour) == 1):
nodes_to_merge.append(neighbour)
curr_node = neighbour
neighbour = (g.neighbors(curr_node)[0] if
g.out_degree(neighbour) > 0 else None)
if len(nodes_to_merge) == 1:
continue
logger.debug("Found unambiguous path: %s",
", ".join("({}, in: {}, out: {})".format(
n.id, g.in_degree(n), g.out_degree(n)) for n in
nodes_to_merge))
# If the graph is symmetric, try to find an ID for its merged reverse
# complement
new_id = "merged{}".format(counter)
counter += 1
# Create the new node and copy the required edges
prefix_lengths = [l for u, v, l in
g.node_path_edges(nodes_to_merge, g.edge_len)]
new_unmatched_prefix = sum(prefix_lengths)
new_length = new_unmatched_prefix + len(nodes_to_merge[-1])
new_node = MergedReads(new_id, new_length, "+", nodes_to_merge,
prefix_lengths)
g.add_node(new_node)
g.node[new_node]['merged_reads'] = ", ".join(
str(n) for n in nodes_to_merge)
# Incoming edges
logger.debug("In degree of first node: %d",
g.in_degree(nodes_to_merge[0]))
for u, v, data in g.in_edges_iter(nodes_to_merge[0], data=True):
g.add_edge(u, new_node, dict(**data))
# Outgoing edges
logger.debug("Out degree of last node: %d",
g.out_degree(nodes_to_merge[-1]))
for u, v, data in g.out_edges_iter(nodes_to_merge[-1], data=True):
# The length of the unmatched prefix probably has changed by
# merging nodes, so we need to adjust the `edge_len` attribute of
# all outgoing edges.
new_data = dict(**data)
new_data[g.edge_len] += new_unmatched_prefix
g.add_edge(new_node, v, new_data)
# Remove merged nodes
logger.debug("Removing nodes: %s", [str(n) for n in nodes_to_merge])
g.remove_nodes_from(nodes_to_merge)
logger.debug("New node in-degree: %d, out-degree: %d.",
g.in_degree(new_node), g.out_degree(new_node))
num_merged_nodes += len(nodes_to_merge)
return num_merged_nodes
def _get_aligning_reads(alignments: AlignmentsT, read: OrientedRead):
if read in alignments:
for b_read in alignments[read].keys():
yield b_read
def average_coverage_path(g: AssemblyGraph, alignments: AlignmentsT,
path: Iterable[Node],
include_last: bool=True) -> float:
"""Calculate the average coverage along a given path through the assembly
graph.
Average coverage is calculated as follows:
1. Determine the length :math:`l` of the given path
2. Determine all reads aligning to the given path
3. Calculate the total sum :math:`s` of read lengths
Coverage: :math:`s / l`
"""
path_length = 0
aligning_reads = set() # type: Set[OrientedRead]
last = None
for u, v, l in g.node_path_edges(path, g.edge_len):
path_length += l
if isinstance(u, MergedReads):
for read in u.reads:
aligning_reads.update(_get_aligning_reads(alignments, read))
else:
aligning_reads.update(_get_aligning_reads(alignments, u))
last = v
if include_last and last:
path_length += len(last)
if isinstance(last, MergedReads):
for read in last.reads:
aligning_reads.update(_get_aligning_reads(alignments, read))
else:
aligning_reads.update(_get_aligning_reads(alignments, last))
read_length_sum = sum(len(r) for r in aligning_reads)
return read_length_sum / path_length
def build_bubblechains(g: AssemblyGraph,
min_nodes: int=1) -> Iterable[AssemblyGraph]:
# Build dictionary which maps the bubble source to the bubble sink
logger.info("Searching for non-nested superbubbles in the assembly "
"graph...")
bubbles = {b[0]: b[1] for b in find_superbubbles(g, report_nested=False)}
bubble_entrances = set(bubbles.keys())
bubble_exits = set(bubbles.values())
logger.debug("Found superbubbles: %s", bubbles)
logger.info("Graph has %d superbubbles", len(bubbles))
# Obtain start nodes
# Priority is given as follows:
# 1. Bubble entrances without incoming edges
# 2. Bubble entrances for which holds that it's not also an exit of an
# other bubble
# 3. Bubble entrances for which the entrance and corresponding exit have
# not been visited yet
start_points = [
n for n in bubble_entrances if g.in_degree(n) == 0]
start_points.extend((n for n in bubble_entrances if n not in bubble_exits))
# We'll check later if this bubble has been visited already
start_points.extend(bubble_entrances)
logger.info("Number of start points : %d", len(start_points))
visited = set()
for start in start_points:
subgraph_nodes = set()
logger.debug("New start point %s", start)
if start not in bubble_entrances:
raise AssemblyError("Unexpected start point: {}, this is not a "
"bubble entrance.".format(start))
num_bubbles = 0
while start in bubble_entrances:
bubble_exit = bubbles[start]
if start in visited and bubble_exit in visited:
logger.debug("<%s, %s> already visited, stopping.",
start, bubble_exit)
break
bubble_nodes = superbubble_nodes(g, start, bubble_exit)
subgraph_nodes.update(bubble_nodes)
visited.update(bubble_nodes)
start = bubble_exit
num_bubbles += 1
if num_bubbles > 0:
logger.info("Built bubblechain of %d bubbles", num_bubbles)
if len(subgraph_nodes) >= min_nodes:
yield networkx.subgraph(g, subgraph_nodes)
def identify_contigs(g: AssemblyGraph, exclude_nodes: Set[Node],
min_contig_len: int=5000
) -> Iterable[Tuple[bytes, Path]]:
"""Identify linear non-branching path that would represent a contig if
you would spell the corresponding DNA sequence. This function only yields
the paths, it's up to the user to obtain the actual sequence.
Using the parameter `exclude_nodes` you can specify which nodes are already
included in a bubble chain, and therefore need to be ignored."""
edge_queue = deque(
e for e in g.edges_iter() if
(g.in_degree(e[0]) == 0 or g.in_degree(e[0]) > 1) and
(e[0] not in exclude_nodes or e[1] not in exclude_nodes)
)
visited_edges = set()
while edge_queue:
e = edge_queue.popleft()
u, v = e
if e in visited_edges:
continue
visited_edges.add(e)
# Build a non-branching path
path = [u, v]
outgoing_edges = g.out_edges(v)
while len(outgoing_edges) == 1:
oe = outgoing_edges[0]
if oe in visited_edges:
break
if g.in_degree(oe[0]) > 1:
break
path.append(oe[1])
visited_edges.add(oe)
# Collect outgoing edges of next node
outgoing_edges = g.out_edges(oe[1])
path_len = g.path_length(g.node_path_edges(path, data=True))
if path_len >= min_contig_len:
yield path
# Check if we're at a branch point, and if so add other edges to the
# queue.
outgoing_edges = g.out_edges(path[-1])
if len(outgoing_edges) > 1:
for oe in outgoing_edges:
if oe[0] in exclude_nodes and oe[1] in exclude_nodes:
continue
if oe not in visited_edges:
edge_queue.append(oe)
# Check singleton nodes, could be long linear path merged to a single node.
singleton_nodes = (n for n in g.nodes_iter() if g.in_degree(n) == 0 and
g.out_degree(n) == 0)
for n in singleton_nodes:
if len(n) >= min_contig_len:
yield [n]
def remove_diamond_tips(g: AssemblyGraph) -> int:
end_nodes = [n for n in g.nodes_iter() if g.out_degree(n) == 0 and
g.in_degree(n) == 2]
num_diamond_tips = 0
for node in end_nodes:
pred_out_degree1 = None
pred_out_degree_gt1 = None
for pred in g.predecessors_iter(node):
if g.out_degree(pred) == 1 and g.in_degree(pred) == 1:
pred_out_degree1 = pred
if g.out_degree(pred) > 1:
pred_out_degree_gt1 = pred
if pred_out_degree1 and pred_out_degree_gt1:
# Diamond found
g.remove_node(node)
g.remove_node(pred_out_degree1)
num_diamond_tips += 1
return num_diamond_tips
| {
"content_hash": "068ec0340fc43526d32f6aa248262698",
"timestamp": "",
"source": "github",
"line_count": 743,
"max_line_length": 79,
"avg_line_length": 33.85868102288021,
"alnum_prop": 0.5556306395834162,
"repo_name": "AbeelLab/phasm",
"id": "54cbad1581206d0314f62a580ae068e3cd4a2058",
"size": "25159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "phasm/assembly_graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "6583"
},
{
"name": "Python",
"bytes": "170386"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import with_statement
from contextlib import contextmanager
import subprocess
import os
import sys
import time
import imp
import urllib2
import copy
import fnmatch
import re
apt = None
rpm = None
try:
import apt
except:
pass
if apt is None:
try:
import rpm
except:
pass
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
helperlib = imp.load_source('helperlib', '../helperlib.py')
LG = nxDSCLog.DSCLog
# [ClassVersion("1.0.0"),FriendlyName("nxPackage"),SupportsInventory()]
# class MSFT_nxPackageResource : OMI_BaseResource
# {
# [write,ValueMap{"Present", "Absent"},Values{"Present", "Absent"}] string Ensure;
# [write,ValueMap{"Yum", "Apt", "Zypper"},Values{"Yum", "Apt", "Zypper"}] string PackageManager;
# [Key,InventoryFilter] string Name;
# [write] string FilePath;
# [write] Boolean PackageGroup;
# [write] string Arguments;
# [write] uint32 ReturnCode;
# [read] string PackageDescription;
# [read] string Publisher;
# [read] string InstalledOn;
# [read] uint32 Size;
# [read] string Version;
# [read] boolean Installed;
# [read] string Architecture;
# };
cache_file_dir = '/var/opt/microsoft/dsc/cache/nxPackage'
global show_mof
show_mof = False
def init_vars(Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode):
if Ensure is not None and Ensure != '':
Ensure = Ensure.encode('ascii', 'ignore').lower()
else:
Ensure = 'present'
if PackageManager is not None:
PackageManager = PackageManager.encode('ascii', 'ignore').lower()
else:
PackageManager = ''
if Name is not None:
Name = Name.encode('ascii', 'ignore')
else:
Name = ''
if FilePath is not None:
FilePath = FilePath.encode('ascii', 'ignore')
else:
FilePath = ''
if PackageGroup is None:
PackageGroup = False
if Arguments is not None:
Arguments = Arguments.encode('ascii', 'ignore')
else:
Arguments = ''
if ReturnCode is None:
ReturnCode = 0
return Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode
def Set_Marshall(Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode):
if helperlib.CONFIG_SYSCONFDIR_DSC == "omsconfig":
return [-1]
(Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode) = init_vars(
Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode)
retval = Set(Ensure, PackageManager, Name,
FilePath, PackageGroup, Arguments, ReturnCode)
sys.stdin.flush()
sys.stderr.flush()
sys.stdout.flush()
return retval
def Test_Marshall(Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode):
if helperlib.CONFIG_SYSCONFDIR_DSC == "omsconfig":
return [-1]
(Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode) = init_vars(
Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode)
retval = Test(Ensure, PackageManager, Name,
FilePath, PackageGroup, Arguments, ReturnCode)
sys.stdin.flush()
sys.stderr.flush()
sys.stdout.flush()
return retval
def Get_Marshall(Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode):
arg_names = list(locals().keys())
(Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode) = init_vars(
Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode)
retval = 0
retval, PackageManager, PackageDescription, Publisher, InstalledOn, Size, Version, Installed, Architecture = Get(
Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode)
sys.stdin.flush()
sys.stderr.flush()
sys.stdout.flush()
Ensure = protocol.MI_String(Ensure)
PackageManager = protocol.MI_String(PackageManager)
Name = protocol.MI_String(Name)
FilePath = protocol.MI_String(FilePath)
PackageGroup = protocol.MI_Boolean(PackageGroup)
Arguments = protocol.MI_String(Arguments)
ReturnCode = protocol.MI_Uint32(ReturnCode)
PackageDescription = protocol.MI_String(PackageDescription)
Publisher = protocol.MI_String(Publisher)
InstalledOn = protocol.MI_String(InstalledOn)
Architecture = protocol.MI_String(Architecture)
Size = protocol.MI_Uint32(int(Size))
Version = protocol.MI_String(Version)
Installed = protocol.MI_Boolean(Installed)
arg_names.append('PackageDescription')
arg_names.append('Publisher')
arg_names.append('InstalledOn')
arg_names.append('Size')
arg_names.append('Version')
arg_names.append('Installed')
arg_names.append('Architecture')
retd = {}
ld = locals()
for k in arg_names:
retd[k] = ld[k]
return retval, retd
def Inventory_Marshall(Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode):
retval = 0
sys.stdin.flush()
sys.stderr.flush()
sys.stdout.flush()
(Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode) = init_vars(
Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode)
retval, pkgs = GetAll(Ensure, PackageManager, Name,
FilePath, PackageGroup, Arguments, ReturnCode)
for p in pkgs:
p['Ensure'] = protocol.MI_String('present')
p['PackageManager'] = protocol.MI_String(PackageManager)
p['Name'] = protocol.MI_String(p['Name'])
p['FilePath'] = protocol.MI_String('')
p['PackageGroup'] = protocol.MI_Boolean(False)
p['Arguments'] = protocol.MI_String(Arguments)
p['ReturnCode'] = protocol.MI_Uint32(0)
p['PackageDescription'] = protocol.MI_String(p['PackageDescription'])
p['Publisher'] = protocol.MI_String(p['Publisher'])
p['InstalledOn'] = protocol.MI_String(p['InstalledOn'])
p['Architecture'] = protocol.MI_String(p['Architecture'])
p['Size'] = protocol.MI_Uint32(int(p['Size']))
p['Version'] = protocol.MI_String(p['Version'])
p['Installed'] = protocol.MI_Boolean(True)
Inventory = protocol.MI_InstanceA(pkgs)
retd = {}
retd["__Inventory"] = Inventory
return retval, retd
#
# Begin user defined DSC functions
#
def GetPackageSystem():
ret = None
for b in ('dpkg', 'rpm'):
code, out = RunGetOutput('which ' + b, False, False)
if code is 0:
ret = b
break
return ret
def GetPackageManager():
ret = None
# choose default - almost surely one will match.
for b in ('apt-get', 'zypper', 'yum'):
code, out = RunGetOutput('which ' + b, False, False)
if code is 0:
ret = b
if ret == 'apt-get':
ret = 'apt'
break
return ret
def ParseArguments(a):
program_arg = ''
cmd_arg = ''
if len(a) > 1:
if '|' in a:
program_arg, cmd_arg = a.split('|')
else:
program_arg = a
return program_arg, cmd_arg
class Params:
def __init__(self, Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode):
if not ("present" in Ensure or "absent" in Ensure):
print(
'ERROR: Param Ensure must be "Present" or "Absent".', file=sys.stdout)
LG().Log(
'ERROR', 'ERROR: Param Ensure must be "Present" or "Absent".')
raise Exception('BadParameter')
if len(PackageManager) > 0:
if not ("*" in PackageManager or "yum" in PackageManager or "apt" in PackageManager or "zypper" in PackageManager):
print(
'ERROR: Param PackageManager values are "Yum", "Apt", or "Zypper".', file=sys.stdout)
LG().Log(
'ERROR', 'ERROR: Param PackageManager values are "Yum", "Apt", or "Zypper".')
raise Exception('BadParameter')
if PackageManager == "*":
PackageManager = GetPackageManager()
if len(Name) < 1 and len(FilePath) < 1:
print(
'ERROR: Param Name or FilePath must be set.', file=sys.stdout)
LG().Log('ERROR', 'ERROR: Param Name or FilePath must be set.')
raise Exception('BadParameter')
if len(Name) > 0 and len(FilePath) > 0:
print('Ignoring Name because FilePath is set.', file=sys.stdout)
LG().Log('DEBUG', 'Ignoring Name because FilePath is set.')
print('PackageGroup value is ' + repr(PackageGroup), file=sys.stdout)
LG().Log('DEBUG', 'PackageGroup value is ' + repr(PackageGroup))
print('PackageGroup type is ' +
repr(type(PackageGroup)), file=sys.stdout)
LG().Log('DEBUG', 'PackageGroup type is ' + repr(type(PackageGroup)))
if not (True is PackageGroup or False is PackageGroup):
print(
'ERROR: Param PackageGroup must be true or false.', file=sys.stdout)
LG().Log(
'ERROR', 'ERROR: Param PackageGroup must be true or false.')
raise Exception('BadParameter')
self.Ensure = Ensure
self.PackageManager = PackageManager
self.Name = Name
self.FilePath = FilePath
self.PackageGroup = PackageGroup
self.Arguments, self.CommandArguments = ParseArguments(Arguments)
self.ReturnCode = ReturnCode
self.PackageDescription = ''
self.Publisher = ''
self.InstalledOn = ''
self.Size = 0
self.Version = ''
self.Installed = ''
self.Architecture = ''
self.PackageSystem = ''
self.PackageSystem = GetPackageSystem()
if len(self.PackageManager) < 1:
self.PackageManager = GetPackageManager()
if len(self.PackageManager) < 1 or len(self.PackageSystem) < 1:
print(
"ERROR: Unable to locate any of 'zypper', 'yum', 'apt-get', 'rpm' or 'dpkg' .", file=sys.stdout)
LG().Log(
'ERROR', "ERROR: Unable to locate any of 'zypper', 'yum', 'apt-get', 'rpm' or 'dpkg' .")
raise Exception('BadParameter')
self.LocalPath = ''
self.record_delimiter = '@@'
self.field_delimiter = '#@#'
self.cmds = {}
self.cmds['dpkg'] = {}
self.cmds['rpm'] = {}
self.cmds['apt'] = {}
self.cmds['yum'] = {}
self.cmds['zypper'] = {}
self.cmds['dpkg'][
'present'] = 'DEBIAN_FRONTEND=noninteractive dpkg % -i '
self.cmds['dpkg'][
'absent'] = 'DEBIAN_FRONTEND=noninteractive dpkg % -r '
self.cmds['dpkg'][
'stat'] = "dpkg-query -W -f='${{Description}}{0}${{Maintainer}}{0}'Unknown'{0}${{Installed-Size}}{0}${{Version}}{0}${{Status}}{0}${{Architecture}}\n' ".format(self.field_delimiter)
self.cmds['dpkg'][
'stat_all'] = "dpkg-query -W -f='${{Package}}{0}${{Description}}{0}${{Maintainer}}{0}'Unknown'{0}${{Installed-Size}}{0}${{Version}}{0}${{Status}}{0}${{Architecture}}\n{1}' ".format(self.field_delimiter, self.record_delimiter)
self.cmds['dpkg']['stat_group'] = None
self.cmds['rpm']['present'] = 'rpm % -i '
self.cmds['rpm']['absent'] = 'rpm % -e '
self.cmds['rpm'][
'stat'] = 'rpm -q --queryformat "%{{SUMMARY}}{0}%{{PACKAGER}}{0}%{{INSTALLTIME}}{0}%{{SIZE}}{0}%{{EPOCH}}:%{{VERSION}}-%{{RELEASE}}{0}installed{0}%{{ARCH}}\n" '.format(self.field_delimiter)
self.cmds['rpm'][
'stat_all'] = 'rpm -qa --queryformat "%{{NAME}}{0}%{{SUMMARY}}{0}%{{PACKAGER}}{0}%{{INSTALLTIME}}{0}%{{SIZE}}{0}%{{EPOCH}}:%{{VERSION}}-%{{RELEASE}}{0}installed{0}%{{ARCH}}\n{1}" | sed "s/(none)/0/g" '.format(self.field_delimiter, self.record_delimiter)
self.cmds['rpm']['stat_group'] = None
self.cmds['apt'][
'present'] = 'DEBIAN_FRONTEND=noninteractive apt-get % install ^ --allow-unauthenticated --yes '
self.cmds['apt'][
'absent'] = 'DEBIAN_FRONTEND=noninteractive apt-get % remove ^ --allow-unauthenticated --yes '
self.cmds['apt']['stat'] = self.cmds['dpkg']['stat']
self.cmds['apt']['stat_all'] = self.cmds['dpkg']['stat_all']
self.cmds['apt']['stat_group'] = None
self.cmds['yum']['present'] = 'yum -y % install ^ '
self.cmds['yum']['absent'] = 'yum -y % remove ^ '
self.cmds['yum']['grouppresent'] = 'yum -y % groupinstall ^ '
self.cmds['yum']['groupabsent'] = 'yum -y % groupremove ^ '
self.cmds['yum'][
'stat_group'] = 'yum grouplist ' # the group mode is implemented when using YUM only.
self.cmds['yum']['stat'] = self.cmds['rpm']['stat']
self.cmds['yum']['stat_all'] = self.cmds['rpm']['stat_all']
self.cmds['zypper']['present'] = 'zypper --non-interactive % install ^'
self.cmds['zypper']['absent'] = self.cmds['rpm']['absent']
self.cmds['zypper']['stat'] = self.cmds['rpm']['stat']
self.cmds['zypper']['stat_all'] = self.cmds['rpm']['stat_all']
self.cmds['zypper']['stat_group'] = None
if self.PackageGroup is True:
if self.cmds[self.PackageManager]['stat_group'] is None:
print('ERROR. PackageGroup is not valid for ' +
self.PackageManager, file=sys.stdout)
LG().Log(
'ERROR', 'ERROR. PackageGroup is not valid for ' + self.PackageManager)
raise Exception('BadParameter')
if len(self.FilePath) > 0:
print(
'ERROR. PackageGroup cannot be True if FilePath is set.', file=sys.stdout)
LG().Log(
'ERROR', 'ERROR. PackageGroup cannot be True if FilePath is set.')
raise Exception('BadParameter')
def SetShowMof(a):
global show_mof
show_mof = a
def ShowMof(op, Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode):
if not show_mof:
return
mof = '\n'
mof += op + ' nxPackage MyPackage \n'
mof += '{\n'
mof += ' Name = "' + Name + '"\n'
mof += ' Ensure = "' + Ensure + '"\n'
mof += ' PackageManager = "' + PackageManager + '"\n'
mof += ' FilePath = "' + FilePath + '"\n'
mof += ' PackageGroup = "' + str(PackageGroup) + '"\n'
mof += ' Arguments = "' + Arguments + '"\n'
mof += ' ReturnCode = ' + str(ReturnCode) + '\n'
mof += '}\n'
f = open('./test_mofs.log', 'a')
print(mof, file=f)
f.close()
def IsPackageInstalled(p):
out = ''
if p is None:
return False, out
if len(p.FilePath) > 0 and '://' in p.FilePath: # its a remote - try to file get name from cache
if ReadCacheInfo(p) is False:
return False, out
elif len(p.FilePath) > 0 and os.path.exists(p.FilePath) is True: # FilePath
if apt is not None and os.path.splitext(p.FilePath)[-1] == '.deb':
from apt.debfile import DebPackage
pkg = DebPackage(p.FilePath)
p.Name = pkg.pkgname
elif rpm is not None and os.path.splitext(p.FilePath)[-1] == '.rpm':
with open(p.FilePath, 'r') as F:
ts = rpm.TransactionSet()
ts.setVSFlags(-1)
try:
pkg = ts.hdrFromFdno(F.fileno())
except rpm.error, e:
print(repr(e))
LG().Log('ERROR', repr(e))
pkg = None
if pkg is None:
return False, out
p.Name = pkg.dsOfHeader().N()
if len(p.Name) < 1:
return False, out
if p.PackageGroup is True:
if p.cmds[p.PackageManager]['stat_group'] is not None:
cmd = p.cmds[p.PackageManager]['stat_group'] + '"' + p.Name + '"'
else:
print('ERROR. PackageGroup is not valid for ' +
p.PackageManager, file=sys.stdout)
LG().Log(
'ERROR', 'ERROR. PackageGroup is not valid for ' + p.PackageManager)
return False, out
else:
cmd = 'LANG=en_US.UTF8 ' + p.cmds[p.PackageManager]['stat'] + p.Name
code, out = RunGetOutput(cmd, False)
if p.PackageGroup is True: # implemented for YUM only.
if 'Installed' in out:
return True, out
else:
return False, out
# regular packages
print('check installed:' + out)
LG().Log('INFO', 'check installed:' + out)
if code is 0:
if 'deinstall' in out or 'not-installed' in out:
code = 1
if code is not int(p.ReturnCode):
return False, out
return True, out
def ParseInfo(p, info):
p.PackageDescription = ''
p.Publisher = ''
p.InstalledOn = ''
p.Size = '0'
p.Version = ''
p.Installed = False
p.Architecture = ''
if len(info) > 1:
f = re.split(p.field_delimiter, info)
if len(f) is 7:
p.PackageDescription = f[0]
p.Publisher = f[1]
p.InstalledOn = f[2]
if not p.InstalledOn.isalnum():
p.InstalledOn = time.gmtime(int(p.InstalledOn))
if len(f[3]) > 0:
p.Size = f[3]
p.Version = f[4]
p.Installed = ('install' in f[5])
p.Architecture = f[6]
if len(f) is not 7:
print(
'ERROR in ParseInfo. Output was ' + info, file=sys.stdout)
LG().Log(
'ERROR', 'ERROR in ParseInfo. Output was ' + info)
def ParseAllInfo(info, p):
pkg_list = []
d = {}
if len(info) < 1 or p.record_delimiter not in info:
return pkg_list
for pkg in re.split(p.record_delimiter, info):
d['Name'] = ''
d['PackageDescription'] = ''
d['Publisher'] = ''
d['InstalledOn'] = ''
d['Size'] = '0'
d['Version'] = ''
d['Installed'] = False
d['Architecture'] = ''
if len(pkg) > 1:
pkg = pkg.strip()
f = re.split(p.field_delimiter, pkg)
if len(f) is 8:
d['Name'] = f[0]
if len(p.Name) and not fnmatch.fnmatch(d['Name'], p.Name):
continue
d['PackageDescription'] = f[1]
d['Publisher'] = f[2]
d['InstalledOn'] = f[3]
if not d['InstalledOn'].isalnum():
d['InstalledOn'] = time.gmtime(int(d['InstalledOn']))
if len(f[4]) > 0:
d['Size'] = f[4]
d['Version'] = f[5]
d['Installed'] = ('install' in f[6])
d['Architecture'] = f[7]
if len(f) is not 8:
print(
'ERROR in ParseAllInfo. Output was ' + info, file=sys.stdout)
LG().Log(
'ERROR', 'ERROR in ParseAllInfo. Output was ' + info)
return pkg_list
pkg_list.append(copy.deepcopy(d))
return pkg_list
def DoEnableDisable(p):
# if the path is set, use the path and self.PackageSystem
cmd = ""
if len(p.FilePath) > 1 and 'present' in p.Ensure: # don't use the path unless installing
if '://' in p.FilePath and p.LocalPath == '': # its a remote file
ret = 0
ret = GetRemoteFile(p)
if ret is not 0:
p.LocalPath = ""
raise Exception(
'Unable to retrieve remote resource ' + p.FilePath + ' Error is ' + str(ret))
else:
p.FilePath = p.LocalPath
if not os.path.isfile(p.FilePath):
print('ERROR. File ' + p.FilePath +
' not found.', file=sys.stdout)
LG().Log('ERROR', 'ERROR. File ' + p.FilePath + ' not found.')
return False, ""
cmd = p.cmds[p.PackageSystem][p.Ensure] + ' ' + p.FilePath
cmd = cmd.replace('%', p.Arguments)
elif p.PackageGroup is True:
if p.cmds[p.PackageManager].has_key('group' + p.Ensure):
cmd = p.cmds[p.PackageManager][
'group' + p.Ensure] + '"' + p.Name + '"'
else:
print('Error: Group mode not implemented for ' +
p.PackageManager, file=sys.stdout)
LG().Log(
'ERROR', 'Error: Group mode not implemented for ' + p.PackageManager)
return False, 'Error: Group mode not implemented for ' + p.PackageManager
else:
cmd = 'LANG=en_US.UTF8 ' + \
p.cmds[p.PackageManager][p.Ensure] + ' ' + p.Name
cmd = cmd.replace('%', p.Arguments)
cmd = cmd.replace('^', p.CommandArguments)
code, out = RunGetOutput(cmd, False)
if len(p.LocalPath) > 1: # create cache entry and remove the tmp file
WriteCacheInfo(p)
RemoveFile(p.LocalPath)
if p.PackageManager == 'yum' and ('No Match for argument: ' + p.Name in out or 'Nothing to do' in out): # yum returns 0 on unknown package
return False, out
if p.PackageManager == 'zypper' and "package '" + p.Name + "' not found" in out: # zypper returns 0 on unknown package
return False, out
if code is not int(p.ReturnCode):
return False, out
return True, out
def WriteCacheInfo(p):
if not os.path.isdir(cache_file_dir):
if MakeDirs(cache_file_dir) is not None:
return False
if len(p.LocalPath) < 1:
return False
if apt is not None and os.path.splitext(p.LocalPath)[-1] == '.deb':
from apt.debfile import DebPackage
try:
pkg = DebPackage(p.LocalPath)
except:
print("Exception opening file " + p.LocalPath, file=sys.stderr)
LG().Log('ERROR', "Exception opening file " + p.LocalPath)
return False
p.Name = pkg.pkgname
elif rpm is not None and os.path.splitext(p.LocalPath)[-1] == '.rpm':
with opened_w_error(p.LocalPath, 'r') as (F, error):
if error:
print(
"Exception opening file " + p.LocalPath, file=sys.stderr)
LG().Log('ERROR', "Exception opening file " + p.LocalPath)
return False
ts = rpm.TransactionSet()
ts.setVSFlags(-1)
try:
pkg = ts.hdrFromFdno(F.fileno())
except rpm.error, e:
print(repr(e))
LG().Log('ERROR', repr(e))
pkg = None
if pkg is None:
return False
p.Name = pkg.dsOfHeader().N()
if len(p.Name) < 1:
return False
cache_file_path = cache_file_dir + '/' + os.path.basename(p.LocalPath)
with opened_w_error(cache_file_path, 'w+') as (F, error):
if error:
print("Exception creating cache file " + cache_file_path + " Error Code: " +
str(error.errno) + " Error: " + error.message + error.strerror, file=sys.stderr)
LG().Log('ERROR', "Exception creating cache file " + cache_file_path +
" Error Code: " + str(error.errno) + " Error: " + error.message + error.strerror)
return False
F.write(p.Name + '\n')
F.close()
return True
def ReadCacheInfo(p):
cache_file_path = cache_file_dir + '/' + os.path.basename(p.FilePath)
if not os.path.isfile(cache_file_path):
return False
with opened_w_error(cache_file_path, 'r') as (F, error):
if error:
print("Exception opening cache file " + cache_file_path + " Error Code: " +
str(error.errno) + " Error: " + error.message + error.strerror, file=sys.stderr)
LG().Log('ERROR', "Exception creating cache file " + cache_file_path +
" Error Code: " + str(error.errno) + " Error: " + error.message + error.strerror)
return False
t = F.read()
F.close()
if len(t) < 2:
return False
p.Name = t.strip()
return True
def Set(Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode):
ShowMof('SET', Ensure, PackageManager, Name,
FilePath, PackageGroup, Arguments, ReturnCode)
try:
p = Params(Ensure, PackageManager, Name,
FilePath, PackageGroup, Arguments, ReturnCode)
except Exception, e:
print('ERROR - Unable to initialize nxPackageProvider. ' +
e.message, file=sys.stdout)
LG().Log(
'ERROR', 'ERROR - Unable to initialize nxPackageProvider. ' + e.message)
return [-1]
installed, out = IsPackageInstalled(p)
if (installed and Ensure == 'present') or (not installed and Ensure == 'absent'): # Nothing to do
return [0]
result, out = DoEnableDisable(p)
if result is False:
op = ''
if Ensure == 'present':
op = 'Install'
else:
op = 'Un-install'
print('Failed to ' + op + ' ' + p.Name +
' output for command was: ' + out)
LG().Log('ERROR', 'Failed to ' + op + ' ' +
p.Name + ' output for command was: ' + out)
return [-1]
return [0]
def Test(Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode):
ShowMof('TEST', Ensure, PackageManager, Name,
FilePath, PackageGroup, Arguments, ReturnCode)
try:
p = Params(Ensure, PackageManager, Name,
FilePath, PackageGroup, Arguments, ReturnCode)
except Exception, e:
print('ERROR - Unable to initialize nxPackageProvider. ' +
e.message, file=sys.stdout)
LG().Log(
'ERROR', 'ERROR - Unable to initialize nxPackageProvider. ' + e.message)
return [-1]
installed, out = IsPackageInstalled(p)
if (installed and Ensure == 'present') or (not installed and Ensure == 'absent'):
return [0]
return [-1]
def Get(Ensure, PackageManager, Name, FilePath, PackageGroup, Arguments, ReturnCode):
retval = -1
installed = False
ShowMof('GET', Ensure, PackageManager, Name,
FilePath, PackageGroup, Arguments, ReturnCode)
try:
p = Params(Ensure, PackageManager, Name,
FilePath, PackageGroup, Arguments, ReturnCode)
except Exception, e:
print('ERROR - Unable to initialize nxPackageProvider. ' +
e.message, file=sys.stdout)
LG().Log(
'ERROR', 'ERROR - Unable to initialize nxPackageProvider. ' + e.message)
return [retval, p.PackageDescription, p.Publisher, p.InstalledOn, p.Size, p.Version, installed]
installed, out = IsPackageInstalled(p)
out = out.replace('(none)','0') # for rpm EPOCH.
ParseInfo(p, out)
return [0, p.PackageManager, p.PackageDescription, p.Publisher, p.InstalledOn, p.Size, p.Version, installed, p.Architecture]
def GetAll(Ensure, PackageManager, Name,
FilePath, PackageGroup, Arguments, ReturnCode):
pkgs = None
try:
p = Params(Ensure, PackageManager, Name,
FilePath, PackageGroup, Arguments, ReturnCode)
except Exception, e:
print('ERROR - Unable to initialize nxPackageProvider. ' +
e.message, file=sys.stdout)
LG().Log(
'ERROR', 'ERROR - Unable to initialize nxPackageProvider. ' + e.message)
return [-1, ]
cmd = 'LANG=en_US.UTF8 ' + p.cmds[p.PackageManager]['stat_all']
code, out = RunGetOutput(cmd, False)
pkgs = ParseAllInfo(out, p)
return [0, pkgs]
@contextmanager
def opened_w_error(filename, mode="r"):
"""
This context ensures the file is closed.
"""
try:
f = open(filename, mode)
except IOError, err:
yield None, err
else:
try:
yield f, None
finally:
f.close()
def RunGetOutput(cmd, no_output, chk_err=True):
"""
Wrapper for subprocess.check_output.
Execute 'cmd'. Returns return code and STDOUT, trapping expected exceptions.
Reports exceptions to Error if chk_err parameter is True
Kill inactivite subprocess and children if 6 second interval is exceeded.
"""
def check_output(no_output, *popenargs, **kwargs):
"""
Backport from subprocess module from python 2.7
"""
if 'stdout' in kwargs:
raise ValueError(
'stdout argument not allowed, it will be overridden.')
if no_output:
out_file = None
else:
out_file = subprocess.PIPE
process = subprocess.Popen(stdout=out_file, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
# Exception classes used by this module.
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
def noop():
pass
try:
output = check_output(no_output, cmd, stderr=subprocess.STDOUT, shell=True)
except CalledProcessError, e:
if chk_err:
print("CalledProcessError. Error Code is " +
str(e.returncode), file=sys.stdout)
print(
"CalledProcessError. Command string was " + e.cmd, file=sys.stdout)
print("CalledProcessError. Command result was " + (e.output[:-1]).decode(
'utf8', 'ignore').encode("ascii", "ignore"), file=sys.stdout)
if no_output:
return e.returncode, None
else:
return e.returncode, e.output.decode('utf8', 'ignore').encode('ascii', 'ignore')
if no_output:
return 0, None
else:
return 0, output.decode('utf8', 'ignore').encode('ascii', 'ignore')
def GetTimeFromString(s):
if s is None or len(s) is 0:
return None
fmt = []
fmt.append('%a, %d %b %Y %H:%M:%S %Z')
st = None
for f in fmt:
try:
st = time.strptime(s, f)
except ValueError:
continue
return st
def RemoveFile(path):
error = None
try:
os.remove(path)
except OSError, error:
print("Exception removing file" + path + " Error Code: " + str(error.errno)
+ " Error: " + error.message + error.strerror, file=sys.stderr)
LG().Log('ERROR', "Exception removing file" + path + " Error Code: " +
str(error.errno) + " Error: " + error.message + error.strerror)
except IOError, error:
print("Exception removing file" + path + " Error Code: " + str(error.errno)
+ " Error: " + error.message + error.strerror, file=sys.stderr)
LG().Log('ERROR', "Exception removing file" + path + " Error Code: " +
str(error.errno) + " Error: " + error.message + error.strerror)
return error
def LStatFile(path):
"""
LStat the file. Do not follow the symlink.
"""
d = None
error = None
try:
d = os.lstat(path)
except OSError, error:
print("Exception lstating file " + path + " Error Code: " + str(error.errno)
+ " Error: " + error.message + error.strerror, file=sys.stderr)
LG().Log('ERROR', "Exception lstating file " + path + " Error Code: " +
str(error.errno) + " Error: " + error.message + error.strerror)
except IOError, error:
print("Exception lstating file " + path + " Error Code: " + str(error.errno)
+ " Error: " + error.message + error.strerror, file=sys.stderr)
LG().Log('ERROR', "Exception lstating file " + path + " Error Code: " +
str(error.errno) + " Error: " + error.message + error.strerror)
return d
def MakeDirs(path):
error = None
try:
os.makedirs(path)
except OSError, error:
print("Exception making dir" + path + " Error Code: " + str(error.errno)
+ " Error: " + error.message + error.strerror, file=sys.stderr)
LG().Log('ERROR', "Exception making dir" + path + " Error Code: " +
str(error.errno) + " Error: " + error.message + error.strerror)
except IOError, error:
print("Exception making dir" + path + " Error Code: " + str(error.errno)
+ " Error: " + error.message + error.strerror, file=sys.stderr)
LG().Log('ERROR', "Exception making dir" + path + " Error Code: " +
str(error.errno) + " Error: " + error.message + error.strerror)
return error
def SetProxyFromConf():
"""
Check for PROXY definition in dsc.conf.
All we must do is set the appropriate value in the environment.
HTTP_PROXY
HTTPS_PROXY
"""
path = helperlib.CONFIG_SYSCONFDIR+ '/' + helperlib.CONFIG_SYSCONFDIR_DSC + '/dsc.conf'
txt, error = ReadFile(path)
if error :
return
for l in txt.splitlines():
if l.startswith('PROXY'):
info = l.split('=')[1].strip()
if 'https' in info:
os.environ['HTTPS_PROXY'] = info
if 'http:' in info:
os.environ['HTTP_PROXY'] = info
return
def ReadFile(path):
"""
Safely attempt to read a file,
ensuring file is always closed at exit.
Return the data and the exception object.
The data is None if an error occurred.
The error is None if the data was read.
Log results to stderr.
"""
d = None
error = None
with opened_w_error(path, 'r') as (F, error):
if error:
print("Exception opening file " + path + " Error Code: " + str(error.errno) + " Error: " + error.message + error.strerror, file=sys.stderr)
LG().Log('ERROR', "Exception opening file " + path + " Error Code: " + str(error.errno) + " Error: " + error.message + error.strerror)
else:
d = F.read()
return d, error
def GetRemoteFile(p):
SetProxyFromConf()
req = urllib2.Request(p.FilePath)
try:
resp = urllib2.urlopen(req)
except urllib2.URLError, e:
print(repr(e))
LG().Log('ERROR', repr(e))
return 1
p.LocalPath = '/tmp/' + os.path.basename(p.FilePath)
h = resp.info()
lm = h.getheader('last-modified')
lm_mtime = GetTimeFromString(lm)
dst_mtime = None
dst_st = None
data = None
if os.path.exists(p.LocalPath):
dst_st = LStatFile(p.LocalPath)
if dst_st is not None:
dst_mtime = time.gmtime(dst_st.st_mtime)
if lm_mtime is not None and dst_mtime is not None and dst_mtime >= lm_mtime:
data = '' # skip download, the file is the same
else:
data = b'keep going'
with (open(p.LocalPath, 'wb+')) as F:
try:
while data:
data = resp.read(1048576)
if data is not None and len(data) > 0:
F.write(data)
except Exception, e:
F.close()
os.unlink(p.LocalPath)
print(repr(e))
LG().Log('ERROR', repr(e))
return 1
return 0
| {
"content_hash": "9674f037c7d4ff11f7e225c8a1a22d75",
"timestamp": "",
"source": "github",
"line_count": 915,
"max_line_length": 265,
"avg_line_length": 39.05245901639344,
"alnum_prop": 0.5663952089105309,
"repo_name": "MSFTOSSMgmt/WPSDSCLinux",
"id": "47c6c84caa870ec6c2d9ef7e794f825b8427e38a",
"size": "35934",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Providers/Scripts/2.6x-2.7x/Scripts/nxPackage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5870322"
},
{
"name": "C#",
"bytes": "98943"
},
{
"name": "C++",
"bytes": "670183"
},
{
"name": "CMake",
"bytes": "13826"
},
{
"name": "HTML",
"bytes": "166861"
},
{
"name": "Makefile",
"bytes": "164013"
},
{
"name": "Objective-C",
"bytes": "61644"
},
{
"name": "PowerShell",
"bytes": "40239"
},
{
"name": "Python",
"bytes": "1858427"
},
{
"name": "Shell",
"bytes": "8136"
},
{
"name": "SourcePawn",
"bytes": "60242"
},
{
"name": "Yacc",
"bytes": "35814"
}
],
"symlink_target": ""
} |
"""Heavy lifting geometry for IDF surfaces."""
from collections import MutableSequence
from itertools import product
from math import atan2, pi
from typing import Any, List, Optional, Tuple, Union # noqa
from eppy.geometry.surface import area
from eppy.idf_msequence import Idf_MSequence # noqa
import numpy as np
from shapely import wkt
from shapely.geometry.polygon import Polygon as SPoly
from shapely.geometry.polygon import orient
from six.moves import zip
from .clippers import Clipper2D, Clipper3D
from .segments import Segment
from .transformations import align_face, invert_align_face
from .vectors import Vector2D, Vector3D
from ..utilities import almostequal
class Polygon(Clipper2D, MutableSequence):
"""Base class for 2D and 3D polygons."""
@property
def n_dims(self):
pass
@property
def vector_class(self):
pass
@property
def normal_vector(self):
pass
def __init__(self, vertices):
# type: (Any) -> None
super(Polygon, self).__init__()
self.vertices = [self.vector_class(*v) for v in vertices]
self.as_2d = Polygon2D
def __repr__(self):
# type: () -> str
class_name = type(self).__name__
return "{}({!r})".format(class_name, self.vertices)
def __len__(self):
# type: () -> int
return len(self.vertices)
def __delitem__(self, key):
del self.vertices[key]
def __getitem__(self, key):
# type: (Union[int, slice]) -> Any
return self.vertices[key]
def __setitem__(self, key, value):
self.vertices[key] = value
def __add__(self, other): # type: (Polygon) -> Union[None, Polygon]
if len(self) == len(other) and hasattr(other[0], "__len__"):
# add together two equal polygons
vertices = [v1 + v2 for v1, v2 in zip(self, other)]
elif len(self[0]) == len(other):
# translate by a vector
vertices = [v + other for v in self]
else:
raise ValueError("Incompatible objects: %s + %s" % (self, other))
return self.__class__(vertices)
def __sub__(self, other):
if len(self) == len(other) and hasattr(other[0], "__len__"):
# subtract two equal polygons
vertices = [v1 - v2 for v1, v2 in zip(self, other)]
elif len(self[0]) == len(other):
# translate by a vector
vertices = [v - other for v in self]
else:
raise ValueError("Incompatible objects: %s + %s" % (self, other))
return self.__class__(vertices)
def insert(self, key, value):
self.vertices.insert(key, value)
@property
def area(self):
# type: () -> np.float64
return area(self)
@property
def bounding_box(self):
# type: () -> Polygon
aligned = align_face(self)
top_left = Vector3D(min(aligned.xs), max(aligned.ys), max(aligned.zs))
bottom_left = Vector3D(min(aligned.xs), min(aligned.ys), min(aligned.zs))
bottom_right = Vector3D(max(aligned.xs), min(aligned.ys), min(aligned.zs))
top_right = Vector3D(max(aligned.xs), max(aligned.ys), max(aligned.zs))
bbox = Polygon3D([top_left, bottom_left, bottom_right, top_right])
return invert_align_face(self, bbox)
def buffer(self, distance=None, join_style=2):
# type: (Optional[float], Optional[int]) -> Polygon2D
"""Returns a representation of all points within a given distance of the polygon.
:param join_style: The styles of joins between offset segments: 1 (round), 2 (mitre), and 3 (bevel).
"""
s_poly = SPoly(self.vertices)
core = orient(s_poly.buffer(distance=distance, join_style=join_style), sign=1.0)
return Polygon2D(core.boundary.coords)
@property
def centroid(self):
# type: () -> Vector2D
"""The centroid of a polygon."""
return self.vector_class(
sum(self.xs) / len(self), sum(self.ys) / len(self), sum(self.zs) / len(self)
)
@property
def edges(self):
# type: () -> List[Segment]
"""A list of edges represented as Segment objects."""
vertices = self.vertices
edges = [
Segment(vertices[i], vertices[(i + 1) % len(self)])
for i in range(len(self))
]
return edges
def invert_orientation(self):
# type: () -> Polygon
"""Reverse the order of the vertices.
This can be used to create a matching surface, e.g. the other side of a wall.
:returns: A polygon.
"""
return self.__class__(reversed(self.vertices))
@property
def is_convex(self):
return is_convex_polygon(self.vertices_list)
return False
@property
def points_matrix(self):
# type: () -> np.ndarray
"""Matrix representing the points in a polygon.
Format::
[[x1, x2,... xn]
[y1, y2,... yn]
[z1, z2,... zn] # all 0 for 2D polygon
"""
points = np.zeros((len(self.vertices), self.n_dims))
for i, v in enumerate(self.vertices):
points[i, :] = v.as_array(dims=self.n_dims)
return points
@property
def vertices_list(self):
# type: () -> List[Tuple[float, float, Optional[float]]]
"""A list of the vertices in the format required by pyclipper.
:returns: A list of tuples like [(x1, y1), (x2, y2),... (xn, yn)].
"""
return [pt.as_tuple(dims=self.n_dims) for pt in self.vertices]
@property
def xs(self):
# type: () -> List[float]
return [pt.x for pt in self.vertices]
@property
def ys(self):
# type: () -> List[float]
return [pt.y for pt in self.vertices]
@property
def zs(self):
pass
class Polygon2D(Polygon):
"""Two-dimensional polygon."""
n_dims = 2
vector_class = Vector2D
def __eq__(self, other):
if self.__dict__ == other.__dict__: # try the simple case first
return True
else: # also cover same shape in different rotation
if self.difference(other):
return False
if almostequal(self.normal_vector, other.normal_vector):
return True
return False
@property
def normal_vector(self):
# type: () -> Vector3D
as_3d = Polygon3D((v.x, v.y, 0) for v in self)
return as_3d.normal_vector
def project_to_3D(self, example3d):
# type: (Polygon3D) -> Polygon3D
"""Project the 2D polygon rotated into 3D space.
This is used to return a previously rotated 3D polygon back to its original orientation, or to to put polygons
generated from pyclipper into the desired orientation.
:param example3D: A 3D polygon in the desired plane.
:returns: A 3D polygon.
"""
points = self.points_matrix
proj_axis = example3d.projection_axis
a = example3d.distance
v = example3d.normal_vector
projected_points = project_to_3D(points, proj_axis, a, v)
return Polygon3D(projected_points)
@property
def zs(self):
# type: () -> List[float]
return [0.0] * len(self.vertices)
class Polygon3D(Clipper3D, Polygon):
"""Three-dimensional polygon."""
n_dims = 3
vector_class = Vector3D
def __eq__(self, other):
# check they're in the same plane
if not almostequal(self.normal_vector, other.normal_vector):
return False
if not almostequal(self.distance, other.distance):
return False
# if they are in the same plane, check they completely overlap in 2D
return self.project_to_2D() == other.project_to_2D()
@property
def zs(self):
# type: () -> List[float]
return [pt.z for pt in self.vertices]
@property
def normal_vector(self):
"""Unit normal vector perpendicular to the polygon in the outward direction.
We use Newell's Method since the cross-product of two edge vectors is not valid for concave polygons.
https://www.opengl.org/wiki/Calculating_a_Surface_Normal#Newell.27s_Method
"""
n = [0.0, 0.0, 0.0]
for i, v_curr in enumerate(self.vertices):
v_next = self.vertices[(i + 1) % len(self.vertices)]
n[0] += (v_curr.y - v_next.y) * (v_curr.z + v_next.z)
n[1] += (v_curr.z - v_next.z) * (v_curr.x + v_next.x)
n[2] += (v_curr.x - v_next.x) * (v_curr.y + v_next.y)
return Vector3D(*n).normalize()
@property
def distance(self):
# type: () -> np.float64
"""Distance from the origin to the polygon.
Where v[0] * x + v[1] * y + v[2] * z = a is the equation of the plane containing the polygon (and where v
is the polygon normal vector).
:returns: The distance from the origin to the polygon.
"""
v = self.normal_vector
pt = self.points_matrix[0] # arbitrary point in the polygon
d = np.dot(v, pt)
return d
@property
def projection_axis(self):
# type: () -> int
"""An axis which will not lead to a degenerate surface.
:returns: The axis index.
"""
proj_axis = max(range(3), key=lambda i: abs(self.normal_vector[i]))
return proj_axis
@property
def is_horizontal(self):
# type: () -> bool
"""Check if polygon is in the xy plane.
:returns: True if the polygon is in the xy plane, else False.
"""
return bool(np.array(self.zs).std() < 1e-8)
def is_clockwise(self, viewpoint):
# type: (Vector3D) -> np.bool_
"""Check if vertices are ordered clockwise
This function checks the vertices as seen from the viewpoint.
:param viewpoint: A point from which to view the polygon.
:returns: True if vertices are ordered clockwise when observed from the given viewpoint.
"""
arbitrary_pt = self.vertices[0]
v = arbitrary_pt - viewpoint
n = self.normal_vector
sign = np.dot(v, n)
return sign > 0
def is_coplanar(self, other):
# type: (Polygon3D) -> bool
"""Check if polygon is in the same plane as another polygon.
This includes the same plane but opposite orientation.
:param other: Another polygon.
:returns: True if the two polygons are coplanar, else False.
"""
n1 = self.normal_vector
n2 = other.normal_vector
d1 = self.distance
d2 = other.distance
if almostequal(n1, n2) and almostequal(d1, d2):
return True
elif almostequal(n1, -n2) and almostequal(d1, -d2):
return True
else:
return False
def outside_point(self, entry_direction="counterclockwise"):
# type: (str) -> Vector3D
"""Return a point outside the zone to which the surface belongs.
The point will be outside the zone, respecting the global geometry rules
for vertex entry direction.
:param entry_direction: Either "clockwise" or "counterclockwise", as seen from outside the space.
:returns: A point vector.
"""
entry_direction = entry_direction.lower()
if entry_direction == "clockwise":
inside = self.vertices[0] - self.normal_vector
elif entry_direction == "counterclockwise":
inside = self.vertices[0] + self.normal_vector
else:
raise ValueError("invalid value for entry_direction '%s'" % entry_direction)
return inside
def order_points(self, starting_position):
# type: (str) -> Polygon3D
"""Reorder the vertices based on a starting position rule.
:param starting_position: The string that defines vertex starting position in EnergyPlus.
:returns: The reordered polygon.
"""
if starting_position == "upperleftcorner":
bbox_corner = self.bounding_box[0]
elif starting_position == "lowerleftcorner":
bbox_corner = self.bounding_box[1]
elif starting_position == "lowerrightcorner":
bbox_corner = self.bounding_box[2]
elif starting_position == "upperrightcorner":
bbox_corner = self.bounding_box[3]
else:
raise ValueError("%s is not a valid starting position" % starting_position)
start_index = self.index(bbox_corner.closest(self))
new_vertices = [self[(start_index + i) % len(self)] for i in range(len(self))]
return Polygon3D(new_vertices)
def project_to_2D(self):
# type: () -> Polygon2D
"""Project the 3D polygon into 2D space.
This is so that we can perform operations on it using pyclipper library.
Project onto either the xy, yz, or xz plane. (We choose the one that
avoids degenerate configurations, which is the purpose of proj_axis.)
:returns: A 2D polygon.
"""
points = self.points_matrix
projected_points = project_to_2D(points, self.projection_axis)
return Polygon2D([pt[:2] for pt in projected_points])
def normalize_coords(self, ggr):
"""Order points, respecting the global geometry rules
:param ggr: EnergyPlus GlobalGeometryRules object.
:returns: The normalized polygon.
"""
try:
entry_direction = ggr.Vertex_Entry_Direction
except AttributeError:
entry_direction = "counterclockwise"
outside_point = self.outside_point(entry_direction)
return normalize_coords(self, outside_point, ggr)
def from_wkt(self, wkt_poly):
# type: (str) -> Polygon3D
"""Convert a wkt representation of a polygon to GeomEppy.
This also accounts for the possible presence of inner rings by linking them to the outer ring.
:param wkt_poly: A text representation of a polygon in well known text (wkt) format.
:returns: A polygon.
"""
poly = wkt.loads(wkt_poly)
exterior = Polygon3D(poly.exterior.coords)
if poly.interiors:
# make the exterior into a geomeppy poly
for inner_ring in poly.interiors:
# make the interior into a geomeppy poly
interior = Polygon3D(inner_ring.coords)
# find the nearest points on the exterior and interior
links = list(product(interior, exterior))
links = sorted(links, key=lambda x: x[0].relative_distance(x[1]))
on_interior = links[0][0]
on_exterior = links[0][1]
# join them up
exterior = Polygon3D(
exterior[exterior.index(on_exterior) :]
+ exterior[: exterior.index(on_exterior) + 1]
)
interior = Polygon3D(
interior[interior.index(on_interior) :]
+ interior[: interior.index(on_interior) + 1]
)
exterior = Polygon3D(exterior[:] + interior[:])
return exterior
def break_polygons(poly, hole):
# type: (Polygon, Polygon) -> List[Polygon]
"""Break up a surface with a hole in it.
This produces two surfaces, neither of which have a hole in them.
:param poly: The surface with a hole in.
:param hole: The hole.
:returns: Two Polygon3D objects.
"""
# take the two closest points on the surface perimeter
links = list(product(poly, hole))
links = sorted(
links, key=lambda x: x[0].relative_distance(x[1])
) # fast distance check
first_on_poly = links[0][0]
last_on_poly = links[1][0]
first_on_hole = links[1][1]
last_on_hole = links[0][1]
new_poly = section(first_on_poly, last_on_poly, poly[:] + poly[:]) + section(
first_on_hole, last_on_hole, reversed(hole[:] + hole[:])
)
new_poly = Polygon3D(new_poly)
union = hole.union(new_poly)[0]
new_poly2 = poly.difference(union)[0]
if not almostequal(new_poly.normal_vector, poly.normal_vector):
new_poly = new_poly.invert_orientation()
if not almostequal(new_poly2.normal_vector, poly.normal_vector):
new_poly2 = new_poly2.invert_orientation()
return [new_poly, new_poly2]
def section(first, last, coords):
section_on_hole = []
for item in coords:
if item == first:
section_on_hole.append(item)
elif section_on_hole:
section_on_hole.append(item)
if item == last:
break
return section_on_hole
def project(pt, proj_axis):
# type: (np.ndarray, int) -> Any
"""Project point pt onto either the xy, yz, or xz plane
We choose the one that avoids degenerate configurations, which is the
purpose of proj_axis.
See http://stackoverflow.com/a/39008641/1706564
"""
return tuple(c for i, c in enumerate(pt) if i != proj_axis)
def project_inv(
pt, proj_axis, a, v
): # type: (np.ndarray, int, np.float64, Vector3D) -> Any
"""Returns the vector w in the surface's plane such that project(w) equals x.
See http://stackoverflow.com/a/39008641/1706564
:param pt: A two-dimensional point.
:param proj_axis: The axis to project into.
:param a: Distance to the origin for the plane to project into.
:param v: Normal vector of the plane to project into.
:returns: The transformed point.
"""
w = list(pt)
w[proj_axis:proj_axis] = [0.0]
c = a
for i in range(3):
c -= w[i] * v[i]
c /= v[proj_axis]
w[proj_axis] = c
return tuple(w)
def project_to_2D(vertices, proj_axis):
# type: (np.ndarray, int) -> List[Tuple[np.float64, np.float64]]
"""Project a 3D polygon into 2D space.
:param vertices: The three-dimensional vertices of the polygon.
:param proj_axis: The axis to project into.
:returns: The transformed vertices.
"""
points = [project(x, proj_axis) for x in vertices]
return points
def project_to_3D(
vertices, proj_axis, a, v
): # type: (np.ndarray, int, np.float64, Vector3D) -> List[Tuple[np.float64, np.float64, np.float64]]
"""Project a 2D polygon into 3D space.
:param vertices: The two-dimensional vertices of the polygon.
:param proj_axis: The axis to project into.
:param a: Distance to the origin for the plane to project into.
:param v: Normal vector of the plane to project into.
:returns: The transformed vertices.
"""
return [project_inv(pt, proj_axis, a, v) for pt in vertices]
def normalize_coords(
poly, outside_pt, ggr=None
): # type: (Polygon3D, Vector3D, Union[List, None, Idf_MSequence]) -> Polygon3D
"""Put coordinates into the correct format for EnergyPlus dependent on Global Geometry Rules (GGR).
:param poly: Polygon with new coordinates, but not yet checked for compliance with GGR.
:param outside_pt: An outside point of the new polygon.
:param ggr: EnergyPlus GlobalGeometryRules object.
:returns: The normalized polygon.
"""
# check and set entry direction
poly = set_entry_direction(poly, outside_pt, ggr)
# check and set starting position
poly = set_starting_position(poly, ggr)
return poly
def set_entry_direction(poly, outside_pt, ggr=None):
"""Check and set entry direction for a polygon.
:param poly: A polygon.
:param outside_pt: A point beyond the outside face of the polygon.
:param ggr: EnergyPlus global geometry rules
:return: A polygon with the vertices correctly oriented.
"""
if not ggr:
entry_direction = "counterclockwise" # EnergyPlus default
else:
entry_direction = ggr.Vertex_Entry_Direction.lower()
if entry_direction == "counterclockwise":
if poly.is_clockwise(outside_pt):
poly = poly.invert_orientation()
elif entry_direction == "clockwise":
if not poly.is_clockwise(outside_pt):
poly = poly.invert_orientation()
return poly
def set_starting_position(poly, ggr=None):
"""Check and set starting position."""
if not ggr:
starting_position = "upperleftcorner" # EnergyPlus default
else:
starting_position = ggr.Starting_Vertex_Position.lower()
poly = poly.order_points(starting_position)
return poly
def intersect(poly1, poly2):
# type: (Polygon, Polygon) -> List[Polygon]
"""Calculate the polygons to represent the intersection of two polygons.
:param poly1: The first polygon.
:param poly2: The second polygon.
:returns: A list of unique polygons.
"""
polys = [] # type: List[Polygon]
polys.extend(poly1.intersect(poly2))
polys.extend(poly2.intersect(poly1))
if is_hole(poly1, poly2):
polys.extend(break_polygons(poly1, poly2))
elif is_hole(poly2, poly1):
polys.extend(break_polygons(poly2, poly1))
else:
polys.extend(poly1.difference(poly2))
polys.extend(poly2.difference(poly1))
return polys
def is_hole(surface, possible_hole):
# type: (Polygon, Polygon) -> bool
"""Identify if an intersection is a hole in the surface.
Check the intersection touches an edge of the surface. If it doesn't then it represents a hole, and this needs
further processing into valid EnergyPlus surfaces.
:param surface: The first surface.
:param possible_hole: The intersection into the surface.
:returns: True if the possible hole is a hole in the surface.
"""
if surface.area < possible_hole.area:
return False
collinear_edges = (
edges[0]._is_collinear(edges[1])
for edges in product(surface.edges, possible_hole.edges)
)
return not any(collinear_edges)
def bounding_box(polygons):
"""The bounding box which encompasses all of the polygons in the x,y plane.
:param polygons: A list of polygons.
:return: A 2D polygon.
"""
top_left = (
min(min(c[0] for c in f.coords) for f in polygons),
max(max(c[1] for c in f.coords) for f in polygons),
)
bottom_left = (
min(min(c[0] for c in f.coords) for f in polygons),
min(min(c[1] for c in f.coords) for f in polygons),
)
bottom_right = (
max(max(c[0] for c in f.coords) for f in polygons),
min(min(c[1] for c in f.coords) for f in polygons),
)
top_right = (
max(max(c[0] for c in f.coords) for f in polygons),
max(max(c[1] for c in f.coords) for f in polygons),
)
return Polygon2D([top_left, bottom_left, bottom_right, top_right])
def is_convex_polygon(polygon): # noqa
"""Return True if the polynomial defined by the sequence of 2D
points is 'strictly convex': points are valid, side lengths non-
zero, interior angles are strictly between zero and a straight
angle, and the polygon does not intersect itself.
See: https://stackoverflow.com/a/45372025/1706564
:: NOTES:
1. Algorithm: the signed changes of the direction angles
from one side to the next side must be all positive or
all negative, and their sum must equal plus-or-minus
one full turn (2 pi radians). Also check for too few,
invalid, or repeated points.
2. No check is explicitly done for zero internal angles
(180 degree direction-change angle) as this is covered
in other ways, including the `n < 3` check.
"""
two_pi = 2 * pi
try: # needed for any bad points or direction changes
# Check for too few points
if len(polygon) < 3:
return False
# Get starting information
old_x, old_y = polygon[-2]
new_x, new_y = polygon[-1]
new_direction = atan2(new_y - old_y, new_x - old_x)
angle_sum = 0.0
# Check each point (the side ending there, its angle) and accum. angles
for ndx, newpoint in enumerate(polygon):
# Update point coordinates and side directions, check side length
old_x, old_y, old_direction = new_x, new_y, new_direction
new_x, new_y = newpoint
new_direction = atan2(new_y - old_y, new_x - old_x)
if old_x == new_x and old_y == new_y:
return False # repeated consecutive points
# Calculate & check the normalized direction-change angle
angle = new_direction - old_direction
if angle <= -pi:
angle += two_pi # make it in half-open interval (-Pi, Pi]
elif angle > pi:
angle -= two_pi
if ndx == 0: # if first time through loop, initialize orientation
if angle == 0.0:
return False
orientation = 1.0 if angle > 0.0 else -1.0
else: # if other time through loop, check orientation is stable
if orientation * angle <= 0.0: # not both pos. or both neg.
return False
# Accumulate the direction-change angle
angle_sum += angle
# Check that the total number of full turns is plus-or-minus 1
return abs(round(angle_sum / two_pi)) == 1
except (ArithmeticError, TypeError, ValueError):
return False # any exception means not a proper convex polygon
| {
"content_hash": "fcecee1c2ff5dcf37c626b49ecd0f95e",
"timestamp": "",
"source": "github",
"line_count": 750,
"max_line_length": 118,
"avg_line_length": 33.910666666666664,
"alnum_prop": 0.605630480084929,
"repo_name": "jamiebull1/geomeppy",
"id": "ce51e4673f426941bc30d583bb24cd3b35dd30bd",
"size": "25433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geomeppy/geom/polygons.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "381"
},
{
"name": "Python",
"bytes": "206395"
}
],
"symlink_target": ""
} |
import pandas as pd
import pandas.util.testing as tm
from pandas import MultiIndex
def check_level_names(index, names):
assert [level.name for level in index.levels] == list(names)
def test_slice_keep_name():
x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')],
names=['x', 'y'])
assert x[1:].names == x.names
def test_index_name_retained():
# GH9857
result = pd.DataFrame({'x': [1, 2, 6],
'y': [2, 2, 8],
'z': [-5, 0, 5]})
result = result.set_index('z')
result.loc[10] = [9, 10]
df_expected = pd.DataFrame({'x': [1, 2, 6, 9],
'y': [2, 2, 8, 10],
'z': [-5, 0, 5, 10]})
df_expected = df_expected.set_index('z')
tm.assert_frame_equal(result, df_expected)
def test_changing_names(idx):
# names should be applied to levels
level_names = [level.name for level in idx.levels]
check_level_names(idx, idx.names)
view = idx.view()
copy = idx.copy()
shallow_copy = idx._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in idx.names]
idx.names = new_names
check_level_names(idx, new_names)
# but not on copies
check_level_names(view, level_names)
check_level_names(copy, level_names)
check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
check_level_names(idx, new_names)
def test_take_preserve_name(idx):
taken = idx.take([3, 0, 1])
assert taken.names == idx.names
def test_copy_names():
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(idx, index_names):
# names are assigned in setup
names = index_names
level_names = [level.name for level in idx.levels]
assert names == level_names
# setting bad names on existing
index = idx
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = idx.levels
major_labels, minor_labels = idx.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
| {
"content_hash": "b11b1b15f45b7368482fcbc577d1330c",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 72,
"avg_line_length": 33.57017543859649,
"alnum_prop": 0.5798275411549516,
"repo_name": "pratapvardhan/pandas",
"id": "a9fbb55679173e6bc28a15bc1c5838e9c9acfe6a",
"size": "3853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/indexes/multi/test_names.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3847"
},
{
"name": "C",
"bytes": "432930"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "563"
},
{
"name": "PowerShell",
"bytes": "2970"
},
{
"name": "Python",
"bytes": "13598412"
},
{
"name": "Shell",
"bytes": "25368"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('applications', '0003_auto_20180309_2251'),
]
operations = [
migrations.DeleteModel(
name='ProjectApplication',
),
migrations.AlterField(
model_name='application',
name='group_choice',
field=models.CharField(choices=[('DEVOPS', 'DEVOPS'), ('PR', 'PR'), ('LABOPS', 'LABOPS'), ('ER', 'ER')], default=('DEVOPS', 'DEVOPS'), max_length=255, verbose_name='Ønsket gruppe'),
),
]
| {
"content_hash": "71486f2522f16172a62d972668a16c22",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 193,
"avg_line_length": 29.476190476190474,
"alnum_prop": 0.5864297253634895,
"repo_name": "hackerspace-ntnu/website",
"id": "25b7009b283ec72be8e5d79ff07de88429ca800c",
"size": "694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "applications/migrations/0004_auto_20180731_1358.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16771"
},
{
"name": "HTML",
"bytes": "235369"
},
{
"name": "JavaScript",
"bytes": "43249"
},
{
"name": "Python",
"bytes": "323186"
}
],
"symlink_target": ""
} |
'''
Stuff for driving MS office applications from Python using COM
Currently just Excel but Word will come soon.
'''
from win32com.client import Dispatch
from types import *
from string import uppercase
class Excel:
'''
Wrapper for MS Excel derived from that in Python Programming on Win32
'''
def __init__(self,filename=None):
'''
Open a new Excel spreadsheet optionally associated with a file
'''
self.xlApp = Dispatch("Excel.Application")
if filename:
self.filename = filename
self.xlBook = self.xlApp.Workbooks.Open(filename)
else:
self.xlBook = self.xlApp.Workbooks.Add()
self.filename = ''
self.open = 1
def save(self, newfilename=None):
'''
Save the workbook either to the default file, another file,
or let Excel query the user where to save it.
'''
if newfilename:
self.filename = newfilename
self.xlBook.SaveAs(newfilename)
else:
self.xlBook.Save()
def close(self):
self.xlBook.Close(SaveChanges=0)
del self.xlApp
self.open = 0
def getCell(self, row, col, sheet=1):
'''
Returns the value in cell (row,col) or None if it is blank.
'''
xlSheet = self.xlBook.Worksheets(sheet)
return xlSheet.Cells(row,col).Value
def getCellFormula(self, row, col, sheet=1):
'''
Returns the formula in cell (row,col) or the value if
there is no formula. If there is no value nor formula,
None is returned.
'''
xlSheet = self.xlBook.Worksheets(sheet)
result = xlSheet.Cells(row,col).Formula
if result == '': # A blank field seems to return a blank string
result = None
return result
def setCell(self, value, row, col, sheet=1):
'''
Sets the value in cell (row,col).
'''
xlSheet = self.xlBook.Worksheets(sheet)
xlSheet.Cells(row,col).Value = value
def getRange(self, row1, col1, row2=None, col2=None, sheet=1):
'''
Returns the data in the given range as a 2d array (i.e., as
a tuple of tuples). If the bottom corner is not specified
or is incompletely specified, assume a dimension of 1.
'''
if not row2:
row2 = row1
if not col2:
col2 = col1
xlSheet = self.xlBook.Worksheets(sheet)
cell1 = xlSheet.Cells(row1,col1)
cell2 = xlSheet.Cells(row2,col2)
return xlSheet.Range(cell1,cell2).Value
def matrixDimensions(self, data):
'''
Determine the dimemension of the matrix data which can be a
scalar, vector, or 2-D matrix. Allows for string data, or for
matrices in which the first row or column are strings labels
for series ... so look at the last row to determine the length
of a row (= number of columns). If the data is a vector then
it is taken as a row-vector in order to be consistent with how
the default extension happens when assigning a simple list or
vector into a rectangular range in Excel.
'''
last = None
n = m = 1
try:
n = len(data)
last = data[-1]
except TypeError:
n = m = 1 # We have a scalar
if last:
if type(last) == StringType:
m = n # Row-vector of strings
n = 1
else:
try:
m = len(last)
except TypeError:
m = n # Row-vector of scalars
n = 1
return (n,m)
def setRange(self, data, row1=1, col1=1, row2=None, col2=None, sheet=1):
'''
Set the range of cells to the given data.
If both corners of the range are specified, the corresponding
piece of data is copied into the range. If data is too small,
then it is mystically extended to fill the range. E.g., you
can fill a range from a scalar or a vector. Vectors are treated
as row-vectors when filling a rectangular region.
Optionally, you specify only the top-left corner of range in
row1, cell1 and specify row2<=0 - the other coordinate is figured
out from the dimension of the data. This can always be overridden by
specifying the full range coordinates.
If no coordinates are given, the data is put into the top left
of the spreadsheet.
Returns the range that was set.
'''
(n,m) = self.matrixDimensions(data)
if not row2:
row2 = row1 + n - 1
if not col2:
col2 = col1 + m - 1
xlSheet = self.xlBook.Worksheets(sheet)
cell1 = xlSheet.Cells(row1,col1)
cell2 = xlSheet.Cells(row2,col2)
xlSheet.Range(cell1,cell2).Value = data
return (row1, col1, row2, col2)
def getContiguousRange(self, row1, col1, sheet=1):
'''
Returns data in the range which forms a contiguous
block with top-left corner in cell (row1,col1).
Starting from the specified cell, scan down/across
the first column/row and identify the range bordered
by blank cells. Blanks within the region will be
set to None.
'''
xlSheet = self.xlBook.Worksheets(sheet)
row2 = row1
while xlSheet.Cells(row2+1,col1).Value not in [None,'']:
row2 = row2 + 1
col2 = col1
while xlSheet.Cells(row1,col2+1).Value not in [None,'']:
col2 = col2 + 1
return self.getRange(row1, col1, row2, col2, sheet=sheet)
def selectRange(self, row1, col1, row2=None, col2=None, sheet=1):
'''
Select the range of cells on the specified sheet. It also
has to select that sheet as the active worksheet.
'''
if not row2:
row2 = row1
if not col2:
col2 = col1
xlSheet = self.xlBook.Worksheets(sheet)
xlSheet.Select()
cell1 = xlSheet.Cells(row1,col1)
cell2 = xlSheet.Cells(row2,col2)
xlSheet.Range(cell1,cell2).Select()
def chartRange(self, row1, col1, row2, col2, sheet=1,
**keys):
'''
Chart the data in the specified range. Additional options
are processed by chartSelectedRange.
'''
self.selectRange(row1, col1, row2, col2, sheet=sheet)
keys['sheet'] = sheet
apply(self.chartSelectedRange, (), keys)
def chartSelectedRange(self,
title=None, xlabel=None, ylabel=None,
plotby='columns',
charttype='xy',
sheet=1,
xmin=None, xmax=None,
ymin=None, ymax=None,
xlog=0, ylog=0):
'''
The interface to Excel charts. Just a few of the capabilities
are exposed here.
[The first of a set of options is the default]
plotby = 'columns' ... data series run down columns
. = 'rows' ... across rows
charttype = 'xy' ... XY scatter plot with lines and points.
. First series is X. Others are y1, y2, etc.
. = 'surface' ... Surface plot of a scalar function of
. two variables. Data should be a grid of the function.
. = 'contour' or 'colorcontour' ... Contour plot of a scalar
. function of two variables. Data should be a grid of
. values.
xmin and xmax = min/max values of the x or category axis
. It defaults to autoscale by Excel. This only applies to
. XY plots (since the surface/contor plots do not use
. values for the category axes ... they use string labels)
ymin and ymax = min/max values of the y or value axis
. It defaults to auto by Excel. Applies to all charts.
xlog = 0 ... use a linear for x or category axis.
. = 1 ... use a log (values must be positive)
. This only applies to XY plots.
ylog = 0 ... use a linear for the value or Y axis
. = 1 ... use a log .
. Applies to all charts
If the first element of each data series is a string, it is
used to label the series. If this string is representable as
a numerical value you must precede it with a single quote to
force Excel to treat it as a string. Note that you must use
strings. If you use numbers it will be interpreted as data
and incorporated into the plot. For the 2-D plots (xy,
surface, contour) you can border the actual data on left and
on the top with strings to label axes.
'''
charttypes = {'xy':74, 'surface':83, 'colorcontour':85, 'contour':56}
try:
charttype = charttypes[charttype]
except KeyError:
print('Excel.chartSelectedRange: Unknown charttype', charttype, ' defaulting to XY')
charttype = charttypes['xy']
# Make the chart and set how the data will be interpreted
# Taking a reference to the active chart does not seemt to work???
self.xlApp.Charts.Add()
self.xlApp.ActiveChart.ChartType = charttype
xlRows=1
xlColumns=2
if plotby == 'rows':
self.xlApp.ActiveChart.PlotBy = xlRows
elif plotby == 'columns':
self.xlApp.ActiveChart.PlotBy = xlColumns
else:
print('Excel.chartSelectedRange: Unknown plotby', charttype, ' defaulting to columns')
self.xlApp.ActiveChart.PlotBy = xlColumns
# Set the title and axis labels
if title:
self.xlApp.ActiveChart.HasTitle = 1
self.xlApp.ActiveChart.ChartTitle.Characters.Text = title
xlCategory=1
xlValue=2
#xlSeries=3
xlPrimary=1
#xlSecondary=2
if xlabel:
self.xlApp.ActiveChart.Axes(xlCategory,xlPrimary).HasTitle = 1
self.xlApp.ActiveChart.Axes(xlCategory,xlPrimary).AxisTitle.Characters.Text = xlabel
if ylabel:
self.xlApp.ActiveChart.Axes(xlValue,xlPrimary).HasTitle = 1
self.xlApp.ActiveChart.Axes(xlValue,xlPrimary).AxisTitle.Characters.Text = ylabel
# Set the axis scale and log options
xlLinear = 0xffffefdc
xlLogarithmic=0xffffefdb
if ymin != None:
self.xlApp.ActiveChart.Axes(xlValue).MinimumScale = ymin
if ymax != None:
self.xlApp.ActiveChart.Axes(xlValue).MaximumScale = ymax
if ylog:
self.xlApp.ActiveChart.Axes(xlValue).ScaleType = xlLogarithmic
if charttype == charttypes['xy']:
if xmin != None:
self.xlApp.ActiveChart.Axes(xlCategory).MinimumScale = xmin
if xmax != None:
self.xlApp.ActiveChart.Axes(xlCategory).MaximumScale = xmax
if xlog:
self.xlApp.ActiveChart.Axes(xlCategory).ScaleType = xlLogarithmic
# A legend is kinda useful
self.xlApp.ActiveChart.HasLegend = 1
def chartData(self, data, row1=1, col1=1, sheet=1, **keys):
'''
Simplest interface for creating a chart. Data is a matrix
of data. Paste it into a sheet and plot it. All arguments
except the data can be defaulted. Optional arguments are passed
to the actual charting function.
'''
(n,m) = self.matrixDimensions(data)
row2 = row1 + n - 1
col2 = col1 + m - 1
self.setRange(data, row1, col1, row2, col2, sheet=sheet)
keys['sheet'] = sheet
apply(self.chartRange, (row1, col1, row2, col2), keys)
def a1(self, row, col, absrow=0, abscol=0):
'''
Return a string that may be used to address the cell in
a formula. The row and/or column address may be made absolute
by setting absrow/col to true values.
Internally we are addressing cells in the spreadsheet using
integers (row,col), which is what Excel calls R1C1 style
references. But, unless the user has turned-on R1C1 style
addressing (unlikely!) this will not work in formulae
so we must translate to the usual addressing style, called A1,
which uses letters for the columns and numbers for the rows,
writing the column index first.
E.g., A1 = R1C1 = (1,1), and B3 = R3C2 = (3,2).
Absolute addresses are preceded with a $ symbol.
'''
ar = ac = ''
if absrow: ar = '$'
if abscol: ac = '$'
if col < 1 or col > 256:
raise RangeError('column index must be in [1,256]')
(c1,c2) = divmod(col-1,26)
if c1:
c = uppercase[c1] + uppercase[c2]
else:
c = uppercase[c2]
r = str(row)
return ac + c + ar + r
def visible(self):
'''
Make the spreadsheet visible.
'''
self.xlApp.Visible = 1
def invisible(self):
'''
Make the spreadsheet invisible.
'''
self.xlApp.Visible = 0
def isvisible(self):
'''
Returns true if the spreadsheet is visible.
'''
return self.xlApp.Visible
def __del__(self):
'''
Destructor ... may be uncessary but it cannot hurt.
'''
if self.open:
self.close()
if __name__ == "__main__":
from math import *
import time
# Make a worksheet and test set/getCell
xls = Excel()
print(' Setting cell(2,2) to "Hi"')
xls.setCell("Hi", 2, 2)
print(xls.getCell(2,2))
print(' Setting cell(1,2) to "(1,2)"')
xls.setCell("(1,2)", 1, 2)
print(' Setting cell(2,1) to "(1,2)"')
xls.setCell("(2,1)", 2, 1)
xls.visible()
# Test setting a range to a scalar and getting contiguous range
print(' Setting 9,1,12,2 to 0')
xls.setRange(0,9,1,12,2)
print(' Getting same contiguous range back ... expecting matrix(4,2)=0')
value = xls.getContiguousRange(9,1)
print(value)
# Test setting/getting a range from/to a matrix
n = 3
m = 5
x = [0]*n
for i in range(n):
x[i] = [0]*m
for j in range(m):
x[i][j] = i + j
print(' Setting range (3:,4:) to ')
print(x)
xls.setRange(x,3,4) # Auto determination of the bottom corner
print(' Got back from same range ',3,3,3+n-1,4+m-1)
y = xls.getRange(3,4,3+n-1,4+m-1)
print(y)
# Add names for the series that will eventually become the chart
names = []
for i in range(m):
names.append("y%d" % i)
xls.setRange(names,2,4)
# Test selecting a range
print(' Selecting range ', 3,3,3+n-1,4+m-1)
xls.selectRange(3,4,3+n-1,4+m-1)
# Test general matrix
xls.setRange([[1,2],[5,6],["hi","bye"]],1,10,3,11)
# Test making an x-y plot (changes the range selection)
xls.chartRange(2,4,3+n-1,4+m-1,
title='THIS IS THE TITLE',
xlabel='XXXXX',
ylabel='YYYYY')
# Test making an x-y plot just from the data ... use a
# second sheet and the simple chart interface
print(' Creating chart of sin(x) and cos(x) using second sheet')
n = 20
m = 3
h = 2*pi/(n-1)
data = range(n+1)
data[0] = ['x', 'sin', 'cos']
for i in range(n):
x = i*h
data[i+1] = (x,sin(x),cos(x))
xls.chartData(data,sheet=2)
# Try using a formula to add up the absolute values of the data
# Use absolute values for the rows but not the columns to test
# reuse of the formula.
formula = '=sum('+xls.a1(2,2,absrow=1)+':'+xls.a1(21,2,absrow=1)+')'
print(' The formula is ', formula)
xls.setCell('Total',23,1,sheet=2)
xls.setCell(formula,23,2,sheet=2)
xls.setCell(formula,23,3,sheet=2)
# Getting the cell contents back will get the value not the formula
print(' The formula from the sheet is ', xls.getCellFormula(23,2,sheet=2))
print(' The value of the formula (sum of sin) is ', xls.getCell(23,2,sheet=2))
print(' The formula from where there is only the value "Total" is', xls.getCellFormula(23,1,sheet=2))
print(' The formula from where there is nothing ', xls.getCellFormula(23,4,sheet=2))
print(' The value from where there is nothing ', xls.getCell(23,4,sheet=2))
# Make a surface plot by creating a 2-D grid bordered on the
# left and top with strings to indicate the values. Note the
# use of a single quote before the value in the labels in
# order to force Excel to treat them as strings.
print(' Create surface chart of exp(-0.1*r*r)*cos(1.3*r)')
n = 10
h = 2*pi/(n-1)
data = range(n+1)
data[0] = range(n+1)
data[0][0] = ' '
for i in range(n):
x = i*h-pi
data[i+1] = range(n+1)
data[0][i+1] = data[i+1][0] = ("'%5.2f" % x)
for j in range(n):
y = j*h-pi
r = sqrt(x*x+y*y)
data[i+1][j+1] = exp(-0.1*r*r)*cos(1.3*r)
# Specify (row1,col1) to avoid overwriting the previous data
# Also, specify axis ranges to make the animation smoother.
xls.chartData(data,1,5,sheet=2,charttype='surface',
ymin=-1,ymax=1)
# Animate the chart by periodically updating the data range.
nloop = 60
for loop in range(1,nloop+1):
phase = loop*2*pi/(nloop-1)
for i in range(n):
x = i*h-pi
for j in range(n):
y = j*h-pi
r = sqrt(x*x+y*y)
data[i+1][j+1] = exp(-0.1*r*r)*cos(1.3*r+phase)
time.sleep(0.5)
xls.setRange(data,1,5,sheet=2)
# Finally make a chart with all options set
print(' Creating chart of sin(x) and cos(x) using second sheet')
n = 81
data = range(n+1)
data[0] = ['Age', 'Wisdom']
for i in range(n):
data[i+1] = [i+1, 1.0 + 100.0*exp(-((i-40)**2)/400.0)]
xls.chartData(data,1,1,sheet=3,plotby='columns',charttype='xy',
xmin=1,xmax=80,ymin=1,ymax=100,ylog=1,
title='Wisdom vs. Age', xlabel='Age/years',
ylabel='Wisdom')
| {
"content_hash": "2a0be48218c40e9578b2754e3d8dbcd4",
"timestamp": "",
"source": "github",
"line_count": 494,
"max_line_length": 105,
"avg_line_length": 38.31578947368421,
"alnum_prop": 0.5574281487743026,
"repo_name": "rangsimanketkaew/NWChem",
"id": "f736b3c641da105cd867f21cb3c29970c91b52bd",
"size": "18928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/python/office.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "67582"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.agent import agent
from lib.core.common import arrayizeValue
from lib.core.common import Backend
from lib.core.common import filterPairValues
from lib.core.common import getLimitRange
from lib.core.common import isInferenceAvailable
from lib.core.common import isNoneValue
from lib.core.common import isNumPosStrValue
from lib.core.common import isTechniqueAvailable
from lib.core.common import readInput
from lib.core.common import safeSQLIdentificatorNaming
from lib.core.common import safeStringFormat
from lib.core.common import unArrayizeValue
from lib.core.common import unsafeSQLIdentificatorNaming
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import paths
from lib.core.data import queries
from lib.core.enums import CHARSET_TYPE
from lib.core.enums import DBMS
from lib.core.enums import EXPECTED
from lib.core.enums import PAYLOAD
from lib.core.exception import SqlmapMissingMandatoryOptionException
from lib.core.exception import SqlmapUserQuitException
from lib.core.settings import CURRENT_DB
from lib.core.settings import METADB_SUFFIX
from lib.request import inject
from lib.techniques.brute.use import columnExists
from lib.techniques.brute.use import tableExists
class Search:
"""
This class defines search functionalities for plugins.
"""
def __init__(self):
pass
def searchDb(self):
foundDbs = []
rootQuery = queries[Backend.getIdentifiedDbms()].search_db
dbList = conf.db.split(",")
if Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema:
dbCond = rootQuery.inband.condition2
else:
dbCond = rootQuery.inband.condition
dbConsider, dbCondParam = self.likeOrExact("database")
for db in dbList:
values = []
db = safeSQLIdentificatorNaming(db)
if Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.DB2):
db = db.upper()
infoMsg = "searching database"
if dbConsider == "1":
infoMsg += "s LIKE"
infoMsg += " '%s'" % unsafeSQLIdentificatorNaming(db)
logger.info(infoMsg)
if conf.excludeSysDbs:
exclDbsQuery = "".join(" AND '%s' != %s" % (unsafeSQLIdentificatorNaming(db), dbCond) for db in self.excludeDbsList)
infoMsg = "skipping system database%s '%s'" % ("s" if len(self.excludeDbsList) > 1 else "", ", ".join(db for db in self.excludeDbsList))
logger.info(infoMsg)
else:
exclDbsQuery = ""
dbQuery = "%s%s" % (dbCond, dbCondParam)
dbQuery = dbQuery % unsafeSQLIdentificatorNaming(db)
if any(isTechniqueAvailable(_) for _ in (PAYLOAD.TECHNIQUE.UNION, PAYLOAD.TECHNIQUE.ERROR, PAYLOAD.TECHNIQUE.QUERY)) or conf.direct:
if Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema:
query = rootQuery.inband.query2
else:
query = rootQuery.inband.query
query = query % (dbQuery + exclDbsQuery)
values = inject.getValue(query, blind=False, time=False)
if not isNoneValue(values):
values = arrayizeValue(values)
for value in values:
value = safeSQLIdentificatorNaming(value)
foundDbs.append(value)
if not values and isInferenceAvailable() and not conf.direct:
infoMsg = "fetching number of database"
if dbConsider == "1":
infoMsg += "s LIKE"
infoMsg += " '%s'" % unsafeSQLIdentificatorNaming(db)
logger.info(infoMsg)
if Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema:
query = rootQuery.blind.count2
else:
query = rootQuery.blind.count
query = query % (dbQuery + exclDbsQuery)
count = inject.getValue(query, union=False, error=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
warnMsg = "no database"
if dbConsider == "1":
warnMsg += "s LIKE"
warnMsg += " '%s' found" % unsafeSQLIdentificatorNaming(db)
logger.warn(warnMsg)
continue
indexRange = getLimitRange(count)
for index in indexRange:
if Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema:
query = rootQuery.blind.query2
else:
query = rootQuery.blind.query
query = query % (dbQuery + exclDbsQuery)
query = agent.limitQuery(index, query, dbCond)
value = unArrayizeValue(inject.getValue(query, union=False, error=False))
value = safeSQLIdentificatorNaming(value)
foundDbs.append(value)
conf.dumper.lister("found databases", foundDbs)
def searchTable(self):
bruteForce = False
if Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema:
errMsg = "information_schema not available, "
errMsg += "back-end DBMS is MySQL < 5.0"
bruteForce = True
if bruteForce:
message = "do you want to use common table existence check? %s" % ("[Y/n/q]" if Backend.getIdentifiedDbms() in (DBMS.ACCESS,) else "[y/N/q]")
test = readInput(message, default="Y" if "Y" in message else "N")
if test[0] in ("n", "N"):
return
elif test[0] in ("q", "Q"):
raise SqlmapUserQuitException
else:
regex = "|".join(conf.tbl.split(","))
return tableExists(paths.COMMON_TABLES, regex)
foundTbls = {}
tblList = conf.tbl.split(",")
rootQuery = queries[Backend.getIdentifiedDbms()].search_table
tblCond = rootQuery.inband.condition
dbCond = rootQuery.inband.condition2
tblConsider, tblCondParam = self.likeOrExact("table")
for tbl in tblList:
values = []
tbl = safeSQLIdentificatorNaming(tbl, True)
if Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.DB2, DBMS.FIREBIRD):
tbl = tbl.upper()
infoMsg = "searching table"
if tblConsider == "1":
infoMsg += "s LIKE"
infoMsg += " '%s'" % unsafeSQLIdentificatorNaming(tbl)
if dbCond and conf.db and conf.db != CURRENT_DB:
_ = conf.db.split(",")
whereDbsQuery = " AND (" + " OR ".join("%s = '%s'" % (dbCond, unsafeSQLIdentificatorNaming(db)) for db in _) + ")"
infoMsg += " for database%s '%s'" % ("s" if len(_) > 1 else "", ", ".join(db for db in _))
elif conf.excludeSysDbs:
whereDbsQuery = "".join(" AND '%s' != %s" % (unsafeSQLIdentificatorNaming(db), dbCond) for db in self.excludeDbsList)
infoMsg2 = "skipping system database%s '%s'" % ("s" if len(self.excludeDbsList) > 1 else "", ", ".join(db for db in self.excludeDbsList))
logger.info(infoMsg2)
else:
whereDbsQuery = ""
logger.info(infoMsg)
tblQuery = "%s%s" % (tblCond, tblCondParam)
tblQuery = tblQuery % unsafeSQLIdentificatorNaming(tbl)
if any(isTechniqueAvailable(_) for _ in (PAYLOAD.TECHNIQUE.UNION, PAYLOAD.TECHNIQUE.ERROR, PAYLOAD.TECHNIQUE.QUERY)) or conf.direct:
query = rootQuery.inband.query
query = query % (tblQuery + whereDbsQuery)
values = inject.getValue(query, blind=False, time=False)
if values and Backend.getIdentifiedDbms() in (DBMS.SQLITE, DBMS.FIREBIRD):
newValues = []
if isinstance(values, basestring):
values = [values]
for value in values:
dbName = "SQLite" if Backend.isDbms(DBMS.SQLITE) else "Firebird"
newValues.append(["%s%s" % (dbName, METADB_SUFFIX), value])
values = newValues
for foundDb, foundTbl in filterPairValues(values):
foundDb = safeSQLIdentificatorNaming(foundDb)
foundTbl = safeSQLIdentificatorNaming(foundTbl, True)
if foundDb is None or foundTbl is None:
continue
if foundDb in foundTbls:
foundTbls[foundDb].append(foundTbl)
else:
foundTbls[foundDb] = [foundTbl]
if not values and isInferenceAvailable() and not conf.direct:
if Backend.getIdentifiedDbms() not in (DBMS.SQLITE, DBMS.FIREBIRD):
if len(whereDbsQuery) == 0:
infoMsg = "fetching number of databases with table"
if tblConsider == "1":
infoMsg += "s LIKE"
infoMsg += " '%s'" % unsafeSQLIdentificatorNaming(tbl)
logger.info(infoMsg)
query = rootQuery.blind.count
query = query % (tblQuery + whereDbsQuery)
count = inject.getValue(query, union=False, error=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
warnMsg = "no databases have table"
if tblConsider == "1":
warnMsg += "s LIKE"
warnMsg += " '%s'" % unsafeSQLIdentificatorNaming(tbl)
logger.warn(warnMsg)
continue
indexRange = getLimitRange(count)
for index in indexRange:
query = rootQuery.blind.query
query = query % (tblQuery + whereDbsQuery)
query = agent.limitQuery(index, query)
foundDb = unArrayizeValue(inject.getValue(query, union=False, error=False))
foundDb = safeSQLIdentificatorNaming(foundDb)
if foundDb not in foundTbls:
foundTbls[foundDb] = []
if tblConsider == "2":
foundTbls[foundDb].append(tbl)
if tblConsider == "2":
continue
else:
for db in conf.db.split(",") if conf.db else (self.getCurrentDb(),):
db = safeSQLIdentificatorNaming(db)
if db not in foundTbls:
foundTbls[db] = []
else:
dbName = "SQLite" if Backend.isDbms(DBMS.SQLITE) else "Firebird"
foundTbls["%s%s" % (dbName, METADB_SUFFIX)] = []
for db in foundTbls.keys():
db = safeSQLIdentificatorNaming(db)
infoMsg = "fetching number of table"
if tblConsider == "1":
infoMsg += "s LIKE"
infoMsg += " '%s' in database '%s'" % (unsafeSQLIdentificatorNaming(tbl), unsafeSQLIdentificatorNaming(db))
logger.info(infoMsg)
query = rootQuery.blind.count2
if Backend.getIdentifiedDbms() not in (DBMS.SQLITE, DBMS.FIREBIRD):
query = query % unsafeSQLIdentificatorNaming(db)
query += " AND %s" % tblQuery
count = inject.getValue(query, union=False, error=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
warnMsg = "no table"
if tblConsider == "1":
warnMsg += "s LIKE"
warnMsg += " '%s' " % unsafeSQLIdentificatorNaming(tbl)
warnMsg += "in database '%s'" % unsafeSQLIdentificatorNaming(db)
logger.warn(warnMsg)
continue
indexRange = getLimitRange(count)
for index in indexRange:
query = rootQuery.blind.query2
if query.endswith("'%s')"):
query = query[:-1] + " AND %s)" % tblQuery
else:
query += " AND %s" % tblQuery
if Backend.isDbms(DBMS.FIREBIRD):
query = safeStringFormat(query, index)
if Backend.getIdentifiedDbms() not in (DBMS.SQLITE, DBMS.FIREBIRD):
query = safeStringFormat(query, unsafeSQLIdentificatorNaming(db))
if not Backend.isDbms(DBMS.FIREBIRD):
query = agent.limitQuery(index, query)
foundTbl = unArrayizeValue(inject.getValue(query, union=False, error=False))
if not isNoneValue(foundTbl):
kb.hintValue = foundTbl
foundTbl = safeSQLIdentificatorNaming(foundTbl, True)
foundTbls[db].append(foundTbl)
for db in foundTbls.keys():
if isNoneValue(foundTbls[db]):
del foundTbls[db]
if not foundTbls:
warnMsg = "no databases contain any of the provided tables"
logger.warn(warnMsg)
return
conf.dumper.dbTables(foundTbls)
self.dumpFoundTables(foundTbls)
def searchColumn(self):
bruteForce = False
if Backend.isDbms(DBMS.MYSQL) and not kb.data.has_information_schema:
errMsg = "information_schema not available, "
errMsg += "back-end DBMS is MySQL < 5.0"
bruteForce = True
if bruteForce:
message = "do you want to use common column existence check? %s" % ("[Y/n/q]" if Backend.getIdentifiedDbms() in (DBMS.ACCESS,) else "[y/N/q]")
test = readInput(message, default="Y" if "Y" in message else "N")
if test[0] in ("n", "N"):
return
elif test[0] in ("q", "Q"):
raise SqlmapUserQuitException
else:
regex = '|'.join(conf.col.split(','))
conf.dumper.dbTableColumns(columnExists(paths.COMMON_COLUMNS, regex))
message = "do you want to dump entries? [Y/n] "
output = readInput(message, default="Y")
if output and output[0] not in ("n", "N"):
self.dumpAll()
return
rootQuery = queries[Backend.getIdentifiedDbms()].search_column
foundCols = {}
dbs = {}
whereDbsQuery = ""
whereTblsQuery = ""
infoMsgTbl = ""
infoMsgDb = ""
colList = conf.col.split(",")
if conf.excludeCol:
colList = [_ for _ in colList if _ not in conf.excludeCol.split(',')]
origTbl = conf.tbl
origDb = conf.db
colCond = rootQuery.inband.condition
dbCond = rootQuery.inband.condition2
tblCond = rootQuery.inband.condition3
colConsider, colCondParam = self.likeOrExact("column")
for column in colList:
values = []
column = safeSQLIdentificatorNaming(column)
conf.db = origDb
conf.tbl = origTbl
if Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.DB2):
column = column.upper()
infoMsg = "searching column"
if colConsider == "1":
infoMsg += "s LIKE"
infoMsg += " '%s'" % unsafeSQLIdentificatorNaming(column)
foundCols[column] = {}
if conf.tbl:
_ = conf.tbl.split(",")
whereTblsQuery = " AND (" + " OR ".join("%s = '%s'" % (tblCond, unsafeSQLIdentificatorNaming(tbl)) for tbl in _) + ")"
infoMsgTbl = " for table%s '%s'" % ("s" if len(_) > 1 else "", ", ".join(unsafeSQLIdentificatorNaming(tbl) for tbl in _))
if conf.db and conf.db != CURRENT_DB:
_ = conf.db.split(",")
whereDbsQuery = " AND (" + " OR ".join("%s = '%s'" % (dbCond, unsafeSQLIdentificatorNaming(db)) for db in _) + ")"
infoMsgDb = " in database%s '%s'" % ("s" if len(_) > 1 else "", ", ".join(unsafeSQLIdentificatorNaming(db) for db in _))
elif conf.excludeSysDbs:
whereDbsQuery = "".join(" AND %s != '%s'" % (dbCond, unsafeSQLIdentificatorNaming(db)) for db in self.excludeDbsList)
infoMsg2 = "skipping system database%s '%s'" % ("s" if len(self.excludeDbsList) > 1 else "", ", ".join(unsafeSQLIdentificatorNaming(db) for db in self.excludeDbsList))
logger.info(infoMsg2)
else:
infoMsgDb = " across all databases"
logger.info("%s%s%s" % (infoMsg, infoMsgTbl, infoMsgDb))
colQuery = "%s%s" % (colCond, colCondParam)
colQuery = colQuery % unsafeSQLIdentificatorNaming(column)
if any(isTechniqueAvailable(_) for _ in (PAYLOAD.TECHNIQUE.UNION, PAYLOAD.TECHNIQUE.ERROR, PAYLOAD.TECHNIQUE.QUERY)) or conf.direct:
if not all((conf.db, conf.tbl)):
# Enumerate tables containing the column provided if
# either of database(s) or table(s) is not provided
query = rootQuery.inband.query
query = query % (colQuery + whereDbsQuery + whereTblsQuery)
values = inject.getValue(query, blind=False, time=False)
else:
# Assume provided databases' tables contain the
# column(s) provided
values = []
for db in conf.db.split(","):
for tbl in conf.tbl.split(","):
values.append([safeSQLIdentificatorNaming(db), safeSQLIdentificatorNaming(tbl, True)])
for db, tbl in filterPairValues(values):
db = safeSQLIdentificatorNaming(db)
tbls = tbl.split(",") if not isNoneValue(tbl) else []
for tbl in tbls:
tbl = safeSQLIdentificatorNaming(tbl, True)
if db is None or tbl is None:
continue
conf.db = db
conf.tbl = tbl
conf.col = column
self.getColumns(onlyColNames=True, colTuple=(colConsider, colCondParam), bruteForce=False)
if db in kb.data.cachedColumns and tbl in kb.data.cachedColumns[db]:
if db not in dbs:
dbs[db] = {}
if tbl not in dbs[db]:
dbs[db][tbl] = {}
dbs[db][tbl].update(kb.data.cachedColumns[db][tbl])
if db in foundCols[column]:
foundCols[column][db].append(tbl)
else:
foundCols[column][db] = [tbl]
kb.data.cachedColumns = {}
if not values and isInferenceAvailable() and not conf.direct:
if not conf.db:
infoMsg = "fetching number of databases with tables containing column"
if colConsider == "1":
infoMsg += "s LIKE"
infoMsg += " '%s'" % unsafeSQLIdentificatorNaming(column)
logger.info("%s%s%s" % (infoMsg, infoMsgTbl, infoMsgDb))
query = rootQuery.blind.count
query = query % (colQuery + whereDbsQuery + whereTblsQuery)
count = inject.getValue(query, union=False, error=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
warnMsg = "no databases have tables containing column"
if colConsider == "1":
warnMsg += "s LIKE"
warnMsg += " '%s'" % unsafeSQLIdentificatorNaming(column)
logger.warn("%s%s" % (warnMsg, infoMsgTbl))
continue
indexRange = getLimitRange(count)
for index in indexRange:
query = rootQuery.blind.query
query = query % (colQuery + whereDbsQuery + whereTblsQuery)
query = agent.limitQuery(index, query)
db = unArrayizeValue(inject.getValue(query, union=False, error=False))
db = safeSQLIdentificatorNaming(db)
if db not in dbs:
dbs[db] = {}
if db not in foundCols[column]:
foundCols[column][db] = []
else:
for db in conf.db.split(",") if conf.db else (self.getCurrentDb(),):
db = safeSQLIdentificatorNaming(db)
if db not in foundCols[column]:
foundCols[column][db] = []
origDb = conf.db
origTbl = conf.tbl
for column, dbData in foundCols.items():
colQuery = "%s%s" % (colCond, colCondParam)
colQuery = colQuery % unsafeSQLIdentificatorNaming(column)
for db in dbData:
conf.db = origDb
conf.tbl = origTbl
infoMsg = "fetching number of tables containing column"
if colConsider == "1":
infoMsg += "s LIKE"
infoMsg += " '%s' in database '%s'" % (unsafeSQLIdentificatorNaming(column), unsafeSQLIdentificatorNaming(db))
logger.info(infoMsg)
query = rootQuery.blind.count2
query = query % unsafeSQLIdentificatorNaming(db)
query += " AND %s" % colQuery
query += whereTblsQuery
count = inject.getValue(query, union=False, error=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
warnMsg = "no tables contain column"
if colConsider == "1":
warnMsg += "s LIKE"
warnMsg += " '%s' " % unsafeSQLIdentificatorNaming(column)
warnMsg += "in database '%s'" % unsafeSQLIdentificatorNaming(db)
logger.warn(warnMsg)
continue
indexRange = getLimitRange(count)
for index in indexRange:
query = rootQuery.blind.query2
if query.endswith("'%s')"):
query = query[:-1] + " AND %s)" % (colQuery + whereTblsQuery)
else:
query += " AND %s" % (colQuery + whereTblsQuery)
query = safeStringFormat(query, unsafeSQLIdentificatorNaming(db))
query = agent.limitQuery(index, query)
tbl = unArrayizeValue(inject.getValue(query, union=False, error=False))
kb.hintValue = tbl
tbl = safeSQLIdentificatorNaming(tbl, True)
conf.db = db
conf.tbl = tbl
conf.col = column
self.getColumns(onlyColNames=True, colTuple=(colConsider, colCondParam), bruteForce=False)
if db in kb.data.cachedColumns and tbl in kb.data.cachedColumns[db]:
if db not in dbs:
dbs[db] = {}
if tbl not in dbs[db]:
dbs[db][tbl] = {}
dbs[db][tbl].update(kb.data.cachedColumns[db][tbl])
kb.data.cachedColumns = {}
if db in foundCols[column]:
foundCols[column][db].append(tbl)
else:
foundCols[column][db] = [tbl]
if dbs:
conf.dumper.dbColumns(foundCols, colConsider, dbs)
self.dumpFoundColumn(dbs, foundCols, colConsider)
else:
warnMsg = "no databases have tables containing any of the "
warnMsg += "provided columns"
logger.warn(warnMsg)
def search(self):
if Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.DB2):
for item in ('db', 'tbl', 'col'):
if getattr(conf, item, None):
setattr(conf, item, getattr(conf, item).upper())
if conf.col:
self.searchColumn()
elif conf.tbl:
self.searchTable()
elif conf.db:
self.searchDb()
else:
errMsg = "missing parameter, provide -D, -T or -C along "
errMsg += "with --search"
raise SqlmapMissingMandatoryOptionException(errMsg)
| {
"content_hash": "7b3e044a7fca497278d79883697089b7",
"timestamp": "",
"source": "github",
"line_count": 604,
"max_line_length": 183,
"avg_line_length": 43.50993377483444,
"alnum_prop": 0.5126331811263318,
"repo_name": "glaudsonml/kurgan-ai",
"id": "8686f9877acd552100ffee7afe8730d058cd9c94",
"size": "26303",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/sqlmap/plugins/generic/search.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "122729"
},
{
"name": "HTML",
"bytes": "48894"
},
{
"name": "JavaScript",
"bytes": "1589671"
},
{
"name": "PHP",
"bytes": "72064"
},
{
"name": "Python",
"bytes": "211839"
},
{
"name": "Shell",
"bytes": "5722"
}
],
"symlink_target": ""
} |
"""Example showing the joining and splitting of tuple streams."""
from pycascading.helpers import *
@udf_map(produces=['ucase_lhs2', 'rhs2'])
def upper_case(tuple):
"""Return the upper case of the 'lhs2' column, and the 'rhs2' column"""
return [tuple.get('lhs2').upper(), tuple.get('rhs2')]
def main():
flow = Flow()
lhs = flow.source(Hfs(TextDelimited(Fields(['col1', 'col2']), ' ',
[Integer, String]),
'pycascading_data/lhs.txt'))
rhs = flow.source(Hfs(TextDelimited(Fields(['col1', 'col2']), ' ',
[Integer, String]),
'pycascading_data/rhs.txt'))
output1 = flow.tsv_sink('pycascading_data/out1')
output2 = flow.tsv_sink('pycascading_data/out2')
# Join on the first columns ('col1' for both) of lhs and rhs inputs
# We need to use declared_fields if the field names since the field names
# of the two pipes overlap
p = (lhs & rhs) | inner_join(['col1', 'col1'],
declared_fields=['lhs1', 'lhs2', 'rhs1', 'rhs2'])
# Save the 2nd and 4th columns of p to output1
p | retain('lhs2', 'rhs2') | output1
# Join on the upper-cased first column of p and the 2nd column of rhs,
# and save the output to output2
((p | upper_case) & (rhs | retain('col2'))) | \
inner_join(['ucase_lhs2', 'col2']) | output2
flow.run(num_reducers=2)
| {
"content_hash": "03e8e1ccc4f8a04412d726c31ae76c8c",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 82,
"avg_line_length": 38.578947368421055,
"alnum_prop": 0.5702592087312415,
"repo_name": "twitter/pycascading",
"id": "e08550d8602f81a7cd096146f1720ac79e90ea3e",
"size": "2043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/joins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "68837"
},
{
"name": "Python",
"bytes": "76261"
},
{
"name": "Shell",
"bytes": "12992"
}
],
"symlink_target": ""
} |
import core
PLUGINVERSION = 2
# Always name this variable as `plugin`
# If you dont, module loader will fail to load the plugin!
plugin = core.Plugin()
@plugin.command(command="/hi",
description="Says 'Hi, %username%'",
inline_supported=True,
hidden=False)
def hi(bot, update, user, args):
return core.message(text="Hi, @%s" % user.username)
| {
"content_hash": "0cf56d48a2f5b729d89801c635cc5194",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 58,
"avg_line_length": 35.81818181818182,
"alnum_prop": 0.631979695431472,
"repo_name": "ProtoxiDe22/Octeon",
"id": "8f7cbcc9ecd2de306328dd0eef1d50f5e6094aa6",
"size": "394",
"binary": false,
"copies": "1",
"ref": "refs/heads/python-telegram-bot",
"path": "examples/Command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28666"
}
],
"symlink_target": ""
} |
import pytest
import salt.config
import salt.loader
import salt.client
from utils.path import PathHelper
# import watchdog
# import time
_config = '/etc/salt/minion'
_opts = salt.config.minion_config(_config)
_opts['pillar_raise_on_missing'] = True
_grains = salt.loader.grains(_opts)
_opts['grains'] = _grains
_utils = salt.loader.utils(_opts)
_salt = salt.loader.minion_mods(_opts, utils=_utils)
@pytest.fixture
def __opts__():
return _opts
@pytest.fixture
def __grains__():
return _grains
@pytest.fixture
def __utils__():
return _utils
@pytest.fixture
def __salt__():
return _salt
@pytest.fixture
def __envs__():
return ['base', 'dev', 'stg', 'prd']
@pytest.fixture
def path_helper():
return PathHelper
# from watchdog.observers import Observer
# from watchdog.events import PatternMatchingEventHandler
# class MyEventHandler(PatternMatchingEventHandler):
# def __init__(self, ignore_patterns):
# super(MyEventHandler, self).__init__(ignore_patterns=ignore_patterns)
# self.files = []
# def on_any_event(self, event):
# self.files.append(event.src_path)
# @pytest.fixture
# def filesystem_watch(env, state):
# pass
# event_handler = MyEventHandler(ignore_patterns=["/var/cache/*", "/var/log/*", "/tmp/*"])
# observer = Observer()
# observer.schedule(event_handler, "/", recursive=True)
# observer.start()
# yield
# time.sleep(2)
# observer.stop()
# observer.join()
# print event_handler.files
# assert False
| {
"content_hash": "997ac6ea121e5ae250c695f36a0e10f7",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 94,
"avg_line_length": 22.441176470588236,
"alnum_prop": 0.6651376146788991,
"repo_name": "yagnik/saltstack-template",
"id": "417e8c09f7e650833842c45df928436ccad5bbe5",
"size": "1526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2461"
},
{
"name": "Makefile",
"bytes": "614"
},
{
"name": "Python",
"bytes": "19892"
},
{
"name": "SaltStack",
"bytes": "1476"
}
],
"symlink_target": ""
} |
from tastypie.test import ResourceTestCase
from django.utils.unittest import skip
class UsersResourceTest(ResourceTestCase):
fixtures = [
'users_api_users.json'
]
DETAIL_URL = '/users/%s/'
LIST_URL = '/users/'
USER_ID = 1
USERNAME = 'admin'
PASSWORD = 'admin'
ADMIN_RESPONSE = {
u'username': u'admin',
u'resourceUri': u'/users/1/',
u'firstName': u'',
u'lastName': u'',
u'isSuperuser': True,
u'isStaff': True,
u'email': u'admin@admin.com',
u'isActive': True
}
@property
def detail_uri(self):
return self.DETAIL_URL % self.USER_ID
@property
def list_uri(self):
return self.LIST_URL
def login(self):
self.api_client.client.login(username=self.USERNAME,
password=self.PASSWORD)
def logout(self):
self.api_client.client.logout()
###########################################################################
# NOT ALLOWED
###########################################################################
def test_delete_list_not_allowed(self):
self.login()
response = self.api_client.delete(self.list_uri)
self.assertHttpMethodNotAllowed(response)
def test_put_list_not_allowed(self):
self.login()
response = self.api_client.put(self.list_uri)
self.assertHttpMethodNotAllowed(response)
def test_post_detail_not_allowed(self):
self.login()
data = {
'username': 'newusers',
'password': 'newpass'
}
response = self.api_client.post(self.detail_uri, data=data)
self.assertHttpMethodNotAllowed(response)
###########################################################################
# UNAUTHENTICATED
###########################################################################
def test_get_list_unauthenticated(self):
response = self.api_client.get(self.list_uri)
self.assertHttpUnauthorized(response)
def test_get_detail_unauthenticated(self):
response = self.api_client.get(self.detail_uri)
self.assertHttpUnauthorized(response)
def test_post_list_unauthenticated(self):
data = {
'username': 'newusers',
'password': 'newpass'
}
response = self.api_client.post(self.list_uri, data=data)
self.assertHttpUnauthorized(response)
def test_put_detail_unauthenticated(self):
data = {
'username': 'newusers',
'password': 'newpass'
}
response = self.api_client.put(self.detail_uri, data=data)
self.assertHttpUnauthorized(response)
def test_delete_detail_unauthenticated(self):
response = self.api_client.delete(self.detail_uri)
self.assertHttpUnauthorized(response)
###########################################################################
# UNAUTHORIZED
###########################################################################
def test_post_list_unauthorized(self):
self.USERNAME = 'user1'
self.PASSWORD = 'user1'
self.login()
data = {
'username': 'newusers',
'password': 'newpass'
}
response = self.api_client.post(self.list_uri, data=data)
self.assertHttpUnauthorized(response)
def test_put_detail_unauthorized(self):
self.USERNAME = 'user1'
self.PASSWORD = 'user1'
self.login()
data = {
'username': 'newusers',
'password': 'newpass'
}
response = self.api_client.put(self.detail_uri, data=data)
self.assertHttpUnauthorized(response)
def test_delete_detail_unauthorized(self):
self.USERNAME = 'user1'
self.PASSWORD = 'user1'
self.login()
response = self.api_client.delete(self.detail_uri)
self.assertHttpUnauthorized(response)
###########################################################################
# GET
###########################################################################
def test_get_list_superuser(self):
self.login()
response = self.api_client.get(self.list_uri)
self.assertHttpOK(response)
self.assertValidJSONResponse(response)
res = self.deserialize(response)
self.assertEqual(res['meta']['totalCount'], 2)
self.assertDictContainsSubset(self.ADMIN_RESPONSE, res['objects'][0])
def test_get_detail_superuser(self):
self.login()
response = self.api_client.get(self.detail_uri)
self.assertHttpOK(response)
self.assertValidJSONResponse(response)
res = self.deserialize(response)
self.assertDictContainsSubset(self.ADMIN_RESPONSE, res)
def test_get_list_normal_user(self):
self.USERNAME = 'user1'
self.PASSWORD = 'user1'
self.login()
response = self.api_client.get(self.list_uri)
self.assertHttpOK(response)
res = self.deserialize(response)
self.assertEqual(res['meta']['totalCount'], 2)
def test_get_detail_normal_user(self):
self.USERNAME = 'user1'
self.PASSWORD = 'user1'
self.login()
response = self.api_client.get(self.detail_uri)
self.assertHttpOK(response)
res = self.deserialize(response)
self.assertDictContainsSubset(self.ADMIN_RESPONSE, res)
###########################################################################
# POST
###########################################################################
def test_post_list(self):
self.login()
data = {
'username': 'user2',
'password': 'user2',
'email': 'user2@admin.com'
}
response = self.api_client.post(self.list_uri, data=data)
self.assertHttpCreated(response)
res = self.deserialize(response)
self.assertNotIn('password', res)
resource_uri = res['resourceUri']
response = self.api_client.get(resource_uri)
self.assertDictContainsSubset({
u'username': u'user2',
u'resourceUri': resource_uri,
u'firstName': u'',
u'lastName': u'',
u'isSuperuser': False,
u'isStaff': False,
u'email': u'user2@admin.com',
u'isActive': True
}, self.deserialize(response))
def test_post_list_invalid(self):
self.login()
data = {}
response = self.api_client.post(self.list_uri, data=data)
self.assertHttpBadRequest(response)
data = {'username': 'user2'}
response = self.api_client.post(self.list_uri, data=data)
self.assertHttpBadRequest(response)
data = {'password': 'user2'}
response = self.api_client.post(self.list_uri, data=data)
self.assertHttpBadRequest(response)
data = {
'username': 'user2',
'password': 'user2',
'email': 'INVALID_EMAIL'
}
response = self.api_client.post(self.list_uri, data=data)
self.assertHttpBadRequest(response)
###########################################################################
# PUT
###########################################################################
def test_put_detail(self):
self.login()
data = {
'firstName': 'Admin',
'lastName': 'Admin Last'
}
response = self.api_client.put(self.detail_uri, data=data)
self.assertHttpOK(response)
self.assertDictContainsSubset(data, self.deserialize(response))
# Another user
data = {
'firstName': 'User 1',
'lastName': '1 User'
}
self.USER_ID = 2
response = self.api_client.put(self.detail_uri, data=data)
self.assertHttpOK(response)
self.assertDictContainsSubset(data, self.deserialize(response))
def test_put_detail_superuser_change_password(self):
self.login()
self.USER_ID = 2
data = {
'password': 'new password'
}
response = self.api_client.put(self.detail_uri, data=data)
self.assertHttpOK(response)
self.logout()
self.USERNAME = 'user1'
self.PASSWORD = data['password']
self.login()
response = self.api_client.get(self.list_uri)
self.assertHttpOK(response)
@skip('Required with different authorization')
def test_put_detail_owner_change_password(self):
self.USERNAME = 'user1'
self.PASSWORD = 'user1'
self.USER_ID = 2
self.login()
data = {
'password': 'new password'
}
response = self.api_client.put(self.detail_uri, data=data)
self.assertHttpOK(response)
self.logout()
# Try to use old password, and fail!
self.login()
response = self.api_client.get(self.list_uri)
self.assertHttpUnauthorized(response)
def test_put_detail_invalid(self):
self.login()
data = {
'email': 'INVALID_EMAIL'
}
response = self.api_client.put(self.detail_uri, data=data)
self.assertHttpBadRequest(response)
def test_put_detail_not_found(self):
self.login()
data = {
'email': 'INVALID_EMAIL'
}
self.USER_ID = 100
response = self.api_client.put(self.detail_uri, data=data)
self.assertHttpBadRequest(response)
###########################################################################
# DELETE
###########################################################################
def test_delete_detail(self):
self.login()
self.USER_ID = 2
response = self.api_client.delete(self.detail_uri)
self.assertHttpAccepted(response)
def test_delete_detail_not_found(self):
self.login()
self.USER_ID = 200
response = self.api_client.delete(self.detail_uri)
self.assertHttpNotFound(response)
| {
"content_hash": "ea9a3b66cf55beebb4280a2df2e29310",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 79,
"avg_line_length": 30.080118694362017,
"alnum_prop": 0.5241195620005918,
"repo_name": "mohabusama/django-users-api",
"id": "51317e709ce8c69b66451a223cb30221b23cb6b1",
"size": "10137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "users_api/tests/users.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66466"
}
],
"symlink_target": ""
} |
"""
WSGI config for mong project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| {
"content_hash": "05ecc7e66b554401055b334038309ead",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 79,
"avg_line_length": 40.32142857142857,
"alnum_prop": 0.7989371124889283,
"repo_name": "tarvitz/djtp",
"id": "8eefff5e8b752613156ce043e16e9fe4ed2948a5",
"size": "1129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/wsgi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "86715"
},
{
"name": "HTML",
"bytes": "12887"
},
{
"name": "JavaScript",
"bytes": "13426"
},
{
"name": "Python",
"bytes": "99550"
},
{
"name": "Shell",
"bytes": "555"
}
],
"symlink_target": ""
} |
import random
from sys import maxsize
mentionStrings = [
'Also wirklich mal, {usr}!',
'{usr}, was fällt dir ein?!',
'Falls du es nicht mitbekommen hast, {usr}...',
'Ey, {usr}!',
'Sag doch auch mal was, {usr}!'
]
notTalkingStrings = [
'Mich interessiert nicht, was du mir sagst, rede mit anderen!',
'Kümmere dich um deinen Kram, {usr}!',
'Hey {usr}, warum sollte ich mit mir selbst reden?!'
]
selfMentionStrings = [
'Sag mal {usr}, brauchst du eigentlich viel Aufmerksamkeit?',
'{usr}, warum musst du dich immer selbst hervorheben?',
'Du willst Aufmerksamkeit? Hier: {usr}',
'Ich muss schon sagen, {usr}, du brauchst ziemlich viel Aufmerksamkeit...',
'Willst du, dass auch mal jemand mit dir schreibt und du erwähnst dich \
deshalb selbst, {usr}?',
'{usr}, bitte. Ich brauche dich nicht erinnern, dass du mit DIR redest...'
]
def buildmessage(username):
random.seed()
randstring = mentionStrings[
random.randint(0, maxsize) % len(mentionStrings)]
message = randstring.format(usr=username)
return message
def noBotMessage(user):
random.seed()
randstring = notTalkingStrings[
random.randint(0, maxsize) % len(notTalkingStrings)]
message = randstring.replace('{usr}', user.name)
return message
def selfMention(user):
random.seed()
randstring = selfMentionStrings[
random.randint(0, maxsize) % len(selfMentionStrings)]
message = randstring.format(usr=user.name)
return message
| {
"content_hash": "e26effd41ca931d6fddbdefe05e76e7a",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 79,
"avg_line_length": 30.18,
"alnum_prop": 0.6719681908548708,
"repo_name": "h4llow3En/IAmTalkingToYouBot",
"id": "210d821b014476688b12bb87dfecd18d6405a21f",
"size": "1512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reprimandUser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6789"
}
],
"symlink_target": ""
} |
import xml.etree.ElementTree as ET
from decimal import Decimal
import sys
import os
# Vendor Libraries
import yaml
from jinja2 import Template
def build(iconset_id='fa', include_ids=[]):
tree = ET.parse('res/fontawesome-webfont.svg')
root = tree.getroot()
namespaces = {'svg': 'http://www.w3.org/2000/svg'}
glyph_map = {}
for glyph in root.findall('.//svg:glyph', namespaces):
glyph_map[hex(ord(glyph.get('unicode')))[2:]] = glyph
iconsdf = open('res/icons.yml', 'r')
iconsd = yaml.load(iconsdf)
iconsdf.close()
icond_map = {}
for ii in iconsd['icons']:
icond_map[ii['unicode']] = ii
default_width = 1792
glyphs = []
for key, idef in icond_map.items():
if key in glyph_map and (len(include_ids) == 0 or idef['id'] in include_ids):
glyph = idef
glyphel = glyph_map[key]
glyph['d'] = glyphel.get('d')
glyph['horiz-adv-x'] = glyphel.get('horiz-adv-x')
scale = 1
x_margin = 0
if glyph['horiz-adv-x']:
custom_width = int(glyph['horiz-adv-x'])
if custom_width < default_width:
x_margin = (default_width - custom_width) / 2
if custom_width > default_width:
scale = Decimal(float(default_width) / float(custom_width), 2).quantize(Decimal('0.0001'))
glyph['x_margin'] = x_margin
glyph['scale'] = str(scale)
glyphs.append(glyph)
context = {
'iconset_id': iconset_id,
'glyphs': glyphs,
'default_width': default_width,
}
if not os.path.exists('build'):
os.makedirs('build')
etemplatef = open('res/element-template.html', 'r')
etemplate = Template(etemplatef.read())
etemplatef.close()
econtent = etemplate.render(**context)
efile = open('build/%s-icons.html' % iconset_id, 'w')
efile.write(econtent)
efile.close()
demotemplatef = open('res/demo-template.html', 'r')
demotemplate = Template(demotemplatef.read())
demotemplatef.close()
democontent = demotemplate.render(**context)
dfile = open('build/demo.html', 'w')
dfile.write(democontent)
dfile.close()
if __name__ == "__main__":
usage = "./makefaicons.py <iconset-id> [icon_id1] [icon_id2] ..."
try:
build(iconset_id=sys.argv[1], include_ids=sys.argv[2:])
except IndexError:
print usage
| {
"content_hash": "3f4006dda7fd7176d033fbe66fadad57",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 110,
"avg_line_length": 28.227272727272727,
"alnum_prop": 0.572866344605475,
"repo_name": "philya/font-awesome-polymer-icons-generator",
"id": "4b8a61d7946f3f16d1a6c49f7799e21c4add1f00",
"size": "2526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "makefaicons.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2526"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.template import Library
from ..admin import SuperInlineModelAdmin
register = Library()
@register.simple_tag(takes_context=True)
def get_sub_inline_formsets(context, inline, original, index, is_template):
if not isinstance(inline, SuperInlineModelAdmin):
return ()
request = context['request']
formsets, inline_instances = inline._create_formsets(
request, obj=original, change=original is not None, index=index,
is_template=is_template)
return inline.get_inline_formsets(request, formsets, inline_instances,
obj=original)
| {
"content_hash": "cf32fcf53fcf85d0964637d5deff971e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 75,
"avg_line_length": 34.578947368421055,
"alnum_prop": 0.6971080669710806,
"repo_name": "BertrandBordage/django-super-inlines",
"id": "54cc41ac5b2414461808ff20fc289898ad47b8e1",
"size": "674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "super_inlines/templatetags/super_inlines.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "38738"
},
{
"name": "JavaScript",
"bytes": "14549"
},
{
"name": "Python",
"bytes": "6505"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.