code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
print('y1')
def main(filename):
raw_data = load_data(filename)
# TODO - Clean data
print(raw_data[0])
return raw_data
def load_data(filename):
with open(filename, 'r') as file:
raw = file.read()
raw_data = frames = raw.split('\n')
return raw_data
if __name__ == "__main__":
print(sys.argv)
args = sys.argv[1:]
import os
filename = os.path.join('sonic_pi_face', 'data', args[0])
main(filename)
|
JustinShenk/sonic-face
|
analyze.py
|
Python
|
mit
| 514
|
import pandas as pd
# TODO:
# Load up the dataset, setting correct header labels.
df = pd.read_csv('Datasets/census.data', names=['education', 'age', 'capital-gain', 'race', 'capital-loss',
'hours-per-week', 'sex', 'classification'])
# TODO:
# Use basic pandas commands to look through the dataset... get a
# feel for it before proceeding! Do the data-types of each column
# reflect the values you see when you look through the data using
# a text editor / spread sheet program? If you see 'object' where
# you expect to see 'int32' / 'float64', that is a good indicator
# that there is probably a string or missing value in a column.
# use `your_data_frame['your_column'].unique()` to see the unique
# values of each column and identify the rogue values. If these
# should be represented as nans, you can convert them using
# na_values when loading the dataframe.
df['capital-gain'] = pd.to_numeric(df['capital-gain'], errors='coerce')
df.apply(lambda x: pd.to_numeric(x, errors='ignore'))
# TODO:
# Look through your data and identify any potential categorical
# features. Ensure you properly encode any ordinal and nominal
# types using the methods discussed in the chapter.
#
# Be careful! Some features can be represented as either categorical
# or continuous (numerical). Think to yourself, does it generally
# make more sense to have a numeric type or a series of categories
# for these somewhat ambigious features?
ordered_education = df.education.unique()
ordered_classification = df.classification.unique()
df.education = df.education.astype('category', ordered=True, categories=ordered_education).cat.codes
df.classification = df.classification.astype('category', ordered=True, categories=ordered_classification).cat.codes
df = pd.get_dummies(df, columns=['race', 'sex'])
# TODO:
# Print out your dataframe
print(df)
|
Wittlich/DAT210x-Python
|
Module2/assignment5.py
|
Python
|
mit
| 1,879
|
#!/usr/bin/env python
#-------------------------------------------------------------------
# The MIT License
#
# Copyright (c) 2009 Patrick Mueller
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#-------------------------------------------------------------------
import os
import sys
lib_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../lib"))
if lib_path not in sys.path: sys.path.insert(0, lib_path)
import unittest
from nitro_pie import *
from test_utils import *
#-------------------------------------------------------------------
class Test(unittest.TestCase):
#---------------------------------------------------------------
def setUp(self):
self.ctx = JSGlobalContextRef.create()
def tearDown(self):
self.ctx.release()
#---------------------------------------------------------------
def test_get_simple_properties(self):
ctx = self.ctx
result = ctx.eval("({a: 1, b: '2', c: true, d: null, e: undefined})").asJSObjectRef(ctx)
self.assertTrue(result.isObject(ctx))
prop_names = result.getPropertyNames(ctx)
self.assertTrue("a" in prop_names)
self.assertTrue("b" in prop_names)
self.assertTrue("c" in prop_names)
self.assertTrue("d" in prop_names)
self.assertTrue("e" in prop_names)
self.assertEquals(1, result.getProperty(ctx, "a").toNumber(ctx))
self.assertEquals('2', result.getProperty(ctx, "b").toString(ctx))
self.assertEquals(True, result.getProperty(ctx, "c").toBoolean(ctx))
self.assertTrue(result.getProperty(ctx, "d").isNull(ctx))
self.assertTrue(result.getProperty(ctx, "e").isUndefined(ctx))
#---------------------------------------------------------------
def test_complex_property(self):
ctx = self.ctx
result = ctx.eval("({a: {b: 2}})").asJSObjectRef(ctx)
self.assertTrue(result.isObject(ctx))
prop_names = result.getPropertyNames(ctx)
self.assertTrue("a" in prop_names)
result_inner = result.getProperty(ctx, "a").asJSObjectRef(ctx)
self.assertTrue(result_inner.isObject(ctx))
prop_names = result_inner.getPropertyNames(ctx)
self.assertTrue("b" in prop_names)
self.assertEquals(2, result_inner.getProperty(ctx, "b").toNumber(ctx))
#---------------------------------------------------------------
def test_delete_property(self):
ctx = self.ctx
o = ctx.eval("({a: 1, b: 2, c: 3})").asJSObjectRef(ctx)
self.assertTrue(o.isObject(ctx))
for prop in "a b c".split():
self.assertTrue(o.hasProperty(ctx, prop))
self.assertTrue(o.deleteProperty(ctx, "b"))
for prop in "a c".split():
self.assertTrue(o.hasProperty(ctx, prop))
self.assertFalse(o.hasProperty(ctx, "b"))
log("delete property again")
self.assertTrue(o.deleteProperty(ctx, "b"))
#---------------------------------------------------------------
def test_set_property(self):
ctx = self.ctx
o = ctx.eval("({})").asJSObjectRef(ctx)
self.assertTrue(o.isObject(ctx))
o.setProperty(ctx, "a", ctx.makeNumber(1))
o.setProperty(ctx, "b", ctx.makeNumber(3.3))
o.setProperty(ctx, "c", ctx.makeBoolean(True))
o.setProperty(ctx, "d", ctx.makeBoolean(False))
o.setProperty(ctx, "e", ctx.makeNull())
o.setProperty(ctx, "f", ctx.makeUndefined())
for prop in "a b".split():
self.assertTrue(o.hasProperty(ctx, prop))
self.assertEquals(1, o.getProperty(ctx, "a").toNumber(ctx))
self.assertEquals(3.3, o.getProperty(ctx, "b").toNumber(ctx))
self.assertEquals(True, o.getProperty(ctx, "c").toBoolean(ctx))
self.assertEquals(False, o.getProperty(ctx, "d").toBoolean(ctx))
self.assertTrue(o.getProperty(ctx, "e").isNull(ctx))
self.assertTrue(o.getProperty(ctx, "f").isUndefined(ctx))
#---------------------------------------------------------------
def test_set_property_compound(self):
ctx = self.ctx
p1 = ctx.eval("[1,2]")
p2 = ctx.eval("({a:11, b:22})")
o = ctx.eval("({})").asJSObjectRef(ctx)
o.setProperty(ctx, "x", p1)
o.setProperty(ctx, "y", p2)
for prop in "x y".split():
self.assertTrue(o.hasProperty(ctx, prop))
t1 = o.getProperty(ctx, "x").asJSObjectRef(ctx)
t2 = o.getProperty(ctx, "y").asJSObjectRef(ctx)
self.assertEquals(2, t1.getProperty(ctx, "length").toNumber(ctx))
self.assertEquals(1, t1.getPropertyAtIndex(ctx, 0).toNumber(ctx))
self.assertEquals(2, t1.getPropertyAtIndex(ctx, 1).toNumber(ctx))
self.assertEquals(11, t2.getProperty(ctx, "a").toNumber(ctx))
self.assertEquals(22, t2.getProperty(ctx, "b").toNumber(ctx))
#---------------------------------------------------------------
def test_get_array_element(self):
ctx = self.ctx
o = ctx.eval("[11,22,33]").asJSObjectRef(ctx)
self.assertTrue(o.isObject(ctx))
self.assertEquals(11, o.getPropertyAtIndex(ctx, 0).toNumber(ctx))
self.assertEquals(22, o.getPropertyAtIndex(ctx, 1).toNumber(ctx))
self.assertEquals(33, o.getPropertyAtIndex(ctx, 2).toNumber(ctx))
#---------------------------------------------------------------
def test_set_array_element(self):
ctx = self.ctx
o = ctx.eval("[]").asJSObjectRef(ctx)
self.assertTrue(o.isObject(ctx))
o.setPropertyAtIndex(ctx, 0, ctx.makeNumber(1))
o.setPropertyAtIndex(ctx, 1, ctx.makeNumber(3.3))
o.setPropertyAtIndex(ctx, 2, ctx.makeBoolean(True))
o.setPropertyAtIndex(ctx, 3, ctx.makeBoolean(False))
o.setPropertyAtIndex(ctx, 4, ctx.makeNull())
o.setPropertyAtIndex(ctx, 5, ctx.makeUndefined())
self.assertEquals(1, o.getPropertyAtIndex(ctx, 0).toNumber(ctx))
self.assertEquals(3.3, o.getPropertyAtIndex(ctx, 1).toNumber(ctx))
self.assertEquals(True, o.getPropertyAtIndex(ctx, 2).toBoolean(ctx))
self.assertEquals(False, o.getPropertyAtIndex(ctx, 3).toBoolean(ctx))
self.assertTrue(o.getPropertyAtIndex(ctx, 4).isNull(ctx))
self.assertTrue(o.getPropertyAtIndex(ctx, 5).isUndefined(ctx))
#---------------------------------------------------------------
def test_attribute_read_only(self):
ctx = self.ctx
o = ctx.eval("({})").asJSObjectRef(ctx)
o.setProperty(ctx, "x", ctx.makeNumber( 111), JSObjectRef.kJSPropertyAttributeReadOnly)
self.assertEquals(111, o.getProperty(ctx, "x").toNumber(ctx))
o.setProperty(ctx, "x", ctx.makeNumber( 222))
self.assertEquals(111, o.getProperty(ctx, "x").toNumber(ctx))
self.assertTrue(ctx, "x" in o.getPropertyNames(ctx))
o.deleteProperty(ctx, "x")
self.assertTrue("x" not in o.getPropertyNames(ctx))
#---------------------------------------------------------------
def test_attribute_dont_enum(self):
ctx = self.ctx
o = ctx.eval("({})").asJSObjectRef(ctx)
o.setProperty(ctx, "x", ctx.makeNumber( 111), JSObjectRef.kJSPropertyAttributeDontEnum)
self.assertEquals(111, o.getProperty(ctx, "x").toNumber(ctx))
o.setProperty(ctx, "x", ctx.makeNumber( 222))
self.assertEquals(222, o.getProperty(ctx, "x").toNumber(ctx))
self.assertTrue("x" not in o.getPropertyNames(ctx))
o.deleteProperty(ctx, "x")
self.assertTrue("x" not in o.getPropertyNames(ctx))
#---------------------------------------------------------------
def test_attribute_dont_delete(self):
ctx = self.ctx
o = ctx.eval("({})").asJSObjectRef(ctx)
o.setProperty(ctx, "x", ctx.makeNumber( 111), JSObjectRef.kJSPropertyAttributeDontDelete)
self.assertEquals(111, o.getProperty(ctx, "x").toNumber(ctx))
o.setProperty(ctx, "x", ctx.makeNumber( 222))
self.assertEquals(222, o.getProperty(ctx, "x").toNumber(ctx))
self.assertTrue("x" in o.getPropertyNames(ctx))
o.deleteProperty(ctx, "x")
self.assertTrue("x" in o.getPropertyNames(ctx))
#-------------------------------------------------------------------
if __name__ == '__main__':
NitroLogging(not True)
logging(not True)
unittest.main()
|
pmuellr/nitro_pie
|
test/test_properties.py
|
Python
|
mit
| 9,821
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# auth: Philip J Grabner <grabner@cadit.com>
# date: 2013/10/02
# copy: (C) Copyright 2013 Cadit Health Inc., All Rights Reserved.
#------------------------------------------------------------------------------
from gettext import gettext
#------------------------------------------------------------------------------
def _(message, *args, **kw):
if args or kw:
return gettext(message).format(*args, **kw)
return gettext(message)
#------------------------------------------------------------------------------
# end of $Id$
#------------------------------------------------------------------------------
|
cadithealth/pyramid_describe
|
pyramid_describe/i18n.py
|
Python
|
mit
| 733
|
# VirusShare.py
# download VirusShare hashes and search for specified hashes
# Author: Sent1ent
# Version: 1.0
# Date: February 22nd, 2016
import argparse
import os
import time
from urllib.request import urlopen
def downloader(directory, iteration):
# Downloads given URL
url = 'https://virusshare.com/hashfiles/VirusShare_%05d.md5' % iteration
exists = os.path.isfile(directory + ("\VirusShare_%05d.md5" % iteration))
if not exists:
print(" Downloading {0} into {1}...".format(url, directory))
file_path = os.path.join(directory, os.path.basename(url))
contents = urlopen(url)
file_output = open(file_path,'wb')
file_output.write(contents.read())
file_output.close()
else: print("Skipping " + directory + ("\VirusShare_%05d.md5" % iteration))
time.sleep(1)
def find_missing(directory, latest):
# find all files, parse files for end number, remove any files from 'to_find'
to_find = list(range(0,latest+1))
for i in os.listdir(directory):
to_find.remove(int(''.join(c for c in i if c.isdigit())[:5]))
return to_find
def parse_amount(amount):
to_find = []
try:
if ',' in amount:
# if a comma seperation (e.g. 10,11,12) is specified
temp = amount.split(',')
for i in temp:
to_find.append(int(i))
return to_find
elif '-' in amount:
# if a range (e.g. 10-20) is specified
temp = amount.split('-')
for i in range(int(temp[0]),int(temp[1]) + 1):
to_find.append(i)
return to_find
else:
# if a single number (e.g. 123) is specified
to_find.append(int(amount))
return to_find
except ValueError:
print(" ERROR: incorrect value given for update range.")
exit()
def update(directory, amount, latest):
try:
l = int(latest)
except ValueError:
print(" ERROR: incorrect value given for latest hash release.")
exit()
if amount == "all":
# Downloads all md5 files
for i in range(0,l):
downloader(directory,i)
elif amount == "missing":
# Finds all md5 files not in a directory
to_find = find_missing(directory, l)
for i in to_find:
downloader(directory,i)
else:
# Parses amount...
to_find = parse_amount(amount)
for i in to_find:
downloader(directory,i)
def search(directory, term):
counter = 1
for file_to_search in os.listdir(directory):
full_file_path = os.path.join(directory, file_to_search)
if os.path.isfile(full_file_path):
with open(full_file_path) as f:
for line in f:
if term in line:
print('FOUND|{0}|{1}|{2}'.format(term,file_to_search, counter))
return
counter += 1
counter = 1
print(' |{0}|{1}|{2}'.format(term,"None ", -1))
def main():
parser = argparse.ArgumentParser(description='tool to download VirusShare hash files and search them for specified hashes')
parser.add_argument('-s','--search', help='hash to search for in local repository (hint: specify any number of hashes)', nargs="+")
parser.add_argument('-u','--update', help='updates local hash containing files (--update all/missing/10,11,12/0-20)')
parser.add_argument('-l','--latest', help='sets latest VirusShare file released (default: 389)', default='389')
parser.add_argument('-d','--directory', help='sets working directory (default: VirusShare_Hashes)', default='VirusShare_Hashes')
args = parser.parse_args()
directory = args.directory
latest = args.latest
if not os.path.exists(directory):
os.makedirs(directory)
if args.update is not None:
update(directory, args.update, latest)
if args.search is not None:
print(" | Hash | File | Line")
for t in args.search:
search(directory, t)
if args.search is None and args.update is None:
parser.print_help()
if __name__ == "__main__":
main()
|
AdamGreenhill/VirusShare-Search
|
VirusShare-Search.py
|
Python
|
mit
| 3,716
|
# Proximal
import sys
sys.path.append('../../')
from proximal.utils.utils import *
from proximal.utils.convergence_log import *
from proximal.utils.metrics import *
from proximal.halide.halide import *
from proximal.lin_ops import *
from proximal.prox_fns import *
from proximal.algorithms import *
import cvxpy as cvx
import numpy as np
from scipy import ndimage
import argparse
import matplotlib.pyplot as plt
from PIL import Image
import cv2
# Load image
img = Image.open('./data/angela.jpg') # opens the file using Pillow - it's not an array yet
x = np.asfortranarray(im2nparray(img))
x = np.mean(x, axis=2)
x = np.maximum(x, 0.0)
# Kernel
K = Image.open('./data/kernel_snake.png') # opens the file using Pillow - it's not an array yet
K = np.mean(np.asfortranarray(im2nparray(K)), axis=2)
K = np.maximum(cv2.resize(K, (15, 15), interpolation=cv2.INTER_LINEAR), 0)
K /= np.sum(K)
# Generate observation
sigma_noise = 0.01
b = ndimage.convolve(x, K, mode='wrap') + sigma_noise * np.random.randn(*x.shape)
# Display data
plt.ion()
plt.figure()
imgplot = plt.imshow(x, interpolation="nearest", clim=(0.0, 1.0))
imgplot.set_cmap('gray')
plt.title('Original Image')
plt.show()
plt.figure()
imgplot = plt.imshow(K / np.amax(K), interpolation="nearest", clim=(0.0, 1.0))
imgplot.set_cmap('gray')
plt.title('K')
plt.show()
plt.figure()
imgplot = plt.imshow(b, interpolation="nearest", clim=(0.0, 1.0))
imgplot.set_cmap('gray')
plt.title('Observation')
plt.show()
# Setup problem
lambda_tv = 1.0
lambda_data = 500.0
I = x.copy()
#psnrval = psnr_metric( I, pad = (10,10), decimals = 2 )
psnrval = None
x = Variable(I.size)
shaped_x = reshape(x, I.shape)
# Modify with equilibration.
np.random.seed(1)
op1 = grad(shaped_x, dims=2)
op2 = conv(K, shaped_x)
wrand1 = np.random.lognormal(0, 1, size=op1.shape)
wrand2 = np.random.lognormal(0, 1, size=op2.shape)
# wrand = np.ones(op2.shape)
op1 = mul_elemwise(wrand1, op1)
b = wrand2 * b
op2 = mul_elemwise(wrand2, op2)
stacked_ops = vstack([op1, op2])
equil_iters = 100
d, e = equil(CompGraph(stacked_ops), equil_iters, 1e-1, 5)
op1_d = np.reshape(d[:op1.size], op1.shape) # /wrand1
op2_d = np.reshape(d[op1.size:], op2.shape) # /wrand2
new_x = mul_elemwise(e, x)
shaped_x = reshape(new_x, I.shape)
op1 = grad(shaped_x, dims=2)
op2 = conv(K, shaped_x)
op1 = mul_elemwise(wrand1, op1)
op2 = mul_elemwise(wrand2, op2)
orig_fns = [norm1(op1, alpha=lambda_tv), sum_squares(op2, b=b, alpha=lambda_data)]
op1 = mul_elemwise(op1_d, op1)
op2 = mul_elemwise(op2_d, op2)
stacked_ops = vstack([op1, op2])
L = est_CompGraph_norm(CompGraph(stacked_ops))
print "||K||_2 = ", L
# Quadratic or non quadratic splitting
print 'Splitting quadratics'
# print np.linalg.norm(new_x.weight)
# op1_d /= np.sqrt(L)
# op2_d /= np.sqrt(L)
# e /= np.sqrt(L)
# print np.linalg.norm(new_x.weight)
nonquad_fns = [weighted_norm1(1 / op1_d, op1, alpha=lambda_tv)] # Anisotropic
# nonquad_fns = [group_norm1( grad(x, dims = 2), [2], alpha = lambda_tv )] #Isotropic
quad_funcs = [weighted_sum_squares(1 / op2_d, op2, b=op2_d * b, alpha=lambda_data)]
# print 'No splitting'
# #nonquad_fns = [sum_squares(conv(K, x), b=b, alpha = 400), norm1( grad(x, dims = 2), alpha = lambda_tv ) ] #Anisotropic
# nonquad_fns = [sum_squares(conv(K, x), b=b, alpha = 400), group_norm1( grad(x, dims = 2), [2] ) ] #Isotropic
# quad_funcs = []
# In 100 - equil iters.
# 0: 39595062.8522
# 10: 8708627.07193
# 25: 1972630.38285
# 50: 551021.415309
# 75: 385803.229338
# 0/perfect: 85337.5131483
# In 200 - equil iters.
# 0: 34864879.4644
# 100: 75537.9407767
# 0/perfect: 87258.0455795
# Prox functions are the union
prox_fns = nonquad_fns + quad_funcs
method = 'pc'
verbose = 1
diag = False
convlog = ConvergenceLog()
tic()
if method == 'pc':
options = cg_options(tol=1e-5, num_iters=100, verbose=True)
#options = lsqr_options(atol=1e-5, btol=1e-5, num_iters=100, verbose=False)
pc(prox_fns, quad_funcs=[], tau=1 / L, sigma=1 / L, theta=1, max_iters=1000 - equil_iters,
eps_rel=1e-5, eps_abs=1e-5, lin_solver="cg", lin_solver_options=options,
try_split=False, try_diagonalize=diag,
metric=psnrval, verbose=verbose, convlog=None)
elif method == 'lin-admm':
options = cg_options(tol=1e-5, num_iters=100, verbose=True)
lin_admm(prox_fns, quad_funcs=quad_funcs, lmb=0.1, max_iters=300,
eps_abs=1e-4, eps_rel=1e-4, lin_solver="cg", lin_solver_options=options,
try_diagonalize=diag, metric=psnrval, verbose=verbose)
elif method == 'admm':
options = cg_options(tol=1e-5, num_iters=100, verbose=True)
admm(prox_fns, quad_funcs=[], rho=10, max_iters=300,
eps_abs=1e-4, eps_rel=1e-4, lin_solver="cg", lin_solver_options=options,
try_diagonalize=diag, metric=psnrval, verbose=verbose)
elif method == 'hqs':
# Need high accuracy when quadratics are not splitted
options = cg_options(tol=1e-5, num_iters=100, verbose=True)
hqs(prox_fns, lin_solver="cg", lin_solver_options=options,
eps_rel=1e-6, max_iters=10, max_inner_iters=10, x0=b,
try_diagonalize=diag, metric=psnrval, verbose=verbose)
print convlog.objective_val
print reduce(lambda x, y: x + y, [fn.value for fn in orig_fns])
print('Running took: {0:.1f}s'.format(toc() / 1000.0))
plt.figure()
imgplot = plt.imshow(shaped_x.value, interpolation="nearest", clim=(0.0, 1.0))
imgplot.set_cmap('gray')
plt.colorbar()
plt.title('Result')
plt.show()
# Wait until done
raw_input("Press Enter to continue...")
|
timmeinhardt/ProxImaL
|
proximal/examples/test_precond.py
|
Python
|
mit
| 5,494
|
#!/usr/bin/env python
import time
from fluidsynth import fluidsynth
settings = fluidsynth.FluidSettings()
settings.quality = "low"
synth = fluidsynth.FluidSynth(settings)
synth.load_soundfont("double.sf2")
driver = fluidsynth.FluidAudioDriver(settings, synth)
scale = (60, 62, 64, 65, 67, 69, 71, 72)
for i in scale:
synth.noteon(0, i, 127)
time.sleep(0.5)
synth.noteoff(0, i)
for i in reversed(scale):
synth.noteon(0, i, 127)
time.sleep(0.5)
synth.noteoff(0, i)
|
MostAwesomeDude/pyfluidsynth
|
demos/scale.py
|
Python
|
mit
| 495
|
#!/usr/bin/env python
#
# NSC_INSTCAL_SEXDAOPHOT.PY -- Run SExtractor and DAOPHOT on an exposure
#
from __future__ import print_function
__authors__ = 'David Nidever <dnidever@noao.edu>'
__version__ = '20180819' # yyyymmdd
import os
import sys
import numpy as np
import warnings
from astropy.io import fits
from astropy.wcs import WCS
from astropy.utils.exceptions import AstropyWarning
from astropy.table import Table, Column
import time
import shutil
import re
import subprocess
import glob
import logging
import socket
#from scipy.signal import convolve2d
from scipy.ndimage.filters import convolve
import astropy.stats
import struct
from utils import *
from phot import *
# Ignore these warnings, it's a bug
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
# Get NSC directories
def getnscdirs(version=None):
# Host
hostname = socket.gethostname()
host = hostname.split('.')[0]
# Version
verdir = ""
if version is not None:
verdir = version if version.endswith('/') else version+"/"
# on thing/hulk use
if (host == "thing") | (host == "hulk"):
basedir = "/dl1/users/dnidever/nsc/instcal/"+verdir
tmproot = "/d0/dnidever/nsc/instcal/"+verdir+"tmp/"
# on gp09 use
if (host == "gp09") | (host == "gp08") | (host == "gp07") | (host == "gp06") | (host == "gp05"):
basedir = "/net/dl1/users/dnidever/nsc/instcal/"+verdir
tmproot = "/data0/dnidever/nsc/instcal/"+verdir+"tmp/"
return basedir,tmproot
# Class to represent an exposure to process
class Exposure:
# Initialize Exposure object
def __init__(self,fluxfile,wtfile,maskfile,nscversion="t3a"):
# Check that the files exist
if os.path.exists(fluxfile) is False:
print(fluxfile+" NOT found")
return
if os.path.exists(wtfile) is False:
print(wtfile+" NOT found")
return
if os.path.exists(maskfile) is False:
print(maskfile+" NOT found")
return
# Setting up the object properties
self.origfluxfile = fluxfile
self.origwtfile = wtfile
self.origmaskfile = maskfile
self.fluxfile = None # working files in temp dir
self.wtfile = None # working files in temp dir
self.maskfile = None # working files in temp dir
base = os.path.basename(fluxfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
self.base = base
self.nscversion = nscversion
self.logfile = base+".log"
self.logger = None
self.origdir = None
self.wdir = None # the temporary working directory
self.outdir = None
self.chip = None
# Get instrument
head0 = fits.getheader(fluxfile,0)
if head0["DTINSTRU"] == 'mosaic3':
self.instrument = 'k4m'
elif head0["DTINSTRU"] == '90prime':
self.instrument = 'ksb'
elif head0["DTINSTRU"] == 'decam':
self.instrument = 'c4d'
else:
print("Cannot determine instrument type")
return
# Get number of extensions
hdulist = fits.open(fluxfile)
nhdu = len(hdulist)
hdulist.close()
self.nexten = nhdu
# Get night
dateobs = head0.get("DATE-OBS")
night = dateobs[0:4]+dateobs[5:7]+dateobs[8:10]
self.night = night
# Output directory
basedir,tmpdir = getnscdirs(nscversion)
self.outdir = basedir+self.instrument+"/"+self.night+"/"+self.base+"/"
# Setup
def setup(self):
basedir,tmproot = getnscdirs(self.nscversion)
# Prepare temporary directory
tmpcntr = 1L
tmpdir = tmproot+self.base+"."+str(tmpcntr)
while (os.path.exists(tmpdir)):
tmpcntr = tmpcntr+1
tmpdir = tmproot+self.base+"."+str(tmpcntr)
if tmpcntr > 20:
print("Temporary Directory counter getting too high. Exiting")
sys.exit()
os.mkdir(tmpdir)
origdir = os.getcwd()
self.origdir = origdir
os.chdir(tmpdir)
self.wdir = tmpdir
# Set up logging to screen and logfile
logFormatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
# file handler
fileHandler = logging.FileHandler(self.logfile)
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
# console/screen handler
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
rootLogger.setLevel(logging.NOTSET)
self.logger = rootLogger
self.logger.info("Setting up in temporary directory "+tmpdir)
self.logger.info("Starting logfile at "+self.logfile)
# Copy over images from zeus1:/mss
fluxfile = "bigflux.fits.fz"
wtfile = "bigwt.fits.fz"
maskfile = "bigmask.fits.fz"
self.logger.info("Copying InstCal images from mass store archive")
shutil.copyfile(self.origfluxfile,tmpdir+"/"+os.path.basename(self.origfluxfile))
self.logger.info(" "+self.origfluxfile)
if (os.path.basename(self.origfluxfile) != fluxfile):
os.symlink(os.path.basename(self.origfluxfile),fluxfile)
shutil.copyfile(self.origwtfile,tmpdir+"/"+os.path.basename(self.origwtfile))
self.logger.info(" "+self.origwtfile)
if (os.path.basename(self.origwtfile) != wtfile):
os.symlink(os.path.basename(self.origwtfile),wtfile)
shutil.copyfile(self.origmaskfile,tmpdir+"/"+os.path.basename(self.origmaskfile))
self.logger.info(" "+self.origmaskfile)
if (os.path.basename(self.origmaskfile) != maskfile):
os.symlink(os.path.basename(self.origmaskfile),maskfile)
# Set local working filenames
self.fluxfile = fluxfile
self.wtfile = wtfile
self.maskfile = maskfile
# Make final output directory
if not os.path.exists(self.outdir):
os.makedirs(self.outdir) # will make multiple levels of directories if necessary
self.logger.info("Making output directory: "+self.outdir)
# Load chip
def loadchip(self,extension,fluxfile="flux.fits",wtfile="wt.fits",maskfile="mask.fits"):
# Load the data
self.logger.info(" Loading chip "+str(extension))
# Check that the working files set by "setup"
if (self.fluxfile is None) | (self.wtfile is None) | (self.maskfile is None):
self.logger.warning("Local working filenames not set. Make sure to run setup() first")
return
try:
flux,fhead = fits.getdata(self.fluxfile,extension,header=True)
fhead0 = fits.getheader(self.fluxfile,0) # add PDU info
fhead.extend(fhead0,unique=True)
wt,whead = fits.getdata(self.wtfile,extension,header=True)
mask,mhead = fits.getdata(self.maskfile,extension,header=True)
except:
self.logger.error("No extension "+str(extension))
return
# Write the data to the appropriate files
if os.path.exists(fluxfile):
os.remove(fluxfile)
fits.writeto(fluxfile,flux,header=fhead,output_verify='warn')
if os.path.exists(wtfile):
os.remove(wtfile)
fits.writeto(wtfile,wt,header=whead,output_verify='warn')
if os.path.exists(maskfile):
os.remove(maskfile)
fits.writeto(maskfile,mask,header=mhead,output_verify='warn')
# Create the chip object
self.chip = Chip(fluxfile,wtfile,maskfile,self.base)
self.chip.bigextension = extension
self.chip.nscversion = self.nscversion
self.chip.outdir = self.outdir
# Add logger information
self.chip.logger = self.logger
# Process all chips
def process(self):
self.logger.info("-------------------------------------------------")
self.logger.info("Processing ALL extension images")
self.logger.info("-------------------------------------------------")
# LOOP through the HDUs/chips
#----------------------------
for i in xrange(1,self.nexten):
t0 = time.time()
self.logger.info(" ")
self.logger.info("=== Processing subimage "+str(i)+" ===")
# Load the chip
self.loadchip(i)
self.logger.info("CCDNUM = "+str(self.chip.ccdnum))
# Process it
self.chip.process()
# Clean up
self.chip.cleanup()
self.logger.info("dt = "+str(time.time()-t0)+" seconds")
# Teardown
def teardown(self):
# Delete files and temporary directory
self.logger.info("Deleting files and temporary directory.")
# Move the final log file
shutil.move(self.logfile,self.outdir+self.base+".log")
# Delete temporary files and directory
tmpfiles = glob.glob("*")
for f in tmpfiles: os.remove(f)
os.rmdir(self.wdir)
# CD back to original directory
os.chdir(self.origdir)
# RUN all steps to process this exposure
def run(self):
self.setup()
self.process()
self.teardown()
# Class to represent a single chip of an exposure
class Chip:
def __init__(self,fluxfile,wtfile,maskfile,bigbase):
self.fluxfile = fluxfile
self.wtfile = wtfile
self.maskfile = maskfile
self.bigbase = bigbase
self.bigextension = None
base = os.path.basename(fluxfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
self.dir = os.path.abspath(os.path.dirname(fluxfile))
self.base = base
self.meta = makemeta(header=fits.getheader(fluxfile,0))
self.sexfile = self.dir+"/"+self.base+"_sex.fits"
self.daofile = self.dir+"/"+self.base+"_dao.fits"
self.sexcatfile = None
self.sexcat = None
self.seeing = None
self.apcorr = None
# Internal hidden variables
self._rdnoise = None
self._gain = None
self._ccdnum = None
self._pixscale = None
self._saturate = None
self._wcs = None
self._exptime = None
self._instrument = None
self._plver = None
self._cpfwhm = None
self._daomaglim = None # set by daoaperphot()
self._sexmaglim = None # set by runsex()
# Logger
self.logger = None
def __repr__(self):
return "Chip object"
@property
def rdnoise(self):
# We have it already, just return it
if self._rdnoise is not None:
return self._rdnoise
# Can't get rdnoise, no header yet
if self.meta is None:
self.logger.warning("Cannot get RDNOISE, no header yet")
return None
# Check DECam style rdnoise
if "RDNOISEA" in self.meta.keys():
rdnoisea = self.meta["RDNOISEA"]
rdnoiseb = self.meta["RDNOISEB"]
rdnoise = (rdnoisea+rdnoiseb)*0.5
self._rdnoise = rdnoise
return self._rdnoise
# Get rdnoise from the header
for name in ['RDNOISE','READNOIS','ENOISE']:
# We have this key, set _rndoise and return
if name in self.meta.keys():
self._rdnoise = self.meta[name]
return self._rdnoise
self.logger.warning('No RDNOISE found')
return None
@property
def gain(self):
# We have it already, just return it
if self._gain is not None:
return self._gain
try:
gainmap = { 'c4d': lambda x: 0.5*(x.get('gaina')+x.get('gainb')),
'k4m': lambda x: x.get('gain'),
'ksb': lambda x: [1.3,1.5,1.4,1.4][ccdnum-1] } # bok gain in HDU0, use list here
gain = gainmap[self.instrument](self.meta)
except:
gainmap_avg = { 'c4d': 3.9845419, 'k4m': 1.8575, 'ksb': 1.4}
gain = gainmap_avg[self.instrument]
self._gain = gain
return self._gain
## Can't get gain, no header yet
#if self.meta is None:
# print("Cannot get GAIN, no header yet")
## Get rdnoise from the header
#for name in ['GAIN','EGAIN']:
# # We have this key, set _gain and return
# if self.meta.has_key(name):
# self._gain = self.meta[name]
# return self._gain
#print('No GAIN found')
#return None
@property
def ccdnum(self):
# We have it already, just return it
if self._ccdnum is not None:
return self._ccdnum
# Can't get ccdnum, no header yet
if self.meta is None:
self.logger.warning("Cannot get CCDNUM, no header yet")
return None
# Get ccdnum from the header
# We have this key, set _rndoise and return
if 'CCDNUM' in self.meta.keys():
self._ccdnum = self.meta['CCDNUM']
return self._ccdnum
self.logger.warning('No CCDNUM found')
return None
@property
def pixscale(self):
# We have it already, just return it
if self._pixscale is not None:
return self._pixscale
pixmap = { 'c4d': 0.27, 'k4m': 0.258, 'ksb': 0.45 }
try:
pixscale = pixmap[self.instrument]
self._pixscale = pixscale
return self._pixscale
except:
self._pixscale = np.max(np.abs(self.wcs.pixel_scale_matrix))
return self._pixscale
@property
def saturate(self):
# We have it already, just return it
if self._saturate is not None:
return self._saturate
# Can't get saturate, no header yet
if self.meta is None:
self.logger.warning("Cannot get SATURATE, no header yet")
return None
# Get saturate from the header
# We have this key, set _saturate and return
if 'SATURATE' in self.meta.keys():
self._saturate = self.meta['SATURATE']
return self._saturate
self.logger.warning('No SATURATE found')
return None
@property
def wcs(self):
# We have it already, just return it
if self._wcs is not None:
return self._wcs
# Can't get wcs, no header yet
if self.meta is None:
self.logger.warning("Cannot get WCS, no header yet")
return None
try:
self._wcs = WCS(self.meta)
return self._wcs
except:
self.logger.warning("Problem with WCS")
return None
@property
def exptime(self):
# We have it already, just return it
if self._exptime is not None:
return self._exptime
# Can't get exptime, no header yet
if self.meta is None:
self.logger.warning("Cannot get EXPTIME, no header yet")
return None
# Get rdnoise from the header
# We have this key, set _rndoise and return
if 'EXPTIME' in self.meta.keys():
self._exptime = self.meta['EXPTIME']
return self._exptime
print('No EXPTIME found')
return None
@property
def instrument(self):
# We have it already, just return it
if self._instrument is not None:
return self._instrument
# Can't get instrument, no header yet
if self.meta is None:
self.logger.warning("Cannot get INSTRUMENT, no header yet")
return None
# instrument, c4d, k4m or ksb
# DTINSTRU = 'mosaic3 '
# DTTELESC = 'kp4m '
# Bok 90Prime data has
if self.meta.get("DTINSTRU") == 'mosaic3':
self._instrument = 'k4m'
return self._instrument
elif self.meta.get("DTINSTRU") == '90prime':
self._instrument = 'ksb'
return self._instrument
else:
self._instrument = 'c4d'
return self._instrument
@property
def plver(self):
# We have it already, just return it
if self._plver is not None:
return self._plver
# Can't get plver, no header yet
if self.meta is None:
self.logger.warning("Cannot get PLVER, no header yet")
return None
plver = self.meta.get('PLVER')
if plver is None:
self._plver = 'V1.0'
self._plver = plver
return self._plver
@property
def cpfwhm(self):
# We have it already, just return it
if self._cpfwhm is not None:
return self._cpfwhm
# Can't get fwhm, no header yet
if self.meta is None:
self.logger.warning("Cannot get CPFWHM, no header yet")
return None
# FWHM values are ONLY in the extension headers
cpfwhm_map = { 'c4d': 1.5 if self.meta.get('FWHM') is None else self.meta.get('FWHM')*0.27,
'k4m': 1.5 if self.meta.get('SEEING1') is None else self.meta.get('SEEING1'),
'ksb': 1.5 if self.meta.get('SEEING1') is None else self.meta.get('SEEING1') }
cpfwhm = cpfwhm_map[self.instrument]
self._cpfwhm = cpfwhm
return self._cpfwhm
@property
def maglim(self):
# We have it already, just return it
if self._daomaglim is not None:
return self._daomaglim
if self._sexmaglim is not None:
return self._sexmaglim
self.logger.warning('Maglim not set yet')
return None
# Write SE catalog in DAO format
def sextodao(self,cat=None,outfile=None,format="coo"):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
if outfile is None: outfile=daobase+".coo"
if cat is None: cat=self.sexcat
sextodao(self.sexcat,self.meta,outfile=outfile,format=format,logger=self.logger)
# Run Source Extractor
#---------------------
def runsex(self,outfile=None):
basedir, tmpdir = getnscdirs(self.nscversion)
configdir = basedir+"config/"
sexcatfile = "flux_sex.cat.fits"
sexcat, maglim = runsex(self.fluxfile,self.wtfile,self.maskfile,self.meta,sexcatfile,configdir,logger=self.logger)
self.sexcat = sexcatfile
self.sexcat = sexcat
self._sexmaglim = maglim
# Set the FWHM as well
fwhm = sexfwhm(sexcat,logger=self.logger)
self.meta['FWHM'] = fwhm
# Determine FWHM using SE catalog
#--------------------------------
def sexfwhm(self):
self.seeing = sexfwhm(self.sexcat)
return self.seeing
# Pick PSF candidates using SE catalog
#-------------------------------------
def sexpickpsf(self,nstars=100):
base = os.path.basename(self.sexfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
fwhm = self.sexfwhm() if self.seeing is None else self.seeing
psfcat = sexpickpsf(self.sexcat,fwhm,self.meta,base+".lst",nstars=nstars,logger=self.logger)
# Make DAOPHOT option files
#--------------------------
#def mkopt(self,**kwargs):
def mkopt(self):
base = os.path.basename(self.daofile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
#mkopt(base,self.meta,logger=self.logger,**kwargs)
mkopt(base,self.meta,logger=self.logger)
# Make image ready for DAOPHOT
def mkdaoim(self):
mkdaoim(self.fluxfile,self.wtfile,self.maskfile,self.meta,self.daofile,logger=self.logger)
# DAOPHOT detection
#----------------------
def daofind(self):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
cat = daofind(self.daofile,outfile=daobase+".coo",logger=self.logger)
# DAOPHOT aperture photometry
#----------------------------
def daoaperphot(self):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
apcat, maglim = daoaperphot(self.daofile,daobase+".coo",outfile=daobase+".ap",logger=self.logger)
self._daomaglim = maglim
# Pick PSF stars using DAOPHOT
#-----------------------------
def daopickpsf(self,maglim=None,nstars=100):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
if maglim is None: maglim=self.maglim
psfcat = daopickpsf(self.daofile,daobase+".ap",maglim,daobase+".lst",nstars,logger=self.logger)
# Run DAOPHOT PSF
#-------------------
def daopsf(self,verbose=False):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
psfcat = daopsf(self.daofile,daobase+".lst",outfile=daobase+".psf",verbose=verbose,logger=self.logger)
# Subtract neighbors of PSF stars
#--------------------------------
def subpsfnei(self):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
psfcat = subpsfnei(self.daofile,daobase+".lst",daobase+".nei",daobase+"a.fits",logger=self.logger)
# Create DAOPHOT PSF
#-------------------
def createpsf(self,listfile=None,apfile=None,doiter=True,maxiter=5,minstars=6,subneighbors=True,verbose=False):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
createpsf(daobase+".fits",daobase+".ap",daobase+".lst",meta=self.meta,logger=self.logger)
# Run ALLSTAR
#-------------
def allstar(self,psffile=None,apfile=None,subfile=None):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
alscat = allstar(daobase+".fits",daobase+".psf",daobase+".ap",outfile=daobase+".als",meta=self.meta,logger=self.logger)
# Get aperture correction
#------------------------
def getapcor(self):
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
apcorr = apcor(daobase+"a.fits",daobase+".lst",daobase+".psf",self.meta,optfile=daobase+'.opt',alsoptfile=daobase+".als.opt",logger=self.logger)
self.apcorr = apcorr
self.meta['apcor'] = (apcorr,"Aperture correction in mags")
# Combine SE and DAOPHOT catalogs
#--------------------------------
def finalcat(self,outfile=None,both=True,sexdetect=True):
# both Only keep sources that have BOTH SE and ALLSTAR information
# sexdetect SE catalog was used for DAOPHOT detection list
self.logger.info("-- Creating final combined catalog --")
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
if outfile is None: outfile=self.base+".cat.fits"
# Check that we have the SE and ALS information
if (self.sexcat is None) | (os.path.exists(daobase+".als") is None):
self.logger.warning("SE catalog or ALS catalog NOT found")
return
# Load ALS catalog
als = Table(daoread(daobase+".als"))
nals = len(als)
# Apply aperture correction
if self.apcorr is None:
self.logger.error("No aperture correction available")
return
als['MAG'] -= self.apcorr
# Just add columns to the SE catalog
ncat = len(self.sexcat)
newcat = self.sexcat.copy()
alsnames = ['X','Y','MAG','ERR','SKY','ITER','CHI','SHARP']
newnames = ['XPSF','YPSF','MAGPSF','ERRPSF','SKY','ITER','CHI','SHARP','RAPSF','DECPSF']
newtypes = ['float64','float64','float','float','float','float','float','float','float64','float64']
nan = float('nan')
newvals = [nan, nan, nan, nan ,nan, nan, nan, nan, nan, nan]
# DAOPHOT detection list used, need ALS ID
if not sexdetect:
alsnames = ['ID']+alsnames
newnames = ['ALSID']+newnames
newtypes = ['int32']+newtypes
newvals = [-1]+newvals
newcols = []
for n,t,v in zip(newnames,newtypes,newvals):
col = Column(name=n,length=ncat,dtype=t)
col[:] = v
newcols.append(col)
newcat.add_columns(newcols)
# Match up with IDs if SE list used by DAOPHOT
if sexdetect:
mid, ind1, ind2 = np.intersect1d(newcat["NUMBER"],als["ID"],return_indices=True)
for id1,id2 in zip(newnames,alsnames):
newcat[id1][ind1] = als[id2][ind2]
# Only keep sources that have SE+ALLSTAR information
# trim out ones that don't have ALS
if (both is True) & (nals<ncat): newcat = newcat[ind1]
# Match up with coordinates, DAOPHOT detection list used
else:
print("Need to match up with coordinates")
# Only keep sources that have SE+ALLSTAR information
# trim out ones that don't have ALS
if (both is True) & (nals<ncat): newcat = newcat[ind1]
# Add RA, DEC
r,d = self.wcs.all_pix2world(newcat["XPSF"],newcat["YPSF"],1)
newcat['RAPSF'] = r
newcat['DECPSF'] = d
# Write to file
self.logger.info("Final catalog = "+outfile)
fits.writeto(outfile,None,self.meta,overwrite=True) # meta in PDU header
# append the table in extension 1
hdulist = fits.open(outfile)
hdu = fits.table_to_hdu(newcat)
hdulist.append(hdu)
hdulist.writeto(outfile,overwrite=True)
hdulist.close()
#newcat.write(outfile,overwrite=True)
#fits.append(outfile,0,self.meta) # meta is header of 2nd extension
# Process a single chip
#----------------------
def process(self):
self.runsex()
self.logger.info("-- Getting ready to run DAOPHOT --")
self.mkopt()
self.mkdaoim()
#self.daodetect()
# Create DAOPHOT-style coo file
# Need to use SE positions
self.sextodao(outfile="flux_dao.coo")
self.daoaperphot()
self.daopickpsf()
self.createpsf()
self.allstar()
self.getapcor()
self.finalcat()
# Do I need to rerun daoaperphot to get aperture
# photometry at the FINAL allstar positions??
# Is there a way to reduce the number of iterations needed to create the PSF?
# what do the ?, * mean anyway?
# maybe just remove the worse 10% of stars or something
# Put all of the daophot-running into separate function (maybe separate module)
# same for sextractor
# Maybe make my own xmatch function that does one-to-one matching
# Clean up the files
#--------------------
def cleanup(self):
self.logger.info("Copying final files to output directory "+self.outdir)
base = os.path.basename(self.fluxfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
daobase = os.path.basename(self.daofile)
daobase = os.path.splitext(os.path.splitext(daobase)[0])[0]
# Copy the files we want to keep
# final combined catalog, logs
outcatfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".fits"
if os.path.exists(outcatfile): os.remove(outcatfile)
shutil.copyfile("flux.cat.fits",outcatfile)
# Copy DAOPHOT opt files
outoptfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".opt"
if os.path.exists(outoptfile): os.remove(outoptfile)
shutil.copyfile(daobase+".opt",outoptfile)
outalsoptfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".als.opt"
if os.path.exists(outalsoptfile): os.remove(outalsoptfile)
shutil.copyfile(daobase+".als.opt",outalsoptfile)
# Copy DAOPHOT PSF star list
outlstfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".psf.lst"
if os.path.exists(outlstfile): os.remove(outlstfile)
shutil.copyfile(daobase+".lst",outlstfile)
# Copy DAOPHOT PSF file
outpsffile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".psf"
if os.path.exists(outpsffile): os.remove(outpsffile)
shutil.copyfile(daobase+".psf",outpsffile)
# Copy DAOPHOT .apers file??
# Copy SE config file
outconfigfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".sex.config"
if os.path.exists(outconfigfile): os.remove(outconfigfile)
shutil.copyfile("default.config",outconfigfile)
# Combine all the log files
logfiles = glob.glob(base+"*.log")
loglines = []
for logfil in logfiles:
loglines += ["==> "+logfil+" <==\n"]
f = open(logfil,'r')
lines = f.readlines()
f.close()
loglines += lines
loglines += ["\n"]
f = open(base+".logs","w")
f.writelines("".join(loglines))
f.close()
outlogfile = self.outdir+self.bigbase+"_"+str(self.ccdnum)+".logs"
if os.path.exists(outlogfile): os.remove(outlogfile)
shutil.copyfile(base+".logs",outlogfile)
# Delete temporary directory/files
self.logger.info(" Cleaning up")
files1 = glob.glob("flux*")
files2 = glob.glob("default*")
files = files1+files2+["flux.fits","wt.fits","mask.fits","daophot.opt","allstar.opt"]
for f in files:
if os.path.exists(f): os.remove(f)
# Main command-line program
if __name__ == "__main__":
# Version
verdir = ""
if len(sys.argv) > 4:
version = sys.argv[4]
verdir = version if version.endswith('/') else version+"/"
# Get NSC directories
basedir, tmpdir = getnscdirs(version)
# Make sure the directories exist
if not os.path.exists(basedir):
os.makedirs(basedir)
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
t0 = time.time()
print(sys.argv)
# Not enough inputs
n = len(sys.argv)
if n < 4:
print("Syntax - nsc_instcal_sexdaophot.py fluxfile wtfile maskfile version")
sys.exit()
# File names
fluxfile = sys.argv[1]
wtfile = sys.argv[2]
maskfile = sys.argv[3]
# Check that the files exist
if os.path.exists(fluxfile) is False:
print(fluxfile+" file NOT FOUND")
sys.exit()
if os.path.exists(wtfile) is False:
print(wtfile+" file NOT FOUND")
sys.exit()
if os.path.exists(maskfile) is False:
print(maskile+" file NOT FOUND")
sys.exit()
# Create the Exposure object
exp = Exposure(fluxfile,wtfile,maskfile,nscversion=version)
# Run
exp.run()
print("Total time = "+str(time.time()-t0)+" seconds")
|
dnidever/noaosourcecatalog
|
python/nsc_instcal_sexdaophot.py
|
Python
|
mit
| 31,293
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Redactor'
copyright = '2018, ttd'
author = 'ttd'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'press'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_favicon = '_static/favicon.png'
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
html_sidebars = {'**': ['util/sidetoc.html']}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'sphinx_press_themedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sphinx_press_theme.tex', 'sphinx\\_press\\_theme Documentation',
'Eduardo Naufel Schettino', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sphinx_press_theme', 'sphinx_press_theme Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sphinx_press_theme', 'sphinx_press_theme Documentation',
author, 'sphinx_press_theme', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
|
testthedocs/redactor
|
docs/conf.py
|
Python
|
mit
| 4,942
|
import sys
import types
from minecraft_data.v1_8 import find_item_or_block, windows_list
from minecraft_data.v1_8 import windows as windows_by_id
from spock.mcdata import constants
from spock.utils import camel_case, snake_case
def make_slot_check(wanted):
"""
Creates and returns a function that takes a slot and checks
if it matches the wanted item.
:param wanted: function(Slot) or Slot or itemID or (itemID, metadata)
"""
if isinstance(wanted, types.FunctionType):
return wanted # just forward the slot check function
if isinstance(wanted, int):
item, meta = wanted, None
elif isinstance(wanted, Slot):
item, meta = wanted.item_id, wanted.damage
# TODO compare NBT
else: # wanted is list of (id, meta)
item, meta = wanted
return lambda slot: item == slot.item_id and meta in (None, slot.damage)
# TODO move to mcdata.items
def apply_variation(item_dict, metadata):
if item_dict and metadata is not None and 'variations' in item_dict:
for variation in item_dict['variations']:
if variation['metadata'] == metadata:
# variants provide replacements for some fields
item_dict = item_dict.copy()
item_dict.update(variation)
return item_dict
# TODO no matching metadata was found, make it 0? None? leave blank?
return item_dict
def find_item_dict(item, metadata=None):
if metadata is None: # check for complex types
if isinstance(item, Slot):
item, metadata = item.item_id, item.damage
elif not isinstance(item, (int, str)):
# name_or_id is tuple of (item_id, metadata)
item, metadata = item
return apply_variation(find_item_or_block(item), metadata)
class Slot(object):
def __init__(self, window, slot_nr, id=constants.INV_ITEMID_EMPTY,
damage=0, amount=0, enchants=None):
self.window = window
self.slot_nr = slot_nr
self.item_id = id
self.damage = damage
self.amount = amount
self.nbt = enchants
def move_to_window(self, window, slot_nr):
self.window, self.slot_nr = window, slot_nr
@property
def item_dict(self):
# TODO cache find_item_dict?
return find_item_dict(self.item_id, self.damage) \
or {'name': 'unknown',
'id': self.item_id,
'metadata': self.damage,
'stackSize': 0,
}
@property
def max_amount(self):
return self.item_dict['stackSize']
@property
def name(self):
return self.item_dict['name']
@property
def is_empty(self):
# could also check self.item_id == constants.INV_ITEMID_EMPTY
return self.amount <= 0
def matches(self, other):
return make_slot_check(other)(self)
def stacks_with(self, other):
if self.item_id != other.item_id:
return False
if self.damage != other.damage:
return False
# raise NotImplementedError('Stacks might differ by NBT data: %s %s'
# % (self, other))
# if self.nbt != other.nbt: return False
# TODO implement stacking correctly (NBT data comparison)
return self.max_amount != 1
def get_dict(self):
""" Formats the slot for network packing. """
data = {'id': self.item_id}
if self.item_id != constants.INV_ITEMID_EMPTY:
data['damage'] = self.damage
data['amount'] = self.amount
if self.nbt is not None:
data['enchants'] = self.nbt
return data
def copy(self):
return Slot(self.window, self.slot_nr, self.item_id,
self.damage, self.amount, self.nbt)
def __bool__(self):
return not self.is_empty
def __repr__(self):
if self.is_empty:
return '<empty slot at %i in %s>' % (
self.slot_nr, self.window)
else:
attrs_with_name = {'name': self.name}
attrs_with_name.update(self.__dict__)
return '<Slot: %(amount)ix %(item_id)i:%(damage)i' \
' %(name)s at %(slot_nr)i in %(window)s>' \
% attrs_with_name
class SlotCursor(Slot):
def __init__(self, id=constants.INV_ITEMID_EMPTY, damage=0, amount=0,
enchants=None):
class CursorWindow(object): # TODO is there a cleaner way to do this?
window_id = constants.INV_WINID_CURSOR
def __repr__(self):
return 'CursorWindow()'
super(SlotCursor, self).__init__(
CursorWindow(), constants.INV_SLOT_NR_CURSOR,
id, damage, amount, enchants)
class BaseClick(object):
def get_packet(self, inv_plugin):
"""
Called by send_click() to prepare the sent packet.
Abstract method.
:param inv_plugin: inventory plugin instance
"""
raise NotImplementedError()
def apply(self, inv_plugin):
"""
Called by on_success().
Abstract method.
:param inv_plugin: inventory plugin instance
"""
raise NotImplementedError()
def on_success(self, inv_plugin, emit_set_slot):
"""
Called when the click was successful
and should be applied to the inventory.
:param inv_plugin: inventory plugin instance
:param emit_set_slot: function to signal a slot change,
should be InventoryPlugin().emit_set_slot
"""
self.dirty = set()
self.apply(inv_plugin)
for changed_slot in self.dirty:
emit_set_slot(changed_slot)
# helper methods, used by children
# all argument instances are modified in-place
def copy_slot_type(self, slot_from, slot_to):
slot_to.item_id, slot_to.damage = slot_from.item_id, slot_from.damage
slot_to.nbt = slot_from.nbt
self.mark_dirty(slot_to)
def swap_slots(self, slot_a, slot_b):
slot_a.item_id, slot_b.item_id = slot_b.item_id, slot_a.item_id
slot_a.damage, slot_b.damage = slot_b.damage, slot_a.damage
slot_a.amount, slot_b.amount = slot_b.amount, slot_a.amount
slot_a.nbt, slot_b.nbt = slot_b.nbt, slot_a.nbt
self.mark_dirty(slot_a)
self.mark_dirty(slot_b)
def transfer(self, from_slot, to_slot, max_amount):
transfer_amount = min(max_amount, from_slot.amount,
to_slot.max_amount - to_slot.amount)
if transfer_amount <= 0:
return
self.copy_slot_type(from_slot, to_slot)
to_slot.amount += transfer_amount
from_slot.amount -= transfer_amount
self.cleanup_if_empty(from_slot)
def cleanup_if_empty(self, slot):
if slot.is_empty:
empty_slot_at_same_position = Slot(slot.window, slot.slot_nr)
self.copy_slot_type(empty_slot_at_same_position, slot)
self.mark_dirty(slot)
def mark_dirty(self, slot):
self.dirty.add(slot)
class SingleClick(BaseClick):
def __init__(self, slot, button=constants.INV_BUTTON_LEFT):
self.slot = slot
self.button = button
if button not in (constants.INV_BUTTON_LEFT,
constants.INV_BUTTON_RIGHT):
raise NotImplementedError(
'Clicking with button %s not implemented' % button)
def get_packet(self, inv_plugin):
return {
'slot': self.slot.slot_nr,
'button': self.button,
'mode': 0,
'clicked_item': self.slot.get_dict(),
}
def apply(self, inv_plugin):
clicked = self.slot
cursor = inv_plugin.cursor_slot
if self.button == constants.INV_BUTTON_LEFT:
if clicked.stacks_with(cursor):
self.transfer(cursor, clicked, cursor.amount)
else:
self.swap_slots(cursor, clicked)
elif self.button == constants.INV_BUTTON_RIGHT:
if cursor.item_id == constants.INV_ITEMID_EMPTY:
# transfer half, round up
self.transfer(clicked, cursor, (clicked.amount + 1) // 2)
elif clicked.is_empty or clicked.stacks_with(cursor):
self.transfer(cursor, clicked, 1)
else: # slot items do not stack
self.swap_slots(cursor, clicked)
else:
raise NotImplementedError(
'Clicking with button %s not implemented' % self.button)
class DropClick(BaseClick):
def __init__(self, slot, drop_stack=False):
self.slot = slot
self.drop_stack = drop_stack
def get_packet(self, inv_plugin):
if self.slot == inv_plugin.active_slot:
slot_nr = constants.INV_OUTSIDE_WINDOW # drop cursor slot
elif inv_plugin.cursor_slot.item_id != constants.INV_ITEMID_EMPTY:
return None # can't drop while holding an item
else: # default case
slot_nr = self.slot.slot_nr
return {
'slot': slot_nr,
'button': 1 if self.drop_stack else 0,
'mode': 4,
'clicked_item': inv_plugin.cursor_slot.get_dict(),
}
def apply(self, inv_plugin):
if inv_plugin.cursor_slot.is_empty:
if self.drop_stack:
self.slot.amount = 0
else:
self.slot.amount -= 1
self.cleanup_if_empty(self.slot)
# else: cursor not empty, can't drop while holding an item
class BaseWindow(object):
""" Base class for all inventory types. """
# the arguments must have the same names as the keys in the packet dict
def __init__(self, window_id, title, slot_count,
inv_type=None, persistent_slots=None, eid=None):
assert not inv_type or inv_type == self.inv_type, \
'inv_type differs: %s instead of %s' % (inv_type, self.inv_type)
self.is_storage = slot_count > 0 # same after re-opening window
if not self.is_storage: # get number of temporary slots
window_dict = windows_by_id[inv_type]
if 'slots' in window_dict:
slot_count = max(slot['index'] + slot.get('size', 1)
for slot in window_dict['slots'])
self.window_id = window_id
self.title = title
self.eid = eid # used for horses
# window slots vary, but always end with main inventory and hotbar
# create own slots, ...
self.slots = [Slot(self, slot_nr) for slot_nr in range(slot_count)]
# ... append persistent slots (main inventory and hotbar)
if persistent_slots is None:
for slot_nr in range(constants.INV_SLOTS_PERSISTENT):
self.slots.append(Slot(self, slot_nr + slot_count))
else: # persistent slots have to be moved from other inventory
moved_slots = persistent_slots[-constants.INV_SLOTS_PERSISTENT:]
for slot_nr, moved_slot in enumerate(moved_slots):
moved_slot.move_to_window(self, slot_nr + slot_count)
self.slots.append(moved_slot)
# additional info dependent on inventory type,
# dynamically updated by server
self.properties = {}
def __repr__(self):
return '%s(window_id=%i, title=%s, slot_count=%i)' % (
self.__class__.__name__,
self.window_id, self.title, len(self.slots))
@property
def persistent_slots(self):
return self.slots[-constants.INV_SLOTS_PERSISTENT:]
@property
def inventory_slots(self):
return self.slots[
-constants.INV_SLOTS_PERSISTENT:-constants.INV_SLOTS_HOTBAR]
@property
def hotbar_slots(self):
return self.slots[-constants.INV_SLOTS_HOTBAR:]
@property
def window_slots(self):
"""
All slots except inventory and hotbar.
Useful for searching.
"""
return self.slots[:-constants.INV_SLOTS_PERSISTENT]
# Helpers for creating the window classes
def _make_window(window_dict):
"""
Creates a new class for that window and registers it at this module.
"""
window_dict = window_dict.copy()
cls_name = '%sWindow' % camel_case(str(window_dict['name']))
bases = (BaseWindow,)
attrs = {
'__module__': sys.modules[__name__],
'name': str(window_dict['name']),
'inv_type': str(window_dict['id']),
}
# creates function-local index and size variables
def make_slot_method(index, size=1):
if size == 1:
return lambda self: self.slots[index]
else:
return lambda self: self.slots[index:(index + size)]
for slots in window_dict.get('slots', []):
index = slots['index']
size = slots.get('size', 1)
attr_name = snake_case(str(slots['name']))
attr_name += '_slot' if size == 1 else '_slots'
slots_method = make_slot_method(index, size)
slots_method.__name__ = attr_name
attrs[attr_name] = property(slots_method)
for i, prop_name in enumerate(window_dict.get('properties', [])):
def make_prop_method(i):
return lambda self: self.properties[i]
prop_method = make_prop_method(i)
prop_name = snake_case(str(prop_name))
prop_method.__name__ = prop_name
attrs[prop_name] = property(prop_method)
cls = type(cls_name, bases, attrs)
assert not hasattr(sys.modules[__name__], cls_name), \
'Window "%s" already registered at %s' % (cls_name, __name__)
setattr(sys.modules[__name__], cls_name, cls)
return cls
# look up a class by window type ID, e.g. when opening windows
inv_types = {}
def _create_windows():
for window in windows_list:
cls = _make_window(window)
inv_types[cls.inv_type] = cls
# Create all window classes from minecraft_data
_create_windows()
# get the PlayerWindow, which was just generated during runtime
_player_window = sys.modules[__name__].PlayerWindow
# override constructor of PlayerWindow
def _player_init(self, *args, **kwargs):
super(_player_window, self).__init__(
constants.INV_WINID_PLAYER, self.name, constants.INV_SLOTS_PLAYER,
*args, **kwargs)
setattr(_player_window, '__init__', _player_init)
|
MrSwiss/SpockBot
|
spock/mcdata/windows.py
|
Python
|
mit
| 14,390
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
from .. import models
from ._configuration import PolicyClientConfiguration
from .operations import PolicyAssignmentsOperations, PolicyDefinitionsOperations, PolicySetDefinitionsOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class PolicyClient:
"""To manage and control access to your resources, you can define customized policies and assign them at a scope.
:ivar policy_assignments: PolicyAssignmentsOperations operations
:vartype policy_assignments:
azure.mgmt.resource.policy.v2018_03_01.aio.operations.PolicyAssignmentsOperations
:ivar policy_definitions: PolicyDefinitionsOperations operations
:vartype policy_definitions:
azure.mgmt.resource.policy.v2018_03_01.aio.operations.PolicyDefinitionsOperations
:ivar policy_set_definitions: PolicySetDefinitionsOperations operations
:vartype policy_set_definitions:
azure.mgmt.resource.policy.v2018_03_01.aio.operations.PolicySetDefinitionsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = PolicyClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.policy_assignments = PolicyAssignmentsOperations(self._client, self._config, self._serialize, self._deserialize)
self.policy_definitions = PolicyDefinitionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.policy_set_definitions = PolicySetDefinitionsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "PolicyClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
Azure/azure-sdk-for-python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2018_03_01/aio/_policy_client.py
|
Python
|
mit
| 4,595
|
# 04_thermomether_f.py
# From the code for the Electronics Starter Kit for the Raspberry Pi by MonkMakes.com
from Tkinter import * # tkinter provides the graphical user interface (GUI)
import RPi.GPIO as GPIO
import time, math
C = 0.38 # uF - Tweek this value around 0.33 to improve accuracy
R1 = 1000 # Ohms
B = 3800.0 # The thermistor constant - change this for a different thermistor
R0 = 1000.0 # The resistance of the thermistor at 25C -change for different thermistor
# Configure the Pi to use the BCM (Broadcom) pin names, rather than the pin positions
GPIO.setmode(GPIO.BCM)
# This project uses a thermistor, a component whose resistance varies with the temperature.
# To measure its resistance, the code records the time it takes for a capacitor to fill
# when supplied by a current passing through the resistor. The lower the resistance the faster
# it fills up.
#
# You can think of a capacitor as a tank of electricity, and as it fills with charge, the voltage
# across it increases. We cannot measure that voltage directly, because the Raspberry Pi
# does not have an analog to digital convertor (ADC or analog input). However, we can time how long it
# takes for the capacitor to fill with charge to the extent that it gets above the 1.65V or so
# that counts as being a high digital input.
#
# For more information on this technique take a look at:
# learn.adafruit.com/basic-resistor-sensor-reading-on-raspberry-pi
# The code here is based on that in the Raspberry Pi Cookbook (Recipes 12.1 to 12.3)
# Pin a charges the capacitor through a fixed 1k resistor and the thermistor in series
# pin b discharges the capacitor through a fixed 1k resistor
a_pin = 18
b_pin = 23
# empty the capacitor ready to start filling it up
def discharge():
GPIO.setup(a_pin, GPIO.IN)
GPIO.setup(b_pin, GPIO.OUT)
GPIO.output(b_pin, False)
time.sleep(0.01)
# return the time taken for the voltage on the capacitor to count as a digital input HIGH
# than means around 1.65V
def charge_time():
GPIO.setup(b_pin, GPIO.IN)
GPIO.setup(a_pin, GPIO.OUT)
GPIO.output(a_pin, True)
t1 = time.time()
while not GPIO.input(b_pin):
pass
t2 = time.time()
return (t2 - t1) * 1000000 # microseconds
# Take an analog reading as the time taken to charge after first discharging the capacitor
def analog_read():
discharge()
t = charge_time()
discharge()
return t
# Convert the time taken to charge the cpacitor into a value of resistance
# To reduce errors, do it lots of times and take the average.
def read_resistance():
n = 10
total = 0;
for i in range(0, n):
total = total + analog_read()
t = total / float(n)
T = t * 0.632 * 3.3
r = (T / C) - R1
return r
def read_temp_c():
R = read_resistance()
t0 = 273.15 # 0 deg C in K
t25 = t0 + 25.0 # 25 deg C in K
# Steinhart-Hart equation - Google it
inv_T = 1/t25 + 1/B * math.log(R/R0)
T = (1/inv_T - t0)
return T
# group together all of the GUI code into a class called App
class App:
# this function gets called when the app is created
def __init__(self, master):
self.master = master
# A frame holds the various GUI controls
frame = Frame(master)
frame.pack()
label = Label(frame, text='Temp F', font=("Helvetica", 32))
label.grid(row=0)
self.reading_label = Label(frame, text='12.34', font=("Helvetica", 110))
self.reading_label.grid(row=1)
self.update_reading()
# Update the temperature reading
def update_reading(self):
temp_c = read_temp_c()
temp_f = temp_c * 9.0 / 5.0 + 32
reading_str = "{:.2f}".format(temp_f)
self.reading_label.configure(text=reading_str)
self.master.after(500, self.update_reading) # schedule yourself to be called after 0.5 seconds
# Set the GUI running, give the window a title, size and position
root = Tk()
root.wm_title('Thermometer')
app = App(root)
root.geometry("400x300+0+0")
try:
root.mainloop()
finally:
print("Cleaning up")
GPIO.cleanup()
|
simonmonk/pi_starter_kit
|
04_thermometer_f.py
|
Python
|
mit
| 4,117
|
from L500analysis.data_io.get_cluster_data import GetClusterData
from L500analysis.utils.utils import aexp2redshift
from L500analysis.plotting.tools.figure_formatting import *
from L500analysis.plotting.profiles.tools.profiles_percentile \
import *
from L500analysis.plotting.profiles.tools.select_profiles \
import nu_cut, prune_dict
from L500analysis.utils.constants import rbins
from derived_field_functions import *
color = matplotlib.cm.afmhot_r
aexps = [1.0,0.9,0.8,0.7,0.6,0.5,0.45,0.4,0.35]
nu_threshold = {0:[1,1.7],1:[1.7,2.3],2:[2.3, 2.7]} # 1, 1.7, 2.3, 2.7
nu_threshold_key = 0
nu_label = r"%0.1f$\leq\nu_{500c}\leq$%0.1f"%(nu_threshold[nu_threshold_key][0],nu_threshold[nu_threshold_key][1])
db_name = 'L500_NR_0'
db_dir = '/home/babyostrich/Documents/Repos/L500analysis/'
profiles_list = ['S_mw', 'r_mid',
'S_mw/S500c',
'R/R500c']
halo_properties_list=['r500c','M_total_500c','nu_500c']
Sratio=r"$\tilde{K}=K(R)/K_{500c}$"
fSz1=r"$\tilde{K}/\tilde{K}(z=1)$"
pa = PlotAxes(figname='Kmw_r500c_nu%01d'%nu_threshold_key,
axes=[[0.15,0.4,0.80,0.55],[0.15,0.15,0.80,0.24]],
axes_labels=[Sratio,fSz1],
xlabel=r"$R/R_{500c}$",
ylog=[True,False],
xlim=(0.2,5),
ylims=[(0.1,10.1),(0.6,1.4)])
Smw={}
linestyles = ['-']
for aexp in aexps :
cldata = GetClusterData(aexp=aexp,db_name=db_name,
db_dir=db_dir,
profiles_list=profiles_list,
halo_properties_list=halo_properties_list)
nu_cut_hids = nu_cut(nu=cldata['nu_500c'], threshold=nu_threshold[nu_threshold_key])
pruned_profiles = prune_dict(d=cldata['S_mw/S500c'],k=nu_cut_hids)
Smw[aexp] = calculate_profiles_mean_variance(pruned_profiles)
pa.axes[Sratio].plot( rbins, Smw[aexp]['mean'],color=color(aexp),ls='-',
label="$z=%3.1f$" % aexp2redshift(aexp))
for aexp in aexps :
fractional_evolution = get_profiles_division_mean_variance(
mean_profile1=Smw[aexp]['mean'],
var_profile1=Smw[aexp]['var'],
mean_profile2=Smw[0.5]['mean'],
var_profile2=Smw[0.5]['var'],
)
pa.axes[fSz1].plot( rbins, fractional_evolution['mean'],
color=color(aexp),ls='-')
pa.axes[Sratio].tick_params(labelsize=12)
pa.axes[Sratio].tick_params(labelsize=12)
pa.axes[fSz1].set_yticks(arange(0.6,1.4,0.2))
matplotlib.rcParams['legend.handlelength'] = 0
matplotlib.rcParams['legend.numpoints'] = 1
matplotlib.rcParams['legend.fontsize'] = 12
pa.set_legend(axes_label=Sratio,ncol=3,loc='best', frameon=False)
pa.color_legend_texts(axes_label=Sratio)
pa.axes[Sratio].annotate(nu_label, xy=(.3, 2.5), xytext=(.3, 6.),
)
pa.savefig()
|
cavestruz/L500analysis
|
plotting/profiles/K_evolution/plot_K_nu_binned_r500c.py
|
Python
|
mit
| 2,878
|
class Solution:
def hIndex(self, citations):
n = len(citations)
at_least = [0] * (n + 2)
for c in citations:
at_least[min(c, n + 1)] += 1
for i in xrange(n, -1, -1):
at_least[i] += at_least[i + 1]
for i in xrange(n, -1, -1):
if at_least[i] >= i:
return i
|
rahul-ramadas/leetcode
|
h-index/Solution.55533513.py
|
Python
|
mit
| 366
|
__version__ = '5.0.0b3'
|
ronekko/chainer
|
chainer/_version.py
|
Python
|
mit
| 24
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pastas.utils import timestep_weighted_resample, timestep_weighted_resample_fast
# make a daily series from monthly (mostly) values, without specifying the
# frequency of the original series
#series0 = pd.read_csv("data/tswr1.csv", index_col=0, parse_dates=True,
# squeeze=True)
index = pd.date_range('2000-1-1','2001-1-1',freq='MS')
series0 = pd.Series(np.random.rand(len(index)),index)
series = series0.resample('d').mean()
series = timestep_weighted_resample(series0, series.index)
series2 = timestep_weighted_resample_fast(series0, 'd')
plt.figure()
series0.plot(label='Monthly (mostly)')
series.plot(label='Daily')
series2.plot(label='Daily (fast)', linestyle='--')
plt.legend()
# make a precipitation-series at 0:00 from values at 9:00
#series0 = pd.read_csv("data/tswr2.csv", index_col=0, parse_dates=True,
# squeeze=True)
index = pd.date_range('2000-1-1 9:00','2000-1-10 9:00')
series0 = pd.Series(np.random.rand(len(index)),index)
series = timestep_weighted_resample(series0, series0.index.normalize())
series2 = timestep_weighted_resample_fast(series0, 'd')
plt.figure()
series0.plot(label='Original (9:00)')
series.plot(label='Resampled (0:00)')
series2.plot(label='Resampled (0:00, fast)', linestyle='--')
plt.legend()
|
gwtsa/gwtsa
|
examples/example_timestep_weighted_resample.py
|
Python
|
mit
| 1,352
|
import os
import sys
import inspect
import json
def attach_parameters(obj, params):
"""
Ataches common parameters to the data provider
"""
obj.timestamp = params.get('timestamp')
obj.instrument = params.get('instrument')
obj.pricebar = params.get('pricebar')
if hasattr(obj, 'algorithm') and not getattr(obj, 'algorithm'):
obj.algorithm = params.get('algorithm')
def importall(projpath):
for path, dirs, files in os.walk(projpath):
pathparts = path.split('/')
if '.git' in pathparts or 'venv' in pathparts or 'virtualenv' in pathparts or 'site-packages' in pathparts:
continue
sys.path.insert(0, path)
for f in files:
if not f.endswith('.py'):
continue
try:
filename = f
modname = f.replace('.py', '')
mod = __import__(modname, globals(), locals(), [''])
except Exception:
continue
sys.path.pop(0)
def _get_subclass_info(parentcls):
ls = []
for cls in parentcls.__subclasses__():
ls.append({
'name': cls.__name__,
'methods': cls.methods_implemented()
})
return {
parentcls.__name__: ls
}
def get_implementation_info(projpath):
from actors import Calculator, Algorithm
if not projpath:
raise AttributeError('get_implementation_info requires the path of the project')
importall(projpath)
info = dict()
info.update(_get_subclass_info(Calculator))
info.update(_get_subclass_info(Algorithm))
return info
def get_user_implementation_class(mod, clsname, modulename='quantnode.actors'):
"""
Get the user's implementation of the class corresponding to clsname
"""
if not clsname:
return None
for attrname in dir(mod):
if '__' in attrname:
continue
attr = getattr(mod, attrname)
if not hasattr(attr, '__bases__'):
continue
for parent in attr.__bases__:
if getattr(parent, '__module__', '') == modulename and getattr(parent, '__name__', '') == clsname:
return attr
return None
def find_implementation(repopath, clsname, modulename='quantnode.actors'):
useralgo_cls = None
for path, dirs, files in os.walk(repopath):
pathparts = path.split('/')
if '.git' in pathparts or 'venv' in pathparts or 'virtualenv' in pathparts or 'site-packages' in pathparts:
continue
sys.path.insert(0, path)
for f in files:
if not f.endswith('.py'):
continue
try:
filename = f
modname = f.replace('.py', '')
mod = __import__(modname, globals(), locals(), [''])
useralgo_cls = get_user_implementation_class(mod, clsname, modulename = modulename)
if useralgo_cls:
break
except Exception, e:
continue
sys.path.pop(0)
if useralgo_cls:
break
return useralgo_cls
def get_error_info(err):
"""
Logs exception to database, flags that an error has been recorded in the workflow state
"""
_, _, tb = sys.exc_info()
# user_error = False
lineno = None
codelines = ''
filename = ''
while tb.tb_next is not None:
tb = tb.tb_next
lineno = tb.tb_lineno
codelines = inspect.getsource(tb.tb_frame).replace('\r\n', '\n')
func_firstlineno = tb.tb_frame.f_code.co_firstlineno
filename = inspect.getsourcefile(tb.tb_frame).split('/')[-1][:50]
func_lineno = lineno - func_firstlineno
func_name = tb.tb_frame.f_code.co_name[:100]
flocals = {}
for key, value in tb.tb_frame.f_locals.items():
flocals[key] = str(value)
return {
'codelines': codelines,
'filename': filename,
'func_name': func_name,
'func_lineno': func_lineno,
'f_locals': json.dumps(flocals),
'message': err.message,
'lineno': lineno
}
|
quantnode/quantnode-client
|
quantnode/helpers.py
|
Python
|
mit
| 4,129
|
from .aggregates import ConditionalSum, ConditionalCount # noqa
|
anentropic/django-conditional-aggregates
|
djconnagg/__init__.py
|
Python
|
mit
| 65
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetches album art.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from contextlib import closing
import os
import re
from tempfile import NamedTemporaryFile
import requests
from beets import plugins
from beets import importer
from beets import ui
from beets import util
from beets import config
from beets.util.artresizer import ArtResizer
try:
import itunes
HAVE_ITUNES = True
except ImportError:
HAVE_ITUNES = False
IMAGE_EXTENSIONS = ['png', 'jpg', 'jpeg']
CONTENT_TYPES = ('image/jpeg', 'image/png')
DOWNLOAD_EXTENSION = '.jpg'
CANDIDATE_BAD = 0
CANDIDATE_EXACT = 1
CANDIDATE_DOWNSCALE = 2
def _logged_get(log, *args, **kwargs):
"""Like `requests.get`, but logs the effective URL to the specified
`log` at the `DEBUG` level.
Use the optional `message` parameter to specify what to log before
the URL. By default, the string is "getting URL".
Also sets the User-Agent header to indicate beets.
"""
# Use some arguments with the `send` call but most with the
# `Request` construction. This is a cheap, magic-filled way to
# emulate `requests.get` or, more pertinently,
# `requests.Session.request`.
req_kwargs = kwargs
send_kwargs = {}
for arg in ('stream', 'verify', 'proxies', 'cert', 'timeout'):
if arg in kwargs:
send_kwargs[arg] = req_kwargs.pop(arg)
# Our special logging message parameter.
if 'message' in kwargs:
message = kwargs.pop('message')
else:
message = 'getting URL'
req = requests.Request('GET', *args, **req_kwargs)
with requests.Session() as s:
s.headers = {'User-Agent': 'beets'}
prepped = s.prepare_request(req)
log.debug('{}: {}', message, prepped.url)
return s.send(prepped, **send_kwargs)
class RequestMixin(object):
"""Adds a Requests wrapper to the class that uses the logger, which
must be named `self._log`.
"""
def request(self, *args, **kwargs):
"""Like `requests.get`, but uses the logger `self._log`.
See also `_logged_get`.
"""
return _logged_get(self._log, *args, **kwargs)
# ART SOURCES ################################################################
class ArtSource(RequestMixin):
def __init__(self, log, config):
self._log = log
self._config = config
def get(self, album):
raise NotImplementedError()
class CoverArtArchive(ArtSource):
"""Cover Art Archive"""
URL = 'http://coverartarchive.org/release/{mbid}/front'
GROUP_URL = 'http://coverartarchive.org/release-group/{mbid}/front'
def get(self, album):
"""Return the Cover Art Archive and Cover Art Archive release group URLs
using album MusicBrainz release ID and release group ID.
"""
if album.mb_albumid:
yield self.URL.format(mbid=album.mb_albumid)
if album.mb_releasegroupid:
yield self.GROUP_URL.format(mbid=album.mb_releasegroupid)
class Amazon(ArtSource):
URL = 'http://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg'
INDICES = (1, 2)
def get(self, album):
"""Generate URLs using Amazon ID (ASIN) string.
"""
if album.asin:
for index in self.INDICES:
yield self.URL % (album.asin, index)
class AlbumArtOrg(ArtSource):
"""AlbumArt.org scraper"""
URL = 'http://www.albumart.org/index_detail.php'
PAT = r'href\s*=\s*"([^>"]*)"[^>]*title\s*=\s*"View larger image"'
def get(self, album):
"""Return art URL from AlbumArt.org using album ASIN.
"""
if not album.asin:
return
# Get the page from albumart.org.
try:
resp = self.request(self.URL, params={'asin': album.asin})
self._log.debug(u'scraped art URL: {0}', resp.url)
except requests.RequestException:
self._log.debug(u'error scraping art page')
return
# Search the page for the image URL.
m = re.search(self.PAT, resp.text)
if m:
image_url = m.group(1)
yield image_url
else:
self._log.debug(u'no image found on page')
class GoogleImages(ArtSource):
URL = u'https://www.googleapis.com/customsearch/v1'
def get(self, album):
"""Return art URL from google custom search engine
given an album title and interpreter.
"""
if not (album.albumartist and album.album):
return
search_string = (album.albumartist + ',' + album.album).encode('utf-8')
response = self.request(self.URL, params={
'key': self._config['google_key'].get(),
'cx': self._config['google_engine'].get(),
'q': search_string,
'searchType': 'image'
})
# Get results using JSON.
try:
data = response.json()
except ValueError:
self._log.debug(u'google: error loading response: {}'
.format(response.text))
return
if 'error' in data:
reason = data['error']['errors'][0]['reason']
self._log.debug(u'google fetchart error: {0}', reason)
return
if 'items' in data.keys():
for item in data['items']:
yield item['link']
class ITunesStore(ArtSource):
# Art from the iTunes Store.
def get(self, album):
"""Return art URL from iTunes Store given an album title.
"""
if not (album.albumartist and album.album):
return
search_string = (album.albumartist + ' ' + album.album).encode('utf-8')
try:
# Isolate bugs in the iTunes library while searching.
try:
results = itunes.search_album(search_string)
except Exception as exc:
self._log.debug('iTunes search failed: {0}', exc)
return
# Get the first match.
if results:
itunes_album = results[0]
else:
self._log.debug('iTunes search for {:r} got no results',
search_string)
return
if itunes_album.get_artwork()['100']:
small_url = itunes_album.get_artwork()['100']
big_url = small_url.replace('100x100', '1200x1200')
yield big_url
else:
self._log.debug(u'album has no artwork in iTunes Store')
except IndexError:
self._log.debug(u'album not found in iTunes Store')
class Wikipedia(ArtSource):
# Art from Wikipedia (queried through DBpedia)
DBPEDIA_URL = 'http://dbpedia.org/sparql'
WIKIPEDIA_URL = 'http://en.wikipedia.org/w/api.php'
SPARQL_QUERY = '''PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dbpprop: <http://dbpedia.org/property/>
PREFIX owl: <http://dbpedia.org/ontology/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT DISTINCT ?pageId ?coverFilename WHERE {{
?subject owl:wikiPageID ?pageId .
?subject dbpprop:name ?name .
?subject rdfs:label ?label .
{{ ?subject dbpprop:artist ?artist }}
UNION
{{ ?subject owl:artist ?artist }}
{{ ?artist foaf:name "{artist}"@en }}
UNION
{{ ?artist dbpprop:name "{artist}"@en }}
?subject rdf:type <http://dbpedia.org/ontology/Album> .
?subject dbpprop:cover ?coverFilename .
FILTER ( regex(?name, "{album}", "i") )
}}
Limit 1'''
def get(self, album):
if not (album.albumartist and album.album):
return
# Find the name of the cover art filename on DBpedia
cover_filename, page_id = None, None
dbpedia_response = self.request(
self.DBPEDIA_URL,
params={
'format': 'application/sparql-results+json',
'timeout': 2500,
'query': self.SPARQL_QUERY.format(
artist=album.albumartist.title(), album=album.album)
},
headers={'content-type': 'application/json'},
)
try:
data = dbpedia_response.json()
results = data['results']['bindings']
if results:
cover_filename = 'File:' + results[0]['coverFilename']['value']
page_id = results[0]['pageId']['value']
else:
self._log.debug('wikipedia: album not found on dbpedia')
except (ValueError, KeyError, IndexError):
self._log.debug('wikipedia: error scraping dbpedia response: {}',
dbpedia_response.text)
# Ensure we have a filename before attempting to query wikipedia
if not (cover_filename and page_id):
return
# DBPedia sometimes provides an incomplete cover_filename, indicated
# by the filename having a space before the extension, e.g., 'foo .bar'
# An additional Wikipedia call can help to find the real filename.
# This may be removed once the DBPedia issue is resolved, see:
# https://github.com/dbpedia/extraction-framework/issues/396
if ' .' in cover_filename and \
'.' not in cover_filename.split(' .')[-1]:
self._log.debug(
'wikipedia: dbpedia provided incomplete cover_filename'
)
lpart, rpart = cover_filename.rsplit(' .', 1)
# Query all the images in the page
wikipedia_response = self.request(
self.WIKIPEDIA_URL,
params={
'format': 'json',
'action': 'query',
'continue': '',
'prop': 'images',
'pageids': page_id,
},
headers={'content-type': 'application/json'},
)
# Try to see if one of the images on the pages matches our
# imcomplete cover_filename
try:
data = wikipedia_response.json()
results = data['query']['pages'][page_id]['images']
for result in results:
if re.match(re.escape(lpart) + r'.*?\.' + re.escape(rpart),
result['title']):
cover_filename = result['title']
break
except (ValueError, KeyError):
self._log.debug(
'wikipedia: failed to retrieve a cover_filename'
)
return
# Find the absolute url of the cover art on Wikipedia
wikipedia_response = self.request(
self.WIKIPEDIA_URL,
params={
'format': 'json',
'action': 'query',
'continue': '',
'prop': 'imageinfo',
'iiprop': 'url',
'titles': cover_filename.encode('utf-8'),
},
headers={'content-type': 'application/json'},
)
try:
data = wikipedia_response.json()
results = data['query']['pages']
for _, result in results.iteritems():
image_url = result['imageinfo'][0]['url']
yield image_url
except (ValueError, KeyError, IndexError):
self._log.debug('wikipedia: error scraping imageinfo')
return
class FileSystem(ArtSource):
"""Art from the filesystem"""
@staticmethod
def filename_priority(filename, cover_names):
"""Sort order for image names.
Return indexes of cover names found in the image filename. This
means that images with lower-numbered and more keywords will have
higher priority.
"""
return [idx for (idx, x) in enumerate(cover_names) if x in filename]
def get(self, path, cover_names, cautious):
"""Look for album art files in a specified directory.
"""
if not os.path.isdir(path):
return
# Find all files that look like images in the directory.
images = []
for fn in os.listdir(path):
for ext in IMAGE_EXTENSIONS:
if fn.lower().endswith(b'.' + ext.encode('utf8')) and \
os.path.isfile(os.path.join(path, fn)):
images.append(fn)
# Look for "preferred" filenames.
images = sorted(images,
key=lambda x: self.filename_priority(x, cover_names))
cover_pat = br"(\b|_)({0})(\b|_)".format(b'|'.join(cover_names))
for fn in images:
if re.search(cover_pat, os.path.splitext(fn)[0], re.I):
self._log.debug(u'using well-named art file {0}',
util.displayable_path(fn))
return os.path.join(path, fn)
# Fall back to any image in the folder.
if images and not cautious:
self._log.debug(u'using fallback art file {0}',
util.displayable_path(images[0]))
return os.path.join(path, images[0])
# Try each source in turn.
SOURCES_ALL = [u'coverart', u'itunes', u'amazon', u'albumart',
u'wikipedia', u'google']
ART_SOURCES = {
u'coverart': CoverArtArchive,
u'itunes': ITunesStore,
u'albumart': AlbumArtOrg,
u'amazon': Amazon,
u'wikipedia': Wikipedia,
u'google': GoogleImages,
}
# PLUGIN LOGIC ###############################################################
class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
def __init__(self):
super(FetchArtPlugin, self).__init__()
self.config.add({
'auto': True,
'minwidth': 0,
'maxwidth': 0,
'enforce_ratio': False,
'remote_priority': False,
'cautious': False,
'cover_names': ['cover', 'front', 'art', 'album', 'folder'],
'sources': ['coverart', 'itunes', 'amazon', 'albumart'],
'google_key': None,
'google_engine': u'001442825323518660753:hrh5ch1gjzm',
})
self.config['google_key'].redact = True
# Holds paths to downloaded images between fetching them and
# placing them in the filesystem.
self.art_paths = {}
self.minwidth = self.config['minwidth'].get(int)
self.maxwidth = self.config['maxwidth'].get(int)
self.enforce_ratio = self.config['enforce_ratio'].get(bool)
if self.config['auto']:
# Enable two import hooks when fetching is enabled.
self.import_stages = [self.fetch_art]
self.register_listener('import_task_files', self.assign_art)
available_sources = list(SOURCES_ALL)
if not HAVE_ITUNES and u'itunes' in available_sources:
available_sources.remove(u'itunes')
if not self.config['google_key'].get() and \
u'google' in available_sources:
available_sources.remove(u'google')
sources_name = plugins.sanitize_choices(
self.config['sources'].as_str_seq(), available_sources)
self.sources = [ART_SOURCES[s](self._log, self.config)
for s in sources_name]
self.fs_source = FileSystem(self._log, self.config)
# Asynchronous; after music is added to the library.
def fetch_art(self, session, task):
"""Find art for the album being imported."""
if task.is_album: # Only fetch art for full albums.
if task.album.artpath and os.path.isfile(task.album.artpath):
# Album already has art (probably a re-import); skip it.
return
if task.choice_flag == importer.action.ASIS:
# For as-is imports, don't search Web sources for art.
local = True
elif task.choice_flag == importer.action.APPLY:
# Search everywhere for art.
local = False
else:
# For any other choices (e.g., TRACKS), do nothing.
return
path = self.art_for_album(task.album, task.paths, local)
if path:
self.art_paths[task] = path
# Synchronous; after music files are put in place.
def assign_art(self, session, task):
"""Place the discovered art in the filesystem."""
if task in self.art_paths:
path = self.art_paths.pop(task)
album = task.album
src_removed = (config['import']['delete'].get(bool) or
config['import']['move'].get(bool))
album.set_art(path, not src_removed)
album.store()
if src_removed:
task.prune(path)
# Manual album art fetching.
def commands(self):
cmd = ui.Subcommand('fetchart', help='download album art')
cmd.parser.add_option('-f', '--force', dest='force',
action='store_true', default=False,
help='re-download art when already present')
def func(lib, opts, args):
self.batch_fetch_art(lib, lib.albums(ui.decargs(args)), opts.force)
cmd.func = func
return [cmd]
# Utilities converted from functions to methods on logging overhaul
def _fetch_image(self, url):
"""Downloads an image from a URL and checks whether it seems to
actually be an image. If so, returns a path to the downloaded image.
Otherwise, returns None.
"""
try:
with closing(self.request(url, stream=True,
message='downloading image')) as resp:
if 'Content-Type' not in resp.headers \
or resp.headers['Content-Type'] not in CONTENT_TYPES:
self._log.debug(
'not a supported image: {}',
resp.headers.get('Content-Type') or 'no content type',
)
return None
# Generate a temporary file with the correct extension.
with NamedTemporaryFile(suffix=DOWNLOAD_EXTENSION,
delete=False) as fh:
for chunk in resp.iter_content(chunk_size=1024):
fh.write(chunk)
self._log.debug(u'downloaded art to: {0}',
util.displayable_path(fh.name))
return fh.name
except (IOError, requests.RequestException, TypeError) as exc:
# Handling TypeError works around a urllib3 bug:
# https://github.com/shazow/urllib3/issues/556
self._log.debug('error fetching art: {}', exc)
return None
def _is_valid_image_candidate(self, candidate):
"""Determine whether the given candidate artwork is valid based on
its dimensions (width and ratio).
Return `CANDIDATE_BAD` if the file is unusable.
Return `CANDIDATE_EXACT` if the file is usable as-is.
Return `CANDIDATE_DOWNSCALE` if the file must be resized.
"""
if not candidate:
return CANDIDATE_BAD
if not (self.enforce_ratio or self.minwidth or self.maxwidth):
return CANDIDATE_EXACT
# get_size returns None if no local imaging backend is available
size = ArtResizer.shared.get_size(candidate)
self._log.debug('image size: {}', size)
if not size:
self._log.warning(u'Could not get size of image (please see '
u'documentation for dependencies). '
u'The configuration options `minwidth` and '
u'`enforce_ratio` may be violated.')
return CANDIDATE_EXACT
# Check minimum size.
if self.minwidth and size[0] < self.minwidth:
self._log.debug('image too small ({} < {})',
size[0], self.minwidth)
return CANDIDATE_BAD
# Check aspect ratio.
if self.enforce_ratio and size[0] != size[1]:
self._log.debug('image is not square ({} != {})',
size[0], size[1])
return CANDIDATE_BAD
# Check maximum size.
if self.maxwidth and size[0] > self.maxwidth:
self._log.debug('image needs resizing ({} > {})',
size[0], self.maxwidth)
return CANDIDATE_DOWNSCALE
return CANDIDATE_EXACT
def art_for_album(self, album, paths, local_only=False):
"""Given an Album object, returns a path to downloaded art for the
album (or None if no art is found). If `maxwidth`, then images are
resized to this maximum pixel size. If `local_only`, then only local
image files from the filesystem are returned; no network requests
are made.
"""
out = None
check = None
# Local art.
cover_names = self.config['cover_names'].as_str_seq()
cover_names = map(util.bytestring_path, cover_names)
cautious = self.config['cautious'].get(bool)
if paths:
for path in paths:
candidate = self.fs_source.get(path, cover_names, cautious)
check = self._is_valid_image_candidate(candidate)
if check:
out = candidate
self._log.debug('found local image {}', out)
break
# Web art sources.
remote_priority = self.config['remote_priority'].get(bool)
if not local_only and (remote_priority or not out):
for url in self._source_urls(album):
if self.maxwidth:
url = ArtResizer.shared.proxy_url(self.maxwidth, url)
candidate = self._fetch_image(url)
check = self._is_valid_image_candidate(candidate)
if check:
out = candidate
self._log.debug('using remote image {}', out)
break
if self.maxwidth and out and check == CANDIDATE_DOWNSCALE:
out = ArtResizer.shared.resize(self.maxwidth, out)
return out
def batch_fetch_art(self, lib, albums, force):
"""Fetch album art for each of the albums. This implements the manual
fetchart CLI command.
"""
for album in albums:
if album.artpath and not force and os.path.isfile(album.artpath):
message = ui.colorize('text_highlight_minor', 'has album art')
else:
# In ordinary invocations, look for images on the
# filesystem. When forcing, however, always go to the Web
# sources.
local_paths = None if force else [album.path]
path = self.art_for_album(album, local_paths)
if path:
album.set_art(path, False)
album.store()
message = ui.colorize('text_success', 'found album art')
else:
message = ui.colorize('text_error', 'no art found')
self._log.info(u'{0}: {1}', album, message)
def _source_urls(self, album):
"""Generate possible source URLs for an album's art. The URLs are
not guaranteed to work so they each need to be attempted in turn.
This allows the main `art_for_album` function to abort iteration
through this sequence early to avoid the cost of scraping when not
necessary.
"""
source_names = {v: k for k, v in ART_SOURCES.items()}
for source in self.sources:
self._log.debug(
'trying source {0} for album {1.albumartist} - {1.album}',
source_names[type(source)],
album,
)
urls = source.get(album)
for url in urls:
yield url
|
LordSputnik/beets
|
beetsplug/fetchart.py
|
Python
|
mit
| 25,041
|
import math
from abc import abstractmethod
from dataclasses import dataclass
from typing import (
Generic,
List,
Optional,
TypeVar,
Union,
overload,
Any,
Tuple,
Sequence,
)
import random
numpy_installed = False
try:
import numpy as np
numpy_installed = True
except ImportError:
pass
T = TypeVar("T")
@dataclass # type: ignore
class Prior(Generic[T]):
def __post_init__(self):
if numpy_installed:
self.np_rng = np.random
else:
self.rng: random.Random = random.Random()
@abstractmethod
def sample(self) -> T:
pass
def seed(self, seed: Optional[int]) -> None:
# Should this seed this individual prior?
if numpy_installed:
self.np_rng = np.random.RandomState(seed)
else:
self.rng = random.Random(seed)
@abstractmethod
def get_orion_space_string(self) -> str:
""" Gets the 'Orion-formatted space string' for this Prior object. """
@abstractmethod
def __contains__(self, v: Union[T, Any]) -> bool:
pass
@dataclass
class NormalPrior(Prior):
mu: float = 0.0
sigma: float = 1.0
discrete: bool = False
default: Optional[float] = None
shape: Union[int, Tuple[int, ...]] = None
def __post_init__(self):
super().__post_init__()
if self.shape:
if isinstance(self.default, (int, float)):
self.default = [self.default for _ in range(self.shape)]
def sample(self) -> Union[float, int]:
if self.shape:
assert isinstance(self.shape, int), "only support int shape for now."
if numpy_installed:
return self.np_rng.normal(self.mu, self.sigma, size=self.shape)
elif isinstance(self.shape, int):
_shape = self.shape
self.shape = None
values = [self.sample() for _ in range(_shape)]
self.shape = _shape
return values
else:
raise NotImplementedError(self.shape)
if numpy_installed:
value = self.np_rng.normal(self.mu, self.sigma)
else:
value = self.rng.normalvariate(self.mu, self.sigma)
if self.discrete:
return round(value)
return value
def get_orion_space_string(self) -> str:
raise NotImplementedError(
"TODO: Add this for the normal prior, didn't check how its done in "
"Orion yet."
)
def __contains__(self, v: Union[T, Any]) -> bool:
# TODO: For normal priors, I guess we only really check if the value is a float?
return isinstance(v, (int, float))
@dataclass
class UniformPrior(Prior):
min: float = 0.0
max: float = 1.0
discrete: bool = False
default: Optional[float] = None
shape: Union[int, Tuple[int, ...]] = None
def __post_init__(self):
super().__post_init__()
assert self.min <= self.max
if self.shape:
if isinstance(self.default, (int, float)):
self.default = [self.default for _ in range(self.shape)]
def sample(self) -> Union[float, int]:
# TODO: add suport for enums?
if self.shape:
assert isinstance(self.shape, int), "only support int shape for now."
if numpy_installed:
values = self.np_rng.uniform(self.min, self.max, size=self.shape)
if self.discrete:
values = np.round(values)
values = values.astype(int)
return values
elif isinstance(self.shape, int):
_shape = self.shape
self.shape = None
values = [self.sample() for _ in range(_shape)]
self.shape = _shape
return values
else:
raise NotImplementedError(self.shape)
if numpy_installed:
value = self.np_rng.uniform(self.min, self.max)
else:
value = self.rng.uniform(self.min, self.max)
if self.discrete:
return round(value)
return value
def get_orion_space_string(self) -> str:
string = f"uniform({self.min}, {self.max}"
if self.discrete:
string += ", discrete=True"
if self.default is not None:
string += f", default_value={self.default}"
if self.shape is not None:
string += f", shape={self.shape}"
string += ")"
return string
def __contains__(self, v: Union[T, Any]) -> bool:
# TODO: Include the max value here? or not?
return isinstance(v, (int, float)) and (self.min <= v < self.max)
@dataclass
class CategoricalPrior(Prior[T]):
choices: List[T]
probabilities: Optional[List[float]] = None
default_value: Optional[T] = None
def __post_init__(self):
super().__post_init__()
if isinstance(self.choices, dict):
choices = []
self.probabilities = []
for k, v in self.choices.items():
choices.append(k)
assert isinstance(v, (int, float)), "probs should be int or float"
self.probabilities.append(v)
@overload
def sample(self, n: int) -> List[T]:
...
@overload
def sample(self) -> T:
...
def sample(self, n: int = None) -> Union[T, List[T]]:
assert self.choices
# n = n or 1
# assert isinstance(n, int), n
choices: List = []
probabilities: List[float] = []
if isinstance(self.choices, dict):
for k, v in self.choices.items():
choices.append(k)
probabilities.append(v)
else:
choices = self.choices
probabilities = self.probabilities
print(choices, n, probabilities)
if numpy_installed:
s = self.np_rng.choice(choices, size=n, p=probabilities)
samples = [
(s_i.item() if isinstance(s_i, np.ndarray) else s_i) for s_i in s
]
else:
samples = self.rng.choices(choices, weights=probabilities, k=n or 1)
return samples[0] if n in {None, 1} else samples
def get_orion_space_string(self) -> str:
string = "choices("
if self.probabilities:
prob_dict = dict(zip(self.choices, self.probabilities))
assert sum(self.probabilities) == 1, "probs should sum to 1."
# BUG: Seems like orion still samples entries, even if they have zero
# probability!
# TODO: Remove the entries that have zero prob?
prob_dict = {k: v for k, v in prob_dict.items() if v > 0}
string += str(prob_dict)
else:
string += str(self.choices)
if self.default_value is not None:
assert isinstance(self.default_value, (int, str, float))
default_value_str = str(self.default_value)
if isinstance(self.default_value, str):
default_value_str = f"'{self.default_value}'"
string += f", default_value={default_value_str}"
string += ")"
return string
def __contains__(self, v: Union[T, Any]) -> bool:
return v in self.choices
@dataclass
class LogUniformPrior(Prior):
min: float = 1e-3
max: float = 1e3
base: float = math.e
discrete: bool = False
default: Optional[float] = None
shape: Union[int, Tuple[int, ...]] = None
def __post_init__(self):
super().__post_init__()
if self.shape:
if isinstance(self.default, (int, float)):
self.default = [self.default for _ in range(self.shape)]
def sample(self) -> float:
# TODO: Might not be 100% numerically stable.
assert self.min > 0, "min of LogUniform can't be negative!"
assert self.min < self.max, "max should be greater than min!"
if self.shape:
assert isinstance(self.shape, int), "only support in shape for now."
if numpy_installed:
log_vals = self.np_rng.uniform(
self.log_min, self.log_max, size=self.shape
)
values = np.power(self.base, log_vals)
if self.discrete:
values = np.round(values)
return values
elif isinstance(self.shape, int):
_shape = self.shape
self.shape = None
values = [self.sample() for _ in range(_shape)]
self.shape = _shape
return values
else:
raise NotImplementedError(self.shape)
if numpy_installed:
log_val = self.np_rng.uniform(self.log_min, self.log_max)
else:
log_val = self.rng.uniform(self.log_min, self.log_max)
value = math.pow(self.base, log_val)
if self.discrete:
return round(value)
return value
@property
def log_min(self) -> Union[int, float]:
if numpy_installed:
if self.base in {np.e, math.e}:
log_min = np.log(self.min)
else:
log_min = np.log(self.min)
else:
if self.base is math.e:
log_min = math.log(self.min)
else:
log_min = math.log(self.min, self.base)
assert isinstance(log_min, (int, float))
return log_min
@property
def log_max(self) -> Union[int, float]:
if numpy_installed:
if self.base in {math.e, np.e}:
log_max = np.log(self.max)
else:
log_max = np.log(self.max) / np.log(self.base)
else:
if self.base is math.e:
log_max = math.log(self.max)
else:
log_max = math.log(self.max, self.base)
assert isinstance(log_max, (int, float))
return log_max
def get_orion_space_string(self) -> str:
def format_power(value: float, log_value: float):
if isinstance(value, int) or value.is_integer():
return int(value)
elif isinstance(log_value, int) or log_value.is_integer():
log_value = int(log_value)
if self.base == np.e:
return f"np.exp({int(log_value)})"
elif self.base == 10:
return f"{value:.2e}"
if math.log10(value).is_integer():
return f"{value:.0e}"
else:
return f"{value:g}"
min_str = format_power(self.min, self.log_min)
max_str = format_power(self.max, self.log_max)
string = f"loguniform({min_str}, {max_str}"
if self.discrete:
string += ", discrete=True"
if self.default is not None:
string += f", default_value={self.default}"
if self.shape is not None:
string += f", shape={self.shape}"
string += ")"
return string
def __contains__(self, v: Union[T, Any]) -> bool:
if self.shape:
assert isinstance(self.shape, int), "only support int shape for now."
mins: Sequence[float]
if isinstance(self.min, (int, float)):
mins = [self.min for _ in range(self.shape)]
else:
mins = self.min
maxes: Sequence[float]
if isinstance(self.max, (int, float)):
maxes = [self.max for _ in range(self.shape)]
else:
maxes = self.max
return all(
isinstance(v_i, (int, float)) and mins[i] <= v_i < maxes[i]
for i, v_i in enumerate(v)
)
return isinstance(v, (int, float)) and (self.min <= v < self.max)
|
lebrice/SimpleParsing
|
simple_parsing/helpers/hparams/priors.py
|
Python
|
mit
| 11,857
|
#!/usr/bin/python
"""MobWrite Nullifier
Copyright 2009 Google Inc.
http://code.google.com/p/google-mobwrite/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""This command-line program nullifies a document from a MobWrite server.
The MobWrite URL and the docname are provided on the command line.
"""
__author__ = "fraser@google.com (Neil Fraser)"
import mobwritelib
import sys
if __name__ == "__main__":
# Obtain the server URL and the docname from the command line argument.
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: %s <URL> <DOCNAME>" % sys.argv[0]
print >> sys.stderr, " E.g. %s http://mobwrite3.appspot.com/scripts/q.py demo_editor_text" % sys.argv[0]
print >> sys.stderr, " E.g. %s telnet://localhost:3017 demo_editor_text" % sys.argv[0]
sys.exit(2)
url = sys.argv[1]
docname = sys.argv[2]
mobwritelib.nullify(url, [docname])
|
mjpizz/node-mobwrite
|
ext/google-mobwrite/tools/nullify.py
|
Python
|
mit
| 1,360
|
"""Entry point for reading MGZ."""
# pylint: disable=invalid-name,no-name-in-module
from construct import (Struct, CString, Const, Int32ul, Embedded, Float32l, Terminated, If, Computed, this, Peek)
from mgz.util import MgzPrefixed, ZlibCompressed, Version, VersionAdapter, get_version
from mgz.header.ai import ai
from mgz.header.replay import replay
from mgz.header.map_info import map_info
from mgz.header.initial import initial
from mgz.header.achievements import achievements
from mgz.header.scenario import scenario
from mgz.header.lobby import lobby
from mgz.header.de import de
from mgz.header.hd import hd
compressed_header = Struct(
"game_version"/CString(encoding='latin1'),
"save_version"/VersionAdapter(Float32l),
"version"/Computed(lambda ctx: get_version(ctx.game_version, ctx.save_version, None)),
"hd"/If(lambda ctx: ctx.version == Version.HD and ctx.save_version > 12.34, hd),
"de"/If(lambda ctx: ctx.version == Version.DE, de),
ai,
replay,
map_info,
initial,
achievements,
scenario,
lobby,
Terminated
)
subheader = Struct(
"check"/Peek(Int32ul),
"chapter_address"/If(lambda ctx: ctx.check < 100000000, Int32ul),
Embedded(MgzPrefixed(lambda ctx: ctx._.header_length - 4 - (4 if ctx.check < 100000000 else 0), ZlibCompressed(compressed_header)))
)
"""Header is compressed"""
header = Struct(
"header_length"/Int32ul,
Embedded(subheader),
"log_version"/If(lambda ctx: ctx.save_version >= 11.76, Peek(Int32ul)),
"version"/Computed(lambda ctx: get_version(ctx.game_version, ctx.save_version, ctx.log_version))
)
|
happyleavesaoc/aoc-mgz
|
mgz/__init__.py
|
Python
|
mit
| 1,612
|
import new, sys
import galaxy.util
import parameters
from parameters import basic
from parameters import grouping
from elementtree.ElementTree import XML
class ToolTestBuilder( object ):
"""
Encapsulates information about a tool test, and allows creation of a
dynamic TestCase class (the unittest framework is very class oriented,
doing dynamic tests in this was allows better integration)
"""
def __init__( self, tool, name ):
self.tool = tool
self.name = name
self.required_files = []
self.inputs = []
self.outputs = []
self.error = False
self.exception = None
def add_param( self, name, value, extra ):
try:
if name not in self.tool.inputs:
for input_name, input_value in self.tool.inputs.items():
if isinstance( input_value, grouping.Conditional ) or isinstance( input_value, grouping.Repeat ):
self.__expand_grouping_for_data_input(name, value, extra, input_name, input_value)
elif isinstance( self.tool.inputs[name], parameters.DataToolParameter ):
self.required_files.append( ( value, extra ) )
except: pass
self.inputs.append( ( name, value, extra ) )
def add_output( self, name, file ):
self.outputs.append( ( name, file ) )
def __expand_grouping_for_data_input( self, name, value, extra, grouping_name, grouping_value ):
# Currently handles grouping.Conditional and grouping.Repeat
if isinstance( grouping_value, grouping.Conditional ):
if name != grouping_value.test_param.name:
for case in grouping_value.cases:
for case_input_name, case_input_value in case.inputs.items():
if case_input_name == name and isinstance( case_input_value, basic.DataToolParameter ):
self.required_files.append( ( value, extra ) )
return True
elif isinstance( case_input_value, grouping.Conditional ):
self.__expand_grouping_for_data_input(name, value, extra, case_input_name, case_input_value)
elif isinstance( grouping_value, grouping.Repeat ):
# FIXME: grouping.Repeat can only handle 1 repeat param element since the param name
# is something like "input2" and the expanded page display is something like "queries_0|input2".
# The problem is that the only param name on the page is "input2", and adding more test input params
# with the same name ( "input2" ) is not yet supported in our test code ( the lat one added is the only
# one used ).
for input_name, input_value in grouping_value.inputs.items():
if input_name == name and isinstance( input_value, basic.DataToolParameter ):
self.required_files.append( ( value, extra ) )
return True
|
dbcls/dbcls-galaxy
|
lib/galaxy/tools/test.py
|
Python
|
mit
| 2,997
|
""" Illustris Simulation: Public Data Release.
lhalotree.py: File I/O related to the LHaloTree merger tree files. """
import numpy as np
import h5py
from groupcat import gcPath
from util import partTypeNum
def treePath(basePath,chunkNum=0):
""" Return absolute path to a LHaloTree HDF5 file (modify as needed). """
filePath = basePath + '/trees/treedata/' + 'trees_sf1_135.' + str(chunkNum) + '.hdf5'
return filePath
def treeOffsets(basePath, snapNum, id):
""" Handle offset loading for a LHaloTree merger tree cutout. """
# load groupcat chunk offsets from header of first file
with h5py.File(gcPath(basePath,snapNum),'r') as f:
groupFileOffsets = f['Header'].attrs['FileOffsets_Subhalo']
# calculate target groups file chunk which contains this id
groupFileOffsets = int(id) - groupFileOffsets
fileNum = np.max( np.where(groupFileOffsets >= 0) )
groupOffset = groupFileOffsets[fileNum]
with h5py.File(gcPath(basePath,snapNum,fileNum),'r') as f:
# load the merger tree offsets of this subgroup
TreeFile = f['Offsets']['Subhalo_LHaloTreeFile'][groupOffset]
TreeIndex = f['Offsets']['Subhalo_LHaloTreeIndex'][groupOffset]
TreeNum = f['Offsets']['Subhalo_LHaloTreeNum'][groupOffset]
return TreeFile,TreeIndex,TreeNum
def singleNodeFlat(conn, index, data_in, data_out, count, onlyMPB):
""" Recursive helper function: Add a single tree node. """
data_out[count] = data_in[index]
count += 1
count = recProgenitorFlat(conn,index,data_in,data_out,count,onlyMPB)
return count
def recProgenitorFlat(conn, start_index, data_in, data_out, count, onlyMPB):
""" Recursive helper function: Flatten out the unordered LHaloTree, one data field at a time. """
firstProg = conn["FirstProgenitor"][start_index]
if firstProg < 0:
return count
# depth-ordered traversal (down mpb)
count = singleNodeFlat(conn,firstProg,data_in,data_out,count,onlyMPB)
# explore breadth
if not onlyMPB:
nextProg = conn["NextProgenitor"][firstProg]
while nextProg >= 0:
count = singleNodeFlat(conn,nextProg,data_in,data_out,count,onlyMPB)
nextProg = conn["NextProgenitor"][nextProg]
firstProg = conn["FirstProgenitor"][firstProg]
return count
def loadTree(basePath, snapNum, id, fields=None, onlyMPB=False):
""" Load portion of LHaloTree, for a given subhalo, re-arranging into a flat format. """
TreeFile,TreeIndex,TreeNum = treeOffsets(basePath, snapNum, id)
# config
gName = 'Tree' + str(TreeNum) # group name containing this subhalo
nRows = None # we do not know in advance the size of the tree
# make sure fields is not a single element
if isinstance(fields, basestring):
fields = [fields]
fTree = h5py.File(treePath(basePath,TreeFile),'r')
# if no fields requested, return everything
if not fields:
fields = fTree[gName].keys()
# verify existence of requested fields
for field in fields:
if field not in fTree[gName].keys():
raise Exception('Error: Requested field '+field+' not in tree.')
# load connectivity for this entire TreeX group
connFields = ['FirstProgenitor','NextProgenitor']
conn = {}
for field in connFields:
conn[field] = fTree[gName][field][:]
# determine sub-tree size with dummy walk
dummy = np.zeros( conn['FirstProgenitor'].shape, dtype='int32' )
nRows = singleNodeFlat(conn, TreeIndex, dummy, dummy, 0, onlyMPB)
result = {}
result['count'] = nRows
# walk through connectivity, one data field at a time
for field in fields:
# load field for entire tree? doing so is much faster than randomly accessing the disk
# during walk, assuming that the sub-tree is a large fraction of the full tree, and that
# the sub-tree is large in the absolute sense. the decision is heuristic, and can be
# modified (if you have the tree on a fast SSD, could disable the full load).
if nRows < 1000: # and float(nRows)/len(result['FirstProgenitor']) > 0.1
# do not load, walk with single disk reads
full_data = fTree[gName][field]
else:
# pre-load all, walk in-memory
full_data = fTree[gName][field][:]
# allocate the data array in the sub-tree
dtype = fTree[gName][field].dtype
shape = list(fTree[gName][field].shape)
shape[0] = nRows
data = np.zeros(shape, dtype=dtype)
# walk the tree, depth-first
count = singleNodeFlat(conn, TreeIndex, full_data, data, 0, onlyMPB)
# save field
result[field] = data
fTree.close()
# only a single field? then return the array instead of a single item dict
if len(fields) == 1:
return result[fields[0]]
return result
|
linan7788626/hackday_03112016
|
illustris_python/lhalotree.py
|
Python
|
mit
| 5,231
|
from db import DB
import config
from gensim.corpora import Dictionary
from multiprocessing import Process
from helper import chunkFun
import psutil
class UserProfile:
def __init__(self, user_list = []):
self.threshold_num = config.threshold_num
self.lan = config.lan
self.user_list = user_list
def getUserList(self):
if not self.user_list:
"""
find user list from mongodb who has at least 'self.threshold_num'
of tweets in language 'self.lan'
"""
db = DB()
result = db.db[config.id_name_collection_name].find({'tweet_lan_stats.en':{'$gte':int(self.threshold_num)}}, {'_id':0,'id':1})
self.user_list = [x['id'] for x in result]
print(len(self.user_list))
db.close()
def buildCorpProfile(self, user_list = []):
"""
build corp profile on given users or on all users meet the requirement defined in config.py
"""
if not user_list:
if not self.user_list:
print('user list is empty! will run getUserList')
self.getUserList()
print('get the user list')
user_list = self.user_list
if not user_list:
print('user list is still empty!')
return
#self.doBuildCorpProfile(self.user_list)
# number of CPU cores in the system
cores = psutil.cpu_count(logical = False)
# logical cpu include hyperthread in each cores
threads = psutil.cpu_count(logical = True)
# just in case, need to save another 1/2 number of threads for mongodb as we are running
# mongodb locally. An config can be added in future if we can run mongodb on remote server
if threads < 2:
threads = 2
# will make use of all the logical cpus
all_threads = threads
# thread_n is import from config.py, user can specifiy how many thread to use
thread_n = int(config.thread_n)
# because mongodb is running locally, and it will create a thread for each connection,
# here we set the thread number to no larger than all_threads/2
if thread_n > ( int(all_threads/2) ) or thread_n <= 0:
thread_n = int(all_threads/2)
if thread_n >= len(user_list):
thread_n = len(user_list)
print('{} threads out of {} will be used'.format(thread_n, all_threads))
process_list = []
# split the list of followers(userIDs) into chunks and start a process to deal with each chunk
for i in range(thread_n):
c = chunkFun(user_list, thread_n, i)
p = Process(target=self.doBuildCorpProfile, args=(c,i,))
p.start()
process_list.append(p)
for p in process_list:
p.join()
print('All Done!')
def doBuildCorpProfile(self, user_list, worker_id):
"""
split each tweet text and build a corpora for the user using his/all clean tweet text
"""
count = 0
total = len(user_list)
db = DB()
for user_id in user_list:
result = db.db[config.tweet_collection_name].find({'user.id':user_id, 'tweet_lan':self.lan},{'_id':0,'tweet_clean':1})
profile_corpora = []
for doc in result:
profile_corpora += doc['tweet_clean'].split()
result = db.db[config.id_name_collection_name].update_one({'id':user_id},{'$set':{'corp_profile':profile_corpora}})
count += 1
print('process user: {}, {}/{} on worker {}'.format(user_id, count, total, worker_id))
db.close()
def getCorpProfile(self, user_list = []):
"""
return the corp profile for a given user list or the list of users who meet the requirement defined in config.py
"""
if not user_list:
if not self.user_list:
print('user list is empty! will run getUserList')
self.getUserList()
print('get the user list')
user_list = self.user_list
if not user_list:
print('user list is still empty!')
return []
else:
corp_profile = self.doGetCorpProfile(user_list)
return corp_profile
def doGetCorpProfile(self, user_list):
"""
retrive the corp profile from mongodb in 'id_name' collection
"""
user_corp_profile = {}
db = DB()
for user_id in user_list:
result = db.db[config.id_name_collection_name].find_one({'id':user_id},{'corp_profile':1, 'screen_name':1})
#user_corp_profile.append(result['corp_profile'])
user_corp_profile[result['screen_name']] = result['corp_profile']
db.close()
return user_corp_profile
|
weakties/infrastructure
|
component_twitter_networks/userProfile.py
|
Python
|
mit
| 4,903
|
#! /usr/bin/env python
import sys
import os
import urllib
import re
import pytest
import time
import hubcheck
from hubcheck.exceptions import HCException
from hubcheck.testcase import TestCase2
from hubcheck.shell import ContainerManager
from hubcheck.shell import ToolSession
pytestmark = [ pytest.mark.website,
pytest.mark.container,
pytest.mark.reboot,
pytest.mark.weekly,
pytest.mark.upgrade,
pytest.mark.prod_safe_upgrade,
pytest.mark.parampass,
]
TESTDATA = ''
TOOL_NAME = "hppt"
TOOL_REVISION = 2
INVOKE_APP_PATH = '/usr/bin/invoke_app'
TEST_PROGRAM_PARAMS_FNAME = "pp.out"
TEST_PROGRAM_NAME = 'printparams'
TEST_PROGRAM_SCRIPT = """#!/bin/sh
# \\
exec wish "$0" ${1+"$@"}
set params_file_out %(outfile)s
set fid [open $params_file_out w]
puts -nonewline $fid $argv
close $fid
label .text -text "Running..."
button .close -text "Quit" -command {exit 1}
pack .text -side top
pack .close -side top
""" % {'outfile':TEST_PROGRAM_PARAMS_FNAME}
class BadParameterError(HCException):
pass
class HarStatusError(HCException):
pass
class GroupMembershipError(HCException):
pass
class SessionInvokeError(HCException):
pass
def setup_tool(shell,tool_name,tool_revision,invoke_script,
test_program_name,test_program_script):
"""
install code to test parameter passing. the tool revision provided to
this function will be overwritten with a tool that accepts parameters
on the command line and prints out the parameter list to the file
TEST_PROGRAM_PARAMS_FNAME. users can examine the TEST_PROGRAM_PARAMS_FNAME
file to compare what was sent to invoke_app with the parameter list the
tool was executed with.
"""
# check that the user is in the apps group
groups,es = shell.execute("echo ${USER} | groups")
if "apps" not in groups.split():
# user not in the apps group, bail
username,es = shell.execute("echo ${USER}")
# raise RuntimeError("user %s not in apps group: %s" % (username,groups))
raise GroupMembershipError("user %s not in apps group: %s" % (username,groups))
# become the apps user
shell.send('sudo su - apps')
shell.start_bash_shell()
tool_revision_string = "r%s" % (tool_revision)
tool_path = "/apps/%s/%s" % (tool_name, tool_revision_string)
dev_path = "/apps/%s/dev" % (tool_name)
# setup the new tool's invoke script
# mv %(tool_path)s %(tmp_tool_path)s;
# tmp_tool_path = tool_path + ".old"
# """ % {'tool_path' : tool_path, 'tmp_tool_path' : tmp_tool_path}
script = """
rm -rf %(tool_path)s;
mkdir %(tool_path)s;
rm -f %(dev_path)s;
ln -s %(tool_path)s %(dev_path)s;
cd %(tool_path)s;
mkdir middleware bin;
""" % {'tool_path' : tool_path,
'dev_path' : dev_path}
commands = script.strip().split('\n')
shell.execute(commands)
# write the invoke script to disk
shell.write_file('middleware/invoke', invoke_script)
shell.execute('chmod 755 middleware/invoke')
# write the test program to disk
shell.write_file("bin/%s" % (test_program_name), test_program_script)
shell.execute("chmod 755 bin/%s" % (test_program_name))
# exit from apps user
shell.stop_bash_shell()
shell.send('exit')
def setup_datafiles(shell,params_info):
"""
write the datafiles to disk
build the parameters file
"""
parameters_text_items = []
for key,value in params_info.items():
shell.write_file(value['path'], value['text'])
parameters_text_items.append("%s:%s" % (value['type'],value['path']))
# generate the parameters file to feed into the url
parameters_text = '\n'.join(parameters_text_items)
return parameters_text
def launch_tool(https_authority,username,password,browser,catalog,utils,tool_name,
tool_revision,parameters_text,add_empty_params=False):
"""
launch the test/dev version of a tool to test parameter passing
we launch the test version so we don't have to publish a tool
just for testing parameter passing.
"""
# login to the hub
utils.account.login_as(username,password)
# go to the page to launch the tool
# with the parameters encoded in the url
GenericPage = catalog.load('GenericPage')
po = GenericPage(browser,catalog)
# quote() is for escaping the path portion of the url
# quote_plus() is for escaping the query portion of the url
# and handles escaping / to %2F, but turns spaces into +'s
encoded_parameters = urllib.quote_plus(parameters_text.encode('utf8'))
url = "%(https_authority)s/tools/%(toolname)s/invoke/%(toolrev)s"
if encoded_parameters != '' or add_empty_params:
url += "?params=%(params)s"
# hard code tool revision to test so we don't have to publish the tool
tool_revision = 'test'
url = url % {'https_authority' : https_authority,
'toolname' : tool_name,
'toolrev' : tool_revision,
'params' : encoded_parameters,}
# print "parameters_text = %s" % (parameters_text)
# print "url = %s" % (url)
# launch the tool
browser.proxy_client.new_har("page link")
po.goto_page(url)
har_entry = browser.page_load_details()
if browser.error_loading_page(har_entry):
# the tool session container may still start,
# even if the har status is not 200 level
# try to close it before raising the error
har_status = har_entry['response']['status']
# print har_status
raise HarStatusError("unexpected error while launching tool: %s" % (har_status))
# include a check for 'Bad Parameters' error box in html
# because the error page returns status of 200
for msg in po.get_errorbox_info():
if 'bad parameters' in msg.lower():
raise BadParameterError(
"found a 'Bad Parameters' error box in the html of" \
+ " %s while passing parameters: %s\n%s" \
% (url,parameters_text,msg))
# one last check for middleware errors that take the user back to
# the member's dashboard page. middleware can't send messages back
# to the web server, so we get the message "Failed to invoke session"
loc = 'css=dd.error'
if po.is_displayed(loc):
# there is an error message displayed
e = po.find_element(loc)
msg = e.text
raise SessionInvokeError(msg)
# get the session number from the tool session page
ToolSessionPage = catalog.load('ToolSessionPage')
po = ToolSessionPage(browser,catalog)
tool_session_number = po.get_session_number()
return tool_session_number
def retrieve_container_parameters(shell):
parameters_text = ''
# figure out what the TOOL_PARAMETERS environment variable is
# by looking at the command that started the tool session
# container.
container_cmd,es = shell.execute('ps aux | grep TOOL_PARAMETERS')
# container_cmd should at least contain the grep
# fish out the assignment
# if there is no assignment, return an empty string
matches = re.search('TOOL_PARAMETERS=([^\s]+)',container_cmd)
if matches:
tool_parameters_filename = matches.group(1)
parameters_text = shell.read_file(tool_parameters_filename)
return parameters_text
def retrieve_program_output(shell,params_out_fname):
fpath = '${SESSIONDIR}/%s' % (params_out_fname)
count = 0
# wait for the file to exist on disk for systems with slow nfs
# if the file never appears, error out in the read_file() method
while (count < 5) and shell.bash_test('-e %s' % (fpath)) is False:
time.sleep(5)
count = count + 1
parameters_out = shell.read_file(fpath)
return parameters_out
def shrink_space(data):
"""
perform newline normalization on data
"""
# remove leading and trailing spaces
data = data.strip()
# collapse multiple lines to one single line
data = re.sub("\n+","\n",data)
return data
def pass_parameters(apps_shell,reg_shell,invoke_script,params_info,
https_authority,reguser,regpass,browser,catalog,utils):
# as the apps user, setup a fake tool
setup_tool(apps_shell,TOOL_NAME,TOOL_REVISION,invoke_script,
TEST_PROGRAM_NAME,TEST_PROGRAM_SCRIPT)
# as the registered user, setup a datafiles that were
# referenced by parameter passing
parameters_text = setup_datafiles(reg_shell,params_info)
# as the registered user, launch the session, passing parameters
sessnum = launch_tool(https_authority,reguser,regpass,browser,catalog,utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
return (sessnum,parameters_text)
@pytest.mark.registereduser
@pytest.mark.appsuser
class TestParameterPassingInvokeApp(TestCase2):
def setup_method(self,method):
self.remove_files = []
# get user account info
self.reguser,self.regpass = self.testdata.find_account_for('registeredworkspace')
self.appsuser,self.appspass = self.testdata.find_account_for('appsworkspace')
hubname = self.testdata.find_url_for('https')
# setup a web browser
self.browser.get(self.https_authority)
# setup access to tool session containers
cm = ContainerManager()
self.reg_ws = cm.access(host=hubname,
username=self.reguser,
password=self.regpass)
self.apps_ws = cm.access(host=hubname,
username=self.appsuser,
password=self.appspass,
toolname=hubcheck.conf.settings.apps_workspace_toolname)
self.session = ToolSession(
host=hubname, username=self.reguser, password=self.regpass)
self.reg_ws.execute('cd $SESSIONDIR')
# get a list of session open before the test was run
# incase the test fails unexpectedly, we can cleanup
self.existing_sessions = self.session.get_open_session_detail()
self.close_sessions = []
def teardown_method(self,method):
# exit the workspace
# shut down the ssh connection
self.reg_ws.close()
self.apps_ws.close()
# see if we can find any sessions accidentally left open.
open_sessions = self.session.get_open_session_detail()
open_sessions_numbers = []
for row in open_sessions.values():
open_sessions_numbers.append(row['session_number'])
if re.search(TOOL_NAME,row['name']) is not None:
# we found an open session that matches the name of our test tool
# check if it was open before we started the test.
old_session = False
for existing_row in self.existing_sessions.values():
if existing_row['session_number'] == row['session_number']:
old_session = True
break
if old_session is False:
# we found a session that was not open when the test started
# but is open now. there is a small chance it was opened by
# someone else.
# check if it is already in the list of sessions to be closed,
# if not, add the session to the list
if row['session_number'] not in self.close_sessions:
self.close_sessions.append(row['session_number'])
# close the parampass tool's container
for session_number in self.close_sessions:
# check if the session is still open before closing it
if session_number in open_sessions_numbers:
self.session.stop(session_number)
del self.session
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_1_command_no_templates_no_parameters_file(self):
"""
launch a tool with an invoke script with one -C option that is not
templated. do not create a parameters file.
"""
invoke_script = """#!/bin/sh
%(invoke_app_path)s -C %(test_program)s
""" % {'invoke_app_path' : INVOKE_APP_PATH,
'test_program' : TEST_PROGRAM_NAME}
params_info = {
}
expected_parameters = ''
sessnum,parameters_text = pass_parameters(self.apps_ws, self.reg_ws,
invoke_script, params_info,
self.https_authority, self.reguser,
self.regpass, self.browser,
self.catalog, self.utils)
self.close_sessions.append(sessnum)
# log into the tool session container to get the list of parameters
# passed into the test program. we check that the paramaters were
# all found in our original parameters file we generated earlier.
ws = self.session.access(session_number=sessnum)
ws.execute('echo $SESSION')
ws_params_text = retrieve_container_parameters(ws)
ws_params_out = retrieve_program_output(ws,TEST_PROGRAM_PARAMS_FNAME)
ws.close()
# check that the TOOL_PARAMETERS file has the same info
# as out parameters_text variable it was created from
assert parameters_text == ws_params_text, \
"TOOL_PARAMETERS file in container does not match data" \
+ " sent through url.\nexpected:\n%s\nreceived:\n%s\n" \
% (repr(parameters_text),repr(ws_params_text))
# check that toolparams started the correct tool based on
# parameters passed into the container.
assert expected_parameters == ws_params_out, \
"expected parameters: %s\nreceived parameters: %s" \
% (repr(expected_parameters),repr(ws_params_out))
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_1_command_no_templates_with_parameters_file(self):
"""
launch a tool with an invoke script with one -C option that is not
templated. create a parameters file. no parameters should be passed
to the test program.
"""
homedir,es = self.reg_ws.execute('echo ${HOME}')
invoke_script = """#!/bin/sh
%(invoke_app_path)s -C %(test_program)s
""" % {'invoke_app_path' : INVOKE_APP_PATH,
'test_program' : TEST_PROGRAM_NAME}
params_info = {
'datafile1' : {
'text' : 'this is datafile1',
'type' : 'file(datafile1)',
'path' : "%s/datafile1" % (homedir),
},
}
expected_parameters = ''
sessnum,parameters_text = pass_parameters(self.apps_ws, self.reg_ws,
invoke_script, params_info,
self.https_authority, self.reguser,
self.regpass, self.browser,
self.catalog, self.utils)
self.close_sessions.append(sessnum)
# log into the tool session container to get the list of parameters
# passed into the test program. we check that the paramaters were
# all found in our original parameters file we generated earlier.
ws = self.session.access(session_number=sessnum)
ws.execute('echo $SESSION')
ws_params_text = retrieve_container_parameters(ws)
ws_params_out = retrieve_program_output(ws,TEST_PROGRAM_PARAMS_FNAME)
ws.close()
# check that the TOOL_PARAMETERS file has the same info
# as out parameters_text variable it was created from
assert parameters_text == ws_params_text, \
"TOOL_PARAMETERS file in container does not match data" \
+ " sent through url.\nexpected:\n%s\nreceived:\n%s\n" \
% (repr(parameters_text),repr(ws_params_text))
# check that toolparams started the correct tool based on
# parameters passed into the container.
assert expected_parameters == ws_params_out, \
"expected parameters: %s\nreceived parameters: %s" \
% (repr(expected_parameters),repr(ws_params_out))
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_1_template_1_default_run_default(self):
"""
launch a tool with an invoke script with two -C options. the first
-C option is templated and accepts a named file. the second -C option
is not templated. invoke_app should make the non-templated command the
default option for toolparams. toolparams should run the default option
because there is no TOOL_PARAMETERS file and no templates will match.
no TOOL_PARAMETERS file will be created. no parameters should be passed
to the test program.
"""
invoke_script = """#!/bin/sh
%(invoke_app_path)s -C "%(test_program)s @@file(datafile1)" -C %(test_program)s
""" % {'invoke_app_path' : INVOKE_APP_PATH,
'test_program' : TEST_PROGRAM_NAME}
params_info = {
}
expected_parameters = ''
sessnum,parameters_text = pass_parameters(self.apps_ws, self.reg_ws,
invoke_script, params_info,
self.https_authority, self.reguser,
self.regpass, self.browser,
self.catalog, self.utils)
self.close_sessions.append(sessnum)
# log into the tool session container to get the list of parameters
# passed into the test program. we check that the paramaters were
# all found in our original parameters file we generated earlier.
ws = self.session.access(session_number=sessnum)
ws.execute('echo $SESSION')
ws_params_text = retrieve_container_parameters(ws)
ws_params_out = retrieve_program_output(ws,TEST_PROGRAM_PARAMS_FNAME)
ws.close()
# check that the TOOL_PARAMETERS file has the same info
# as out parameters_text variable it was created from
assert parameters_text == ws_params_text, \
"TOOL_PARAMETERS file in container does not match data" \
+ " sent through url.\nexpected:\n%s\nreceived:\n%s\n" \
% (repr(parameters_text),repr(ws_params_text))
# check that toolparams started the correct tool based on
# parameters passed into the container.
assert expected_parameters == ws_params_out, \
"expected parameters: %s\nreceived parameters: %s" \
% (repr(expected_parameters),repr(ws_params_out))
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_1_template_1_default_run_template(self):
"""
launch a tool with an invoke script with two -C options. the first
-C option is templated and accepts a named file. the second -C option
is not templated. invoke_app should make the non-templated command the
default option for toolparams. toolparams should run the templated option
because it will match a value in the TOOL_PARAMETERS file.
one parameter should be passed to the test program.
"""
homedir,es = self.reg_ws.execute('echo ${HOME}')
invoke_script = """#!/bin/sh
%(invoke_app_path)s -C "%(test_program)s @@file(datafile1)" -C %(test_program)s
""" % {'invoke_app_path' : INVOKE_APP_PATH,
'test_program' : TEST_PROGRAM_NAME}
params_info = {
'datafile1' : {
'text' : 'this is datafile1',
'type' : 'file(datafile1)',
'path' : "%s/datafile1" % (homedir),
},
}
expected_parameters = params_info['datafile1']['path']
sessnum,parameters_text = pass_parameters(self.apps_ws, self.reg_ws,
invoke_script, params_info,
self.https_authority, self.reguser,
self.regpass, self.browser,
self.catalog, self.utils)
self.close_sessions.append(sessnum)
# log into the tool session container to get the list of parameters
# passed into the test program. we check that the paramaters were
# all found in our original parameters file we generated earlier.
ws = self.session.access(session_number=sessnum)
ws.execute('echo $SESSION')
ws_params_text = retrieve_container_parameters(ws)
ws_params_out = retrieve_program_output(ws,TEST_PROGRAM_PARAMS_FNAME)
ws.close()
# check that the TOOL_PARAMETERS file has the same info
# as out parameters_text variable it was created from
assert parameters_text == ws_params_text, \
"TOOL_PARAMETERS file in container does not match data" \
+ " sent through url.\nexpected:\n%s\nreceived:\n%s\n" \
% (repr(parameters_text),repr(ws_params_text))
# check that toolparams started the correct tool based on
# parameters passed into the container.
assert expected_parameters == ws_params_out, \
"expected parameters: %s\nreceived parameters: %s" \
% (repr(expected_parameters),repr(ws_params_out))
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_1_template_0_default_run_template_1(self):
"""
launching a tool with an invoke script with one -C template command
should launch toolparams to run the command. toolparams
should launch the tool with the templated argument.
"""
homedir,es = self.reg_ws.execute('echo ${HOME}')
invoke_script = """#!/bin/sh
%(invoke_app_path)s -C "%(test_program)s @@file(datafile1)"
""" % {'invoke_app_path' : INVOKE_APP_PATH,
'test_program' : TEST_PROGRAM_NAME}
params_info = {
'datafile1' : {
'text' : 'this is datafile1',
'type' : 'file(datafile1)',
'path' : "%s/datafile1" % (homedir),
},
}
expected_parameters = params_info['datafile1']['path']
sessnum,parameters_text = pass_parameters(self.apps_ws, self.reg_ws,
invoke_script, params_info,
self.https_authority, self.reguser,
self.regpass, self.browser,
self.catalog, self.utils)
self.close_sessions.append(sessnum)
# log into the tool session container to get the list of parameters
# passed into the test program. we check that the paramaters were
# all found in our original parameters file we generated earlier.
ws = self.session.access(session_number=sessnum)
ws.execute('echo $SESSION')
ws_params_text = retrieve_container_parameters(ws)
ws_params_out = retrieve_program_output(ws,TEST_PROGRAM_PARAMS_FNAME)
ws.close()
# check that the TOOL_PARAMETERS file has the same info
# as out parameters_text variable it was created from
assert parameters_text == ws_params_text, \
"TOOL_PARAMETERS file in container does not match data" \
+ " sent through url.\nexpected:\n%s\nreceived:\n%s\n" \
% (repr(parameters_text),repr(ws_params_text))
# check that toolparams started the correct tool based on
# parameters passed into the container.
assert expected_parameters == ws_params_out, \
"expected parameters: %s\nreceived parameters: %s" \
% (repr(expected_parameters),repr(ws_params_out))
@pytest.mark.registereduser
@pytest.mark.appsuser
class TestParameterPassingUrl(TestCase2):
def setup_method(self,method):
self.remove_files = []
# get user account info
self.reguser,self.regpass = self.testdata.find_account_for('registeredworkspace')
self.appsuser,self.appspass = self.testdata.find_account_for('appsworkspace')
self.hubname = self.testdata.find_url_for('https')
# setup a web browser
self.browser.get(self.https_authority)
# setup access to tool session containers
cm = ContainerManager()
self.reg_ws = cm.access(host=self.hubname,
username=self.reguser,
password=self.regpass)
self.session = ToolSession(
host=self.hubname, username=self.reguser, password=self.regpass)
# get a list of session open before the test was run
# incase the test fails unexpectedly, we can cleanup
self.existing_sessions = self.session.get_open_session_detail()
self.close_sessions = []
def teardown_method(self,method):
# exit the workspace
# shut down the ssh connection
self.reg_ws.close()
# see if we can find any sessions accidentally left open.
open_sessions = self.session.get_open_session_detail()
open_sessions_numbers = []
for row in open_sessions.values():
open_sessions_numbers.append(row['session_number'])
if re.search(TOOL_NAME,row['name']) is not None:
# we found an open session that matches the name of our test tool
# check if it was open before we started the test.
old_session = False
for existing_row in self.existing_sessions.values():
if existing_row['session_number'] == row['session_number']:
old_session = True
break
if old_session is False:
# we found a session that was not open when the test started
# but is open now. there is a small chance it was opened by
# someone else.
# check if it is already in the list of sessions to be closed,
# if not, add the session to the list
if row['session_number'] not in self.close_sessions:
self.close_sessions.append(row['session_number'])
# close the parampass tool's container
for session_number in self.close_sessions:
# check if the session is still open before closing it
if session_number in open_sessions_numbers:
self.session.stop(session_number)
del self.session
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_launch_tool_no_parameters_file(self):
"""
launch a tool with no parameters argument in url.
"""
parameters_text = ''
sessnum = launch_tool(self.https_authority,self.reguser,self.regpass,
self.browser,self.catalog,self.utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
self.close_sessions.append(sessnum)
ws = self.session.access(session_number=sessnum)
ws.execute('cd $SESSIONDIR')
ws_params_text = retrieve_container_parameters(ws)
ws.close()
# check that the TOOL_PARAMETERS file has the same info
# as out parameters_text variable it was created from
assert parameters_text == ws_params_text, \
"TOOL_PARAMETERS file in container does not match data" \
+ " sent through url.\nexpected:\n%s\nreceived:\n%s\n" \
% (repr(parameters_text),repr(ws_params_text))
@pytest.mark.skipif(True, reason="we no longer do file validation")
@hubcheck.utils.hub_version(min_version='1.1.4')
def dnr_test_launch_tool_invalid_path_1(self):
"""
launch a tool with a parameter file with an invalid filename.
file(datafile1):/home/hubname/testuser/file_does_not_exist
"""
home_dir,es = self.reg_ws.execute('echo ${HOME}')
parameters_text = 'file(datafile1):%s/file_does_not_exist' % (home_dir)
try:
sessnum = launch_tool(self.https_authority,self.reguser,
self.regpass,self.browser,self.catalog,self.utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
self.close_sessions.append(sessnum)
assert False, "while passing tool parameters, cms failed to" \
+ " catch invalid path: %s" % (repr(parameters_text))
except BadParameterError as e:
pass
@pytest.mark.skipif(True, reason="we no longer do file validation")
@hubcheck.utils.hub_version(min_version='1.1.4')
def dnr_test_launch_tool_invalid_path_2(self):
"""
launch a tool with a parameter file with an invalid user.
file(datafile1):/home/hubname/non_existent_fake_user/file_does_not_exist
"""
home_dir,es = self.reg_ws.execute('echo ${HOME}')
home_base = os.path.dirname(home_dir)
home_dir = os.path.join(home_base,'non_existent_fake_user')
parameters_text = 'file(datafile1):%s/file_does_not_exist' % (home_dir)
try:
sessnum = launch_tool(self.https_authority,self.reguser,
self.regpass,self.browser,self.catalog,self.utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
self.close_sessions.append(sessnum)
assert False, "while passing tool parameters, cms failed to" \
+ " catch invalid path: %s" % (repr(parameters_text))
except BadParameterError as e:
pass
@pytest.mark.skipif(True, reason="we no longer do file validation")
@hubcheck.utils.hub_version(min_version='1.1.4')
def dnr_test_launch_tool_invalid_path_3(self):
"""
launch a tool with a parameter file with an invalid hubname.
file(datafile1):/home/bad_hubname/fake_user/file_does_not_exist
"""
home_dir,es = self.reg_ws.execute('echo ${HOME}')
home_base = os.path.dirname(os.path.dirname(home_dir))
home_dir = os.path.join(home_base,'bad_hubname','fake_user')
parameters_text = 'file(datafile1):%s/file_does_not_exist' % (home_dir)
try:
sessnum = launch_tool(self.https_authority,self.reguser,
self.regpass,self.browser,self.catalog,self.utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
self.close_sessions.append(sessnum)
assert False, "while passing tool parameters, cms failed to" \
+ " catch invalid path: %s" % (repr(parameters_text))
except BadParameterError as e:
pass
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_launch_tool_invalid_path_4(self):
"""
launch a tool with a parameter file with an invalid hubname.
file(datafile1):/bad_home/bad_hubname/fake_user/file_does_not_exist
"""
home_dir = os.path.join('/','bad_home','bad_hubname','fake_user')
parameters_text = 'file(datafile1):%s/file_does_not_exist' % (home_dir)
try:
sessnum = launch_tool(self.https_authority,self.reguser,
self.regpass,self.browser,self.catalog,self.utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
self.close_sessions.append(sessnum)
assert False, "while passing tool parameters, cms failed to" \
+ " catch invalid path: %s" % (repr(parameters_text))
except BadParameterError as e:
pass
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_launch_tool_blacklisted_path_1(self):
"""
launch a tool with a parameter file with an blacklisted path.
file(datafile1):/etc/environ
"""
parameters_text = 'file(datafile1):/etc/environ'
try:
sessnum = launch_tool(self.https_authority,self.reguser,
self.regpass,self.browser,self.catalog,self.utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
self.close_sessions.append(sessnum)
assert False, "while passing tool parameters, cms failed to" \
+ " catch blacklisted path: %s" % (repr(parameters_text))
except BadParameterError as e:
pass
@pytest.mark.skipif(
not hubcheck.utils.check_hub_hostname(['nees.org']),
reason="nees.org specific test")
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_launch_tool_whitelisted_path_1(self):
"""
launch a tool with a parameter file with a whitelisted path.
directory:/nees
"""
parameters_text = 'directory:/nees'
sessnum = launch_tool(self.https_authority,self.reguser,
self.regpass,self.browser,self.catalog,self.utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
self.close_sessions.append(sessnum)
ws = self.session.access(session_number=sessnum)
ws.execute('cd $SESSIONDIR')
ws_params_text = retrieve_container_parameters(ws)
ws.close()
# check that the TOOL_PARAMETERS file has the same info
# as out parameters_text variable it was created from
assert parameters_text == ws_params_text, \
"TOOL_PARAMETERS file in container does not match data" \
+ " sent through url.\nexpected:\n%s\nreceived:\n%s\n" \
% (repr(parameters_text),repr(ws_params_text))
@pytest.mark.skipif(
not hubcheck.utils.check_hub_hostname(['nees.org']),
reason="nees.org specific test")
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_launch_tool_whitelisted_path_2(self):
"""
launch a tool with a parameter file with a whitelisted path.
file:/nees/home/Public.groups/thumb_1235445883_Model-18EP-a.jpg
"""
parameters_text = 'file:/nees/home/Public.groups/thumb_1235445883_Model-18EP-a.jpg'
sessnum = launch_tool(self.https_authority,self.reguser,
self.regpass,self.browser,self.catalog,self.utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
self.close_sessions.append(sessnum)
ws = self.session.access(session_number=sessnum)
ws.execute('cd $SESSIONDIR')
ws_params_text = retrieve_container_parameters(ws)
ws.close()
# check that the TOOL_PARAMETERS file has the same info
# as out parameters_text variable it was created from
assert parameters_text == ws_params_text, \
"TOOL_PARAMETERS file in container does not match data" \
+ " sent through url.\nexpected:\n%s\nreceived:\n%s\n" \
% (repr(parameters_text),repr(ws_params_text))
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_launch_tool_whitelisted_path_3(self):
"""
launch a tool with a parameter file with a whitelisted path.
directory:/home/blahh
"""
parameters_text = 'directory:/home/blahh'
sessnum = launch_tool(self.https_authority,self.reguser,
self.regpass,self.browser,self.catalog,self.utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
self.close_sessions.append(sessnum)
ws = self.session.access(session_number=sessnum)
ws.execute('cd $SESSIONDIR')
ws_params_text = retrieve_container_parameters(ws)
ws.close()
# check that the TOOL_PARAMETERS file has the same info
# as out parameters_text variable it was created from
assert parameters_text == ws_params_text, \
"TOOL_PARAMETERS file in container does not match data" \
+ " sent through url.\nexpected:\n%s\nreceived:\n%s\n" \
% (repr(parameters_text),repr(ws_params_text))
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_launch_tool_home_expansion_1(self):
"""
launch a tool with a parameter file with a ~ in the path.
file(datafile1):~/.icewm/menu
"""
parameters_text = 'file(datafile1):~/.icewm/menu'
sessnum = launch_tool(self.https_authority,self.reguser,
self.regpass,self.browser,self.catalog,self.utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
self.close_sessions.append(sessnum)
ws = self.session.access(session_number=sessnum)
ws.execute('cd $SESSIONDIR')
ws_params_text = retrieve_container_parameters(ws)
ws.close()
# check that the TOOL_PARAMETERS file has the same info
# as out parameters_text variable it was created from
assert parameters_text == ws_params_text, \
"TOOL_PARAMETERS file in container does not match data" \
+ " sent through url.\nexpected:\n%s\nreceived:\n%s\n" \
% (repr(parameters_text),repr(ws_params_text))
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_launch_tool_named_file_1(self):
"""
launch a tool with a single named file parameter in url.
"""
session_dir,es = self.reg_ws.execute('echo ${SESSIONDIR}')
parameters_text = 'file(datafile1):%s/resources' % (session_dir)
sessnum = launch_tool(self.https_authority,self.reguser,
self.regpass,self.browser,self.catalog,self.utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
self.close_sessions.append(sessnum)
ws = self.session.access(session_number=sessnum)
ws.execute('cd $SESSIONDIR')
ws_params_text = retrieve_container_parameters(ws)
ws.close()
# check that the TOOL_PARAMETERS file has the same info
# as out parameters_text variable it was created from
assert parameters_text == ws_params_text, \
"TOOL_PARAMETERS file in container does not match data" \
+ " sent through url.\nexpected:\n%s\nreceived:\n%s\n" \
% (repr(parameters_text),repr(ws_params_text))
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_launch_tool_named_file_2(self):
"""
launch a tool with multiple named file parameter in url.
files are located in home directory
"""
session_dir,es = self.reg_ws.execute('echo ${SESSIONDIR}')
home_dir,es = self.reg_ws.execute('echo ${HOME}')
parameters_text = '\n'.join([
'file(datafile1):%s/resources' % (session_dir),
'file(datafile2):%s/.icewm/menu' % (home_dir),
])
sessnum = launch_tool(self.https_authority,self.reguser,
self.regpass,self.browser,self.catalog,self.utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
self.close_sessions.append(sessnum)
ws = self.session.access(session_number=sessnum)
ws.execute('cd $SESSIONDIR')
ws_params_text = retrieve_container_parameters(ws)
ws.close()
# check that the TOOL_PARAMETERS file has the same info
# as out parameters_text variable it was created from
assert parameters_text == ws_params_text, \
"TOOL_PARAMETERS file in container does not match data" \
+ " sent through url.\nexpected:\n%s\nreceived:\n%s\n" \
% (repr(parameters_text),repr(ws_params_text))
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_launch_tool_file_format_1(self):
"""
launch a tool with single named file parameter and an
extra newline at the end of the file.
https://nees.org/groups/parampass/wiki/MainPage step 1 (b)
"""
home_dir,es = self.reg_ws.execute('echo ${HOME}')
parameters_text = '\n'.join([
'file(datafile2):%s/.icewm/menu' % (home_dir),
'',
])
sessnum = launch_tool(self.https_authority,self.reguser,
self.regpass,self.browser,self.catalog,self.utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
self.close_sessions.append(sessnum)
ws = self.session.access(session_number=sessnum)
ws.execute('cd $SESSIONDIR')
ws_params_text = retrieve_container_parameters(ws)
ws.close()
parameters_text = shrink_space(parameters_text)
# check that the TOOL_PARAMETERS file has the same info
# as out parameters_text variable it was created from
assert parameters_text == ws_params_text, \
"TOOL_PARAMETERS file in container does not match data" \
+ " sent through url.\nexpected:\n%s\nreceived:\n%s\n" \
% (repr(parameters_text),repr(ws_params_text))
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_launch_tool_file_format_2(self):
"""
launch a tool with single named file parameter and
multiple extra newlines at the end of the file.
https://nees.org/groups/parampass/wiki/MainPage step 1 (b)
"""
home_dir,es = self.reg_ws.execute('echo ${HOME}')
parameters_text = '\n'.join([
'file(datafile2):%s/.icewm/menu' % (home_dir),
'',
'',
])
sessnum = launch_tool(self.https_authority,self.reguser,
self.regpass,self.browser,self.catalog,self.utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
self.close_sessions.append(sessnum)
ws = self.session.access(session_number=sessnum)
ws.execute('cd $SESSIONDIR')
ws_params_text = retrieve_container_parameters(ws)
ws.close()
parameters_text = shrink_space(parameters_text)
# check that the TOOL_PARAMETERS file has the same info
# as out parameters_text variable it was created from
assert parameters_text == ws_params_text, \
"TOOL_PARAMETERS file in container does not match data" \
+ " sent through url.\nexpected:\n%s\nreceived:\n%s\n" \
% (repr(parameters_text),repr(ws_params_text))
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_launch_tool_file_format_3(self):
"""
launch a tool with single named file parameter and
surrounded by multiple extra newlines.
https://nees.org/groups/parampass/wiki/MainPage step 1 (b)
"""
home_dir,es = self.reg_ws.execute('echo ${HOME}')
parameters_text = '\n'.join([
'',
'',
'file(datafile2):%s/.icewm/menu' % (home_dir),
'',
'',
])
sessnum = launch_tool(self.https_authority,self.reguser,
self.regpass,self.browser,self.catalog,self.utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
self.close_sessions.append(sessnum)
ws = self.session.access(session_number=sessnum)
ws.execute('cd $SESSIONDIR')
ws_params_text = retrieve_container_parameters(ws)
ws.close()
parameters_text = shrink_space(parameters_text)
# check that the TOOL_PARAMETERS file has the same info
# as out parameters_text variable it was created from
assert parameters_text == ws_params_text, \
"TOOL_PARAMETERS file in container does not match data" \
+ " sent through url.\nexpected:\n%s\nreceived:\n%s\n" \
% (repr(parameters_text),repr(ws_params_text))
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_launch_tool_file_format_4(self):
"""
launch a tool with single named file parameter and
preceeded by multiple extra newlines.
https://nees.org/groups/parampass/wiki/MainPage step 1 (b)
"""
home_dir,es = self.reg_ws.execute('echo ${HOME}')
parameters_text = '\n'.join([
'',
'',
'file(datafile2):%s/.icewm/menu' % (home_dir),
])
sessnum = launch_tool(self.https_authority,self.reguser,
self.regpass,self.browser,self.catalog,self.utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
self.close_sessions.append(sessnum)
ws = self.session.access(session_number=sessnum)
ws.execute('cd $SESSIONDIR')
ws_params_text = retrieve_container_parameters(ws)
ws.close()
parameters_text = shrink_space(parameters_text)
# check that the TOOL_PARAMETERS file has the same info
# as out parameters_text variable it was created from
assert parameters_text == ws_params_text, \
"TOOL_PARAMETERS file in container does not match data" \
+ " sent through url.\nexpected:\n%s\nreceived:\n%s\n" \
% (repr(parameters_text),repr(ws_params_text))
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_launch_tool_file_format_5(self):
"""
launch a tool with multiple named file parameter and
a single extra newlines between the parameters.
https://nees.org/groups/parampass/wiki/MainPage step 1 (b)
"""
home_dir,es = self.reg_ws.execute('echo ${HOME}')
parameters_text = '\n'.join([
'file(datafile2):%s/.icewm/menu' % (home_dir),
'',
'file(datafile2):%s/.icewm/preferences' % (home_dir),
])
sessnum = launch_tool(self.https_authority,self.reguser,
self.regpass,self.browser,self.catalog,self.utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
self.close_sessions.append(sessnum)
ws = self.session.access(session_number=sessnum)
ws.execute('cd $SESSIONDIR')
ws_params_text = retrieve_container_parameters(ws)
ws.close()
# check that the TOOL_PARAMETERS file has the same info
# as out parameters_text variable it was created from
assert parameters_text == ws_params_text, \
"TOOL_PARAMETERS file in container does not match data" \
+ " sent through url.\nexpected:\n%s\nreceived:\n%s\n" \
% (repr(parameters_text),repr(ws_params_text))
@hubcheck.utils.hub_version(min_version='1.1.4')
def test_launch_tool_file_format_6(self):
"""
launch a tool with multiple named file parameter and
multiple extra newlines between the parameters.
https://nees.org/groups/parampass/wiki/MainPage step 1 (b)
"""
home_dir,es = self.reg_ws.execute('echo ${HOME}')
parameters_text = '\n'.join([
'file(datafile2):%s/.icewm/menu' % (home_dir),
'',
'',
'file(datafile2):%s/.icewm/preferences' % (home_dir),
])
sessnum = launch_tool(self.https_authority,self.reguser,
self.regpass,self.browser,self.catalog,self.utils,
TOOL_NAME,TOOL_REVISION,parameters_text)
self.close_sessions.append(sessnum)
ws = self.session.access(session_number=sessnum)
ws.execute('cd $SESSIONDIR')
ws_params_text = retrieve_container_parameters(ws)
ws.close()
# check that the TOOL_PARAMETERS file has the same info
# as out parameters_text variable it was created from
assert parameters_text == ws_params_text, \
"TOOL_PARAMETERS file in container does not match data" \
+ " sent through url.\nexpected:\n%s\nreceived:\n%s\n" \
% (repr(parameters_text),repr(ws_params_text))
|
codedsk/hubcheck-hubzero-tests
|
hchztests/tests/test_website_parampass.py
|
Python
|
mit
| 47,868
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRoutePortsOperations(object):
"""ExpressRoutePortsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
express_route_port_name=express_route_port_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRoutePort"
"""Retrieves the requested ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of ExpressRoutePort.
:type express_route_port_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRoutePort, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_02_01.models.ExpressRoutePort
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
parameters, # type: "_models.ExpressRoutePort"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRoutePort"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExpressRoutePort')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
parameters, # type: "_models.ExpressRoutePort"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRoutePort"]
"""Creates or updates the specified ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:param parameters: Parameters supplied to the create ExpressRoutePort operation.
:type parameters: ~azure.mgmt.network.v2019_02_01.models.ExpressRoutePort
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRoutePort or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_02_01.models.ExpressRoutePort]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
express_route_port_name=express_route_port_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRoutePort"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRoutePort"]
"""Update ExpressRoutePort tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:param parameters: Parameters supplied to update ExpressRoutePort resource tags.
:type parameters: ~azure.mgmt.network.v2019_02_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRoutePort or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_02_01.models.ExpressRoutePort]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
express_route_port_name=express_route_port_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRoutePortListResult"]
"""List all the ExpressRoutePort resources in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_02_01.models.ExpressRoutePortListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRoutePortListResult"]
"""List all the ExpressRoutePort resources in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_02_01.models.ExpressRoutePortListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePorts'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/operations/_express_route_ports_operations.py
|
Python
|
mit
| 30,439
|
# -*- coding: utf-8 -*-
import pygame
from pibooth.utils import LOGGER
from pibooth.controls import GPIO
BUTTON_DOWN = pygame.USEREVENT + 1
class PtbButton(object):
"""Physical button management
"""
def __init__(self, pin, bouncetime=0.1):
self.pin = pin
# Use internal pull up/down resistors
GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(self.pin, GPIO.FALLING,
callback=self.on_button_down,
bouncetime=int(bouncetime * 1000))
def __eq__(self, other):
"""Can compare button with its pin.
"""
if isinstance(other, PtbButton):
return self is other
else:
return self.pin == other
def on_button_down(self, pin):
"""Post a pygame event when the button is pressed.
"""
LOGGER.debug('Hardware button (pin %s) triggered', pin)
event = pygame.event.Event(BUTTON_DOWN, pin=pin)
pygame.event.post(event)
|
werdeil/pibooth
|
pibooth/controls/button.py
|
Python
|
mit
| 1,039
|
count = int(input("Enter the number of fruits: "))
list = []
for i in range(count):
fruit = raw_input("Enter the name of the fruit: ")
list.append(fruit)
print(list)
|
yatingupta10/Dev.py
|
Week 1/Student Submissions/Ayush_Jain/q3.py
|
Python
|
mit
| 169
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lab01_authserver.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
Boris-Barboris/rsoi
|
lab01/authserver/lab01_authserver/manage.py
|
Python
|
mit
| 814
|
#!/usr/bin/env python3
class SpoonSliceModes(object):
""" Some Text """
PLAY='PLY'
RANDOM='RND'
MUTE='MTE'
SKIP='SKP'
REVERSE='RVS'
DOUBLE='DBL'
HALF='HLF'
def __init__(self):
return
def test_mode():
return SpoonSliceModes.PLAY
def is_valid_mode(self,mode_to_test):
valid_slice_modes = [
SpoonSliceModes.PLAY,
SpoonSliceModes.RANDOM,
SpoonSliceModes.MUTE,
SpoonSliceModes.SKIP,
SpoonSliceModes.REVERSE,
SpoonSliceModes.DOUBLE,
SpoonSliceModes.HALF ]
if mode_to_test in valid_slice_modes:
return True
else:
return False
class SpoonLoopModes(object):
# 6 characters wide
PLAY='PLAY' # free play
STOP='STOP' #not playing
RANDOM='RAND' #random order of slice playback
REVERSE='REVS' # reverse order of slice playback
PLAY_SLAVE='PLY SL' #playback synchronised to a master. if no master, free play
PLAY_MASTER='PLY MS' # free play, but slaves are synchronized to this.
def is_valid_mode(self,mode_to_test):
valid_loop_modes = [
SpoonLoopModes.PLAY,
SpoonLoopModes.STOP,
SpoonLoopModes.RANDOM,
SpoonLoopModes.REVERSE,
SpoonLoopModes.PLAY_SLAVE,
SpoonLoopModes.PLAY_MASTER ]
return mode_to_test in valid_loop_modes
class SpoonLoopIDs(object):
ONE='/loop/1'
TWO='/loop/2'
THREE='/loop/3'
FOUR='/loop/4'
class SpoonOSCNames(object):
LOOP='loop'
|
zarquin/SpoonFight
|
SpoonModes.py
|
Python
|
mit
| 1,625
|
import time
from .payload import Payload
from .worker import Worker
from .features import is_available
if is_available("concurrency"):
from gevent import monkey, pool
class Consumer(Worker):
source_handlers = {
"ready": "spawn_job"
}
outputs = ['results']
def __init__(self, *args, **kwargs):
super(Consumer, self).__init__(*args, **kwargs)
self.pool = None
def setup(self):
super(Consumer, self).setup()
if "concurrency" in self.config:
monkey.patch_all(thread=False)
self.pool = pool.Pool(self.config.concurrency)
def spawn_job(self, payload):
if self.pool is not None:
self.pool.spawn(self.run_job, payload)
else:
self.run_job(payload)
def run_job(self, payload):
self.logger.debug("Job started", extra={"payload": payload})
start_time = time.time()
try:
job = Payload.deserialize(payload)
except Exception:
self.logger.exception("Exception when loading job!")
else:
try:
job.run()
except Exception:
self.logger.exception("Exception when running job!")
end_time = time.time()
self.logger.debug(
"Job completed in %0.2fs seconds", end_time - start_time
)
self.outputs['results'].put(
{"payload": payload, "time": end_time - start_time}
)
|
wglass/rotterdam
|
rotterdam/consumer.py
|
Python
|
mit
| 1,475
|
import sys, os
delete = os.path.join('src','tools')
root_path = os.getcwd().replace(delete,'')
sys.path.append(root_path)
|
golsun/GPS
|
src/tools/add_root_path.py
|
Python
|
mit
| 123
|
import sys
def traduction(N, M, table_traduction, s):
n_div = N/3
s_liste = []
decr = ''
q = 0
for i in range(0,n_div):
for v in table_traduction:
if (s[q:q+3] == v[0]):
if (decr == ''):
decr = v[1]
else:
decr = decr + ' ' + v[1]
q = q + 3
print decr
return
if __name__ == '__main__':
N = int(raw_input())
M = int(raw_input())
table_traduction = [raw_input().split() for i in range(0,M)]
s = raw_input()
traduction(N, M, table_traduction, s)
|
Hugoo/Prologin
|
2010 - Machine/QCM/acides amines.py
|
Python
|
mit
| 618
|
#!/usr/bin/env python
from ansible.module_utils.hashivault import hashivault_auth_client
from ansible.module_utils.hashivault import hashivault_argspec
from ansible.module_utils.hashivault import hashivault_init
from ansible.module_utils.hashivault import hashiwrapper
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.1'}
DOCUMENTATION = r'''
---
module: hashivault_pki_crl_get
version_added: "4.5.0"
short_description: Hashicorp Vault PKI Read CRL Configuration
description:
- This module allows getting the duration for which the generated CRL should be marked valid.
options:
mount_point:
default: pki
description:
- location where secrets engine is mounted. also known as path
extends_documentation_fragment:
- hashivault
'''
EXAMPLES = r'''
---
- hosts: localhost
tasks:
- hashivault_pki_crl_get:
register: clr_config
- debug: msg="{{ clr_config }}"
'''
def main():
argspec = hashivault_argspec()
argspec['mount_point'] = dict(required=False, type='str', default='pki')
module = hashivault_init(argspec)
result = hashivault_pki_crl_get(module)
if result.get('failed'):
module.fail_json(**result)
else:
module.exit_json(**result)
@hashiwrapper
def hashivault_pki_crl_get(module):
params = module.params
client = hashivault_auth_client(params)
mount_point = params.get('mount_point').strip('/')
result = {"changed": False, "rc": 0}
from hvac.exceptions import InvalidPath
try:
result['data'] = client.secrets.pki.read_crl_configuration(mount_point=mount_point).get('data')
except InvalidPath:
result['rc'] = 1
result['failed'] = True
result['msg'] = u"CRLs must be configured before reading"
except Exception as e:
result['rc'] = 1
result['failed'] = True
result['msg'] = u"Exception: " + str(e)
return result
if __name__ == '__main__':
main()
|
TerryHowe/ansible-modules-hashivault
|
ansible/modules/hashivault/hashivault_pki_crl_get.py
|
Python
|
mit
| 1,987
|
#!/usr/bin/env python
import sys
import math
f = sys.argv[1]
out1 = '%s_out1.spi' %(f[:-4])
out2 = '%s_out2.spi' %(f[:-4])
f1 = open(f,'r')
o1 = open(out1,'wa')
o2 = open(out2,'wa')
loop = 1
for line in f1:
l = line.split()
s = float(l[0])
if s < 0:
print '%f is less than 0' %(s)
new = s - 1
n = (math.fabs(new))
o1.write('%s 1 %f\n' %(loop,new))
o2.write('%s 1 %f\n' %(loop,n))
loop = loop+1
continue
if s > 0:
print '%f is greater than 0' %(s)
new = s + 1
o1.write('%s 1 %f\n' %(loop,new))
o2.write('%s 1 %f\n' %(loop,new))
loop = loop + 1
continue
|
mcianfrocco/Cianfrocco_et_al._2013
|
Focused_classification_lobeA/renumber.py
|
Python
|
mit
| 630
|
__author__ = 'heni'
import os
# This function aims to find the specific label of a phrase in the sentence
def find_label(element, labels):
c = 0
found = False
while c < len(labels) and not found:
if labels[c] in element:
found = True
else:
c += 1
return labels[c]
# this function returns a stop-index for a sentence elements
def notLabeledElement_index(elements,labels):
index=0
not_relation=False
while index<len(elements) and not not_relation:
i=0
found=False
while i<len(labels) and not found:
if labels[i] in elements[index]:
found=True
else:
i+=1
if found:
index+=1
else:
not_relation=True
assert index<len(elements)
return index
# This function convert the label from openIE output to english part-of-sentence label
def convert_label(openIE_label, elementInd, relationInd, myLabels):
if openIE_label == 'SimpleArgument(':
if elementInd < relationInd:
label = myLabels[0]
else:
label = myLabels[2]
elif openIE_label == 'Relation(':
label = myLabels[1]
elif openIE_label == 'Context(':
label = myLabels[3]
elif openIE_label == 'TemporalArgument(':
label = myLabels[4]
else:
label = myLabels[5]
return label
# This function allows to write a specific sentence's element in the IOB output file
# the element is written with its corresponding label in the IOB format
def write_element_to_iob_file(element, new_label, old_label):
text_with_label = element.split(',List(')[0]
text = text_with_label.replace(old_label, '')
input_text_in_list = text.split(' ')
output_text = []
for i in range(0, len(input_text_in_list)):
if i == 0:
output_text.append(str(input_text_in_list[i]) + '\t\t' + 'B-' + str(new_label) + '\n')
else:
output_text.append(str(input_text_in_list[i]) + '\t\t' + 'I-' + str(new_label) + '\n')
return output_text
# This function takes as input an openIE file of the article
# it returns an output file under the IOB format
def convert_openie2iob(article_name):
#global article
try:
article = open('qa-jbt/data/openie/' + str(article_name) + '.openie','r')
except:
raise Exception('The wikipedia article %s has not been processed by openIE' % article_name)
print('The openIE file of %s has been found and opened' %article_name)
lines = article.readlines()[1:] # the first line containing the date of the file's creation is ignored
iob_path = 'data/iob/' + str(article_name) + '.iob'
print('Output IOB file for %s has been created' %article_name)
if not os.path.isfile(iob_path):
iob_file = open(iob_path, 'w') # open new .iob file to write
simpleArg_label = 'SimpleArgument('
relation_label = 'Relation('
context_label = 'Context('
temporalArg_label = 'TemporalArgument('
spatialArg_label = 'SpatialArgument('
labels = [simpleArg_label, relation_label, context_label, temporalArg_label, spatialArg_label]
subject = 'SUB'
predicate = 'PRED'
obj = 'OBJ'
context = 'CONT'
time = 'TIME'
location = 'LOC'
my_labels = [subject, predicate, obj, context, time, location]
for i in range(0, len(lines)):
if i%100==0:
print ('Line %d is beeing processed' %i)
line_elements = lines[i].split('\t')[2:]
iob_file.write(str(i)+'\t'+line_elements[-1])
try:
relation_index = [i for i, s in enumerate(line_elements) if relation_label in s][0]
except:
print('Relation label not found in the sentence number %d' % i)
last_index=notLabeledElement_index(line_elements,labels)
line_elements=line_elements[:last_index]
for j in range(0, len(line_elements)):
element = line_elements[j]
element_label = find_label(element, labels)
new_label = convert_label(element_label, j, relation_index, my_labels)
to_write = write_element_to_iob_file(element, new_label, element_label)
for c in range(0, len(to_write)):
iob_file.write(to_write[c])
iob_file.write('\n')
article.close()
iob_file.close()
else:
print('The IOB file for the article %s has been previously created. No need to do so again !!' % article_name)
return
|
hbenarab/mt-iebkg
|
preprocess/openie2iob_format.py
|
Python
|
mit
| 4,633
|
# -*- coding: utf-8 -*-
"""
Yahoo Fantasy Sports API
:copyright: (c) 2018 by Marvin Huang
"""
from app import yahoo_oauth
# from app.models import User, Team, League, Category
class YahooAPI(object):
def __init__(self, yahoo_oauth):
self.oauth = yahoo_oauth
def get_current_user_guid(self):
'''
Return user guid
'''
uri = 'users;use_login=1'
resp = self._get(uri)
# Cannot get user nick name and image_url from this uri,
# workaround:
# from user's team's manager we can get user nickname
# and image url. But there could be multiple managers, so
# we still need the user guid to identify the correct manager.
return resp['fantasy_content']['users']['0']['user'][0]['guid']
def get_current_user_teams(self):
'''
Return the current user and all owning teams
'''
current_user = {}
current_user['guid'] = self.get_current_user_guid()
# user owing teams
teams = []
uri = 'users;use_login=1/games;game_keys=nba/teams'
resp = self._get(uri)
teams_content = resp['fantasy_content']['users']['0']['user'][1]['games']['0']['game'][1]['teams']
team_count = int(teams_content['count'])
for idx in range(0, team_count):
team_content = teams_content[str(idx)]['team'][0]
team = {} # team is a dict, only retrieve data we needed
team['team_key'] = team_content[0]['team_key']
team['team_id'] = int(team_content[1]['team_id'])
team['name'] = team_content[2]['name']
team['team_logo'] = team_content[5]['team_logos'][0]['team_logo']['url']
teams.append(team)
# search team managers to find current user's nick name and image url
managers_content = team_content[19]['managers']
for manager_content in managers_content:
guid = manager_content['manager']['guid']
if guid == current_user['guid']:
current_user['name'] = manager_content['manager']['nickname']
current_user['image_url'] = manager_content['manager']['image_url']
return current_user,teams
def get_current_user_leagues(self):
'''
Return all leagues of a user
'''
uri = 'users;use_login=1/games;game_keys=nba/leagues'
resp = self._get(uri)
leagues_content = resp['fantasy_content']['users']['0']['user'][1]['games']['0']['game'][1]['leagues']
league_count = int(leagues_content['count'])
leagues = []
for idx in range(0,league_count):
league_content = leagues_content[str(idx)]['league'][0]
league = {} # league is a dict, only retrieve data we needed
league['league_key'] = league_content['league_key']
league['league_id'] = int(league_content['league_id'])
league['name'] = league_content['name']
league['num_teams'] = int(league_content['num_teams'])
league['scoring_type'] = league_content['scoring_type']
league['start_week'] = int(league_content['start_week'])
league['end_week'] = int(league_content['end_week'])
league['current_week'] = int(league_content['current_week'])
leagues.append(league)
# sort by league id
leagues.sort(key = lambda league : int(league['league_id']))
return leagues
def get_league_teams(self, league_key):
'''
Return all teams and managers in a league
'''
uri = 'league/{}/teams'.format(league_key)
resp = self._get(uri)
teams_content = resp['fantasy_content']['league'][1]['teams']
team_count = int(teams_content['count'])
teams = []
for idx in range(0, team_count):
team_content = teams_content[str(idx)]['team'][0]
team = {}
team['team_key'] = team_content[0]['team_key']
team['team_id'] = int(team_content[1]['team_id'])
team['name'] = team_content[2]['name']
team['team_logo'] = team_content[5]['team_logos'][0]['team_logo']['url']
teams.append(team)
# sort by team id
teams.sort(key = lambda team : int(team['team_id']))
return teams
def get_league_stat_categories(self, league_key):
'''
Return all stat categories used in this league
'''
uri = 'game/nba/leagues;league_keys={}/settings'.format(league_key)
resp = self._get(uri)
settings = resp['fantasy_content']['game'][1]['leagues']['0']['league'][1]['settings'][0]
stat_categories = settings['stat_categories']['stats']
categories = []
for stat_category in stat_categories:
stat_content = stat_category['stat']
category = {}
category['stat_id'] = int(stat_content['stat_id'])
category['display_name'] = stat_content['display_name']
category['name'] = stat_content['name']
category['sort_order'] = int(stat_content['sort_order'])
if 'is_only_display_stat' in stat_content:
category['display_only'] = int(stat_content['is_only_display_stat'])
else:
category['display_only'] = 0
categories.append(category)
return categories
def get_game_stat_categories(self):
'''
Return all available stat categories of the game(NBA),
used to dynamically create the stat table.
'''
uri = 'game/nba/stat_categories'
resp = self._get(uri)
stat_categories = [ x['stat'] for x in resp['fantasy_content']['game'][1]['stat_categories']['stats'] ]
categories = []
for stat_category in stat_categories:
category = {}
category['stat_id'] = int(stat_category['stat_id'])
category['display_name'] = stat_category['display_name']
category['name'] = stat_category['name']
category['sort_order'] = int(stat_category['sort_order'])
if 'is_only_display_stat' in stat_category:
category['display_only'] = int(stat_category['is_only_display_stat'])
else:
category['display_only'] = 0
categories.append(category)
return categories
def get_team_stat(self, team, week=0):
'''
Return the stats of a team for a certain week, or the season(week==0)
'''
if week==0:
uri = 'team/{}/stats;type=season'.format(team.team_key)
else:
uri = 'team/{}/stats;type=week;week={}'.format(team.team_key, week)
resp = self._get(uri)
team_stats = resp['fantasy_content']['team'][1]['team_stats']['stats']
stats = []
for team_stat in team_stats:
stat = {}
stat['stat_id'] = team_stat['stat']['stat_id']
stat['value'] = team_stat['stat']['value']
stats.append(stat)
# print(stats)
return stats
def _get(self, uri):
base_url = 'https://fantasysports.yahooapis.com/fantasy/v2/'
uri = base_url + uri
# print('request', uri)
resp = self.oauth.request(uri, params={'format': 'json'}).json()
# print('resp', resp)
return resp
|
namiszh/fba
|
WebApp/app/yahoo_api.py
|
Python
|
mit
| 7,485
|
#
# Title : Data munging(AKA cleaning) the Titanic Data
# Author : Felan Carlo Garcia
#
# Notes:
# -- Code is based on the Kaggle Python Tutorial
# -- data cleaning prior to implementing a machine learning algorithm.
import numpy as np
import pandas as pd
def processdata(filename, outputname):
df = pd.read_csv(filename,header=0)
# Make a new column 'Gender' and EmbarkedNum to convert the string
# information into an integer value.
# We do this because general machine learning algorithms do not
# work on string values.
df['Gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)
df['EmbarkedNum'] = df['Embarked'].map({'S': 0, 'C': 1, 'Q': 1}).astype(int)
# Executing the code:
# --print df[df['Age'].isnull()][Sex']-- shows that the titanic data contains
# some null values of the ages of the passengers.
# In this case, we can either drop the row or we can assign an arbitrary
# value to fill the missing data.
# For this code, arbitrary age data is obtained by using the median
# age data of the passengers. We make a new column 'AgeFill' and place
# the median data on the missing values instead of directly modifying
# the 'Age' column
df['AgeFill'] = df['Age']
for i in range(0, 2):
for j in range(0, 3):
median = df[(df['Gender'] == i) & (df['Pclass'] == j+1)]['Age'].dropna().median()
df.loc[ (df.Age.isnull()) & (df.Gender == i) & (df.Pclass == j+1),'AgeFill'] = median
# We add a new column 'AgeIsNull' to know which records has a missing
# values previously.
# We then interpolate the missing values from the 'Fare' column.
df['AgeIsNull'] = pd.isnull(df.Age).astype(int)
df['Fare'] = df['Fare'].interpolate()
# ------------- Feature Engineering Part --------------------
# Feature Engineering is the process of using domain/expert
# knowledge of the data to create features that make machine
# learning algorithms work better.
#
# In this case, studying the data shows that women and children
# have higher survival rates compared to men. Thus we add
# two additional features: 'Female' and 'Children', in an attempt
# to assist our learning model in its prediction.
# At the same time we add features Age*Class and FamilySize
# as additional engineered feature that may help our learning
# model
df['Children'] = df['AgeFill'].map(lambda x: 1 if x < 6.0 else 0)
df['Female'] = df['Gender'].map(lambda x: 1 if x == 0 else 0)
df['FamilySize'] = df['SibSp'] + df['Parch']
df['Age*Class'] = df['AgeFill'] * df['Pclass']
# Since most machine learning algorithms don't work on strings,
# we drop the columns in our pandas dataframe containing object
# datatypes.
# The code:
# --print df.dtypes[df.dtypes.map(lambda x: x=='object')]--
# will show which columns are made of object datatypes.
#
# In this case these are the following columns containing
# object.string:
# Age, Name, Sex, Ticket, Cabin, Embarked, Fare
#
# We drop the following objects columns along with the other data
# since they wont likely contribute to our machine learning
# prediction
df = df.drop(['Age','Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1)
df.to_csv(outputname, sep=',', index=False)
return df
def main():
print processdata('titanic-data-shuffled.csv', 'final-data.csv')
if __name__ == '__main__':
main()
|
cadrev/Titanic-Prediction
|
data-munging.py
|
Python
|
mit
| 3,412
|
import pypcd
import plyfile
import numpy
import scipy.spatial
import mcubes
import tempfile
import meshlabxml
from pyhull.convex_hull import ConvexHull
from curvox import pc_vox_utils
import types
# Binvox functions
def binvox_to_ply(voxel_grid, **kwargs):
"""
:param voxel_grid:
:type voxel_grid: binvox_rw.Voxels
:param kwargs:
:return:
"""
percent_offset = kwargs.get("percent_offset", (0.5, 0.5, 0.45))
marching_cubes_resolution = kwargs.get("marching_cubes_resolution", 0.5)
patch_size = voxel_grid.dims[0]
pc_center_in_voxel_grid = (patch_size * percent_offset[0], patch_size * percent_offset[1], patch_size * percent_offset[2])
voxel_resolution = voxel_grid.scale / patch_size
center_point_in_voxel_grid = voxel_grid.translate + numpy.array(pc_center_in_voxel_grid) * voxel_resolution
vertices, faces = mcubes.marching_cubes(voxel_grid.data, marching_cubes_resolution)
vertices = vertices * voxel_resolution - numpy.array(pc_center_in_voxel_grid) * voxel_resolution + numpy.array(center_point_in_voxel_grid)
ply_data = generate_ply_data(vertices, faces)
# Export to plyfile type
return ply_data
def binvox_to_ply(voxel_grid, **kwargs):
"""
:param voxel_grid:
:type voxel_grid: numpy.Array
:param kwargs:
:return:
"""
marching_cubes_resolution = kwargs.get("marching_cubes_resolution", 0.5)
patch_size = voxel_grid.shape[0]
vertices, faces = mcubes.marching_cubes(voxel_grid, marching_cubes_resolution)
vertices = vertices * voxel_resolution
ply_data = generate_ply_data(vertices, faces)
# Export to plyfile type
return ply_data
# Helper functions
def generate_ply_data(points, faces):
"""
:param points:
:param faces:
:return:
"""
vertices = [(point[0], point[1], point[2]) for point in points]
faces = [(point,) for point in faces]
vertices_np = numpy.array(vertices, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
faces_np = numpy.array(faces, dtype=[('vertex_indices', 'i4', (3,))])
vertex_element = plyfile.PlyElement.describe(vertices_np, 'vertex')
face_element = plyfile.PlyElement.describe(faces_np, 'face')
return plyfile.PlyData([vertex_element, face_element], text=True)
def smooth_ply(ply_data):
# Store ply_data in temporary file
tmp_ply_filename = tempfile.mktemp(suffix=".ply")
with open(tmp_ply_filename, 'w') as tmp_ply_file:
ply_data.write(tmp_ply_file)
# Initialize meshlabserver and meshlabxml script
unsmoothed_mesh = meshlabxml.FilterScript(file_in=tmp_ply_filename, file_out=tmp_ply_filename, ml_version="1.3.2")
meshlabxml.smooth.laplacian(unsmoothed_mesh, iterations=6)
unsmoothed_mesh.run_script(print_meshlabserver_output=False, skip_error=True)
# Read back and store new data
with open(tmp_ply_filename, 'r') as tmp_ply_file:
ply_data_smoothed = plyfile.PlyData.read(tmp_ply_file)
return ply_data_smoothed
def _generate_gaussian_process_points(points, offsets, observed_value, offset_value):
offset_points = numpy.subtract(points, offsets)
new_points = numpy.concatenate([points, offset_points])
observations = numpy.ones((new_points.shape[0], 1))
# Assign first half to observed_value and second half to offset_value
observations[:points.shape[0]] = observed_value
observations[points.shape[0]:] = offset_value
return new_points, observations
def generate_ply_data(points, faces):
"""
:param points:
:param faces:
:return:
"""
vertices = [(point[0], point[1], point[2]) for point in points]
faces = [(point,) for point in faces]
vertices_np = numpy.array(vertices, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
faces_np = numpy.array(faces, dtype=[('vertex_indices', 'i4', (3,))])
vertex_element = plyfile.PlyElement.describe(vertices_np, 'vertex')
face_element = plyfile.PlyElement.describe(faces_np, 'face')
return plyfile.PlyData([vertex_element, face_element], text=True)
def read_obj_file(obj_filepath):
with open(obj_filepath, 'r') as f:
lines = f.readlines()
verts = numpy.array([[float(v) for v in l.strip().split(' ')[1:]] for l in lines if
l[0] == 'v' and len(l.strip().split(' ')) == 4])
faces = numpy.array([[int(v) for v in l.strip().split(' ')[1:]] for l in lines if l[0] == 'f'])
faces -= 1
return verts, faces
def convert_obj_to_ply(obj_filepath):
verts, faces = read_obj_file(obj_filepath)
ply_data = generate_ply_data(verts, faces)
return ply_data
# Functions for saving and loading PCL and PLY files
def complete_pcd_file_and_save(pcd_filename, completion_method, **kwargs):
"""
:param pcd_filename:
:param completion_method:
:type completion_method: types.FunctionType
:param kwargs:
:return:
"""
if completion_method not in COMPLETION_METHODS:
raise ValueError("completion_method must be a completion method defined in this module")
suffix = kwargs.get("suffix", "")
cloud = pypcd.Pointcloud.from_file(pcd_filename)
points = curvox.cloud_conversions.pcl_to_np(cloud)
plydata = completion_method(points, **kwargs)
ply_filename = pcd_filename.replace(".pcd", suffix + ".ply")
plydata.write(open(ply_filename, 'wb'))
def complete_tactile_pcd_file_and_save(depth_pcd_filename, tactile_pcd_filename, completion_method, **kwargs):
"""
:param depth_pcd_filename:
:param tactile_pcd_filename:
:param completion_method:
:param kwargs:
:return:
"""
suffix = kwargs.get("suffix", "")
depth_cloud = pypcd.Pointcloud.from_file(depth_pcd_filename)
depth_points = curvox.cloud_conversions.pcl_to_np(depth_cloud)
tactile_cloud = pypcd.Pointcloud.from_file(tactile_pcd_filename)
tactile_points = curvox.cloud_conversions.pcl_to_np(tactile_cloud)
if completion_method not in TACTILE_COMPLETION_METHODS:
raise ValueError("completion_method must be a tactile completion method defined in this module")
plydata = completion_method(depth_points, tactile_points, **kwargs)
ply_filename = depth_pcd_filename.replace(".pcd", suffix + ".ply")
plydata.write(open(ply_filename, 'wb'))
# Depth only completions
def delaunay_completion(points, **kwargs):
"""
:param points:
:param kwargs:
:return:
"""
smooth = kwargs.get("smooth", False)
# Triangulate points in (x, y) coordinate plane
tri = scipy.spatial.Delaunay(points[:, 0:2])
ply_data = generate_ply_data(points, tri.simplices)
if smooth:
ply_data = smooth_ply(ply_data)
# Generate ply data
return ply_data
def marching_cubes_completion(points, **kwargs):
"""
:param points:
:param kwargs:
:return:
"""
patch_size = kwargs.get("patch_size", 120)
percent_offset = kwargs.get("percent_offset", (0.5, 0.5, 0.45))
percent_patch_size = kwargs.get("percent_patch_size", 0.8)
marching_cubes_resolution = kwargs.get("marching_cubes_resolution", 0.5)
smooth = kwargs.get("smooth", False)
voxel_grid, voxel_center, voxel_resolution, center_point_in_voxel_grid = pc_vox_utils.pc_to_binvox(
points=points,
patch_size=patch_size,
percent_offset=percent_offset,
percent_patch_size=percent_patch_size
)
vertices, faces = mcubes.marching_cubes(voxel_grid.data, marching_cubes_resolution)
vertices = pc_vox_utils.rescale_mesh(vertices, voxel_center, voxel_resolution, center_point_in_voxel_grid)
ply_data = generate_ply_data(vertices, faces)
# If we are smoothing use meshlabserver to smooth over mesh
if smooth:
ply_data = smooth_ply(ply_data)
# Export to plyfile type
return ply_data
def qhull_completion(points, **kwargs):
smooth = kwargs.get("smooth", False)
hull = ConvexHull(points)
# Fix inverted normals from pyhull
hull.vertices = [vertex[::-1] for vertex in hull.vertices]
ply_data = generate_ply_data(points, hull.vertices)
# If we are smoothing use meshlabserver to smooth over mesh
if smooth:
ply_data = smooth_ply(ply_data)
return ply_data
# Tactile completion methods
def delaunay_tactile_completion(depth_points, tactile_points, **kwargs):
points = numpy.concatenate([depth_points, tactile_points])
return delaunay_completion(points, **kwargs)
def marching_cubes_tactile_completion(depth_points, tactile_points, **kwargs):
points = numpy.concatenate([depth_points, tactile_points])
return marching_cubes_completion(points, **kwargs)
def qhull_tactile_completion(depth_points, tactile_points, **kwargs):
points = numpy.concatenate([depth_points, tactile_points])
return qhull_completion(points, **kwargs)
COMPLETION_METHODS = [
delaunay_completion,
marching_cubes_completion,
qhull_completion,
]
TACTILE_COMPLETION_METHODS = [
delaunay_tactile_completion,
marching_cubes_tactile_completion,
qhull_tactile_completion
]
|
CRLab/curvox
|
src/curvox/cloud_to_mesh_conversions.py
|
Python
|
mit
| 9,058
|
import numpy as np
import pandas as pd
import datetime
import util
class Participant:
# Need to update to have both network and retail tariffs as inputs
def __init__(self, participant_id, participant_type, retail_tariff_type, network_tariff_type,retailer):
self.participant_id = participant_id
self.participant_type = participant_type
self.retail_tariff_type = retail_tariff_type
self.network_tariff_type = network_tariff_type
self.retailer = retailer
def print_attributes(self):
print(self.participant_type, self.retail_tariff_type, self.network_tariff_type, self.retailer)
# TODO - make this work
def calc_net_export(self, date_time, interval_min):
return np.random.uniform(-10,10)
def get_id(self):
return self.participant_id
def get_retail_tariff_type(self):
return self.retail_tariff_type
def get_network_tariff_type(self):
return self.network_tariff_type
class CSV_Participant(Participant):
def __init__(self, participant_id, participant_type, retail_tariff_type, network_tariff_type, retailer, solar_path, load_path, solar_capacity):
Participant.__init__(self, participant_id, participant_type, retail_tariff_type, network_tariff_type, retailer)
self.solar_path = solar_path
self.load_path = load_path
solar_data = pd.read_csv(solar_path,index_col = 'date_time', parse_dates=True, date_parser=util.date_parser)
load_data = pd.read_csv(load_path,index_col = 'date_time', parse_dates=False, date_parser=util.date_parser)
# Delete all cols not relevant to this participant
self.load_data = load_data[participant_id]
self.solar_data = solar_data[participant_id]
# Apply capacity to solar data
self.solar_data = self.solar_data * solar_capacity
# print solar_data
def calc_net_export(self, date_time, interval_min):
solar_data = float(self.solar_data.loc[date_time])
load_data = float(self.load_data.loc[date_time])
net_export = solar_data - load_data
return net_export
|
lukasmarshall/embedded-network-model
|
participant.py
|
Python
|
mit
| 2,188
|
from ._Calibrate import *
|
sbragagnolo/xsens
|
src/xsens/srv/__init__.py
|
Python
|
mit
| 26
|
"""Queries."""
from collections import ChainMap
from typing import Dict, List
from flask_sqlalchemy import BaseQuery
from sqlalchemy import or_
from sqlalchemy.orm import aliased
from sqlalchemy.orm.util import AliasedClass
from sqlalchemy.sql.elements import BooleanClauseList
from pma_api.models import db, Characteristic, CharacteristicGroup, Country, \
Data, EnglishString, Geography, Indicator, Survey, Translation
# pylint: disable=too-many-public-methods
class DatalabData:
"""PmaData."""
char1 = aliased(Characteristic)
char2 = aliased(Characteristic)
char_grp1 = aliased(CharacteristicGroup)
char_grp2 = aliased(CharacteristicGroup)
@staticmethod
def all_joined(*select_args):
"""Datalab data joined."""
chr1 = DatalabData.char1
chr2 = DatalabData.char2
grp1 = DatalabData.char_grp1
grp2 = DatalabData.char_grp2
joined = db.session.query(*select_args) \
.select_from(Data) \
.join(Survey, Data.survey_id == Survey.id) \
.join(Geography, Survey.geography_id == Geography.id) \
.join(Country, Survey.country_id == Country.id) \
.join(Indicator, Data.indicator_id == Indicator.id) \
.outerjoin(chr1, Data.char1_id == chr1.id) \
.outerjoin(grp1, grp1.id == chr1.char_grp_id) \
.outerjoin(chr2, Data.char2_id == chr2.id) \
.outerjoin(grp2, grp2.id == chr2.char_grp_id)
return joined
@staticmethod
def series_query(survey_codes, indicator_code, char_grp_code, over_time):
"""Get the series based on supplied codes."""
json_list: List[Dict] = DatalabData.filter_minimal(
survey_codes, indicator_code, char_grp_code, over_time)
if over_time:
series_list = DatalabData.data_to_time_series(json_list)
else:
series_list = DatalabData.data_to_series(json_list)
return series_list
@staticmethod
def data_to_time_series(sorted_data):
"""Transform a sorted list of data into time series."""
curr_char = None
curr_geo = None
results = []
next_series = {}
for obj in sorted_data:
new_char = obj['characteristic.id'] != curr_char
new_geo = obj['geography.id'] != curr_geo
if new_char or new_geo:
if curr_char and curr_geo:
results.append(next_series)
next_series = {
'characteristic.id': obj.pop('characteristic.id'),
'characteristic.label.id':
obj.pop('characteristic.label.id'),
'geography.id': obj.pop('geography.id'),
'geography.label.id': obj.pop('geography.label.id'),
'country.id': obj.pop('country.id'),
'country.label.id': obj.pop('country.label.id'),
'values': [
{
'survey.id': obj.pop('survey.id'),
'survey.label.id': obj.pop('survey.label.id'),
'survey.date': obj.pop('survey.date'),
'value': obj.pop('value'),
}
]
}
curr_char = next_series['characteristic.id']
curr_geo = next_series['geography.id']
else:
next_series['values'].append({
'survey.id': obj.pop('survey.id'),
'survey.label.id': obj.pop('survey.label.id'),
'survey.date': obj.pop('survey.date'),
'value': obj.pop('value'),
})
if next_series:
results.append(next_series)
return results
@staticmethod
def data_to_series(sorted_data):
"""Transform a sorted list of data into series."""
curr_survey = None
results = []
next_series = {}
for obj in sorted_data:
if obj['survey.id'] != curr_survey:
if curr_survey:
results.append(next_series)
next_series = {
'survey.id': obj.pop('survey.id'),
'survey.label.id': obj.pop('survey.label.id'),
'geography.id': obj.pop('geography.id'),
'geography.label.id': obj.pop('geography.label.id'),
'country.id': obj.pop('country.id'),
'country.label.id': obj.pop('country.label.id'),
'values': [
{
'characteristic.label.id':
obj.pop('characteristic.label.id'),
'characteristic.id': obj.pop('characteristic.id'),
'value': obj.pop('value'),
}
]
}
curr_survey = next_series['survey.id']
else:
next_series['values'].append({
'characteristic.label.id':
obj.pop('characteristic.label.id'),
'characteristic.id': obj.pop('characteristic.id'),
'value': obj.pop('value'),
})
if next_series:
results.append(next_series)
return results
@staticmethod
def filter_readable(
survey_codes: str,
indicator_code: str,
char_grp_code: str,
lang=None
):
"""Get filtered Datalab data and return readable columns.
Args:
survey_codes (str): Comma-delimited list of survey codes
indicator_code (str): An indicator code
char_grp_code (str): A characteristic group code
lang (str): The language, if specified.
Filters the data based on the function arguments.
Returns:
A list of simple python objects, one for each record found by
applying the various filters.
"""
chr1: AliasedClass = DatalabData.char1
grp1: AliasedClass = DatalabData.char_grp1
grp2: AliasedClass = DatalabData.char_grp2
select_args: tuple = (Data, Survey, Indicator, grp1, chr1)
filtered: BaseQuery = DatalabData.all_joined(*select_args)
if survey_codes:
survey_sql = DatalabData.survey_list_to_sql(survey_codes)
filtered: BaseQuery = filtered.filter(survey_sql)
if indicator_code:
filtered: BaseQuery = \
filtered.filter(Indicator.code == indicator_code)
if char_grp_code:
filtered: BaseQuery = filtered.filter(grp1.code == char_grp_code)
# TODO (jkp, begin=2017-08-28): This will be grp2.code is None
# eventually when the Data show "none" for char_grp2 in excel import
# Remove E711 from .pycodestyle
# pylint: disable=singleton-comparison
filtered: BaseQuery = filtered.filter(grp2.code == None)
results: List = filtered.all()
json_results = []
for item in results:
precision = item[0].precision
if precision is None:
precision = 1
value = round(item[0].value, precision)
this_dict = {
'value': value,
'survey.id': item[1].code,
'survey.date': item[1].start_date.strftime('%m-%Y'),
'indicator.label': item[2].label.to_string(lang),
'characteristicGroup.label': item[3].label.to_string(lang),
'characteristic.label': item[4].label.to_string(lang)
}
json_results.append(this_dict)
return json_results
@staticmethod
def filter_minimal(
survey_codes: str,
indicator_code: str,
char_grp_code: str,
over_time
) -> List[Dict]:
"""Get filtered Datalab data and return minimal columns.
Args:
survey_codes (str): Comma-delimited list of survey codes
indicator_code (str): An indicator code
char_grp_code (str): A characteristic group code
over_time (bool): Filter charting over time?
Filters the data based on the function arguments. The returned data
are data value, the precision, the survey code, the indicator code,
the characteristic group code, and the characteristic code.
Returns:
A list of simple python objects, one for each record found by
applying the various filters.
"""
chr1: AliasedClass = DatalabData.char1 # Characteristic
grp1: AliasedClass = DatalabData.char_grp1 # CharacteristicGroup
grp2: AliasedClass = DatalabData.char_grp2 # CharacteristicGroup
# Tuple[Union[AliasedClass, InstrumentedAttribute, ApiModel]]
select_args: tuple = (
Data, Survey, Indicator.code, grp1.code, chr1, Geography, Country)
filtered: BaseQuery = DatalabData.all_joined(*select_args)
if survey_codes:
survey_sql: BooleanClauseList = \
DatalabData.survey_list_to_sql(survey_codes)
filtered: BaseQuery = filtered.filter(survey_sql)
if indicator_code:
filtered: BaseQuery = \
filtered.filter(Indicator.code == indicator_code)
if char_grp_code:
filtered: BaseQuery = filtered.filter(grp1.code == char_grp_code)
# TODO 2017.08.28-jkp: Remove E711 from .pycodestyle
# TO-DO: 2019-04-15-jef: For some reason, pycharm is still flagging
# this even with the noinspection.
# 'is None' rather than '== None' will yield an error here.
# pylint: disable=singleton-comparison
# noinspection PyComparisonWithNone,PyPep8
filtered: BaseQuery = filtered.filter(grp2.code == None)
if over_time:
# This ordering is very important!
ordered: BaseQuery = filtered\
.order_by(Geography.order)\
.order_by(chr1.order)\
.order_by(Survey.order)
# Perhaps order by the date of the survey?
else:
ordered: BaseQuery = filtered\
.order_by(Survey.order)\
.order_by(chr1.order)
mdl_results: List = ordered.all()
idx: Dict[str, int] = { # result:index map
'Data': 0, # Data
'Survey': 1, # Survey
'Indicator.id': 2, # str
'CharacteristicGroup.id': 3, # str
'Characteristic': 4, # Characteristic
'Geography': 5, # Geogrpahy
'Country': 6, # Country
}
json_results: List[Dict] = []
for item in mdl_results:
this_dict = {
'value': item[idx['Data']].value,
'precision': item[idx['Data']].precision,
'survey.id': item[idx['Survey']].code,
'survey.date':
item[idx['Survey']].start_date.strftime('%m-%Y'),
'survey.label.id': item[idx['Survey']].label.code,
'indicator.id': item[idx['Indicator.id']],
'characteristicGroup.id': item[idx['CharacteristicGroup.id']],
'characteristic.id': item[idx['Characteristic']].code,
'characteristic.label.id':
item[idx['Characteristic']].label.code,
'geography.label.id': item[idx['Geography']].subheading.code,
'geography.id': item[idx['Geography']].code,
'country.label.id': item[idx['Country']].label.code,
'country.id': item[idx['Country']].code
}
json_results.append(this_dict)
return json_results
@staticmethod
def survey_list_to_sql(survey_list):
"""Turn a list of surveys passed through URL to SQL.
Args:
survey_list (str): A list of survey codes
Returns:
The SQLAlchemy object that represents these OR'd together.
"""
return DatalabData.api_list_to_sql_list(Survey, survey_list)
@staticmethod
def api_list_to_sql_list(model, query_values):
"""Convert generally query args to SQL.
Args:
model (db.Model): A model object with a code attribute
query_values (str): A list of codes joined by comma
Results:
The SQLAlchemy object that represents these OR'd together.
"""
# TODO (jkp 2017-08-28) Error checking on survey_list.
split = query_values.split(',')
sql_exprs = [model.code == code for code in split]
if len(sql_exprs) > 1:
full_sql = or_(*sql_exprs)
else:
full_sql = sql_exprs[0]
return full_sql
# pylint: disable=too-many-locals
@staticmethod
def combos_all(survey_list: List[str], indicator: str, char_grp: str):
"""Get lists of all valid datalab selections.
Based on a current selection in the datalab, this method returns lists
of what should be clickable in each of the three selection areas of
the datalab.
Args:
survey_list (list(str)): A list of survey codes. An empty list if
not provided.
indicator (str): An indicator code or None if not provided.
char_grp(str): An characteristic group code or None if not
provided.
Returns:
A dictionary with a survey list, an indicator list, and a
characteristic group list.
"""
def keep_survey(this_indicator: str, this_char_grp: str):
"""Determine whether a survey from the data is valid.
Args:
this_indicator (str): An indicator code from the data
this_char_grp (str): A characteristic code from the data
Returns:
True or False to say if the related survey code should be
included in the return set.
"""
if not indicator and not char_grp:
keep = True
elif not indicator and not char_grp:
keep = this_char_grp == char_grp
elif not indicator and not char_grp:
keep = this_indicator == indicator
else:
indicator_match = this_indicator == indicator
char_grp_match = this_char_grp == char_grp
keep = indicator_match and char_grp_match
return keep
def keep_indicator(this_survey, this_char_grp):
"""Determine whether an indicator from the data is valid.
Args:
this_survey (str): A survey code from the data
this_char_grp (str): A characteristic code from the data
Returns:
True or False to say if the related indicator code should be
included in the return set.
"""
if not survey_list and char_grp is None:
keep = True
elif not survey_list and char_grp is not None:
keep = this_char_grp == char_grp
elif survey_list and char_grp is None:
keep = this_survey in survey_list
else:
survey_match = this_survey in survey_list
char_grp_match = this_char_grp == char_grp
keep = survey_match and char_grp_match
return keep
def keep_char_grp(this_survey, this_indicator):
"""Determine whether a characterist group from the data is valid.
Args:
this_survey (str): A survey code from the data
this_indicator (str): An indicator code from the data
Returns:
True or False to say if the related characteristic group code
should be included in the return set.
"""
if not survey_list and indicator is None:
keep = True
elif not survey_list and indicator is not None:
keep = this_indicator == indicator
elif survey_list and indicator is None:
keep = this_survey in survey_list
else:
survey_match = this_survey in survey_list
indicator_match = this_indicator == indicator
keep = survey_match and indicator_match
return keep
select_args = (Survey.code, Indicator.code, DatalabData.char_grp1.code)
joined = DatalabData.all_joined(*select_args)
results = joined.distinct().all()
surveys = set()
indicators = set()
char_grps = set()
for survey_code, indicator_code, char_grp_code in results:
if keep_survey(indicator_code, char_grp_code):
surveys.add(survey_code)
if keep_indicator(survey_code, char_grp_code):
indicators.add(indicator_code)
if keep_char_grp(survey_code, indicator_code):
char_grps.add(char_grp_code)
json_obj = {
'survey.id': sorted(list(surveys)),
'indicator.id': sorted(list(indicators)),
'characteristicGroup.id': sorted(list(char_grps))
}
return json_obj
@staticmethod
def all_minimal() -> List[Dict]:
"""Get all datalab data in the minimal style.
Returns:
list(dict): Datalab data, filtered minimally
"""
results: List[Dict] = DatalabData.filter_minimal('', '', '', False)
return results
@staticmethod
def combos_indicator(indicator: str) -> Dict:
"""Get all valid combos of survey and characteristic group.
Args:
indicator (str): An indicator code
Returns:
A dictionary with two key names and list values.
"""
select_args = (Survey.code, DatalabData.char_grp1.code)
joined = DatalabData.all_joined(*select_args)
filtered = joined.filter(Indicator.code == indicator)
results = filtered.distinct().all()
survey_codes = set()
char_grp_codes = set()
for item in results:
survey_code = item[0]
survey_codes.add(survey_code)
char_grp_code = item[1]
char_grp_codes.add(char_grp_code)
to_return = {
'survey.id': sorted(list(survey_codes)),
'characteristicGroup.id': sorted(list(char_grp_codes))
}
return to_return
@staticmethod
def combos_char_grp(char_grp_code):
"""Get all valid combos of survey and indicator.
Args:
char_grp_code (str): A characteristic group code
Returns:
A dictionary with two key names and list values.
"""
select_args = (Survey.code, Indicator.code)
joined = DatalabData.all_joined(*select_args)
filtered = joined.filter(DatalabData.char_grp1.code == char_grp_code)
results = filtered.distinct().all()
survey_codes = set()
indicator_codes = set()
for item in results:
survey_code = item[0]
survey_codes.add(survey_code)
indicator_code = item[1]
indicator_codes.add(indicator_code)
to_return = {
'survey.id': sorted(list(survey_codes)),
'indicator.id': sorted(list(indicator_codes))
}
return to_return
@staticmethod
def combos_survey_list(survey_list):
# TODO (jkp 2017-08-29): make better. make hashmaps one to the other
"""Get all valid combos of indicator and characteristic groups.
Args:
survey_list (str): A list of survey codes, comma separated
Returns:
An object.
"""
select_args = (Indicator.code, DatalabData.char_grp1.code)
joined = DatalabData.all_joined(*select_args)
survey_list_sql = DatalabData.survey_list_to_sql(survey_list)
filtered = joined.filter(survey_list_sql)
results = filtered.distinct().all()
indicator_dict = {}
char_grp_dict = {}
for item in results:
this_indicator = item[0]
this_char_grp = item[1]
if this_indicator in indicator_dict:
indicator_dict[this_indicator].add(this_char_grp)
else:
indicator_dict[this_indicator] = {this_char_grp}
if this_char_grp in char_grp_dict:
char_grp_dict[this_char_grp].add(this_indicator)
else:
char_grp_dict[this_char_grp] = {this_indicator}
new_indicator_dict = {
k: sorted(list(v)) for k, v in indicator_dict.items()
}
new_char_grp_dict = {
k: sorted(list(v)) for k, v in char_grp_dict.items()
}
to_return = {
'indicators': new_indicator_dict,
'characteristicGroups': new_char_grp_dict
}
return to_return
@staticmethod
def combos_indicator_char_grp(indicator_code, char_grp_code):
"""Get all valid surveys from supplied arguments.
Args:
indicator_code (str): An indicator code
char_grp_code (str): A characteristic group code
Returns:
A list of surveys that have data for the supplied indicator and
characteristic group
"""
select_arg = Survey.code
joined = DatalabData.all_joined(select_arg)
filtered = joined.filter(Indicator.code == indicator_code) \
.filter(DatalabData.char_grp1.code == char_grp_code)
results = filtered.distinct().all()
to_return = {
'survey.id': [item[0] for item in results]
}
return to_return
@staticmethod
def init_indicators():
"""Datalab init."""
select_args = Indicator
joined = DatalabData.all_joined(select_args)
ordered = joined.order_by(Indicator.order)
results = ordered.distinct().all()
indicator_categories = []
for ind in results:
for cat in indicator_categories:
if ind.level2.code == cat['label.id']:
cat['indicators'].append(ind.datalab_init_json())
break
else:
indicator_categories.append({
'label.id': ind.level2.code,
'indicators': [ind.datalab_init_json()]
})
return indicator_categories
@staticmethod
def init_char_grp():
"""Datalab init."""
select_args = DatalabData.char_grp1
joined = DatalabData.all_joined(select_args)
ordered = joined.order_by(DatalabData.char_grp1.order)
results = ordered.distinct().all()
chargrp_categories = []
for char_grp in results:
for cat in chargrp_categories:
if char_grp.category.code == cat['label.id']:
cat['characteristicGroups'].append(
char_grp.datalab_init_json())
break
else:
chargrp_categories.append({
'label.id': char_grp.category.code,
'characteristicGroups': [char_grp.datalab_init_json()]
})
return chargrp_categories
@staticmethod
def init_chars():
"""Datalab init."""
select_args = DatalabData.char1
joined = DatalabData.all_joined(select_args)
results = joined.distinct().all()
results = [record.datalab_init_json() if record is not None else "none"
for record in results]
return results
@staticmethod
def init_surveys():
# pylint: disable=too-many-locals
# TODO (2017-09-05 jkp) refactor so that this method is simpler
"""Datalab init."""
select_args = Survey
joined = DatalabData.all_joined(select_args)
ordered = joined.order_by(Country.order) \
.order_by(Geography.order) \
.order_by(Survey.order)
results = ordered.distinct().all()
country_order = []
country_map = {}
country_geo_map = {}
for survey in results:
country = survey.country
country_code = country.code
geo = survey.geography
geo_code = geo.code
country_geo_key = '|'.join((country_code, geo_code))
if country not in country_order:
country_order.append(country)
if country_code in country_map:
if geo not in country_map[country_code]:
country_map[country_code].append(geo)
elif country_code not in country_map:
country_map[country_code] = [geo]
if country_geo_key in country_geo_map:
country_geo_map[country_geo_key].append(survey)
else:
country_geo_map[country_geo_key] = [survey]
survey_country_list = []
for country in country_order:
this_country_geos = country_map[country.code]
geography_list = []
for geo in this_country_geos:
country_geo_key = '|'.join((country.code, geo.code))
surveys = country_geo_map[country_geo_key]
survey_list = [s.datalab_init_json() for s in surveys]
this_geo_obj = {
'label.id': geo.subheading.code,
'surveys': survey_list
}
geography_list.append(this_geo_obj)
this_country_obj = {
'label.id': country.label.code,
'geographies': geography_list
}
survey_country_list.append(this_country_obj)
return survey_country_list
# TODO: (jkp 2017-08-29) Get other languages. Needs: Nothing.
@staticmethod
def init_strings():
"""Datalab init."""
results = EnglishString.query.all()
results = [record.datalab_init_json() for record in results]
results = dict(ChainMap(*results))
return results
@staticmethod
def init_languages():
"""Datalab init."""
return Translation.languages()
@staticmethod
def datalab_init():
"""Datalab Init."""
return {
'indicatorCategories': DatalabData.init_indicators(),
'characteristicGroupCategories': DatalabData.init_char_grp(),
'characteristics': DatalabData.init_chars(),
'surveyCountries': DatalabData.init_surveys(),
'strings': DatalabData.init_strings(),
'languages': DatalabData.init_languages()
}
@staticmethod
def query_input(survey: str, indicator: str, char_grp: str) -> Dict:
"""Build up a dictionary of query input to return with API result.
Args:
survey (str): Comma-delimited list of survey codes
indicator (str): An indicator code
char_grp (str): A characteristic group code
Returns:
A dictionary with lists of input data. Data is from datalab init.
"""
survey_list = sorted(survey.split(',')) if survey else []
survey_records = Survey.get_by_code(survey_list) if survey_list else []
input_survey = \
[r.datalab_init_json(reduced=False) for r in survey_records]
indicator_records = Indicator.get_by_code(indicator)
if indicator_records:
input_indicator = [indicator_records[0].datalab_init_json()]
else:
input_indicator = None
char_grp_records = CharacteristicGroup.get_by_code(char_grp)
if char_grp_records:
input_char_grp = [char_grp_records[0].datalab_init_json()]
else:
input_char_grp = None
query_input = {
'surveys': input_survey,
'characteristicGroups': input_char_grp,
'indicators': input_indicator
}
return query_input
|
joeflack4/pma-api
|
pma_api/queries.py
|
Python
|
mit
| 28,165
|
from django.conf.urls import patterns, url
from admins import views
urlpatterns = patterns(
'',
# Control panels
url(r'^admin/overview/$', views.overview, name='admin_overview'),
)
|
Sult/evehub
|
admins/urls.py
|
Python
|
mit
| 197
|
"""A Dakotathon uncertainty quantification experiment with Hydrotrend.
This experiment uses the `Sampling`_ method to assess the effect of
uncertain mean annual temperature and total annual precipitation
values on the median value of suspended sediment load of the Waipaoa
River over a 10-year interval. The temperature (T) and precipitation
(P) values are assumed to be uniformly distributed random variables,
with bounds set at +/- 10 percent from their default values. One
hundred samples are chosen from the T-P parameter space using Latin
hypercube sampling, then used as inputs to the Hydrotrend model. A
time series of daily Qs values is generated for each 10-year
run. Dakota calculates the median Qs value for each of the 100 runs
and uses them to calculate moments, 95 percent confidence intervals,
and a PDF and a CDF of the Qs values. From these measures, we can
quantify the probability that Qs exceeds a threshold value due to
uncertainty in the input T and P parameters.
Example
--------
Run this experiment with::
$ python hydrotrend-sampling-study.py
Notes
-----
This experiment requires a WMT executor with PyMT installed. It also
requires Dakotathon and Hydrotrend installed as CSDMS components.
.. _Sampling
http://csdms-dakota.readthedocs.io/en/latest/analysis_methods.html#module-dakotathon.method.sampling
"""
import os
from pymt.components import Sampling, Hydrotrend
from dakotathon.utils import configure_parameters
model, dakota = Hydrotrend(), Sampling()
experiment = {
'component': type(model).__name__,
'run_duration': 10, # years
'auxiliary_files': 'HYDRO0.HYPS', # Waipaoa hypsometry
'samples': 100,
'sample_type': 'lhs',
'seed': 17,
'probability_levels': [0.05, 0.10, 0.33, 0.50, 0.67, 0.90, 0.95],
'response_levels': [5.0],
'descriptors': ['starting_mean_annual_temperature',
'total_annual_precipitation'],
'variable_type': 'uniform_uncertain',
'lower_bounds': [12.8, 1.4],
'upper_bounds': [15.8, 1.8],
'response_descriptors': 'channel_exit_water_sediment~suspended__mass_flow_rate',
'response_statistics': 'median',
}
dakota_parameters, model_parameters = configure_parameters(experiment)
dakota_parameters['run_directory'] = model.setup(os.getcwd(), **model_parameters)
cfg_file = 'HYDRO.IN' # get from pymt eventually
dakota_tmpl_file = cfg_file + '.dtmpl'
os.rename(cfg_file, dakota_tmpl_file)
dakota_parameters['template_file'] = dakota_tmpl_file
dakota.setup(dakota_parameters['run_directory'], **dakota_parameters)
dakota.initialize('dakota.yaml')
dakota.update()
dakota.finalize()
|
mdpiper/AGU-2016
|
hydrotrend-Qs-sampling-study/hydrotrend-sampling-study.py
|
Python
|
mit
| 2,647
|
__author__ = 'Tauren'
from flask import abort
from flask.ext.restful import Resource, marshal, fields
from .models import Place
from app import db
place_fields = {
'id': fields.Integer,
'city': fields.String,
'zip': fields.String,
'state': fields.String,
'county': fields.String,
'latitude': fields.Float,
'longitude': fields.Float
}
class PlaceApi(Resource):
def __int__(self):
pass
def get(self, city):
""" Get city data based on given city
:return:
"""
place_data = db.session.query(Place).filter(Place.city == city).all()
if not place_data:
abort(404)
return {'results': marshal(place_data, place_fields)}, 200
|
taurenk/PinPoint-Geocoder-Python
|
app/placeApi.py
|
Python
|
mit
| 725
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from .sub_resource_py3 import SubResource
from .backend_address_pool_py3 import BackendAddressPool
from .inbound_nat_rule_py3 import InboundNatRule
from .security_rule_py3 import SecurityRule
from .network_interface_dns_settings_py3 import NetworkInterfaceDnsSettings
from .network_interface_py3 import NetworkInterface
from .network_security_group_py3 import NetworkSecurityGroup
from .route_py3 import Route
from .route_table_py3 import RouteTable
from .service_endpoint_properties_format_py3 import ServiceEndpointPropertiesFormat
from .public_ip_address_dns_settings_py3 import PublicIPAddressDnsSettings
from .public_ip_address_py3 import PublicIPAddress
from .ip_configuration_py3 import IPConfiguration
from .resource_navigation_link_py3 import ResourceNavigationLink
from .subnet_py3 import Subnet
from .network_interface_ip_configuration_py3 import NetworkInterfaceIPConfiguration
from .application_gateway_backend_address_py3 import ApplicationGatewayBackendAddress
from .application_gateway_backend_address_pool_py3 import ApplicationGatewayBackendAddressPool
from .application_gateway_connection_draining_py3 import ApplicationGatewayConnectionDraining
from .application_gateway_backend_http_settings_py3 import ApplicationGatewayBackendHttpSettings
from .application_gateway_backend_health_server_py3 import ApplicationGatewayBackendHealthServer
from .application_gateway_backend_health_http_settings_py3 import ApplicationGatewayBackendHealthHttpSettings
from .application_gateway_backend_health_pool_py3 import ApplicationGatewayBackendHealthPool
from .application_gateway_backend_health_py3 import ApplicationGatewayBackendHealth
from .application_gateway_sku_py3 import ApplicationGatewaySku
from .application_gateway_ssl_policy_py3 import ApplicationGatewaySslPolicy
from .application_gateway_ip_configuration_py3 import ApplicationGatewayIPConfiguration
from .application_gateway_authentication_certificate_py3 import ApplicationGatewayAuthenticationCertificate
from .application_gateway_ssl_certificate_py3 import ApplicationGatewaySslCertificate
from .application_gateway_frontend_ip_configuration_py3 import ApplicationGatewayFrontendIPConfiguration
from .application_gateway_frontend_port_py3 import ApplicationGatewayFrontendPort
from .application_gateway_http_listener_py3 import ApplicationGatewayHttpListener
from .application_gateway_path_rule_py3 import ApplicationGatewayPathRule
from .application_gateway_probe_health_response_match_py3 import ApplicationGatewayProbeHealthResponseMatch
from .application_gateway_probe_py3 import ApplicationGatewayProbe
from .application_gateway_request_routing_rule_py3 import ApplicationGatewayRequestRoutingRule
from .application_gateway_redirect_configuration_py3 import ApplicationGatewayRedirectConfiguration
from .application_gateway_url_path_map_py3 import ApplicationGatewayUrlPathMap
from .application_gateway_firewall_disabled_rule_group_py3 import ApplicationGatewayFirewallDisabledRuleGroup
from .application_gateway_web_application_firewall_configuration_py3 import ApplicationGatewayWebApplicationFirewallConfiguration
from .application_gateway_py3 import ApplicationGateway
from .application_gateway_firewall_rule_py3 import ApplicationGatewayFirewallRule
from .application_gateway_firewall_rule_group_py3 import ApplicationGatewayFirewallRuleGroup
from .application_gateway_firewall_rule_set_py3 import ApplicationGatewayFirewallRuleSet
from .application_gateway_available_waf_rule_sets_result_py3 import ApplicationGatewayAvailableWafRuleSetsResult
from .application_gateway_available_ssl_options_py3 import ApplicationGatewayAvailableSslOptions
from .application_gateway_ssl_predefined_policy_py3 import ApplicationGatewaySslPredefinedPolicy
from .resource_py3 import Resource
from .dns_name_availability_result_py3 import DnsNameAvailabilityResult
from .endpoint_service_result_py3 import EndpointServiceResult
from .express_route_circuit_authorization_py3 import ExpressRouteCircuitAuthorization
from .express_route_circuit_peering_config_py3 import ExpressRouteCircuitPeeringConfig
from .route_filter_rule_py3 import RouteFilterRule
from .express_route_circuit_stats_py3 import ExpressRouteCircuitStats
from .express_route_circuit_peering_py3 import ExpressRouteCircuitPeering
from .route_filter_py3 import RouteFilter
from .ipv6_express_route_circuit_peering_config_py3 import Ipv6ExpressRouteCircuitPeeringConfig
from .express_route_circuit_sku_py3 import ExpressRouteCircuitSku
from .express_route_circuit_service_provider_properties_py3 import ExpressRouteCircuitServiceProviderProperties
from .express_route_circuit_py3 import ExpressRouteCircuit
from .express_route_circuit_arp_table_py3 import ExpressRouteCircuitArpTable
from .express_route_circuits_arp_table_list_result_py3 import ExpressRouteCircuitsArpTableListResult
from .express_route_circuit_routes_table_py3 import ExpressRouteCircuitRoutesTable
from .express_route_circuits_routes_table_list_result_py3 import ExpressRouteCircuitsRoutesTableListResult
from .express_route_circuit_routes_table_summary_py3 import ExpressRouteCircuitRoutesTableSummary
from .express_route_circuits_routes_table_summary_list_result_py3 import ExpressRouteCircuitsRoutesTableSummaryListResult
from .express_route_service_provider_bandwidths_offered_py3 import ExpressRouteServiceProviderBandwidthsOffered
from .express_route_service_provider_py3 import ExpressRouteServiceProvider
from .frontend_ip_configuration_py3 import FrontendIPConfiguration
from .load_balancing_rule_py3 import LoadBalancingRule
from .probe_py3 import Probe
from .inbound_nat_pool_py3 import InboundNatPool
from .outbound_nat_rule_py3 import OutboundNatRule
from .load_balancer_py3 import LoadBalancer
from .error_details_py3 import ErrorDetails
from .error_py3 import Error
from .azure_async_operation_result_py3 import AzureAsyncOperationResult
from .effective_network_security_group_association_py3 import EffectiveNetworkSecurityGroupAssociation
from .effective_network_security_rule_py3 import EffectiveNetworkSecurityRule
from .effective_network_security_group_py3 import EffectiveNetworkSecurityGroup
from .effective_network_security_group_list_result_py3 import EffectiveNetworkSecurityGroupListResult
from .effective_route_py3 import EffectiveRoute
from .effective_route_list_result_py3 import EffectiveRouteListResult
from .network_watcher_py3 import NetworkWatcher
from .topology_parameters_py3 import TopologyParameters
from .topology_association_py3 import TopologyAssociation
from .topology_resource_py3 import TopologyResource
from .topology_py3 import Topology
from .verification_ip_flow_parameters_py3 import VerificationIPFlowParameters
from .verification_ip_flow_result_py3 import VerificationIPFlowResult
from .next_hop_parameters_py3 import NextHopParameters
from .next_hop_result_py3 import NextHopResult
from .security_group_view_parameters_py3 import SecurityGroupViewParameters
from .network_interface_association_py3 import NetworkInterfaceAssociation
from .subnet_association_py3 import SubnetAssociation
from .security_rule_associations_py3 import SecurityRuleAssociations
from .security_group_network_interface_py3 import SecurityGroupNetworkInterface
from .security_group_view_result_py3 import SecurityGroupViewResult
from .packet_capture_storage_location_py3 import PacketCaptureStorageLocation
from .packet_capture_filter_py3 import PacketCaptureFilter
from .packet_capture_parameters_py3 import PacketCaptureParameters
from .packet_capture_py3 import PacketCapture
from .packet_capture_result_py3 import PacketCaptureResult
from .packet_capture_query_status_result_py3 import PacketCaptureQueryStatusResult
from .troubleshooting_parameters_py3 import TroubleshootingParameters
from .query_troubleshooting_parameters_py3 import QueryTroubleshootingParameters
from .troubleshooting_recommended_actions_py3 import TroubleshootingRecommendedActions
from .troubleshooting_details_py3 import TroubleshootingDetails
from .troubleshooting_result_py3 import TroubleshootingResult
from .retention_policy_parameters_py3 import RetentionPolicyParameters
from .flow_log_status_parameters_py3 import FlowLogStatusParameters
from .flow_log_information_py3 import FlowLogInformation
from .connectivity_source_py3 import ConnectivitySource
from .connectivity_destination_py3 import ConnectivityDestination
from .connectivity_parameters_py3 import ConnectivityParameters
from .connectivity_issue_py3 import ConnectivityIssue
from .connectivity_hop_py3 import ConnectivityHop
from .connectivity_information_py3 import ConnectivityInformation
from .patch_route_filter_rule_py3 import PatchRouteFilterRule
from .patch_route_filter_py3 import PatchRouteFilter
from .bgp_community_py3 import BGPCommunity
from .bgp_service_community_py3 import BgpServiceCommunity
from .usage_name_py3 import UsageName
from .usage_py3 import Usage
from .virtual_network_peering_py3 import VirtualNetworkPeering
from .address_space_py3 import AddressSpace
from .dhcp_options_py3 import DhcpOptions
from .virtual_network_py3 import VirtualNetwork
from .ip_address_availability_result_py3 import IPAddressAvailabilityResult
from .virtual_network_usage_name_py3 import VirtualNetworkUsageName
from .virtual_network_usage_py3 import VirtualNetworkUsage
from .virtual_network_gateway_ip_configuration_py3 import VirtualNetworkGatewayIPConfiguration
from .virtual_network_gateway_sku_py3 import VirtualNetworkGatewaySku
from .vpn_client_root_certificate_py3 import VpnClientRootCertificate
from .vpn_client_revoked_certificate_py3 import VpnClientRevokedCertificate
from .vpn_client_configuration_py3 import VpnClientConfiguration
from .bgp_settings_py3 import BgpSettings
from .bgp_peer_status_py3 import BgpPeerStatus
from .gateway_route_py3 import GatewayRoute
from .virtual_network_gateway_py3 import VirtualNetworkGateway
from .vpn_client_parameters_py3 import VpnClientParameters
from .bgp_peer_status_list_result_py3 import BgpPeerStatusListResult
from .gateway_route_list_result_py3 import GatewayRouteListResult
from .tunnel_connection_health_py3 import TunnelConnectionHealth
from .local_network_gateway_py3 import LocalNetworkGateway
from .ipsec_policy_py3 import IpsecPolicy
from .virtual_network_gateway_connection_py3 import VirtualNetworkGatewayConnection
from .connection_reset_shared_key_py3 import ConnectionResetSharedKey
from .connection_shared_key_py3 import ConnectionSharedKey
from .virtual_network_connection_gateway_reference_py3 import VirtualNetworkConnectionGatewayReference
from .virtual_network_gateway_connection_list_entity_py3 import VirtualNetworkGatewayConnectionListEntity
except (SyntaxError, ImportError):
from .sub_resource import SubResource
from .backend_address_pool import BackendAddressPool
from .inbound_nat_rule import InboundNatRule
from .security_rule import SecurityRule
from .network_interface_dns_settings import NetworkInterfaceDnsSettings
from .network_interface import NetworkInterface
from .network_security_group import NetworkSecurityGroup
from .route import Route
from .route_table import RouteTable
from .service_endpoint_properties_format import ServiceEndpointPropertiesFormat
from .public_ip_address_dns_settings import PublicIPAddressDnsSettings
from .public_ip_address import PublicIPAddress
from .ip_configuration import IPConfiguration
from .resource_navigation_link import ResourceNavigationLink
from .subnet import Subnet
from .network_interface_ip_configuration import NetworkInterfaceIPConfiguration
from .application_gateway_backend_address import ApplicationGatewayBackendAddress
from .application_gateway_backend_address_pool import ApplicationGatewayBackendAddressPool
from .application_gateway_connection_draining import ApplicationGatewayConnectionDraining
from .application_gateway_backend_http_settings import ApplicationGatewayBackendHttpSettings
from .application_gateway_backend_health_server import ApplicationGatewayBackendHealthServer
from .application_gateway_backend_health_http_settings import ApplicationGatewayBackendHealthHttpSettings
from .application_gateway_backend_health_pool import ApplicationGatewayBackendHealthPool
from .application_gateway_backend_health import ApplicationGatewayBackendHealth
from .application_gateway_sku import ApplicationGatewaySku
from .application_gateway_ssl_policy import ApplicationGatewaySslPolicy
from .application_gateway_ip_configuration import ApplicationGatewayIPConfiguration
from .application_gateway_authentication_certificate import ApplicationGatewayAuthenticationCertificate
from .application_gateway_ssl_certificate import ApplicationGatewaySslCertificate
from .application_gateway_frontend_ip_configuration import ApplicationGatewayFrontendIPConfiguration
from .application_gateway_frontend_port import ApplicationGatewayFrontendPort
from .application_gateway_http_listener import ApplicationGatewayHttpListener
from .application_gateway_path_rule import ApplicationGatewayPathRule
from .application_gateway_probe_health_response_match import ApplicationGatewayProbeHealthResponseMatch
from .application_gateway_probe import ApplicationGatewayProbe
from .application_gateway_request_routing_rule import ApplicationGatewayRequestRoutingRule
from .application_gateway_redirect_configuration import ApplicationGatewayRedirectConfiguration
from .application_gateway_url_path_map import ApplicationGatewayUrlPathMap
from .application_gateway_firewall_disabled_rule_group import ApplicationGatewayFirewallDisabledRuleGroup
from .application_gateway_web_application_firewall_configuration import ApplicationGatewayWebApplicationFirewallConfiguration
from .application_gateway import ApplicationGateway
from .application_gateway_firewall_rule import ApplicationGatewayFirewallRule
from .application_gateway_firewall_rule_group import ApplicationGatewayFirewallRuleGroup
from .application_gateway_firewall_rule_set import ApplicationGatewayFirewallRuleSet
from .application_gateway_available_waf_rule_sets_result import ApplicationGatewayAvailableWafRuleSetsResult
from .application_gateway_available_ssl_options import ApplicationGatewayAvailableSslOptions
from .application_gateway_ssl_predefined_policy import ApplicationGatewaySslPredefinedPolicy
from .resource import Resource
from .dns_name_availability_result import DnsNameAvailabilityResult
from .endpoint_service_result import EndpointServiceResult
from .express_route_circuit_authorization import ExpressRouteCircuitAuthorization
from .express_route_circuit_peering_config import ExpressRouteCircuitPeeringConfig
from .route_filter_rule import RouteFilterRule
from .express_route_circuit_stats import ExpressRouteCircuitStats
from .express_route_circuit_peering import ExpressRouteCircuitPeering
from .route_filter import RouteFilter
from .ipv6_express_route_circuit_peering_config import Ipv6ExpressRouteCircuitPeeringConfig
from .express_route_circuit_sku import ExpressRouteCircuitSku
from .express_route_circuit_service_provider_properties import ExpressRouteCircuitServiceProviderProperties
from .express_route_circuit import ExpressRouteCircuit
from .express_route_circuit_arp_table import ExpressRouteCircuitArpTable
from .express_route_circuits_arp_table_list_result import ExpressRouteCircuitsArpTableListResult
from .express_route_circuit_routes_table import ExpressRouteCircuitRoutesTable
from .express_route_circuits_routes_table_list_result import ExpressRouteCircuitsRoutesTableListResult
from .express_route_circuit_routes_table_summary import ExpressRouteCircuitRoutesTableSummary
from .express_route_circuits_routes_table_summary_list_result import ExpressRouteCircuitsRoutesTableSummaryListResult
from .express_route_service_provider_bandwidths_offered import ExpressRouteServiceProviderBandwidthsOffered
from .express_route_service_provider import ExpressRouteServiceProvider
from .frontend_ip_configuration import FrontendIPConfiguration
from .load_balancing_rule import LoadBalancingRule
from .probe import Probe
from .inbound_nat_pool import InboundNatPool
from .outbound_nat_rule import OutboundNatRule
from .load_balancer import LoadBalancer
from .error_details import ErrorDetails
from .error import Error
from .azure_async_operation_result import AzureAsyncOperationResult
from .effective_network_security_group_association import EffectiveNetworkSecurityGroupAssociation
from .effective_network_security_rule import EffectiveNetworkSecurityRule
from .effective_network_security_group import EffectiveNetworkSecurityGroup
from .effective_network_security_group_list_result import EffectiveNetworkSecurityGroupListResult
from .effective_route import EffectiveRoute
from .effective_route_list_result import EffectiveRouteListResult
from .network_watcher import NetworkWatcher
from .topology_parameters import TopologyParameters
from .topology_association import TopologyAssociation
from .topology_resource import TopologyResource
from .topology import Topology
from .verification_ip_flow_parameters import VerificationIPFlowParameters
from .verification_ip_flow_result import VerificationIPFlowResult
from .next_hop_parameters import NextHopParameters
from .next_hop_result import NextHopResult
from .security_group_view_parameters import SecurityGroupViewParameters
from .network_interface_association import NetworkInterfaceAssociation
from .subnet_association import SubnetAssociation
from .security_rule_associations import SecurityRuleAssociations
from .security_group_network_interface import SecurityGroupNetworkInterface
from .security_group_view_result import SecurityGroupViewResult
from .packet_capture_storage_location import PacketCaptureStorageLocation
from .packet_capture_filter import PacketCaptureFilter
from .packet_capture_parameters import PacketCaptureParameters
from .packet_capture import PacketCapture
from .packet_capture_result import PacketCaptureResult
from .packet_capture_query_status_result import PacketCaptureQueryStatusResult
from .troubleshooting_parameters import TroubleshootingParameters
from .query_troubleshooting_parameters import QueryTroubleshootingParameters
from .troubleshooting_recommended_actions import TroubleshootingRecommendedActions
from .troubleshooting_details import TroubleshootingDetails
from .troubleshooting_result import TroubleshootingResult
from .retention_policy_parameters import RetentionPolicyParameters
from .flow_log_status_parameters import FlowLogStatusParameters
from .flow_log_information import FlowLogInformation
from .connectivity_source import ConnectivitySource
from .connectivity_destination import ConnectivityDestination
from .connectivity_parameters import ConnectivityParameters
from .connectivity_issue import ConnectivityIssue
from .connectivity_hop import ConnectivityHop
from .connectivity_information import ConnectivityInformation
from .patch_route_filter_rule import PatchRouteFilterRule
from .patch_route_filter import PatchRouteFilter
from .bgp_community import BGPCommunity
from .bgp_service_community import BgpServiceCommunity
from .usage_name import UsageName
from .usage import Usage
from .virtual_network_peering import VirtualNetworkPeering
from .address_space import AddressSpace
from .dhcp_options import DhcpOptions
from .virtual_network import VirtualNetwork
from .ip_address_availability_result import IPAddressAvailabilityResult
from .virtual_network_usage_name import VirtualNetworkUsageName
from .virtual_network_usage import VirtualNetworkUsage
from .virtual_network_gateway_ip_configuration import VirtualNetworkGatewayIPConfiguration
from .virtual_network_gateway_sku import VirtualNetworkGatewaySku
from .vpn_client_root_certificate import VpnClientRootCertificate
from .vpn_client_revoked_certificate import VpnClientRevokedCertificate
from .vpn_client_configuration import VpnClientConfiguration
from .bgp_settings import BgpSettings
from .bgp_peer_status import BgpPeerStatus
from .gateway_route import GatewayRoute
from .virtual_network_gateway import VirtualNetworkGateway
from .vpn_client_parameters import VpnClientParameters
from .bgp_peer_status_list_result import BgpPeerStatusListResult
from .gateway_route_list_result import GatewayRouteListResult
from .tunnel_connection_health import TunnelConnectionHealth
from .local_network_gateway import LocalNetworkGateway
from .ipsec_policy import IpsecPolicy
from .virtual_network_gateway_connection import VirtualNetworkGatewayConnection
from .connection_reset_shared_key import ConnectionResetSharedKey
from .connection_shared_key import ConnectionSharedKey
from .virtual_network_connection_gateway_reference import VirtualNetworkConnectionGatewayReference
from .virtual_network_gateway_connection_list_entity import VirtualNetworkGatewayConnectionListEntity
from .application_gateway_paged import ApplicationGatewayPaged
from .application_gateway_ssl_predefined_policy_paged import ApplicationGatewaySslPredefinedPolicyPaged
from .endpoint_service_result_paged import EndpointServiceResultPaged
from .express_route_circuit_authorization_paged import ExpressRouteCircuitAuthorizationPaged
from .express_route_circuit_peering_paged import ExpressRouteCircuitPeeringPaged
from .express_route_circuit_paged import ExpressRouteCircuitPaged
from .express_route_service_provider_paged import ExpressRouteServiceProviderPaged
from .load_balancer_paged import LoadBalancerPaged
from .backend_address_pool_paged import BackendAddressPoolPaged
from .frontend_ip_configuration_paged import FrontendIPConfigurationPaged
from .inbound_nat_rule_paged import InboundNatRulePaged
from .load_balancing_rule_paged import LoadBalancingRulePaged
from .network_interface_paged import NetworkInterfacePaged
from .probe_paged import ProbePaged
from .network_interface_ip_configuration_paged import NetworkInterfaceIPConfigurationPaged
from .network_security_group_paged import NetworkSecurityGroupPaged
from .security_rule_paged import SecurityRulePaged
from .network_watcher_paged import NetworkWatcherPaged
from .packet_capture_result_paged import PacketCaptureResultPaged
from .public_ip_address_paged import PublicIPAddressPaged
from .route_filter_paged import RouteFilterPaged
from .route_filter_rule_paged import RouteFilterRulePaged
from .route_table_paged import RouteTablePaged
from .route_paged import RoutePaged
from .bgp_service_community_paged import BgpServiceCommunityPaged
from .usage_paged import UsagePaged
from .virtual_network_paged import VirtualNetworkPaged
from .virtual_network_usage_paged import VirtualNetworkUsagePaged
from .subnet_paged import SubnetPaged
from .virtual_network_peering_paged import VirtualNetworkPeeringPaged
from .virtual_network_gateway_paged import VirtualNetworkGatewayPaged
from .virtual_network_gateway_connection_list_entity_paged import VirtualNetworkGatewayConnectionListEntityPaged
from .virtual_network_gateway_connection_paged import VirtualNetworkGatewayConnectionPaged
from .local_network_gateway_paged import LocalNetworkGatewayPaged
from .network_management_client_enums import (
TransportProtocol,
IPAllocationMethod,
IPVersion,
SecurityRuleProtocol,
SecurityRuleAccess,
SecurityRuleDirection,
RouteNextHopType,
ApplicationGatewayProtocol,
ApplicationGatewayCookieBasedAffinity,
ApplicationGatewayBackendHealthServerHealth,
ApplicationGatewaySkuName,
ApplicationGatewayTier,
ApplicationGatewaySslProtocol,
ApplicationGatewaySslPolicyType,
ApplicationGatewaySslPolicyName,
ApplicationGatewaySslCipherSuite,
ApplicationGatewayRequestRoutingRuleType,
ApplicationGatewayRedirectType,
ApplicationGatewayOperationalState,
ApplicationGatewayFirewallMode,
AuthorizationUseStatus,
ExpressRouteCircuitPeeringAdvertisedPublicPrefixState,
Access,
ExpressRouteCircuitPeeringType,
ExpressRouteCircuitPeeringState,
ExpressRouteCircuitSkuTier,
ExpressRouteCircuitSkuFamily,
ServiceProviderProvisioningState,
LoadDistribution,
ProbeProtocol,
NetworkOperationStatus,
EffectiveSecurityRuleProtocol,
EffectiveRouteSource,
EffectiveRouteState,
ProvisioningState,
AssociationType,
Direction,
Protocol,
NextHopType,
PcProtocol,
PcStatus,
PcError,
Origin,
Severity,
IssueType,
ConnectionStatus,
VirtualNetworkPeeringState,
VirtualNetworkGatewayType,
VpnType,
VirtualNetworkGatewaySkuName,
VirtualNetworkGatewaySkuTier,
VpnClientProtocol,
BgpPeerState,
ProcessorArchitecture,
AuthenticationMethod,
VirtualNetworkGatewayConnectionStatus,
VirtualNetworkGatewayConnectionType,
IpsecEncryption,
IpsecIntegrity,
IkeEncryption,
IkeIntegrity,
DhGroup,
PfsGroup,
)
__all__ = [
'SubResource',
'BackendAddressPool',
'InboundNatRule',
'SecurityRule',
'NetworkInterfaceDnsSettings',
'NetworkInterface',
'NetworkSecurityGroup',
'Route',
'RouteTable',
'ServiceEndpointPropertiesFormat',
'PublicIPAddressDnsSettings',
'PublicIPAddress',
'IPConfiguration',
'ResourceNavigationLink',
'Subnet',
'NetworkInterfaceIPConfiguration',
'ApplicationGatewayBackendAddress',
'ApplicationGatewayBackendAddressPool',
'ApplicationGatewayConnectionDraining',
'ApplicationGatewayBackendHttpSettings',
'ApplicationGatewayBackendHealthServer',
'ApplicationGatewayBackendHealthHttpSettings',
'ApplicationGatewayBackendHealthPool',
'ApplicationGatewayBackendHealth',
'ApplicationGatewaySku',
'ApplicationGatewaySslPolicy',
'ApplicationGatewayIPConfiguration',
'ApplicationGatewayAuthenticationCertificate',
'ApplicationGatewaySslCertificate',
'ApplicationGatewayFrontendIPConfiguration',
'ApplicationGatewayFrontendPort',
'ApplicationGatewayHttpListener',
'ApplicationGatewayPathRule',
'ApplicationGatewayProbeHealthResponseMatch',
'ApplicationGatewayProbe',
'ApplicationGatewayRequestRoutingRule',
'ApplicationGatewayRedirectConfiguration',
'ApplicationGatewayUrlPathMap',
'ApplicationGatewayFirewallDisabledRuleGroup',
'ApplicationGatewayWebApplicationFirewallConfiguration',
'ApplicationGateway',
'ApplicationGatewayFirewallRule',
'ApplicationGatewayFirewallRuleGroup',
'ApplicationGatewayFirewallRuleSet',
'ApplicationGatewayAvailableWafRuleSetsResult',
'ApplicationGatewayAvailableSslOptions',
'ApplicationGatewaySslPredefinedPolicy',
'Resource',
'DnsNameAvailabilityResult',
'EndpointServiceResult',
'ExpressRouteCircuitAuthorization',
'ExpressRouteCircuitPeeringConfig',
'RouteFilterRule',
'ExpressRouteCircuitStats',
'ExpressRouteCircuitPeering',
'RouteFilter',
'Ipv6ExpressRouteCircuitPeeringConfig',
'ExpressRouteCircuitSku',
'ExpressRouteCircuitServiceProviderProperties',
'ExpressRouteCircuit',
'ExpressRouteCircuitArpTable',
'ExpressRouteCircuitsArpTableListResult',
'ExpressRouteCircuitRoutesTable',
'ExpressRouteCircuitsRoutesTableListResult',
'ExpressRouteCircuitRoutesTableSummary',
'ExpressRouteCircuitsRoutesTableSummaryListResult',
'ExpressRouteServiceProviderBandwidthsOffered',
'ExpressRouteServiceProvider',
'FrontendIPConfiguration',
'LoadBalancingRule',
'Probe',
'InboundNatPool',
'OutboundNatRule',
'LoadBalancer',
'ErrorDetails',
'Error',
'AzureAsyncOperationResult',
'EffectiveNetworkSecurityGroupAssociation',
'EffectiveNetworkSecurityRule',
'EffectiveNetworkSecurityGroup',
'EffectiveNetworkSecurityGroupListResult',
'EffectiveRoute',
'EffectiveRouteListResult',
'NetworkWatcher',
'TopologyParameters',
'TopologyAssociation',
'TopologyResource',
'Topology',
'VerificationIPFlowParameters',
'VerificationIPFlowResult',
'NextHopParameters',
'NextHopResult',
'SecurityGroupViewParameters',
'NetworkInterfaceAssociation',
'SubnetAssociation',
'SecurityRuleAssociations',
'SecurityGroupNetworkInterface',
'SecurityGroupViewResult',
'PacketCaptureStorageLocation',
'PacketCaptureFilter',
'PacketCaptureParameters',
'PacketCapture',
'PacketCaptureResult',
'PacketCaptureQueryStatusResult',
'TroubleshootingParameters',
'QueryTroubleshootingParameters',
'TroubleshootingRecommendedActions',
'TroubleshootingDetails',
'TroubleshootingResult',
'RetentionPolicyParameters',
'FlowLogStatusParameters',
'FlowLogInformation',
'ConnectivitySource',
'ConnectivityDestination',
'ConnectivityParameters',
'ConnectivityIssue',
'ConnectivityHop',
'ConnectivityInformation',
'PatchRouteFilterRule',
'PatchRouteFilter',
'BGPCommunity',
'BgpServiceCommunity',
'UsageName',
'Usage',
'VirtualNetworkPeering',
'AddressSpace',
'DhcpOptions',
'VirtualNetwork',
'IPAddressAvailabilityResult',
'VirtualNetworkUsageName',
'VirtualNetworkUsage',
'VirtualNetworkGatewayIPConfiguration',
'VirtualNetworkGatewaySku',
'VpnClientRootCertificate',
'VpnClientRevokedCertificate',
'VpnClientConfiguration',
'BgpSettings',
'BgpPeerStatus',
'GatewayRoute',
'VirtualNetworkGateway',
'VpnClientParameters',
'BgpPeerStatusListResult',
'GatewayRouteListResult',
'TunnelConnectionHealth',
'LocalNetworkGateway',
'IpsecPolicy',
'VirtualNetworkGatewayConnection',
'ConnectionResetSharedKey',
'ConnectionSharedKey',
'VirtualNetworkConnectionGatewayReference',
'VirtualNetworkGatewayConnectionListEntity',
'ApplicationGatewayPaged',
'ApplicationGatewaySslPredefinedPolicyPaged',
'EndpointServiceResultPaged',
'ExpressRouteCircuitAuthorizationPaged',
'ExpressRouteCircuitPeeringPaged',
'ExpressRouteCircuitPaged',
'ExpressRouteServiceProviderPaged',
'LoadBalancerPaged',
'BackendAddressPoolPaged',
'FrontendIPConfigurationPaged',
'InboundNatRulePaged',
'LoadBalancingRulePaged',
'NetworkInterfacePaged',
'ProbePaged',
'NetworkInterfaceIPConfigurationPaged',
'NetworkSecurityGroupPaged',
'SecurityRulePaged',
'NetworkWatcherPaged',
'PacketCaptureResultPaged',
'PublicIPAddressPaged',
'RouteFilterPaged',
'RouteFilterRulePaged',
'RouteTablePaged',
'RoutePaged',
'BgpServiceCommunityPaged',
'UsagePaged',
'VirtualNetworkPaged',
'VirtualNetworkUsagePaged',
'SubnetPaged',
'VirtualNetworkPeeringPaged',
'VirtualNetworkGatewayPaged',
'VirtualNetworkGatewayConnectionListEntityPaged',
'VirtualNetworkGatewayConnectionPaged',
'LocalNetworkGatewayPaged',
'TransportProtocol',
'IPAllocationMethod',
'IPVersion',
'SecurityRuleProtocol',
'SecurityRuleAccess',
'SecurityRuleDirection',
'RouteNextHopType',
'ApplicationGatewayProtocol',
'ApplicationGatewayCookieBasedAffinity',
'ApplicationGatewayBackendHealthServerHealth',
'ApplicationGatewaySkuName',
'ApplicationGatewayTier',
'ApplicationGatewaySslProtocol',
'ApplicationGatewaySslPolicyType',
'ApplicationGatewaySslPolicyName',
'ApplicationGatewaySslCipherSuite',
'ApplicationGatewayRequestRoutingRuleType',
'ApplicationGatewayRedirectType',
'ApplicationGatewayOperationalState',
'ApplicationGatewayFirewallMode',
'AuthorizationUseStatus',
'ExpressRouteCircuitPeeringAdvertisedPublicPrefixState',
'Access',
'ExpressRouteCircuitPeeringType',
'ExpressRouteCircuitPeeringState',
'ExpressRouteCircuitSkuTier',
'ExpressRouteCircuitSkuFamily',
'ServiceProviderProvisioningState',
'LoadDistribution',
'ProbeProtocol',
'NetworkOperationStatus',
'EffectiveSecurityRuleProtocol',
'EffectiveRouteSource',
'EffectiveRouteState',
'ProvisioningState',
'AssociationType',
'Direction',
'Protocol',
'NextHopType',
'PcProtocol',
'PcStatus',
'PcError',
'Origin',
'Severity',
'IssueType',
'ConnectionStatus',
'VirtualNetworkPeeringState',
'VirtualNetworkGatewayType',
'VpnType',
'VirtualNetworkGatewaySkuName',
'VirtualNetworkGatewaySkuTier',
'VpnClientProtocol',
'BgpPeerState',
'ProcessorArchitecture',
'AuthenticationMethod',
'VirtualNetworkGatewayConnectionStatus',
'VirtualNetworkGatewayConnectionType',
'IpsecEncryption',
'IpsecIntegrity',
'IkeEncryption',
'IkeIntegrity',
'DhGroup',
'PfsGroup',
]
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/__init__.py
|
Python
|
mit
| 34,093
|
from mox3.mox import MoxTestBase, IgnoreArg
import pycares
import pycares.errno
import gevent
from gevent import select
from gevent.event import AsyncResult
from slimta.util.dns import DNSResolver, DNSError
class TestDNS(MoxTestBase):
def test_get_query_type(self):
self.assertEqual(pycares.QUERY_TYPE_MX,
DNSResolver._get_query_type('MX'))
self.assertEqual(13, DNSResolver._get_query_type(13))
def test_result_cb(self):
result = AsyncResult()
DNSResolver._result_cb(result, 13, None)
self.assertEqual(13, result.get())
def test_result_cb_error(self):
result = AsyncResult()
DNSResolver._result_cb(result, 13, pycares.errno.ARES_ENOTFOUND)
with self.assertRaises(DNSError) as cm:
result.get()
self.assertEqual('Domain name not found [ARES_ENOTFOUND]',
str(cm.exception))
def test_query(self):
channel = self.mox.CreateMock(pycares.Channel)
self.mox.StubOutWithMock(pycares, 'Channel')
self.mox.StubOutWithMock(gevent, 'spawn')
pycares.Channel().AndReturn(channel)
channel.query('example.com', 13, IgnoreArg())
gevent.spawn(IgnoreArg())
self.mox.ReplayAll()
DNSResolver.query('example.com', 13)
def test_wait_channel(self):
DNSResolver._channel = channel = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(select, 'select')
channel.getsock().AndReturn(('read', 'write'))
channel.timeout().AndReturn(1.0)
select.select('read', 'write', [], 1.0).AndReturn(
([1, 2, 3], [4, 5, 6], None))
for fd in [1, 2, 3]:
channel.process_fd(fd, pycares.ARES_SOCKET_BAD)
for fd in [4, 5, 6]:
channel.process_fd(pycares.ARES_SOCKET_BAD, fd)
channel.getsock().AndReturn(('read', 'write'))
channel.timeout().AndReturn(None)
channel.process_fd(pycares.ARES_SOCKET_BAD, pycares.ARES_SOCKET_BAD)
channel.getsock().AndReturn((None, None))
self.mox.ReplayAll()
DNSResolver._wait_channel()
def test_wait_channel_error(self):
DNSResolver._channel = channel = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(select, 'select')
channel.getsock().AndReturn(('read', 'write'))
channel.timeout().AndReturn(1.0)
select.select('read', 'write', [], 1.0).AndRaise(ValueError(13))
channel.cancel()
self.mox.ReplayAll()
with self.assertRaises(ValueError):
DNSResolver._wait_channel()
self.assertIsNone(DNSResolver._channel)
# vim:et:fdm=marker:sts=4:sw=4:ts=4
|
fisele/slimta-abusix
|
test/test_slimta_util_dns.py
|
Python
|
mit
| 2,692
|
#!/usr/bin/env python3
from PIL import Image, ImageDraw, ImageFont, ImageColor
from configparser import ConfigParser
import os,sys,time, csv, operator, tempfile, stat, hashlib, colorsys
scriptpath = os.path.join(os.getcwd(),os.path.dirname(sys.argv[0]));
config = ConfigParser()
config.read(os.path.join(scriptpath,'map.conf'))
path_to_db = os.path.expanduser(config['DEFAULT']['PathToIRPGDB'])
csv.register_dialect('irpg', delimiter='\t', quoting=csv.QUOTE_NONE)
class Map:
def __init__(self,config):
self.textEnabled = (config['EnableText'] == '1')
self.tailEnabled = (config['EnableTail'] == '1')
self.hashColors = (config['HashColors'] == '1')
self.scale = int(config['ImageScale'])
self.update = int(config['UpdateEvery'])
self.path = config['MapPath']
self.bg_path = os.path.join(scriptpath,config['BackgoundPath'])
font_path = os.path.join(scriptpath,config['Font'])
self.font = ImageFont.truetype(font_path, int(config['FontSize']))
self.pixel_width = int(config['PixelWidth'])
print("Created map ",self.path)
def playerPixel(self, player):
return (self.scale*player.x-self.pixel_width,self.scale*player.y-self.pixel_width,
self.scale*player.x+self.pixel_width,self.scale*player.y+self.pixel_width)
def render(self):
try:
myim = Image.open(str(self.bg_path))
except (IsADirectoryError, IOError):
myim = Image.new("RGB", (500*self.scale,500*self.scale), (255,255,255))
draw = ImageDraw.Draw(myim)
for player in players.values():
color = player.color if self.hashColors else (0,0,0)
if not player.online:
color = (120,0,0)
myim.paste(color, self.playerPixel(player))
description = [player.name, "level: " + player.level]
y = 0
if self.textEnabled:
for line in description:
draw.text((player.x*self.scale, player.y*self.scale+y),line, fill=color, font=self.font)
y = y + 12
if self.tailEnabled:
color = (0,128,0) if not self.hashColors else player.color
steps = int(config['DEFAULT']['TailHistory'])
colDif = (int(255/steps),int(128/steps),int(255/steps)) if not self.hashColors else player.colDif
curPos = (player.x*self.scale,player.y*self.scale)
for p in reversed(player.history):
pos = (p[0]*self.scale,p[1]*self.scale)
if (max(abs(curPos[0]-pos[0]),abs(curPos[1]-pos[1])) >
int(config['DEFAULT']['InternalInterval'])*self.update*2):
continue
draw.line([curPos,pos],fill=color,width=2)
curPos = pos
color = (color[0]+colDif[0],color[1]+colDif[1],color[2]+colDif[2])
tmp = tempfile.mkstemp('.png')
myim.save(tmp[1])
os.chmod(tmp[1],stat.S_IROTH | stat.S_IWUSR | stat.S_IRUSR)
os.rename(tmp[1],os.path.expanduser(self.path))
os.close(tmp[0])
class Player:
def __init__(self,data):
self.name = data["# username"]
dig = hashlib.sha256(self.name.encode('utf-8')).digest()
self.color = ImageColor.getrgb('hsl(%s,100%%,50%%)' % int(dig[-1]*1.4))
steps = int(config['DEFAULT']['TailHistory'])
self.colDif = (int((255-self.color[0])/steps),
int((255-self.color[1])/steps),
int((255-self.color[2])/steps))
self.history = []
self.processData(data)
def processData(self,data):
assert(self.name== data["# username"])
self.x = int(data["x pos"])
self.y = int(data["y pos"])
self.weapon = data["weapon"]
self.level = data["level"]
self.online = data["online"] != '0'
self.history += [(self.x,self.y)]
if len(self.history) > int(config['DEFAULT']['TailHistory']):
del self.history[0]
players = dict()
maps = list()
for s in config.sections():
maps.append(Map(config[s]))
iteration = 0
while True:
csvfile = open(path_to_db,'r')
reader = csv.DictReader(csvfile, dialect='irpg')
for p in reader:
try:
players[p["# username"]].processData(p)
except:
players[p["# username"]] = Player(p)
for m in maps:
if iteration % m.update == 0:
m.render()
iteration += 1
time.sleep(int(config['DEFAULT']['InternalInterval']))
|
lucaswo/idlerpgmap
|
map.py
|
Python
|
mit
| 4,662
|
#@result Submitted a few seconds ago • Score: 10.00 Status: Accepted Test Case #0: 0s Test Case #1: 0s
# Enter your code here. Read input from STDIN. Print output to STDOUT
a = int(raw_input())
b = int(raw_input())
print a / b
print a % b
print divmod(a, b)
|
FeiZhan/Algo-Collection
|
answers/hackerrank/Mod Divmod.py
|
Python
|
mit
| 262
|
from errno import EEXIST
import os
import sys
from functools import partial
is_python2 = lambda: sys.version_info < (3,)
if is_python2():
BUILTINS_NAME = "__builtin__"
def input(prompt):
return raw_input(prompt).decode("UTF-8")
def makedirs(name, exist_ok=False):
try:
os.makedirs(name=name)
except OSError as e:
if not exist_ok or e.errno != EEXIST or not os.path.isdir(name):
raise
else:
BUILTINS_NAME = "builtins"
input = input
makedirs = partial(os.makedirs)
|
marcwebbie/pysswords
|
pysswords/python_two.py
|
Python
|
mit
| 555
|
# -*- coding: utf-8 -*-
'''Management commands.'''
import os
import shutil
from flask.ext.script import Manager
from killtheyak.main import app, freezer
manager = Manager(app)
build_dir = app.config['FREEZER_DESTINATION']
HERE = os.path.dirname(os.path.abspath(__file__))
@manager.command
def install():
'''Installs all required packages.'''
os.system('pip install -U -r requirements.txt')
@manager.command
def build():
"""Builds the static files."""
print("Freezing it up! Brr...")
freezer.freeze() # Freezes the project to build/
print('Copying CNAME...')
cname = os.path.join(HERE, 'CNAME')
shutil.copyfile(cname, os.path.join(build_dir, 'CNAME'))
print('...done')
@manager.command
def deploy(push=True):
'''Deploys the site to GitHub Pages.'''
build()
print('Deploying to GitHub pages...')
command = 'ghp-import -b master -m "[deploy] Build" '
if push:
command += '-p '
command += build_dir
os.system(command)
print('...done')
@manager.command
def test(unit=True, webtest=True):
"""Runs the tests.
"""
command = 'nosetests killtheyak/test/ --verbosity=2'
if not unit:
command += ' --exclude="unit_tests'
if not webtest:
command += ' --exclude="webtest_tests"'
os.system(command)
if __name__ == '__main__':
manager.run()
|
vfulco/killtheyak.github.io
|
manage.py
|
Python
|
mit
| 1,356
|
from configparser import RawConfigParser
import os.path
config = RawConfigParser(allow_no_value=True)
def init(defaults):
config.read_dict(defaults)
update()
def read(file):
config.read(file)
update()
def write(file):
new = not os.path.isfile(file)
with open(file, 'w+') as file:
config.write(file)
return new
def update():
config.slack = config['slack']
config.channels = config['channels']
config.logging = config['logging']
|
stephcd/slake
|
slake/config.py
|
Python
|
mit
| 485
|
# FirstReverse.py
# Using the Python language, have the function FirstReverse(str) take the str parameter being passed and return the string in reversed order. For example: if the input string is "Hello World and Coders" then your program should return the string sredoC dna dlroW olleH.
def FirstReverse(str):
# code goes here
ret_str = ""
for i in range(len(str)-1, -1, -1):
ret_str += str[i]
return ret_str
# keep this function call here
print FirstReverse(raw_input())
|
JunoJunho/coderbyte_challenge
|
FirstReverse.py
|
Python
|
mit
| 507
|
# To change this license header, choose License Headers in Project Properties.
# Python Networking
# -----------------------------------------------------------------------------
# Python Networking Overview
# Telnet
# SSH
# SNMP
# Scapy (Creating your own packets)
#
#
#
# To change this template file, choose Tools | Templates
# and open the template in the editor.
if __name__ == "__main__":
print("Hello World Keeyana made me")
|
keeyanajones/Samples
|
pythonProject/NetworkPythonProject/src/networkpythonproject.py
|
Python
|
mit
| 441
|
# Generated by Django 3.2.5 on 2021-07-10 22:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("app", "0022_auto_20210710_1644"),
]
operations = [
migrations.AddField(
model_name="symbol",
name="length",
field=models.IntegerField(default=0),
preserve_default=False,
),
]
|
johntellsall/shotglass
|
shotglass/app/migrations/0023_add_length.py
|
Python
|
mit
| 415
|
for i in range(1, 13):
print(end=" ")
for j in range(1, 13):
print("{:4d}".format(i * j), end=" ")
print()
|
wchuanghard/CS110-Python_Programming
|
lab6/ex_8_4.py
|
Python
|
mit
| 127
|
#!/usr/bin/env python3
# Advent of Code 2016 - Day 8, Part One and Two
import sys
from itertools import chain
COLS = 50
ROWS = 6
def rect(display, x, y):
for r in range(y):
for c in range(x):
display[r][c] = '#'
def rotate_col(display, x, s):
d = list(zip(*display))
rotate_row(d, x, s)
display[:] = list(map(list, zip(*d)))
def rotate_row(display, y, s):
display[y] = display[y][-s:] + display[y][:-s]
def main(argv):
if len(argv) < 2:
print("Usage: {} puzzle.txt".format(argv[0]))
return 1
display = [[' ' for _ in range(COLS)] for _ in range(ROWS)]
with open(argv[1]) as f:
for line in f:
cmd, *args = line.strip().split()
print(line.strip())
if cmd == 'rect':
x, y = map(int, args[0].split('x'))
rect(display, x, y)
elif cmd == 'rotate':
d = int(args[1].split('=')[1])
s = int(args[3])
if args[0] == 'column':
rotate_col(display, d, s)
elif args[0] == 'row':
rotate_row(display, d, s)
else:
print('err?', cmd, args)
return 1
else:
print('err?', cmd, args)
return 1
for row in display:
print("".join(row))
print("".join(chain.from_iterable(display)).count('#'))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
mcbor/adventofcode
|
2016/day08/day08.py
|
Python
|
mit
| 1,538
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2013-2014 Peter Williams <peter@newton.cx> and collaborators.
# Licensed under the MIT License.
"""pwkit.tabfile - I/O with typed tables of uncertain measurements.
Functions:
read - Read a typed table file.
vizread - Read a headerless table file, with columns specified separately
write - Write a typed table file.
The table format is line-oriented text. Hashes denote comments. Initial lines
of the form "colname = value" set a column name that gets the same value for
every item in the table. The header line is prefixed with an @ sign.
Subsequent lines are data rows.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__all__ = str ('read vizread write').split ()
import six
from . import Holder, PKError, io, msmt, reraise_context
def _getparser (lname):
a = lname.rsplit (':', 1)
if len (a) == 1:
a.append ('s')
return a[0], msmt.parsers[a[1]]
def _trimmedlines (path, **kwargs):
for line in io.pathlines (path, **kwargs):
line = line[:-1] # trailing newline
line = line.split ('#', 1)[0]
if not len (line):
continue
if line.isspace ():
continue
yield line
def read (path, tabwidth=8, **kwargs):
"""Read a typed tabular text file into a stream of Holders.
Arguments:
path
The path of the file to read.
tabwidth=8
The tab width to assume. Please don't monkey with it.
mode='rt'
The file open mode (passed to io.open()).
noexistok=False
If True and the file is missing, treat it as empty.
``**kwargs``
Passed to io.open ().
Returns a generator for a stream of `pwkit.Holder`s, each of which will
contain ints, strings, or some kind of measurement (cf `pwkit.msmt`).
"""
datamode = False
fixedcols = {}
for text in _trimmedlines (path, **kwargs):
text = text.expandtabs (tabwidth)
if datamode:
# table row
h = Holder ()
h.set (**fixedcols)
for name, cslice, parser in info:
try:
v = parser (text[cslice].strip ())
except:
reraise_context ('while parsing "%s"', text[cslice].strip ())
h.set_one (name, v)
yield h
elif text[0] != '@':
# fixed column
padnamekind, padval = text.split ('=', 1)
name, parser = _getparser (padnamekind.strip ())
fixedcols[name] = parser (padval.strip ())
else:
# column specification
n = len (text)
assert n > 1
start = 0
info = []
while start < n:
end = start + 1
while end < n and (not text[end].isspace ()):
end += 1
if start == 0:
namekind = text[start+1:end] # eat leading @
else:
namekind = text[start:end]
while end < n and text[end].isspace ():
end += 1
name, parser = _getparser (namekind)
if parser is None: # allow columns to be ignored
skippedlast = True
else:
skippedlast = False
info.append ((name, slice (start, end), parser))
start = end
datamode = True
if not skippedlast:
# make our last column go as long as the line goes
# (e.g. for "comments" columns)
# but if the real last column is ":x"-type, then info[-1]
# doesn't run up to the end of the line, so do nothing in that case.
lname, lslice, lparser = info[-1]
info[-1] = lname, slice (lslice.start, None), lparser
def _tabpad (text, width, tabwidth=8):
# note: assumes we're starting tab-aligned
l = len (text)
assert l <= width
if l == width:
return text
n = width - l
ntab = n // tabwidth
nsp = n - ntab * tabwidth
return ''.join ((text, ' ' * nsp, '\t' * ntab))
def write (stream, items, fieldnames, tabwidth=8):
"""Write a typed tabular text file to the specified stream.
Arguments:
stream
The destination stream.
items
An iterable of items to write. Two passes have to
be made over the items (to discover the needed column widths),
so this will be saved into a list.
fieldnames
Either a list of field name strings, or a single string.
If the latter, it will be split into a list with .split().
tabwidth=8
The tab width to use. Please don't monkey with it.
Returns nothing.
"""
if isinstance (fieldnames, six.string_types):
fieldnames = fieldnames.split ()
maxlens = [0] * len (fieldnames)
# We have to make two passes, so listify:
items = list (items)
# pass 1: get types and maximum lengths for each record. Pad by 1 to
# ensure there's at least one space between all columns.
coltypes = [None] * len (fieldnames)
for i in items:
for idx, fn in enumerate (fieldnames):
val = i.get (fn)
if val is None:
continue
typetag, text, inexact = msmt.fmtinfo (val)
maxlens[idx] = max (maxlens[idx], len (text) + 1)
if coltypes[idx] is None:
coltypes[idx] = typetag
continue
if coltypes[idx] == typetag:
continue
if coltypes[idx][-1] == 'f' and typetag[-1] == 'u':
# Can upcast floats to uvals
if coltypes[idx][:-1] == typetag[:-1]:
coltypes[idx] = coltypes[idx][:-1] + 'u'
continue
if coltypes[idx][-1] == 'u' and typetag[-1] == 'f':
if coltypes[idx][:-1] == typetag[:-1]:
continue
raise PKError ('irreconcilable column types: %s and %s', coltypes[idx], typetag)
# Compute column headers and their widths
headers = list (fieldnames)
headers[0] = '@' + headers[0]
for idx, fn in enumerate (fieldnames):
if coltypes[idx] != '':
headers[idx] += ':' + coltypes[idx]
maxlens[idx] = max (maxlens[idx], len (headers[idx]))
widths = [tabwidth * ((k + tabwidth - 1) // tabwidth) for k in maxlens]
# pass 2: write out
print (''.join (_tabpad (h, widths[idx], tabwidth)
for (idx, h) in enumerate (headers)), file=stream)
def ustr (i, f):
v = i.get (f)
if v is None:
return ''
return msmt.fmtinfo (v)[1]
for i in items:
print (''.join (_tabpad (ustr (i, fn), widths[idx], tabwidth)
for (idx, fn) in enumerate (fieldnames)), file=stream)
def vizread (descpath, descsection, tabpath, tabwidth=8, **kwargs):
"""Read a headerless tabular text file into a stream of Holders.
Arguments:
descpath
The path of the table description ini file.
descsection
The section in the description file to use.
tabpath
The path to the actual table data.
tabwidth=8
The tab width to assume. Please don't monkey with it.
mode='rt'
The table file open mode (passed to io.open()).
noexistok=False
If True and the file is missing, treat it as empty.
``**kwargs``
Passed to io.open ().
Returns a generator of a stream of `pwkit.Holder`s, each of which will
contain ints, strings, or some kind of measurement (cf `pwkit.msmt`). In
this version, the table file does not contain a header, as seen in Vizier
data files. The corresponding section in the description ini file has keys
of the form "colname = <start> <end> [type]", where <start> and <end> are
the **1-based** character numbers defining the column, and [type] is an
optional specified of the measurement type of the column (one of the usual
b, i, f, u, Lu, Pu).
"""
from .inifile import read as iniread
cols = []
for i in iniread (descpath):
if i.section != descsection:
continue
for field, desc in six.iteritems (i.__dict__):
if field == 'section':
continue
a = desc.split ()
idx0 = int (a[0]) - 1
if len (a) == 1:
cols.append ((field, slice (idx0, idx0 + 1), msmt.parsers['s']))
continue
if len (a) == 2:
parser = msmt.parsers['s']
else:
parser = msmt.parsers[a[2]]
cols.append ((field, slice (idx0, int (a[1])), parser))
for text in _trimmedlines (tabpath, **kwargs):
text = text.expandtabs (tabwidth)
h = Holder ()
for name, cslice, parser in cols:
try:
v = parser (text[cslice].strip ())
except:
reraise_context ('while parsing "%s"', text[cslice].strip ())
h.set_one (name, v)
yield h
|
pkgw/pwkit
|
pwkit/tabfile.py
|
Python
|
mit
| 9,163
|
from urlparser import parse_string, MalformatUrlException
supported_schemes = ['http', 'https']
def _validate_url(url):
if url.get_scheme().lower() not in supported_schemes:
raise InvalidUrlException('Unsupported scheme.')
def normalize_url(input_url):
url = parse_string(input_url)
try:
_validate_url(url)
except MalformatUrlException as ex:
raise ex
builder = url.builder()
builder.set_scheme(url.get_scheme().lower())
host = url.get_host().lower()
if host.endswith("."):
host = host[:-1]
builder.set_host(host)
builder.set_fragment(None)
# remove utm parameters
# https://support.google.com/analytics/answer/1033867
blacklist = ['utm_source', 'utm_medium', 'utm_term', 'utm_content', 'utm_campaign']
queries = filter(lambda x: x[0] not in blacklist, url.get_queries())
# sort queries
queries = sorted(queries)
builder.set_queries(queries)
# remove fragment
builder.set_fragment(None)
return builder.build().get()
class InvalidUrlException(Exception):
pass
|
kafji/urlnormalizer
|
urlnormalizer.py
|
Python
|
mit
| 996
|
"""
https://www.hackerrank.com/challenges/two-two
"""
def strength(a, i, j):
if a[i] == 0:
return 0
return int(''.join([str(x) for x in a[i:j+1]]))
def power_of_2(num):
return (num != 0) and ((num & (num - 1)) == 0)
def twotwo(a):
count = 0
for i in range(len(a)):
if a[i] == 0:
continue
for j in range(i, len(a)):
if a[j] not in [2, 4, 6, 8]:
continue
if power_of_2(strength(a, i, j)):
count += 1
return count
n = int(raw_input())
while n > 0:
z = [int(x) for x in list(raw_input())]
print twotwo(z)
n -= 1
|
capsci/chrome
|
practice/python/TwoTwo.py
|
Python
|
mit
| 643
|
from __future__ import absolute_import
from __future__ import print_function
import os
import unittest
import functools
from six import PY3, binary_type, text_type
# noinspection PyUnresolvedReferences
from six.moves import range
from pybufrkit.decoder import Decoder
BASE_DIR = os.path.dirname(__file__)
DATA_DIR = os.path.join(BASE_DIR, 'data')
def read_bufr_file(file_name):
with open(os.path.join(DATA_DIR, file_name), 'rb') as ins:
s = ins.read()
return s
class DecoderTests(unittest.TestCase):
def setUp(self):
self.decoder = Decoder()
self.filename_stubs = [
'IUSK73_AMMC_182300',
'rado_250', # uncompressed with 222000, 224000, 236000
'207003', # compressed with delayed replication
'amv2_87', # compressed with 222000
'b005_89', # compressed with 222000 and 224000 (1st order stats)
'profiler_european', # uncompressed with 204001 associated fields
'jaso_214', # compressed with 204001 associated fields
'uegabe', # uncompressed with 204004 associated fields
'asr3_190', # compressed with complex replication and 222000, 224000
'b002_95', # uncompressed with skipped local descriptors
'g2nd_208', # compressed with identical string values for all subsets
'ISMD01_OKPR', # compressed with different string values for subsets
'mpco_217',
]
def tearDown(self):
pass
def _compare(self, bufr_message, cmp_file_name):
with open(os.path.join(DATA_DIR, cmp_file_name)) as ins:
lines = ins.readlines()
next_line = functools.partial(next, iter(lines))
for idx_subset in range(len(bufr_message.template_data.value.decoded_values_all_subsets)):
for idx, value in enumerate(bufr_message.template_data.value.decoded_values_all_subsets[idx_subset]):
cmp_line = next_line().strip()
if value is None:
line = '{} {}'.format(idx + 1, repr(value))
assert line == cmp_line, \
'At line {}: {} != {}'.format(idx + 1, line, cmp_line)
elif isinstance(value, (binary_type, text_type)):
# TODO: better to decode all ascii bytes to unicode string
if isinstance(value, binary_type) and PY3:
line = '{} {}'.format(idx + 1, repr(value)[1:])
else:
line = '{} {}'.format(idx + 1, repr(value))
assert line == cmp_line, \
'At line {}: {} != {}'.format(idx + 1, line, cmp_line)
else:
field = cmp_line.split()[1]
if field.endswith('L'):
field = field[:-1]
cmp_value = eval(field)
assert abs(value - cmp_value) < 1.0e6, \
'At line {}: {} != {}'.format(idx + 1, value, cmp_value)
def _print_values(self, bufr_message):
for idx_subset in range(len(bufr_message.template_data.value.decoded_values_all_subsets)):
for idx, value in enumerate(bufr_message.template_data.value.decoded_values_all_subsets[idx_subset]):
print(idx + 1, repr(value))
def do_test(self, filename_stub):
s = read_bufr_file(filename_stub + '.bufr')
bufr_message = self.decoder.process(s, filename_stub)
self._compare(bufr_message, filename_stub + '.values.cmp')
def test_decode(self):
print()
for filename_stub in self.filename_stubs:
print(filename_stub)
self.do_test(filename_stub)
|
ywangd/pybufrkit
|
tests/test_Decoder.py
|
Python
|
mit
| 3,732
|
import pyfits as PF
import sys
import os
from numpy import *
from scipy import *
#sys.path.append("../LIHSPcommon")
#inpath: filepath of input dark spool
#inname: filename of input dark spool
#method: avg med clip[,kappa]
#outpath: filepath of masterdark
#makefits: 1 prints masterdark to .fits
def rflat(filepath,filename):
filein = filepath + '/' + filename
if filename.find('mstrflat') < 0:
if filename.endswith('.fits'):
filename=filename[:-5]
filein=filepath+'/'+filename+'_mstrflat.fits'
flat=PF.open(filein)
return flat[0].data
def mkflat(inpath,filenames,masterdark,dexp,outpath):
from myutils import mymkdir
from mdark import rdark
fileout=outpath+'/'+filenames[0].replace('.fits','_mstrflat.fits')
try:
masterflat = rflat(outpath,filenames)
print "Read masterflat :", fileout
except:
nimages=len(filenames)
flat=PF.open(inpath+'/'+filenames[0])
xarray=flat[0].header['NAXIS1']
yarray=flat[0].header['NAXIS2']
cube=zeros((nimages,yarray,xarray),float)
print "flat 1: ", filenames[0]
header=flat[0].header
if 'HBIN' in header:
binning=(float(header['HBIN']),float(header['VBIN']))
elif 'CCDXBIN' in header:
binning=(float(header['CCDXBIN']),float(header['CCDYBIN']))
if binning == (1,1):
bias=433.5
elif binning ==(2,2):
bias=385.5
else:
bias=400.0
print "WARNING: for this binning the bias is just a guess..."
cube[0]=(flat[0].data-bias)/header['EXPTIME']-(masterdark-bias)/dexp
i=0
for f in filenames[1:]:
print "flat ",str(i),": ", filenames[0]
i=i+1
flat=PF.open(inpath+'/'+f)
header=flat[0].header
cube[i]=flat[0].data/header['EXPTIME']-masterdark/dexp
masterflat=zeros((yarray,xarray),int)
print 'Taking median with 3-sigma clipping...'
header.update('FRMTYPE','FLAT')
header.update('EXPTIME',1.0)
header.update('METHOD','SIG-CLIP MED','Method of frame combination', after="FRMTYPE")
sig=std(cube, axis=0)
mn=cube.mean(axis=0)
kap=3
masterflat=zeros((yarray,xarray), float)
for i in range(yarray):
for j in range(xarray):
maskhi=cube[:,i,j]>(mn[i,j]-kap*sig[i,j])
masklo=cube[:,i,j]<(mn[i,j]+kap*sig[i,j])
masterflat[i,j]=median(cube[maskhi&masklo,i,j])
masterflat/=median(masterflat)
header.update('CLIPPING',kap,'Kappa coefficient of clipping',after='METHOD')
##### WRITE MASTERFLAT TO .FITS #####
print 'printing'
PF.writeto(fileout, masterflat, header, clobber=True) #creates master.fits at fileout
print 'Master flat written to %s' % fileout
return masterflat
|
fedhere/getlucky
|
LIHSPcommon/mflat.py
|
Python
|
mit
| 2,975
|
#Text parameters
XSMALL = 7
SMALL = 9
NORMAL = 11
BIG = 13
#PADDING (%)
PAD = .05
MPL_COLORMAP= 'brg'
#LAYOUT CONFIGURATION
LEFT = 0.04
BOTTOM = 0.07
RIGHT = 0.98
TOP = 0.94
WSPACE = 0.17
HSPACE = 1.
#Line Width
LW_THIN = 0.3
LW_MEDIUM = 0.5
LW_NORMAL = 1
LW_TICK = 2
MAXIMIZE = 'maximize'
MINIMIZE = 'minimize'
#COLORS
RED = 'red'
BLACK = 'black'
RED_DARK = '#AA2222'
GREEN = 'green'
ORANGE = 'orange'
BLUE = 'blue'
GRAY8 = '#888888'
#size of scatter point
SMAX = 50
SMIN = 10
ALPHA50 = 0.5
ALPHA70 = 0.7
ALPHA80 = 0.8
ALPHA100 = 1.
SCATTER_DEFAULT = dict(edgecolor=GRAY8,lw=1)
SCATTER_HIGH = dict(edgecolor=BLACK, lw=2)
LINE_DEFAULT = dict(alpha=ALPHA70, lw=LW_MEDIUM)
LINE_HIGH = dict(alpha=ALPHA100, lw=LW_TICK, zorder=101)
|
adailsonfilho/DarwinEye
|
config.py
|
Python
|
mit
| 785
|
from django.shortcuts import get_object_or_404
from django.template import Context, loader as template_loader
from django.conf import settings
from django.core.context_processors import csrf
from rest_framework import decorators, permissions, viewsets, status
from rest_framework.renderers import JSONPRenderer, JSONRenderer, BrowsableAPIRenderer
from rest_framework.response import Response
from bookmarks.models import Bookmark
from builds.models import Version
from projects.models import Project
@decorators.api_view(['GET'])
@decorators.permission_classes((permissions.AllowAny,))
@decorators.renderer_classes((JSONRenderer, JSONPRenderer, BrowsableAPIRenderer))
def footer_html(request):
project_slug = request.GET.get('project', None)
version_slug = request.GET.get('version', None)
page_slug = request.GET.get('page', None)
theme = request.GET.get('theme', False)
docroot = request.GET.get('docroot', '')
subproject = request.GET.get('subproject', False)
source_suffix = request.GET.get('source_suffix', '.rst')
new_theme = (theme == "sphinx_rtd_theme")
using_theme = (theme == "default")
project = get_object_or_404(Project, slug=project_slug)
version = get_object_or_404(Version.objects.public(request.user, project=project, only_active=False), slug=version_slug)
main_project = project.main_language_project or project
if page_slug and page_slug != "index":
if main_project.documentation_type == "sphinx_htmldir" or main_project.documentation_type == "mkdocs":
path = page_slug + "/"
elif main_project.documentation_type == "sphinx_singlehtml":
path = "index.html#document-" + page_slug
else:
path = page_slug + ".html"
else:
path = ""
host = request.get_host()
if settings.PRODUCTION_DOMAIN in host and request.user.is_authenticated():
show_bookmarks = True
try:
bookmark = Bookmark.objects.get(
user=request.user,
project=project,
version=version,
page=page_slug,
)
except (Bookmark.DoesNotExist, Bookmark.MultipleObjectsReturned, Exception):
bookmark = None
else:
show_bookmarks = False
bookmark = None
context = Context({
'show_bookmarks': show_bookmarks,
'bookmark': bookmark,
'project': project,
'path': path,
'downloads': version.get_downloads(pretty=True),
'current_version': version.slug,
'versions': Version.objects.public(user=request.user, project=project),
'main_project': main_project,
'translations': main_project.translations.all(),
'current_language': project.language,
'using_theme': using_theme,
'new_theme': new_theme,
'settings': settings,
'subproject': subproject,
'github_edit_url': version.get_github_url(docroot, page_slug, source_suffix, 'edit'),
'github_view_url': version.get_github_url(docroot, page_slug, source_suffix, 'view'),
'bitbucket_url': version.get_bitbucket_url(docroot, page_slug, source_suffix),
})
context.update(csrf(request))
html = template_loader.get_template('restapi/footer.html').render(context)
return Response({
'html': html,
'version_active': version.active,
'version_supported': version.supported,
})
|
apparena/docs
|
readthedocs/restapi/views/footer_views.py
|
Python
|
mit
| 3,436
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# jwt_apns_client documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import jwt_apns_client
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'JWT APNs Client'
copyright = u"2016, Mobelux LLC"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = jwt_apns_client.__version__
# The full version, including alpha/beta/rc tags.
release = jwt_apns_client.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'jwt_apns_clientdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'jwt_apns_client.tex',
u'JWT APNs Client Documentation',
u'Justin Michalicek', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jwt_apns_client',
u'JWT APNs Client Documentation',
[u'Justin Michalicek'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'jwt_apns_client',
u'JWT APNs Client Documentation',
u'Justin Michalicek',
'jwt_apns_client',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
Mobelux/jwt_apns_client
|
docs/conf.py
|
Python
|
mit
| 8,505
|
#!/usr/bin/env python3
"""Implements getch. From: https://gist.github.com/chao787/2652257"""
# Last modified: <2012-05-10 18:04:45 Thursday by richard>
# @version 0.1
# @author : Richard Wong
# Email: chao787@gmail.com
class _Getch:
"""
Gets a single character from standard input. Does not echo to
the screen.
"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
try:
self.impl = _GetchMacCarbon()
except(AttributeError, ImportError):
self.impl = _GetchUnix()
def __call__(self):
return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.buffer.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
# Windows sends 0xe0 followed by a single letter for many of the
# special keys (like left/right arrow) so we map these to the
# characters that MicroPythons readline function will work with.
self.keymap = {
b'G': b'\x01', # Control-A Home
b'K': b'\x02', # Control-B Left Arrow
b'S': b'\x04', # Control-D DEL
b'O': b'\x05', # Control-E End
b'M': b'\x06', # Control-F Right Arrow
b'P': b'\x0e', # Control-N Down Arrow (Next line in history)
b'H': b'\x10', # Control-P Up Arrow (Prev line in history)
}
def __call__(self):
import msvcrt
ch = msvcrt.getch()
if ch == b'\xe0':
ch = msvcrt.getch()
if ch in self.keymap:
ch = self.keymap[ch]
return ch
class _GetchMacCarbon:
"""
A function which returns the current ASCII key that is down;
if no ASCII key is down, the null string is returned. The
page http://www.mactech.com/macintosh-c/chap02-1.html was
very helpful in figuring out how to do this.
"""
def __init__(self):
import Carbon
Carbon.Evt #see if it has this (in Unix, it doesn't)
def __call__(self):
import Carbon
if Carbon.Evt.EventAvail(0x0008)[0]==0: # 0x0008 is the keyDownMask
return ''
else:
#
# The event contains the following info:
# (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
#
# The message (msg) contains the ASCII char which is
# extracted with the 0x000000FF charCodeMask; this
# number is converted to an ASCII character with chr() and
# returned
#
(what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
return chr(msg & 0x000000FF)
getch = _Getch()
if __name__ == "__main__":
ch = getch()
print('ch =', ch)
|
dhylands/rshell
|
rshell/getch.py
|
Python
|
mit
| 3,120
|
import _plotly_utils.basevalidators
class TicksuffixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="ticksuffix", parent_name="funnel.marker.colorbar", **kwargs
):
super(TicksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/funnel/marker/colorbar/_ticksuffix.py
|
Python
|
mit
| 439
|
#! /usr/bin/env python
"""Date and time formatting and parsing functions.
"""
import datetime
from functional import compose
def convert_format(from_format, to_format):
"""convert_format(from_format, to_format)(timestr) -> str
Convert between two time formats.
>>> convert_format('%d/%m/%Y', '%Y-%m-%d')('21/12/2112')
'2112-12-21'
"""
return compose(strftime(to_format), strptime(from_format))
def strptime(from_format):
"""strptime(from_format)(timestr) -> datetime.datetime
Return datetime object from timestr according to from_format.
:: str -> str -> datetime.datetime
>>> strptime('%d/%m/%Y')('21/12/2112').date()
datetime.date(2112, 12, 21)
"""
return lambda timestr: datetime.datetime.strptime(timestr, from_format)
def strftime(to_format):
"""strftime(to_format)(dt_obj) -> str
Return datetime, time or date object dt_obj formatted with to_format.
:: str -> (datetime.datetime|datetime.date|datetime.time) -> str
>>> strftime('%Y-%m-%d')(datetime.date(2112, 12, 21))
'2112-12-21'
"""
return lambda dt_obj: dt_obj.strftime(to_format)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
nmbooker/python-funbox
|
funbox/dates.py
|
Python
|
mit
| 1,207
|
from thinglang.compiler.buffer import CompilationBuffer
from thinglang.compiler.opcodes import OpcodeJumpConditional, OpcodeJump
from thinglang.compiler.tracker import ResolvableIndex
from thinglang.lexer.blocks.loops import LexicalRepeatWhile
from thinglang.parser.nodes.base_node import BaseNode
from thinglang.parser.rule import ParserRule
from thinglang.utils.type_descriptors import ValueType
class Loop(BaseNode):
"""
A simple while loop
"""
def __init__(self, value, original_tokens=()):
super(Loop, self).__init__((value,) + original_tokens)
self.value = value
def __repr__(self):
return str(self.value)
def compile(self, context: CompilationBuffer):
loop_start = ResolvableIndex(context.next_index) # Jumps to the evaluation of the loop's expression
jump_out = ResolvableIndex() # Jumps out of the loop when done
conditional_jump = OpcodeJumpConditional(jump_out)
context.jump_out[self] = jump_out
context.jump_in[self] = loop_start
self.value.compile(context)
context.append(conditional_jump, self.source_ref) # Evaluation
super(Loop, self).compile(context)
context.append(OpcodeJump(loop_start.index), self.source_ref)
jump_out.index = context.next_index
@staticmethod
@ParserRule.mark
def parse_loop(_: LexicalRepeatWhile, value: ValueType):
return Loop(value)
|
ytanay/thinglang
|
thinglang/parser/blocks/loop.py
|
Python
|
mit
| 1,434
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Resource'
db.create_table(u'workshop_resource', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('filename', self.gf('django.db.models.fields.CharField')(max_length=200)),
('is_public', self.gf('django.db.models.fields.BooleanField')(default=True)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')()),
('workshop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['workshop.Workshop'])),
))
db.send_create_signal(u'workshop', ['Resource'])
# Adding field 'Attendance.has_lunch'
db.add_column(u'workshop_attendance', 'has_lunch',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Adding field 'Workshop.lunch'
db.add_column(u'workshop_workshop', 'lunch',
self.gf('django.db.models.fields.TextField')(default='', max_length=50),
keep_default=False)
# Adding field 'Workshop.lunch_fee'
db.add_column(u'workshop_workshop', 'lunch_fee',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting model 'Resource'
db.delete_table(u'workshop_resource')
# Deleting field 'Attendance.has_lunch'
db.delete_column(u'workshop_attendance', 'has_lunch')
# Deleting field 'Workshop.lunch'
db.delete_column(u'workshop_workshop', 'lunch')
# Deleting field 'Workshop.lunch_fee'
db.delete_column(u'workshop_workshop', 'lunch_fee')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'workshop.attendance': {
'Meta': {'object_name': 'Attendance'},
'attendee': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'date_registered': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'has_lunch': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'payment': ('django.db.models.fields.IntegerField', [], {}),
'transaction_number': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'workshop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['workshop.Workshop']"})
},
u'workshop.lecturing': {
'Meta': {'object_name': 'Lecturing'},
'date_determined': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lecturer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'payment': ('django.db.models.fields.IntegerField', [], {}),
'workshop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['workshop.Workshop']"})
},
u'workshop.resource': {
'Meta': {'object_name': 'Resource'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'workshop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['workshop.Workshop']"})
},
u'workshop.workshop': {
'Meta': {'object_name': 'Workshop'},
'attendees': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'attended_workshops'", 'null': 'True', 'through': u"orm['workshop.Attendance']", 'to': u"orm['auth.User']"}),
'breif_description': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'fee': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'full_description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lecturers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'lectured_workshops'", 'null': 'True', 'through': u"orm['workshop.Lecturing']", 'to': u"orm['auth.User']"}),
'length': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'lunch': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '50'}),
'lunch_fee': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['workshop']
|
SSC-SUT/SSC-Site
|
src/ssc/workshop/migrations/0002_auto__add_resource__add_field_attendance_has_lunch__add_field_workshop.py
|
Python
|
mit
| 8,903
|
""" Register allocation scheme.
"""
import os, sys
from rpython.jit.backend.llsupport import symbolic
from rpython.jit.backend.llsupport.descr import CallDescr, unpack_arraydescr
from rpython.jit.backend.llsupport.gcmap import allocate_gcmap
from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc,
RegisterManager, TempVar, compute_vars_longevity, is_comparison_or_ovf_op,
valid_addressing_size, get_scale)
from rpython.jit.backend.x86 import rx86
from rpython.jit.backend.x86.arch import (WORD, JITFRAME_FIXED_SIZE, IS_X86_32,
IS_X86_64, DEFAULT_FRAME_BYTES)
from rpython.jit.backend.x86.jump import remap_frame_layout_mixed
from rpython.jit.backend.x86.regloc import (FrameLoc, RegLoc, ConstFloatLoc,
FloatImmedLoc, ImmedLoc, imm, imm0, imm1, ecx, eax, edx, ebx, esi, edi,
ebp, r8, r9, r10, r11, r12, r13, r14, r15, xmm0, xmm1, xmm2, xmm3, xmm4,
xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14,
X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG)
from rpython.jit.backend.x86.vector_ext import VectorRegallocMixin
from rpython.jit.codewriter import longlong
from rpython.jit.codewriter.effectinfo import EffectInfo
from rpython.jit.metainterp.history import (Const, ConstInt, ConstPtr,
ConstFloat, INT, REF, FLOAT, VECTOR, TargetToken, AbstractFailDescr)
from rpython.jit.metainterp.resoperation import rop, ResOperation
from rpython.jit.metainterp.resume import AccumInfo
from rpython.rlib import rgc
from rpython.rlib.objectmodel import we_are_translated
from rpython.rlib.rarithmetic import r_longlong, r_uint
from rpython.rtyper.annlowlevel import cast_instance_to_gcref
from rpython.rtyper.lltypesystem import lltype, rffi, rstr
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.jit.backend.x86.regloc import AddressLoc
class X86RegisterManager(RegisterManager):
box_types = [INT, REF]
all_regs = [ecx, eax, edx, ebx, esi, edi]
no_lower_byte_regs = [esi, edi]
save_around_call_regs = [eax, edx, ecx]
frame_reg = ebp
def call_result_location(self, v):
return eax
def convert_to_imm(self, c):
if isinstance(c, ConstInt):
return imm(c.value)
elif isinstance(c, ConstPtr):
if we_are_translated() and c.value and rgc.can_move(c.value):
not_implemented("convert_to_imm: ConstPtr needs special care")
return imm(rffi.cast(lltype.Signed, c.value))
else:
not_implemented("convert_to_imm: got a %s" % c)
class X86_64_RegisterManager(X86RegisterManager):
# r11 omitted because it's used as scratch
all_regs = [ecx, eax, edx, ebx, esi, edi, r8, r9, r10, r12, r13, r14, r15]
no_lower_byte_regs = []
save_around_call_regs = [eax, ecx, edx, esi, edi, r8, r9, r10]
class X86XMMRegisterManager(RegisterManager):
box_types = [FLOAT, INT] # yes INT!
all_regs = [xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7]
# we never need lower byte I hope
save_around_call_regs = all_regs
def convert_to_imm(self, c):
adr = self.assembler.datablockwrapper.malloc_aligned(8, 8)
x = c.getfloatstorage()
rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), adr)[0] = x
return ConstFloatLoc(adr)
def convert_to_imm_16bytes_align(self, c):
adr = self.assembler.datablockwrapper.malloc_aligned(16, 16)
x = c.getfloatstorage()
y = longlong.ZEROF
rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), adr)[0] = x
rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), adr)[1] = y
return ConstFloatLoc(adr)
def expand_float(self, size, const):
if size == 4:
loc = self.expand_single_float(const)
else:
loc = self.expand_double_float(const)
return loc
def expand_double_float(self, f):
adr = self.assembler.datablockwrapper.malloc_aligned(16, 16)
fs = f.getfloatstorage()
rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), adr)[0] = fs
rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), adr)[1] = fs
return ConstFloatLoc(adr)
def expand_single_float(self, f):
adr = self.assembler.datablockwrapper.malloc_aligned(16, 16)
fs = rffi.cast(lltype.SingleFloat, f.getfloatstorage())
rffi.cast(rffi.CArrayPtr(lltype.SingleFloat), adr)[0] = fs
rffi.cast(rffi.CArrayPtr(lltype.SingleFloat), adr)[1] = fs
rffi.cast(rffi.CArrayPtr(lltype.SingleFloat), adr)[2] = fs
rffi.cast(rffi.CArrayPtr(lltype.SingleFloat), adr)[3] = fs
return ConstFloatLoc(adr)
def call_result_location(self, v):
return xmm0
class X86_64_XMMRegisterManager(X86XMMRegisterManager):
# xmm15 reserved for scratch use
all_regs = [xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14]
save_around_call_regs = all_regs
class X86FrameManager(FrameManager):
def __init__(self, base_ofs):
FrameManager.__init__(self)
self.base_ofs = base_ofs
def frame_pos(self, i, box_type):
return FrameLoc(i, get_ebp_ofs(self.base_ofs, i), box_type)
@staticmethod
def frame_size(box_type):
if IS_X86_32 and box_type == FLOAT:
return 2
else:
return 1
@staticmethod
def get_loc_index(loc):
assert isinstance(loc, FrameLoc)
return loc.position
if WORD == 4:
gpr_reg_mgr_cls = X86RegisterManager
xmm_reg_mgr_cls = X86XMMRegisterManager
elif WORD == 8:
gpr_reg_mgr_cls = X86_64_RegisterManager
xmm_reg_mgr_cls = X86_64_XMMRegisterManager
else:
raise AssertionError("Word size should be 4 or 8")
gpr_reg_mgr_cls.all_reg_indexes = [-1] * WORD * 2 # eh, happens to be true
for _i, _reg in enumerate(gpr_reg_mgr_cls.all_regs):
gpr_reg_mgr_cls.all_reg_indexes[_reg.value] = _i
class RegAlloc(BaseRegalloc, VectorRegallocMixin):
def __init__(self, assembler, translate_support_code=False):
assert isinstance(translate_support_code, bool)
# variables that have place in register
self.assembler = assembler
self.translate_support_code = translate_support_code
# to be read/used by the assembler too
self.jump_target_descr = None
self.final_jump_op = None
def _prepare(self, inputargs, operations, allgcrefs):
for box in inputargs:
assert box.get_forwarded() is None
cpu = self.assembler.cpu
self.fm = X86FrameManager(cpu.get_baseofs_of_frame_field())
operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations,
allgcrefs)
# compute longevity of variables
longevity, last_real_usage = compute_vars_longevity(
inputargs, operations)
self.longevity = longevity
self.last_real_usage = last_real_usage
self.rm = gpr_reg_mgr_cls(self.longevity,
frame_manager = self.fm,
assembler = self.assembler)
self.xrm = xmm_reg_mgr_cls(self.longevity, frame_manager = self.fm,
assembler = self.assembler)
return operations
def prepare_loop(self, inputargs, operations, looptoken, allgcrefs):
operations = self._prepare(inputargs, operations, allgcrefs)
self._set_initial_bindings(inputargs, looptoken)
# note: we need to make a copy of inputargs because possibly_free_vars
# is also used on op args, which is a non-resizable list
self.possibly_free_vars(list(inputargs))
if WORD == 4: # see redirect_call_assembler()
self.min_bytes_before_label = 5
else:
self.min_bytes_before_label = 13
return operations
def prepare_bridge(self, inputargs, arglocs, operations, allgcrefs,
frame_info):
operations = self._prepare(inputargs, operations, allgcrefs)
self._update_bindings(arglocs, inputargs)
self.min_bytes_before_label = 0
return operations
def ensure_next_label_is_at_least_at_position(self, at_least_position):
self.min_bytes_before_label = max(self.min_bytes_before_label,
at_least_position)
def get_final_frame_depth(self):
return self.fm.get_frame_depth()
def possibly_free_var(self, var):
if var.type == FLOAT or var.is_vector():
self.xrm.possibly_free_var(var)
else:
self.rm.possibly_free_var(var)
def possibly_free_vars_for_op(self, op):
for i in range(op.numargs()):
var = op.getarg(i)
if var is not None: # xxx kludgy
self.possibly_free_var(var)
if op.type != 'v':
self.possibly_free_var(op)
def possibly_free_vars(self, vars):
for var in vars:
if var is not None: # xxx kludgy
self.possibly_free_var(var)
def make_sure_var_in_reg(self, var, forbidden_vars=[],
selected_reg=None, need_lower_byte=False):
if var.type == FLOAT or var.is_vector():
if isinstance(var, ConstFloat):
return FloatImmedLoc(var.getfloatstorage())
return self.xrm.make_sure_var_in_reg(var, forbidden_vars,
selected_reg, need_lower_byte)
else:
return self.rm.make_sure_var_in_reg(var, forbidden_vars,
selected_reg, need_lower_byte)
def force_allocate_reg(self, var, forbidden_vars=[], selected_reg=None,
need_lower_byte=False):
if var.type == FLOAT or var.is_vector():
return self.xrm.force_allocate_reg(var, forbidden_vars,
selected_reg, need_lower_byte)
else:
return self.rm.force_allocate_reg(var, forbidden_vars,
selected_reg, need_lower_byte)
def force_allocate_reg_or_cc(self, var):
assert var.type == INT
if self.next_op_can_accept_cc(self.operations, self.rm.position):
# hack: return the ebp location to mean "lives in CC". This
# ebp will not actually be used, and the location will be freed
# after the next op as usual.
self.rm.force_allocate_frame_reg(var)
return ebp
else:
# else, return a regular register (not ebp).
return self.rm.force_allocate_reg(var, need_lower_byte=True)
def force_spill_var(self, var):
if var.type == FLOAT:
return self.xrm.force_spill_var(var)
else:
return self.rm.force_spill_var(var)
def load_xmm_aligned_16_bytes(self, var, forbidden_vars=[]):
# Load 'var' in a register; but if it is a constant, we can return
# a 16-bytes-aligned ConstFloatLoc.
if isinstance(var, Const):
return self.xrm.convert_to_imm_16bytes_align(var)
else:
return self.xrm.make_sure_var_in_reg(var, forbidden_vars)
def _update_bindings(self, locs, inputargs):
# XXX this should probably go to llsupport/regalloc.py
used = {}
i = 0
for loc in locs:
if loc is None: # xxx bit kludgy
loc = ebp
arg = inputargs[i]
i += 1
if isinstance(loc, RegLoc):
if arg.type == FLOAT:
self.xrm.reg_bindings[arg] = loc
used[loc] = None
else:
if loc is ebp:
self.rm.bindings_to_frame_reg[arg] = None
else:
self.rm.reg_bindings[arg] = loc
used[loc] = None
else:
self.fm.bind(arg, loc)
self.rm.free_regs = []
for reg in self.rm.all_regs:
if reg not in used:
self.rm.free_regs.append(reg)
self.xrm.free_regs = []
for reg in self.xrm.all_regs:
if reg not in used:
self.xrm.free_regs.append(reg)
self.possibly_free_vars(list(inputargs))
self.fm.finish_binding()
self.rm._check_invariants()
self.xrm._check_invariants()
def perform(self, op, arglocs, result_loc):
if not we_are_translated():
self.assembler.dump('%s <- %s(%s)' % (result_loc, op, arglocs))
self.assembler.regalloc_perform(op, arglocs, result_loc)
def perform_llong(self, op, arglocs, result_loc):
if not we_are_translated():
self.assembler.dump('%s <- %s(%s)' % (result_loc, op, arglocs))
self.assembler.regalloc_perform_llong(op, arglocs, result_loc)
def perform_math(self, op, arglocs, result_loc):
if not we_are_translated():
self.assembler.dump('%s <- %s(%s)' % (result_loc, op, arglocs))
self.assembler.regalloc_perform_math(op, arglocs, result_loc)
def locs_for_fail(self, guard_op):
faillocs = [self.loc(arg) for arg in guard_op.getfailargs()]
descr = guard_op.getdescr()
if not descr:
return faillocs
assert isinstance(descr, AbstractFailDescr)
if descr.rd_vector_info:
accuminfo = descr.rd_vector_info
while accuminfo:
accuminfo.location = faillocs[accuminfo.getpos_in_failargs()]
loc = self.loc(accuminfo.getoriginal())
faillocs[accuminfo.getpos_in_failargs()] = loc
accuminfo = accuminfo.next()
return faillocs
def perform_guard(self, guard_op, arglocs, result_loc):
faillocs = self.locs_for_fail(guard_op)
if not we_are_translated():
if result_loc is not None:
self.assembler.dump('%s <- %s(%s)' % (result_loc, guard_op,
arglocs))
else:
self.assembler.dump('%s(%s)' % (guard_op, arglocs))
self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs,
result_loc,
self.fm.get_frame_depth())
self.possibly_free_vars(guard_op.getfailargs())
def perform_discard(self, op, arglocs):
if not we_are_translated():
self.assembler.dump('%s(%s)' % (op, arglocs))
self.assembler.regalloc_perform_discard(op, arglocs)
def walk_operations(self, inputargs, operations):
i = 0
self.operations = operations
while i < len(operations):
op = operations[i]
self.assembler.mc.mark_op(op)
assert self.assembler.mc._frame_size == DEFAULT_FRAME_BYTES
self.rm.position = i
self.xrm.position = i
if rop.has_no_side_effect(op.opnum) and op not in self.longevity:
i += 1
self.possibly_free_vars_for_op(op)
continue
if not we_are_translated() and op.getopnum() == rop.FORCE_SPILL:
self._consider_force_spill(op)
else:
oplist[op.getopnum()](self, op)
self.possibly_free_vars_for_op(op)
self.rm._check_invariants()
self.xrm._check_invariants()
i += 1
assert not self.rm.reg_bindings
assert not self.xrm.reg_bindings
if not we_are_translated():
self.assembler.mc.UD2()
self.flush_loop()
self.assembler.mc.mark_op(None) # end of the loop
self.operations = None
for arg in inputargs:
self.possibly_free_var(arg)
def flush_loop(self):
# Force the code to be aligned to a multiple of 16. Also,
# rare case: if the loop is too short, or if we are just after
# a GUARD_NOT_INVALIDATED, we need to make sure we insert enough
# NOPs. This is important to ensure that there are enough bytes
# produced, because GUARD_NOT_INVALIDATED or
# redirect_call_assembler() will maybe overwrite them. (In that
# rare case we don't worry too much about alignment.)
mc = self.assembler.mc
current_pos = mc.get_relative_pos()
target_pos = (current_pos + 15) & ~15
target_pos = max(target_pos, self.min_bytes_before_label)
insert_nops = target_pos - current_pos
assert 0 <= insert_nops <= 15
for c in mc.MULTIBYTE_NOPs[insert_nops]:
mc.writechar(c)
def loc(self, v):
if v is None: # xxx kludgy
return None
if v.type == FLOAT or v.is_vector():
return self.xrm.loc(v)
return self.rm.loc(v)
def load_condition_into_cc(self, box):
if self.assembler.guard_success_cc == rx86.cond_none:
self.assembler.test_location(self.loc(box))
self.assembler.guard_success_cc = rx86.Conditions['NZ']
def _consider_guard_cc(self, op):
arg = op.getarg(0)
self.load_condition_into_cc(arg)
self.perform_guard(op, [], None)
consider_guard_true = _consider_guard_cc
consider_guard_false = _consider_guard_cc
consider_guard_nonnull = _consider_guard_cc
consider_guard_isnull = _consider_guard_cc
def consider_finish(self, op):
# the frame is in ebp, but we have to point where in the frame is
# the potential argument to FINISH
if op.numargs() == 1:
loc = self.make_sure_var_in_reg(op.getarg(0))
locs = [loc]
else:
locs = []
self.perform(op, locs, None)
def consider_guard_no_exception(self, op):
self.perform_guard(op, [], None)
def consider_guard_not_invalidated(self, op):
mc = self.assembler.mc
n = mc.get_relative_pos(break_basic_block=False)
self.perform_guard(op, [], None)
assert n == mc.get_relative_pos(break_basic_block=False)
# ensure that the next label is at least 5 bytes farther than
# the current position. Otherwise, when invalidating the guard,
# we would overwrite randomly the next label's position.
self.ensure_next_label_is_at_least_at_position(n + 5)
def consider_guard_exception(self, op):
loc = self.rm.make_sure_var_in_reg(op.getarg(0))
box = TempVar()
args = op.getarglist()
loc1 = self.rm.force_allocate_reg(box, args)
if op in self.longevity:
# this means, is it ever used
resloc = self.rm.force_allocate_reg(op, args + [box])
else:
resloc = None
self.perform_guard(op, [loc, loc1], resloc)
self.rm.possibly_free_var(box)
def consider_save_exception(self, op):
resloc = self.rm.force_allocate_reg(op)
self.perform(op, [], resloc)
consider_save_exc_class = consider_save_exception
def consider_restore_exception(self, op):
args = op.getarglist()
loc0 = self.rm.make_sure_var_in_reg(op.getarg(0), args) # exc class
loc1 = self.rm.make_sure_var_in_reg(op.getarg(1), args) # exc instance
self.perform_discard(op, [loc0, loc1])
consider_guard_no_overflow = consider_guard_no_exception
consider_guard_overflow = consider_guard_no_exception
consider_guard_not_forced = consider_guard_no_exception
def consider_guard_value(self, op):
x = self.make_sure_var_in_reg(op.getarg(0))
loc = self.assembler.cpu.all_reg_indexes[x.value]
op.getdescr().make_a_counter_per_value(op, loc)
y = self.loc(op.getarg(1))
self.perform_guard(op, [x, y], None)
def consider_guard_class(self, op):
assert not isinstance(op.getarg(0), Const)
x = self.rm.make_sure_var_in_reg(op.getarg(0))
y = self.loc(op.getarg(1))
self.perform_guard(op, [x, y], None)
consider_guard_nonnull_class = consider_guard_class
consider_guard_gc_type = consider_guard_class
def consider_guard_is_object(self, op):
x = self.make_sure_var_in_reg(op.getarg(0))
tmp_box = TempVar()
y = self.rm.force_allocate_reg(tmp_box, [op.getarg(0)])
self.rm.possibly_free_var(tmp_box)
self.perform_guard(op, [x, y], None)
def consider_guard_subclass(self, op):
x = self.make_sure_var_in_reg(op.getarg(0))
tmp_box = TempVar()
z = self.rm.force_allocate_reg(tmp_box, [op.getarg(0)])
y = self.loc(op.getarg(1))
self.rm.possibly_free_var(tmp_box)
self.perform_guard(op, [x, y, z], None)
def _consider_binop_part(self, op, symm=False):
x = op.getarg(0)
y = op.getarg(1)
argloc = self.loc(y)
#
# For symmetrical operations, if 'y' is already in a register
# and won't be used after the current operation finishes,
# then swap the role of 'x' and 'y'
if (symm and isinstance(argloc, RegLoc) and
self.rm.longevity[y][1] == self.rm.position):
x, y = y, x
argloc = self.loc(y)
#
args = op.getarglist()
loc = self.rm.force_result_in_reg(op, x, args)
return loc, argloc
def _consider_binop(self, op):
loc, argloc = self._consider_binop_part(op)
self.perform(op, [loc, argloc], loc)
def _consider_binop_symm(self, op):
loc, argloc = self._consider_binop_part(op, symm=True)
self.perform(op, [loc, argloc], loc)
def _consider_lea(self, op, loc):
argloc = self.loc(op.getarg(1))
resloc = self.force_allocate_reg(op)
self.perform(op, [loc, argloc], resloc)
def consider_int_add(self, op):
loc = self.loc(op.getarg(0))
y = op.getarg(1)
if (isinstance(loc, RegLoc) and
isinstance(y, ConstInt) and rx86.fits_in_32bits(y.value)):
self._consider_lea(op, loc)
else:
self._consider_binop_symm(op)
consider_nursery_ptr_increment = consider_int_add
def consider_int_sub(self, op):
loc = self.loc(op.getarg(0))
y = op.getarg(1)
if (isinstance(loc, RegLoc) and
isinstance(y, ConstInt) and rx86.fits_in_32bits(-y.value)):
self._consider_lea(op, loc)
else:
self._consider_binop(op)
consider_int_mul = _consider_binop_symm
consider_int_and = _consider_binop_symm
consider_int_or = _consider_binop_symm
consider_int_xor = _consider_binop_symm
consider_int_mul_ovf = _consider_binop_symm
consider_int_sub_ovf = _consider_binop
consider_int_add_ovf = _consider_binop_symm
def consider_uint_mul_high(self, op):
arg1, arg2 = op.getarglist()
# should support all cases, but is optimized for (box, const)
if isinstance(arg1, Const):
arg1, arg2 = arg2, arg1
self.rm.make_sure_var_in_reg(arg2, selected_reg=eax)
l1 = self.loc(arg1)
# l1 is a register != eax, or stack_bp; or, just possibly, it
# can be == eax if arg1 is arg2
assert not isinstance(l1, ImmedLoc)
assert l1 is not eax or arg1 is arg2
#
# eax will be trash after the operation
self.rm.possibly_free_var(arg2)
tmpvar = TempVar()
self.rm.force_allocate_reg(tmpvar, selected_reg=eax)
self.rm.possibly_free_var(tmpvar)
#
self.rm.force_allocate_reg(op, selected_reg=edx)
self.perform(op, [l1], edx)
def consider_int_neg(self, op):
res = self.rm.force_result_in_reg(op, op.getarg(0))
self.perform(op, [res], res)
consider_int_invert = consider_int_neg
def consider_int_signext(self, op):
argloc = self.loc(op.getarg(0))
numbytesloc = self.loc(op.getarg(1))
resloc = self.force_allocate_reg(op)
self.perform(op, [argloc, numbytesloc], resloc)
def consider_int_lshift(self, op):
if isinstance(op.getarg(1), Const):
loc2 = self.rm.convert_to_imm(op.getarg(1))
else:
loc2 = self.rm.make_sure_var_in_reg(op.getarg(1), selected_reg=ecx)
args = op.getarglist()
loc1 = self.rm.force_result_in_reg(op, op.getarg(0), args)
self.perform(op, [loc1, loc2], loc1)
consider_int_rshift = consider_int_lshift
consider_uint_rshift = consider_int_lshift
def _consider_compop(self, op):
vx = op.getarg(0)
vy = op.getarg(1)
arglocs = [self.loc(vx), self.loc(vy)]
args = op.getarglist()
if (vx in self.rm.reg_bindings or vy in self.rm.reg_bindings or
isinstance(vx, Const) or isinstance(vy, Const)):
pass
else:
arglocs[0] = self.rm.make_sure_var_in_reg(vx)
loc = self.force_allocate_reg_or_cc(op)
self.perform(op, arglocs, loc)
consider_int_lt = _consider_compop
consider_int_gt = _consider_compop
consider_int_ge = _consider_compop
consider_int_le = _consider_compop
consider_int_ne = _consider_compop
consider_int_eq = _consider_compop
consider_uint_gt = _consider_compop
consider_uint_lt = _consider_compop
consider_uint_le = _consider_compop
consider_uint_ge = _consider_compop
consider_ptr_eq = consider_instance_ptr_eq = _consider_compop
consider_ptr_ne = consider_instance_ptr_ne = _consider_compop
def _consider_float_op(self, op):
loc1 = self.xrm.loc(op.getarg(1))
args = op.getarglist()
loc0 = self.xrm.force_result_in_reg(op, op.getarg(0), args)
self.perform(op, [loc0, loc1], loc0)
consider_float_add = _consider_float_op # xxx could be _symm
consider_float_sub = _consider_float_op
consider_float_mul = _consider_float_op # xxx could be _symm
consider_float_truediv = _consider_float_op
def _consider_float_cmp(self, op):
vx = op.getarg(0)
vy = op.getarg(1)
arglocs = [self.loc(vx), self.loc(vy)]
if not (isinstance(arglocs[0], RegLoc) or
isinstance(arglocs[1], RegLoc)):
if isinstance(vx, Const):
arglocs[1] = self.xrm.make_sure_var_in_reg(vy)
else:
arglocs[0] = self.xrm.make_sure_var_in_reg(vx)
loc = self.force_allocate_reg_or_cc(op)
self.perform(op, arglocs, loc)
consider_float_lt = _consider_float_cmp
consider_float_le = _consider_float_cmp
consider_float_eq = _consider_float_cmp
consider_float_ne = _consider_float_cmp
consider_float_gt = _consider_float_cmp
consider_float_ge = _consider_float_cmp
def _consider_float_unary_op(self, op):
loc0 = self.xrm.force_result_in_reg(op, op.getarg(0))
self.perform(op, [loc0], loc0)
consider_float_neg = _consider_float_unary_op
consider_float_abs = _consider_float_unary_op
def consider_cast_float_to_int(self, op):
loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0))
loc1 = self.rm.force_allocate_reg(op)
self.perform(op, [loc0], loc1)
def consider_cast_int_to_float(self, op):
loc0 = self.rm.make_sure_var_in_reg(op.getarg(0))
loc1 = self.xrm.force_allocate_reg(op)
self.perform(op, [loc0], loc1)
def consider_cast_float_to_singlefloat(self, op):
loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0))
loc1 = self.rm.force_allocate_reg(op)
tmpxvar = TempVar()
loctmp = self.xrm.force_allocate_reg(tmpxvar) # may be equal to loc0
self.xrm.possibly_free_var(tmpxvar)
self.perform(op, [loc0, loctmp], loc1)
consider_cast_singlefloat_to_float = consider_cast_int_to_float
def consider_convert_float_bytes_to_longlong(self, op):
if longlong.is_64_bit:
loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0))
loc1 = self.rm.force_allocate_reg(op)
self.perform(op, [loc0], loc1)
else:
arg0 = op.getarg(0)
loc0 = self.xrm.loc(arg0)
loc1 = self.xrm.force_allocate_reg(op, forbidden_vars=[arg0])
self.perform(op, [loc0], loc1)
def consider_convert_longlong_bytes_to_float(self, op):
if longlong.is_64_bit:
loc0 = self.rm.make_sure_var_in_reg(op.getarg(0))
loc1 = self.xrm.force_allocate_reg(op)
self.perform(op, [loc0], loc1)
else:
arg0 = op.getarg(0)
loc0 = self.xrm.make_sure_var_in_reg(arg0)
loc1 = self.xrm.force_allocate_reg(op, forbidden_vars=[arg0])
self.perform(op, [loc0], loc1)
def _consider_llong_binop_xx(self, op):
# must force both arguments into xmm registers, because we don't
# know if they will be suitably aligned. Exception: if the second
# argument is a constant, we can ask it to be aligned to 16 bytes.
# xxx some of these operations could be '_symm'.
args = [op.getarg(1), op.getarg(2)]
loc1 = self.load_xmm_aligned_16_bytes(args[1])
loc0 = self.xrm.force_result_in_reg(op, args[0], args)
self.perform_llong(op, [loc0, loc1], loc0)
def _consider_llong_eq_ne_xx(self, op):
# must force both arguments into xmm registers, because we don't
# know if they will be suitably aligned. Exception: if they are
# constants, we can ask them to be aligned to 16 bytes.
args = [op.getarg(1), op.getarg(2)]
loc1 = self.load_xmm_aligned_16_bytes(args[0])
loc2 = self.load_xmm_aligned_16_bytes(args[1], args)
tmpxvar = TempVar()
loc3 = self.xrm.force_allocate_reg(tmpxvar, args)
self.xrm.possibly_free_var(tmpxvar)
loc0 = self.rm.force_allocate_reg(op, need_lower_byte=True)
self.perform_llong(op, [loc1, loc2, loc3], loc0)
def _maybe_consider_llong_lt(self, op):
# XXX just a special case for now
box = op.getarg(2)
if not isinstance(box, ConstFloat):
return False
if box.getfloat() != 0.0: # NaNs are also != 0.0
return False
# "x < 0.0" or maybe "x < -0.0" which is the same
box = op.getarg(1)
assert box.type == FLOAT
loc1 = self.xrm.make_sure_var_in_reg(box)
loc0 = self.rm.force_allocate_reg(op)
self.perform_llong(op, [loc1], loc0)
return True
def _consider_llong_to_int(self, op):
# accept an argument in a xmm register or in the stack
loc1 = self.xrm.loc(op.getarg(1))
loc0 = self.rm.force_allocate_reg(op)
self.perform_llong(op, [loc1], loc0)
def _loc_of_const_longlong(self, value64):
c = ConstFloat(value64)
return self.xrm.convert_to_imm(c)
def _consider_llong_from_int(self, op):
assert IS_X86_32
loc0 = self.xrm.force_allocate_reg(op)
box = op.getarg(1)
if isinstance(box, ConstInt):
loc1 = self._loc_of_const_longlong(r_longlong(box.value))
loc2 = None # unused
else:
loc1 = self.rm.make_sure_var_in_reg(box)
tmpxvar = TempVar()
loc2 = self.xrm.force_allocate_reg(tmpxvar, [op])
self.xrm.possibly_free_var(tmpxvar)
self.perform_llong(op, [loc1, loc2], loc0)
def _consider_llong_from_uint(self, op):
assert IS_X86_32
loc0 = self.xrm.force_allocate_reg(op)
loc1 = self.rm.make_sure_var_in_reg(op.getarg(1))
self.perform_llong(op, [loc1], loc0)
def _consider_math_sqrt(self, op):
loc0 = self.xrm.force_result_in_reg(op, op.getarg(1))
self.perform_math(op, [loc0], loc0)
def _consider_threadlocalref_get(self, op):
if self.translate_support_code:
offset = op.getarg(1).getint() # getarg(0) == 'threadlocalref_get'
calldescr = op.getdescr()
size = calldescr.get_result_size()
sign = calldescr.is_result_signed()
resloc = self.force_allocate_reg(op)
self.assembler.threadlocalref_get(offset, resloc, size, sign)
else:
self._consider_call(op)
def _call(self, op, arglocs, gc_level):
# we need to save registers on the stack:
#
# - at least the non-callee-saved registers
#
# - if gc_level > 0, we save also the callee-saved registers that
# contain GC pointers
#
# - gc_level == 2 for CALL_MAY_FORCE or CALL_ASSEMBLER. We
# have to save all regs anyway, in case we need to do
# cpu.force(). The issue is that grab_frame_values() would
# not be able to locate values in callee-saved registers.
#
save_all_regs = gc_level == 2
self.xrm.before_call(save_all_regs=save_all_regs)
if gc_level == 1:
gcrootmap = self.assembler.cpu.gc_ll_descr.gcrootmap
# we save all the registers for shadowstack and asmgcc for now
# --- for asmgcc too: we can't say "register x is a gc ref"
# without distinguishing call sites, which we don't do any
# more for now.
if gcrootmap: # and gcrootmap.is_shadow_stack:
save_all_regs = 2
self.rm.before_call(save_all_regs=save_all_regs)
if op.type != 'v':
if op.type == FLOAT:
resloc = self.xrm.after_call(op)
else:
resloc = self.rm.after_call(op)
else:
resloc = None
self.perform(op, arglocs, resloc)
def _consider_call(self, op, guard_not_forced=False, first_arg_index=1):
calldescr = op.getdescr()
assert isinstance(calldescr, CallDescr)
assert len(calldescr.arg_classes) == op.numargs() - first_arg_index
size = calldescr.get_result_size()
sign = calldescr.is_result_signed()
if sign:
sign_loc = imm1
else:
sign_loc = imm0
#
effectinfo = calldescr.get_extra_info()
if guard_not_forced:
gc_level = 2
elif effectinfo is None or effectinfo.check_can_collect():
gc_level = 1
else:
gc_level = 0
#
self._call(op, [imm(size), sign_loc] +
[self.loc(op.getarg(i)) for i in range(op.numargs())],
gc_level=gc_level)
def _consider_real_call(self, op):
effectinfo = op.getdescr().get_extra_info()
assert effectinfo is not None
oopspecindex = effectinfo.oopspecindex
if oopspecindex != EffectInfo.OS_NONE:
if IS_X86_32:
# support for some of the llong operations,
# which only exist on x86-32
if oopspecindex in (EffectInfo.OS_LLONG_ADD,
EffectInfo.OS_LLONG_SUB,
EffectInfo.OS_LLONG_AND,
EffectInfo.OS_LLONG_OR,
EffectInfo.OS_LLONG_XOR):
return self._consider_llong_binop_xx(op)
if oopspecindex == EffectInfo.OS_LLONG_TO_INT:
return self._consider_llong_to_int(op)
if oopspecindex == EffectInfo.OS_LLONG_FROM_INT:
return self._consider_llong_from_int(op)
if oopspecindex == EffectInfo.OS_LLONG_FROM_UINT:
return self._consider_llong_from_uint(op)
if (oopspecindex == EffectInfo.OS_LLONG_EQ or
oopspecindex == EffectInfo.OS_LLONG_NE):
return self._consider_llong_eq_ne_xx(op)
if oopspecindex == EffectInfo.OS_LLONG_LT:
if self._maybe_consider_llong_lt(op):
return
if oopspecindex == EffectInfo.OS_MATH_SQRT:
return self._consider_math_sqrt(op)
if oopspecindex == EffectInfo.OS_THREADLOCALREF_GET:
return self._consider_threadlocalref_get(op)
if oopspecindex == EffectInfo.OS_MATH_READ_TIMESTAMP:
return self._consider_math_read_timestamp(op)
self._consider_call(op)
consider_call_i = _consider_real_call
consider_call_r = _consider_real_call
consider_call_f = _consider_real_call
consider_call_n = _consider_real_call
def _consider_call_may_force(self, op):
self._consider_call(op, guard_not_forced=True)
consider_call_may_force_i = _consider_call_may_force
consider_call_may_force_r = _consider_call_may_force
consider_call_may_force_f = _consider_call_may_force
consider_call_may_force_n = _consider_call_may_force
def _consider_call_release_gil(self, op):
# [Const(save_err), func_addr, args...]
self._consider_call(op, guard_not_forced=True, first_arg_index=2)
consider_call_release_gil_i = _consider_call_release_gil
consider_call_release_gil_f = _consider_call_release_gil
consider_call_release_gil_n = _consider_call_release_gil
def consider_check_memory_error(self, op):
x = self.rm.make_sure_var_in_reg(op.getarg(0))
self.perform_discard(op, [x])
def _consider_call_assembler(self, op):
locs = self.locs_for_call_assembler(op)
self._call(op, locs, gc_level=2)
consider_call_assembler_i = _consider_call_assembler
consider_call_assembler_r = _consider_call_assembler
consider_call_assembler_f = _consider_call_assembler
consider_call_assembler_n = _consider_call_assembler
def consider_cond_call_gc_wb(self, op):
assert op.type == 'v'
args = op.getarglist()
N = len(args)
# we force all arguments in a reg (unless they are Consts),
# because it will be needed anyway by the following gc_load
# It avoids loading it twice from the memory.
arglocs = [self.rm.make_sure_var_in_reg(op.getarg(i), args)
for i in range(N)]
self.perform_discard(op, arglocs)
consider_cond_call_gc_wb_array = consider_cond_call_gc_wb
def consider_cond_call(self, op):
# A 32-bit-only, asmgcc-only issue: 'cond_call_register_arguments'
# contains edi and esi, which are also in asmgcroot.py:ASM_FRAMEDATA.
# We must make sure that edi and esi do not contain GC pointers.
if IS_X86_32 and self.assembler._is_asmgcc():
for box, loc in self.rm.reg_bindings.items():
if (loc == edi or loc == esi) and box.type == REF:
self.rm.force_spill_var(box)
assert box not in self.rm.reg_bindings
#
args = op.getarglist()
assert 2 <= len(args) <= 4 + 2 # maximum 4 arguments
v_func = args[1]
assert isinstance(v_func, Const)
imm_func = self.rm.convert_to_imm(v_func)
# Delicate ordering here. First get the argument's locations.
# If this also contains args[0], this returns the current
# location too.
arglocs = [self.loc(args[i]) for i in range(2, len(args))]
if op.type == 'v':
# a plain COND_CALL. Calls the function when args[0] is
# true. Often used just after a comparison operation.
gcmap = self.get_gcmap()
self.load_condition_into_cc(op.getarg(0))
resloc = None
else:
# COND_CALL_VALUE_I/R. Calls the function when args[0]
# is equal to 0 or NULL. Returns the result from the
# function call if done, or args[0] if it was not 0/NULL.
# Implemented by forcing the result to live in the same
# register as args[0], and overwriting it if we really do
# the call.
# Load the register for the result. Possibly reuse 'args[0]'.
# But the old value of args[0], if it survives, is first
# spilled away. We can't overwrite any of op.args[2:] here.
resloc = self.rm.force_result_in_reg(op, args[0],
forbidden_vars=args[2:])
# Get the gcmap here, possibly including the spilled
# location, and always excluding the 'resloc' register.
# Some more details: the only interesting case is the case
# where we're doing the call (if we are not, the gcmap is
# not used); and in this case, the gcmap must include the
# spilled location (it contains a valid GC pointer to fix
# during the call if a GC occurs), and never 'resloc'
# (it will be overwritten with the result of the call, which
# is not computed yet if a GC occurs).
#
# (Note that the spilled value is always NULL at the moment
# if the call really occurs, but it's not worth the effort to
# not list it in the gcmap and get crashes if we tweak
# COND_CALL_VALUE_R in the future)
gcmap = self.get_gcmap([resloc])
# Test the register for the result.
self.assembler.test_location(resloc)
self.assembler.guard_success_cc = rx86.Conditions['Z']
self.assembler.cond_call(gcmap, imm_func, arglocs, resloc)
consider_cond_call_value_i = consider_cond_call
consider_cond_call_value_r = consider_cond_call
def consider_call_malloc_nursery(self, op):
size_box = op.getarg(0)
assert isinstance(size_box, ConstInt)
size = size_box.getint()
# hint: try to move unrelated registers away from ecx and edx now
self.rm.spill_or_move_registers_before_call([ecx, edx])
# the result will be in ecx
self.rm.force_allocate_reg(op, selected_reg=ecx)
#
# We need edx as a temporary, but otherwise don't save any more
# register. See comments in _build_malloc_slowpath().
tmp_box = TempVar()
self.rm.force_allocate_reg(tmp_box, selected_reg=edx)
gcmap = self.get_gcmap([ecx, edx]) # allocate the gcmap *before*
self.rm.possibly_free_var(tmp_box)
#
gc_ll_descr = self.assembler.cpu.gc_ll_descr
self.assembler.malloc_cond(
gc_ll_descr.get_nursery_free_addr(),
gc_ll_descr.get_nursery_top_addr(),
size, gcmap)
def consider_call_malloc_nursery_varsize_frame(self, op):
size_box = op.getarg(0)
assert not isinstance(size_box, Const) # we cannot have a const here!
# sizeloc must be in a register, but we can free it now
# (we take care explicitly of conflicts with ecx or edx)
sizeloc = self.rm.make_sure_var_in_reg(size_box)
self.rm.spill_or_move_registers_before_call([ecx, edx]) # sizeloc safe
self.rm.possibly_free_var(size_box)
# the result will be in ecx
self.rm.force_allocate_reg(op, selected_reg=ecx)
# we need edx as a temporary
tmp_box = TempVar()
self.rm.force_allocate_reg(tmp_box, selected_reg=edx)
gcmap = self.get_gcmap([ecx, edx]) # allocate the gcmap *before*
self.rm.possibly_free_var(tmp_box)
#
gc_ll_descr = self.assembler.cpu.gc_ll_descr
self.assembler.malloc_cond_varsize_frame(
gc_ll_descr.get_nursery_free_addr(),
gc_ll_descr.get_nursery_top_addr(),
sizeloc, gcmap)
def consider_call_malloc_nursery_varsize(self, op):
gc_ll_descr = self.assembler.cpu.gc_ll_descr
if not hasattr(gc_ll_descr, 'max_size_of_young_obj'):
raise Exception("unreachable code")
# for boehm, this function should never be called
arraydescr = op.getdescr()
length_box = op.getarg(2)
assert not isinstance(length_box, Const) # we cannot have a const here!
# can only use spill_or_move_registers_before_call() as a hint if
# we are sure that length_box stays alive and won't be freed now
# (it should always be the case, see below, but better safe than sorry)
if self.rm.stays_alive(length_box):
self.rm.spill_or_move_registers_before_call([ecx, edx])
# the result will be in ecx
self.rm.force_allocate_reg(op, selected_reg=ecx)
# we need edx as a temporary
tmp_box = TempVar()
self.rm.force_allocate_reg(tmp_box, selected_reg=edx)
gcmap = self.get_gcmap([ecx, edx]) # allocate the gcmap *before*
self.rm.possibly_free_var(tmp_box)
# length_box always survives: it's typically also present in the
# next operation that will copy it inside the new array. It's
# fine to load it from the stack too, as long as it is != ecx, edx.
lengthloc = self.rm.loc(length_box)
self.rm.possibly_free_var(length_box)
#
itemsize = op.getarg(1).getint()
maxlength = (gc_ll_descr.max_size_of_young_obj - WORD * 2)
self.assembler.malloc_cond_varsize(
op.getarg(0).getint(),
gc_ll_descr.get_nursery_free_addr(),
gc_ll_descr.get_nursery_top_addr(),
lengthloc, itemsize, maxlength, gcmap, arraydescr)
def get_gcmap(self, forbidden_regs=[], noregs=False):
frame_depth = self.fm.get_frame_depth()
gcmap = allocate_gcmap(self.assembler, frame_depth, JITFRAME_FIXED_SIZE)
for box, loc in self.rm.reg_bindings.iteritems():
if loc in forbidden_regs:
continue
if box.type == REF and self.rm.is_still_alive(box):
assert not noregs
assert isinstance(loc, RegLoc)
val = gpr_reg_mgr_cls.all_reg_indexes[loc.value]
gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8))
for box, loc in self.fm.bindings.iteritems():
if box.type == REF and self.rm.is_still_alive(box):
assert isinstance(loc, FrameLoc)
val = loc.position + JITFRAME_FIXED_SIZE
gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8))
return gcmap
def consider_gc_store(self, op):
args = op.getarglist()
base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
size_box = op.getarg(3)
assert isinstance(size_box, ConstInt)
size = size_box.value
assert size >= 1
if size == 1:
need_lower_byte = True
else:
need_lower_byte = False
value_loc = self.make_sure_var_in_reg(op.getarg(2), args,
need_lower_byte=need_lower_byte)
ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
self.perform_discard(op, [base_loc, ofs_loc, value_loc,
imm(size)])
def consider_gc_store_indexed(self, op):
args = op.getarglist()
base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
scale_box = op.getarg(3)
offset_box = op.getarg(4)
size_box = op.getarg(5)
assert isinstance(scale_box, ConstInt)
assert isinstance(offset_box, ConstInt)
assert isinstance(size_box, ConstInt)
factor = scale_box.value
offset = offset_box.value
size = size_box.value
assert size >= 1
if size == 1:
need_lower_byte = True
else:
need_lower_byte = False
value_loc = self.make_sure_var_in_reg(op.getarg(2), args,
need_lower_byte=need_lower_byte)
ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
self.perform_discard(op, [base_loc, ofs_loc, value_loc,
imm(factor), imm(offset), imm(size)])
def consider_increment_debug_counter(self, op):
base_loc = self.loc(op.getarg(0))
self.perform_discard(op, [base_loc])
def _consider_gc_load(self, op):
args = op.getarglist()
base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
result_loc = self.force_allocate_reg(op)
size_box = op.getarg(2)
assert isinstance(size_box, ConstInt)
nsize = size_box.value # negative for "signed"
size_loc = imm(abs(nsize))
if nsize < 0:
sign_loc = imm1
else:
sign_loc = imm0
self.perform(op, [base_loc, ofs_loc, size_loc, sign_loc], result_loc)
consider_gc_load_i = _consider_gc_load
consider_gc_load_r = _consider_gc_load
consider_gc_load_f = _consider_gc_load
def _consider_gc_load_indexed(self, op):
args = op.getarglist()
base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
result_loc = self.force_allocate_reg(op)
scale_box = op.getarg(2)
offset_box = op.getarg(3)
size_box = op.getarg(4)
assert isinstance(scale_box, ConstInt)
assert isinstance(offset_box, ConstInt)
assert isinstance(size_box, ConstInt)
scale = scale_box.value
offset = offset_box.value
nsize = size_box.value # negative for "signed"
size_loc = imm(abs(nsize))
if nsize < 0:
sign_loc = imm1
else:
sign_loc = imm0
locs = [base_loc, ofs_loc, imm(scale), imm(offset), size_loc, sign_loc]
self.perform(op, locs, result_loc)
consider_gc_load_indexed_i = _consider_gc_load_indexed
consider_gc_load_indexed_r = _consider_gc_load_indexed
consider_gc_load_indexed_f = _consider_gc_load_indexed
def consider_int_is_true(self, op):
# doesn't need arg to be in a register
argloc = self.loc(op.getarg(0))
resloc = self.force_allocate_reg_or_cc(op)
self.perform(op, [argloc], resloc)
consider_int_is_zero = consider_int_is_true
def _consider_same_as(self, op):
argloc = self.loc(op.getarg(0))
resloc = self.force_allocate_reg(op)
self.perform(op, [argloc], resloc)
consider_cast_ptr_to_int = _consider_same_as
consider_cast_int_to_ptr = _consider_same_as
consider_same_as_i = _consider_same_as
consider_same_as_r = _consider_same_as
consider_same_as_f = _consider_same_as
def consider_load_from_gc_table(self, op):
resloc = self.rm.force_allocate_reg(op)
self.perform(op, [], resloc)
def consider_int_force_ge_zero(self, op):
argloc = self.make_sure_var_in_reg(op.getarg(0))
resloc = self.force_allocate_reg(op, [op.getarg(0)])
self.perform(op, [argloc], resloc)
def consider_copystrcontent(self, op):
self._consider_copystrcontent(op, is_unicode=False)
def consider_copyunicodecontent(self, op):
self._consider_copystrcontent(op, is_unicode=True)
def _consider_copystrcontent(self, op, is_unicode):
# compute the source address
args = op.getarglist()
base_loc = self.rm.make_sure_var_in_reg(args[0], args)
ofs_loc = self.rm.make_sure_var_in_reg(args[2], args)
assert args[0] is not args[1] # forbidden case of aliasing
srcaddr_box = TempVar()
forbidden_vars = [args[1], args[3], args[4], srcaddr_box]
srcaddr_loc = self.rm.force_allocate_reg(srcaddr_box, forbidden_vars)
self._gen_address_inside_string(base_loc, ofs_loc, srcaddr_loc,
is_unicode=is_unicode)
# compute the destination address
base_loc = self.rm.make_sure_var_in_reg(args[1], forbidden_vars)
ofs_loc = self.rm.make_sure_var_in_reg(args[3], forbidden_vars)
forbidden_vars = [args[4], srcaddr_box]
dstaddr_box = TempVar()
dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box, forbidden_vars)
self._gen_address_inside_string(base_loc, ofs_loc, dstaddr_loc,
is_unicode=is_unicode)
# compute the length in bytes
length_box = args[4]
length_loc = self.loc(length_box)
if is_unicode:
forbidden_vars = [srcaddr_box, dstaddr_box]
bytes_box = TempVar()
bytes_loc = self.rm.force_allocate_reg(bytes_box, forbidden_vars)
scale = self._get_unicode_item_scale()
if not (isinstance(length_loc, ImmedLoc) or
isinstance(length_loc, RegLoc)):
self.assembler.mov(length_loc, bytes_loc)
length_loc = bytes_loc
self.assembler.load_effective_addr(length_loc, 0, scale, bytes_loc)
length_box = bytes_box
length_loc = bytes_loc
# call memcpy()
self.rm.before_call()
self.xrm.before_call()
self.assembler.simple_call_no_collect(imm(self.assembler.memcpy_addr),
[dstaddr_loc, srcaddr_loc, length_loc])
self.rm.possibly_free_var(length_box)
self.rm.possibly_free_var(dstaddr_box)
self.rm.possibly_free_var(srcaddr_box)
def _gen_address_inside_string(self, baseloc, ofsloc, resloc, is_unicode):
if is_unicode:
ofs_items, _, _ = symbolic.get_array_token(rstr.UNICODE,
self.translate_support_code)
scale = self._get_unicode_item_scale()
else:
ofs_items, itemsize, _ = symbolic.get_array_token(rstr.STR,
self.translate_support_code)
assert itemsize == 1
ofs_items -= 1 # for the extra null character
scale = 0
self.assembler.load_effective_addr(ofsloc, ofs_items, scale,
resloc, baseloc)
def _get_unicode_item_scale(self):
_, itemsize, _ = symbolic.get_array_token(rstr.UNICODE,
self.translate_support_code)
if itemsize == 4:
return 2
elif itemsize == 2:
return 1
else:
raise AssertionError("bad unicode item size")
def _consider_math_read_timestamp(self, op):
# hint: try to move unrelated registers away from eax and edx now
self.rm.spill_or_move_registers_before_call([eax, edx])
tmpbox_high = TempVar()
self.rm.force_allocate_reg(tmpbox_high, selected_reg=eax)
if longlong.is_64_bit:
# on 64-bit, use rax as temporary register and returns the
# result in rdx
result_loc = self.rm.force_allocate_reg(op,
selected_reg=edx)
self.perform_math(op, [], result_loc)
else:
# on 32-bit, use both eax and edx as temporary registers,
# use a temporary xmm register, and returns the result in
# another xmm register.
tmpbox_low = TempVar()
self.rm.force_allocate_reg(tmpbox_low, selected_reg=edx)
xmmtmpbox = TempVar()
xmmtmploc = self.xrm.force_allocate_reg(xmmtmpbox)
result_loc = self.xrm.force_allocate_reg(op)
self.perform_math(op, [xmmtmploc], result_loc)
self.xrm.possibly_free_var(xmmtmpbox)
self.rm.possibly_free_var(tmpbox_low)
self.rm.possibly_free_var(tmpbox_high)
def compute_hint_frame_locations(self, operations):
# optimization only: fill in the 'hint_frame_locations' dictionary
# of 'fm' based on the JUMP at the end of the loop, by looking
# at where we would like the boxes to be after the jump.
op = operations[-1]
if op.getopnum() != rop.JUMP:
return
self.final_jump_op = op
descr = op.getdescr()
assert isinstance(descr, TargetToken)
if descr._ll_loop_code != 0:
# if the target LABEL was already compiled, i.e. if it belongs
# to some already-compiled piece of code
self._compute_hint_frame_locations_from_descr(descr)
#else:
# The loop ends in a JUMP going back to a LABEL in the same loop.
# We cannot fill 'hint_frame_locations' immediately, but we can
# wait until the corresponding consider_label() to know where the
# we would like the boxes to be after the jump.
def _compute_hint_frame_locations_from_descr(self, descr):
arglocs = descr._x86_arglocs
jump_op = self.final_jump_op
assert len(arglocs) == jump_op.numargs()
for i in range(jump_op.numargs()):
box = jump_op.getarg(i)
if not isinstance(box, Const):
loc = arglocs[i]
if isinstance(loc, FrameLoc):
self.fm.hint_frame_pos[box] = self.fm.get_loc_index(loc)
def consider_jump(self, op):
assembler = self.assembler
assert self.jump_target_descr is None
descr = op.getdescr()
assert isinstance(descr, TargetToken)
arglocs = descr._x86_arglocs
self.jump_target_descr = descr
# Part about non-floats
src_locations1 = []
dst_locations1 = []
# Part about floats
src_locations2 = []
dst_locations2 = []
# Build the four lists
for i in range(op.numargs()):
box = op.getarg(i)
src_loc = self.loc(box)
dst_loc = arglocs[i]
if box.type != FLOAT and not box.is_vector():
src_locations1.append(src_loc)
dst_locations1.append(dst_loc)
else:
src_locations2.append(src_loc)
dst_locations2.append(dst_loc)
# Do we have a temp var?
if IS_X86_64:
tmpreg = X86_64_SCRATCH_REG
xmmtmp = X86_64_XMM_SCRATCH_REG
else:
tmpreg = None
xmmtmp = None
# Do the remapping
remap_frame_layout_mixed(assembler,
src_locations1, dst_locations1, tmpreg,
src_locations2, dst_locations2, xmmtmp)
self.possibly_free_vars_for_op(op)
assembler.closing_jump(self.jump_target_descr)
def consider_enter_portal_frame(self, op):
self.assembler.enter_portal_frame(op)
def consider_leave_portal_frame(self, op):
self.assembler.leave_portal_frame(op)
def consider_jit_debug(self, op):
pass
def _consider_force_spill(self, op):
# This operation is used only for testing
self.force_spill_var(op.getarg(0))
def consider_force_token(self, op):
# XXX for now we return a regular reg
#self.rm.force_allocate_frame_reg(op)
self.assembler.force_token(self.rm.force_allocate_reg(op))
def consider_label(self, op):
descr = op.getdescr()
assert isinstance(descr, TargetToken)
inputargs = op.getarglist()
arglocs = [None] * len(inputargs)
#
# we use force_spill() on the boxes that are not going to be really
# used any more in the loop, but that are kept alive anyway
# by being in a next LABEL's or a JUMP's argument or fail_args
# of some guard
position = self.rm.position
for arg in inputargs:
assert not isinstance(arg, Const)
if self.last_real_usage.get(arg, -1) <= position:
self.force_spill_var(arg)
#
# we need to make sure that no variable is stored in ebp
for arg in inputargs:
if self.loc(arg) is ebp:
loc2 = self.fm.loc(arg)
self.assembler.mc.MOV(loc2, ebp)
self.rm.bindings_to_frame_reg.clear()
#
for i in range(len(inputargs)):
arg = inputargs[i]
assert not isinstance(arg, Const)
loc = self.loc(arg)
assert loc is not ebp
arglocs[i] = loc
if isinstance(loc, RegLoc):
self.fm.mark_as_free(arg)
#
# if we are too close to the start of the loop, the label's target may
# get overridden by redirect_call_assembler(). (rare case)
self.flush_loop()
#
descr._x86_arglocs = arglocs
descr._ll_loop_code = self.assembler.mc.get_relative_pos()
descr._x86_clt = self.assembler.current_clt
self.assembler.target_tokens_currently_compiling[descr] = None
self.possibly_free_vars_for_op(op)
self.assembler.label()
#
# if the LABEL's descr is precisely the target of the JUMP at the
# end of the same loop, i.e. if what we are compiling is a single
# loop that ends up jumping to this LABEL, then we can now provide
# the hints about the expected position of the spilled variables.
jump_op = self.final_jump_op
if jump_op is not None and jump_op.getdescr() is descr:
self._compute_hint_frame_locations_from_descr(descr)
def consider_guard_not_forced_2(self, op):
self.rm.before_call(op.getfailargs(), save_all_regs=True)
self.xrm.before_call(op.getfailargs(), save_all_regs=True)
fail_locs = [self.loc(v) for v in op.getfailargs()]
self.assembler.store_force_descr(op, fail_locs,
self.fm.get_frame_depth())
self.possibly_free_vars(op.getfailargs())
def consider_keepalive(self, op):
pass
def _scaled_addr(self, index_loc, itemsize_loc,
base_loc, ofs_loc):
assert isinstance(itemsize_loc, ImmedLoc)
itemsize = itemsize_loc.value
if isinstance(index_loc, ImmedLoc):
temp_loc = imm(index_loc.value * itemsize)
shift = 0
else:
assert valid_addressing_size(itemsize), "rewrite did not correctly handle shift/mul!"
temp_loc = index_loc
shift = get_scale(itemsize)
assert isinstance(ofs_loc, ImmedLoc)
return AddressLoc(base_loc, temp_loc, shift, ofs_loc.value)
def consider_zero_array(self, op):
_, baseofs, _ = unpack_arraydescr(op.getdescr())
length_box = op.getarg(2)
scale_box = op.getarg(3)
assert isinstance(scale_box, ConstInt)
start_itemsize = scale_box.value
len_scale_box = op.getarg(4)
assert isinstance(len_scale_box, ConstInt)
len_itemsize = len_scale_box.value
# rewrite handles the mul of a constant length box
constbytes = -1
if isinstance(length_box, ConstInt):
constbytes = length_box.getint()
args = op.getarglist()
base_loc = self.rm.make_sure_var_in_reg(args[0], args)
startindex_loc = self.rm.make_sure_var_in_reg(args[1], args)
if 0 <= constbytes <= 16 * 8:
if IS_X86_64:
null_loc = X86_64_XMM_SCRATCH_REG
else:
null_box = TempVar()
null_loc = self.xrm.force_allocate_reg(null_box)
self.xrm.possibly_free_var(null_box)
self.perform_discard(op, [base_loc, startindex_loc,
imm(constbytes), imm(start_itemsize),
imm(baseofs), null_loc])
else:
# base_loc and startindex_loc are in two regs here (or they are
# immediates). Compute the dstaddr_loc, which is the raw
# address that we will pass as first argument to memset().
# It can be in the same register as either one, but not in
# args[2], because we're still needing the latter.
dstaddr_box = TempVar()
dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box, [args[2]])
itemsize_loc = imm(start_itemsize)
dst_addr = self._scaled_addr(startindex_loc, itemsize_loc,
base_loc, imm(baseofs))
self.assembler.mc.LEA(dstaddr_loc, dst_addr)
#
if constbytes >= 0:
length_loc = imm(constbytes)
else:
# load length_loc in a register different than dstaddr_loc
length_loc = self.rm.make_sure_var_in_reg(length_box,
[dstaddr_box])
if len_itemsize > 1:
# we need a register that is different from dstaddr_loc,
# but which can be identical to length_loc (as usual,
# only if the length_box is not used by future operations)
bytes_box = TempVar()
bytes_loc = self.rm.force_allocate_reg(bytes_box,
[dstaddr_box])
len_itemsize_loc = imm(len_itemsize)
b_adr = self._scaled_addr(length_loc, len_itemsize_loc, imm0, imm0)
self.assembler.mc.LEA(bytes_loc, b_adr)
length_box = bytes_box
length_loc = bytes_loc
#
# call memset()
self.rm.before_call()
self.xrm.before_call()
self.assembler.simple_call_no_collect(
imm(self.assembler.memset_addr),
[dstaddr_loc, imm0, length_loc])
self.rm.possibly_free_var(length_box)
self.rm.possibly_free_var(dstaddr_box)
def not_implemented_op(self, op):
not_implemented("not implemented operation: %s" % op.getopname())
oplist = [RegAlloc.not_implemented_op] * rop._LAST
import itertools
iterate = itertools.chain(RegAlloc.__dict__.iteritems(),
VectorRegallocMixin.__dict__.iteritems())
for name, value in iterate:
if name.startswith('consider_'):
name = name[len('consider_'):]
num = getattr(rop, name.upper())
oplist[num] = value
def get_ebp_ofs(base_ofs, position):
# Argument is a frame position (0, 1, 2...).
# Returns (ebp+20), (ebp+24), (ebp+28)...
# i.e. the n'th word beyond the fixed frame size.
return base_ofs + WORD * (position + JITFRAME_FIXED_SIZE)
def not_implemented(msg):
msg = '[x86/regalloc] %s\n' % msg
if we_are_translated():
llop.debug_print(lltype.Void, msg)
raise NotImplementedError(msg)
|
oblique-labs/pyVM
|
rpython/jit/backend/x86/regalloc.py
|
Python
|
mit
| 66,392
|
import requests
import json
import traceback
import os
import time
#Simple log function so we can log events
def log(text):
logFile = open("netflixScrapper.log", "ab+")
logFile.write(text+"\n")
logFile.close()
log("START")
#Use basic cookie authentication from visiting http://netflix.com
session = requests.Session()
response = session.get('http://netflix.com')
d= session.cookies.get_dict()
headers = {
'Pragma': 'no-cache',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.125 Safari/537.36',
'Accept': '*/*',
'Referer': 'http://www.netflix.com/watch/',
'Connection': 'keep-alive',
'Cache-Control': 'no-cache',
}
log("PROGRESS: Saved cookies for http://netflix.com")
#Initailly searched through all nums
#70000000-81000000
#for both movies and shows
#some movie id's are less than 70000000 however
netID=70949421
log("PROGRESS: Started at ID #"+str(netID))
while netID<81000000:
try:
#First try to find a movie
response = requests.get("https://api-global.netflix.com/catalog/titles/series/"+str(netID)+"?country=US&esn=NFAPPL-02&expand=%40seasons%2C%40episodes%2C%40box_art%2F%4088pix_w%2C%40box_art%2F%40350pix_w%2C%40cast%2C%40creators%2C%40queue_item%2C%40directors%2C%40formats%2C%40default_audio_subtitles%2C%40subtitle_languages%2C%40short_synopsis%2C%40thumbnails_hd&filters=http%3A%2F%2Fapi.netflix.com%2Fcategories%2Ftitle_formats%2Finstant&languages=en-US&output=json&routing=reject", headers=headers, cookies=d)
print response.status_code
if response.status_code== 403:
#Use basic cookie authentication from visiting http://netflix.com
session = requests.Session()
response = session.get('http://netflix.com')
d = session.cookies.get_dict()
headers = {
'Pragma': 'no-cache',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.125 Safari/537.36',
'Accept': '*/*',
'Referer': 'http://www.netflix.com/watch/',
'Connection': 'keep-alive',
'Cache-Control': 'no-cache',
}
else:
#if the show is found
if response.status_code!= 404:
IDmatches=open("showFiles/"+str(netID)+".txt", "w")
IDmatches.write((response.text).encode('ascii', 'ignore'))
IDmatches.close()
#If it didnt find a show at that ID, check if it was a movie
else:
response = requests.get("https://api-global.netflix.com/catalog/titles/movies/"+str(netID)+"?country=US&esn=NFAPPL-02&expand=%40seasons%2C%40episodes%2C%40box_art%2F%4088pix_w%2C%40box_art%2F%40350pix_w%2C%40cast%2C%40creators%2C%40queue_item%2C%40directors%2C%40formats%2C%40default_audio_subtitles%2C%40subtitle_languages%2C%40short_synopsis%2C%40thumbnails_hd&filters=http%3A%2F%2Fapi.netflix.com%2Fcategories%2Ftitle_formats%2Finstant&languages=en-US&output=json&routing=reject", headers=headers, cookies=d)
if response.status_code!= 404:
IDmatches=open("movieFiles/"+str(netID)+".txt", "w")
IDmatches.write((response.text).encode('ascii', 'ignore'))
IDmatches.close()
netID+=1
print "\n"+str(netID)
except:
print traceback.format_exc()
log("ERROR: "+str(traceback.format_exc())+" Occurred when grabbing ID #"+str(netID))
time.sleep(10)
|
Healdb/healdb.github.io
|
grabNetflixFiles.py
|
Python
|
mit
| 3,849
|
from rest_framework import routers
from imagefy.wishes.views import OfferViewSet, WishViewSet
router = routers.SimpleRouter()
router.register(r'wishes', WishViewSet)
router.register(r'offers', OfferViewSet)
|
dvl/imagefy-web
|
imagefy/api.py
|
Python
|
mit
| 209
|
# -*- coding: utf-8 -*-
#戦略を分ける
import os
import time
from decimal import Decimal
import csv
def moneyfmt(value, places=2, curr='', sep=',', dp='.',
pos='', neg='-', trailneg=''):
"""Decimal を通貨表現の文字列に変換します。
places: 小数点以下の値を表すのに必要な桁数
curr: 符号の前に置く通貨記号 (オプションで、空でもかまいません)
sep: 桁のグループ化に使う記号、オプションです (コンマ、ピリオド、
スペース、または空)
dp: 小数点 (コンマまたはピリオド)
小数部がゼロの場合には空にできます。
pos: 正数の符号オプション: '+', 空白または空文字列
neg: 負数の符号オプション: '-', '(', 空白または空文字列
trailneg:後置マイナス符号オプション: '-', ')', 空白または空文字列
>>> d = Decimal('-1234567.8901')
>>> moneyfmt(d, curr='')
'-1,234,567.89'
>>> moneyfmt(d, places=0, sep='.', dp='', neg='', trailneg='-')
'1.234.568-'
>>> moneyfmt(d, curr='', neg='(', trailneg=')')
'(1,234,567.89)'
>>> moneyfmt(Decimal(123456789), sep=' ')
'123 456 789.00'
>>> moneyfmt(Decimal('-0.02'), neg='<', trailneg='>')
'<0.02>'
"""
q = Decimal(10) ** -places # 2 places -. '0.01'
sign, digits, exp = value.quantize(q).as_tuple()
result = []
digits = map(str, digits)
build, next = result.append, digits.pop
if sign:
build(trailneg)
for i in range(places):
build(next() if digits else '0')
build(dp)
if not digits:
build('0')
i = 0
while digits:
build(next())
i += 1
if i == 3 and digits:
i = 0
build(sep)
build(curr)
build(neg if sign else pos)
return ''.join(reversed(result))
class ItemPrototype(object):
def __init__(self, code, name, price):
self.item_code = code
self.item_name = name
self.price = price
def getCode(self):
return self.item_code
def getName(self):
return self.item_name
def getPrice(self):
return self.price
def setDetail(self, detail):
self.detail = detail
def getDetail(self):
return self.detail
def dumpData(self):
print self.getName()
print '商品番号' + self.getCode()
print '\\' + moneyfmt(Decimal(self.getPrice()), 0, dp="") + '-'
print self.detail.comment
#cloneキーワードを使って新しいインスタンスを作成する
def newInstance(self):
new_instance = self._clone(self)
return new_instance
#Strategyに相当する
class ReadItemDataStrategy(object):
def __init__(self, filename):
self.filename = filename
#データファイルを読み込み、オブジェクトの配列で返す
#Contextに提供するメソッド
#@param string データファイル名
#@return データオブジェクトの配列
def getData(self):
if os.access(self.filename, os.R_OK) == False:
raise Exception, 'file [' + self.getFilename() + '] is not readable !'
return self.readData(self.getFilename())
#ファイル名を返す
#@return ファイル名
def getFilename(self):
return self.filename
#固定長データを読み込む
#ConcreteStrategyに相当する
class ReadFixedLengthDataStrategy(ReadItemDataStrategy):
#データファイルを読み込み、オブジェクトの配列で返す
#@param string データファイル名
#@return データオブジェクトの配列
def readData(self, filename):
return_value = []
with file(filename) as lines:
reader = csv.reader(lines)
try:
next(lines)
except Exception as e:
return return_value
for line in reader:
item_name = line[0]
item_code = line[1]
price = int(line[2])
release_date = line[3]
#戻り値のオブジェクトの作成
obj = type('lamdbaobject', (object,), {})()
obj.item_name = item_name
obj.item_code = item_code
obj.price = price
obj.release_date = time.strptime(release_date, '%Y%m%d')
return_value.append(obj)
return return_value
class ReadTabSeparatedDataStrategy(ReadItemDataStrategy):
#データファイルを読み込み、オブジェクトの配列で返す
#@param string データファイル名
#@return データオブジェクトの配列
def readData(self, filename):
return_value = []
with file(filename) as lines:
try:
next(lines)
except Exception as e:
return return_value
for line in lines:
item_list = line.split('\t')
#戻り値のオブジェクトの作成
obj = type('lamdbaobject', (object,), {})()
obj.item_code = item_list.pop(0)
obj.item_name = item_list.pop(0)
obj.price = int(item_list.pop(0))
obj.release_date = time.strptime(item_list.pop(0).strip(), '%Y/%m/%d')
return_value.append(obj)
return return_value
#Contextに相当する
class ItemDataContext(object):
#コンストラクタ
#@param ReadItemDataStrategy ReadItemDataStrategyオブジェクト
def __init__(self, strategy):
self.strategy = strategy
#商品情報をオブジェクトの配列で返す
#@return データオブジェクトの配列
def getItemData(self):
return self.strategy.getData()
def dumpData(data):
for object in data:
print '商品番号:' + object.item_code
print '\\' + moneyfmt(Decimal(object.price), 0, dp="") + '-'
print time.strftime('%Y/%m/%d', object.release_date) + '発売'
if __name__ == "__main__":
#固定長データを読み込む
strategy1 = ReadFixedLengthDataStrategy('fixed_length_data.txt')
context1 = ItemDataContext(strategy1)
dumpData(context1.getItemData())
#タブ区切りデータを読み込む
strategy2 = ReadTabSeparatedDataStrategy('tab_separated_data.txt')
context2 = ItemDataContext(strategy2)
dumpData(context2.getItemData())
|
t10471/python
|
practice/src/design_pattern/Strategy.py
|
Python
|
mit
| 6,535
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','jp.msmc.imagecollectionview.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
path = os.path.basename(js_file)
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
method = compiler.compile_commonjs_file(path,js_file)
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
method += '\treturn filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);'
f = os.path.join(cwd,'Classes','JpMsmcImagecollectionviewModuleAssets.m')
c = open(f).read()
templ_search = ' moduleAsset\n{\n'
idx = c.find(templ_search) + len(templ_search)
before = c[0:idx]
after = """
}
@end
"""
newc = before + method + after
if newc!=c:
x = open(f,'w')
x.write(newc)
x.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README','jp.msmc.imagecollectionview.js']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[]):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e)==2 and e[1]=='.pyc':continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
for dn in ('assets','example','platform'):
if os.path.exists(dn):
zip_dir(zf,dn,'%s/%s' % (modulepath,dn),['README'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
atsusy/TiImageCollectionView
|
build.py
|
Python
|
mit
| 6,549
|
import pytest
from marshmallow import Schema, fields
from flask_resty import Api, ApiView
from flask_resty.fields import DelimitedList
from flask_resty.testing import assert_response
# -----------------------------------------------------------------------------
@pytest.fixture
def schemas():
class NameSchema(Schema):
name = fields.String(required=True)
class NameListSchema(Schema):
names = fields.List(fields.String(), data_key="name", required=True)
class NameDelimitedListSchema(Schema):
names = DelimitedList(fields.String(), data_key="name", required=True)
class NameDefaultSchema(Schema):
name = fields.String(missing="foo")
return {
"name": NameSchema(),
"name_list": NameListSchema(),
"name_delimited_list": NameDelimitedListSchema(),
"name_default": NameDefaultSchema(),
}
@pytest.fixture
def views(app, schemas):
class NameView(ApiView):
args_schema = schemas["name"]
def get(self):
return self.make_response(self.request_args["name"])
class NameListView(ApiView):
args_schema = schemas["name_list"]
def get(self):
return self.make_response(self.request_args["names"])
class NameDelimitedListView(ApiView):
args_schema = schemas["name_delimited_list"]
def get(self):
return self.make_response(self.request_args["names"])
class NameDefaultView(ApiView):
args_schema = schemas["name_default"]
def get(self):
return self.make_response(self.request_args["name"])
return {
"name": NameView,
"names": NameListView,
"names_delimited": NameDelimitedListView,
"name_default": NameDefaultView,
}
@pytest.fixture(autouse=True)
def routes(app, views):
api = Api(app)
api.add_resource("/name", views["name"])
api.add_resource("/names", views["names"])
api.add_resource("/names_delimited", views["names_delimited"])
api.add_resource("/name_default", views["name_default"])
# -----------------------------------------------------------------------------
def test_get_name_one(client):
response = client.get("/name?name=foo")
assert_response(response, 200, "foo")
def test_get_name_extra(client):
response = client.get("/name?name=foo&ignored=bar")
assert_response(response, 200, "foo")
def test_get_names_one(client):
response = client.get("/names?name=foo")
assert_response(response, 200, ["foo"])
def test_get_names_many(client):
response = client.get("/names?name=foo&name=bar")
assert_response(response, 200, ["foo", "bar"])
def test_get_names_many_delimited(client):
response = client.get("/names_delimited?name=foo,bar")
assert_response(response, 200, ["foo", "bar"])
def test_get_name_default(client):
response = client.get("/name_default")
assert_response(response, 200, "foo")
def test_get_name_default_specified(client):
response = client.get("/name_default?name=bar")
assert_response(response, 200, "bar")
def test_caching(app, views):
with app.test_request_context("/?name=foo"):
name_view = views["name"]()
names_view = views["names"]()
name_view_request_args = name_view.request_args
names_view_request_args = names_view.request_args
assert name_view_request_args == {"name": "foo"}
assert names_view_request_args == {"names": ["foo"]}
assert name_view.request_args is name_view_request_args
assert names_view.request_args is names_view_request_args
# -----------------------------------------------------------------------------
def test_error_get_name_missing(client):
response = client.get("/name")
assert_response(
response,
422,
[{"code": "invalid_parameter", "source": {"parameter": "name"}}],
)
def test_error_get_names_missing(client):
response = client.get("/names")
assert_response(
response,
422,
[{"code": "invalid_parameter", "source": {"parameter": "name"}}],
)
|
4Catalyzer/flask-jsonapiview
|
tests/test_args.py
|
Python
|
mit
| 4,107
|
# Copyright 2010-2012 the SGC project developers.
# See the LICENSE file at the top-level directory of this distribution
# and at http://program.sambull.org/sgc/license.html.
"""
Dialog window, creates a popup window.
"""
import pygame.mouse
from pygame.locals import *
from pygame import draw
from ._locals import *
from .base_widget import Simple
class Dialog(Simple):
"""
Dialog Window
If ``surf`` is not given, window will be large enough to fit the
given widget.
Images:
'close_off': The close button in the normal state.
'close_over': The close button when the cursor is hovering over.
"""
_can_focus = True
_modal = True
_layered = True
_extra_images = {"close_off": ((0, 16), (0, 16)), "close_over": "close_off"}
_settings_default = {"title": None, "widget": None, "col_bg": (240,240,240),
"col_border": (50,40,90), "show_button": True}
_drag = _over = False
def _config(self, **kwargs):
"""
widget: Widget that should be displayed in the dialog window.
title: ``str`` Text to display in the title bar.
col_border: ``tuple`` (r,g,b) Window decoration colour.
col_bg: ``tuple`` (r,g,b) Background colour.
modal: ``bool`` ``True`` if window should be modal.
Defaults to ``True``.
"""
if "widget" in kwargs:
self._settings["widget"] = kwargs["widget"]
self._settings["widget"]._parent = self
self._settings["widget"].pos = (2, 20)
if not hasattr(self, "image"):
r = self._settings["widget"].rect
self._create_base_images((r.w + 4, r.h + 22))
if "modal" in kwargs:
self._modal = kwargs["modal"]
if "show_button" in kwargs:
self._settings["show_button"] = kwargs["show_button"]
if not kwargs["show_button"]:
self._images["close_over"]._show = False
self._images["close_off"]._show = False
for key in ("title", "col_border", "col_bg"):
if key in kwargs:
self._settings[key] = kwargs[key]
def _draw_base(self):
# Draw window
inner_rect = Rect((2,20), (self.rect.w-4,self.rect.h-22))
self._images["image"].fill(self._settings["col_border"])
self._images["image"].fill(self._settings["col_bg"], inner_rect)
def _draw_close_off(self, image, size):
image.fill(self._settings["col_border"])
draw.circle(image, (140,6,15), (size[0]//2, size[1]//2), 8)
draw.line(image, (0,0,1), (5,5), (11,11), 3)
draw.line(image, (0,0,1), (5,11), (11,5), 3)
def _draw_close_over(self, image, size):
image.fill(self._settings["col_border"])
draw.circle(image, (234,14,50), (size[0]//2, size[1]//2), 8)
draw.line(image, (0,0,1), (5,5), (11,11), 5)
draw.line(image, (0,0,1), (5,11), (11,5), 5)
def _draw_final(self):
self._images["close_off"].pos = (2,2)
self._images["close_over"].pos = (2,2)
self._set_over()
if self._settings["title"]:
t = Simple(Font["widget"].render(
self._settings["title"], True, Font.col))
t.rect.x = self._images["close_off"].rect.right
self._images["image"].blit(t.image, t.pos)
def _set_over(self, over=None):
"""Set over state and show/hide close button images."""
if self._settings["show_button"]:
if over is not None: self._over = over
self._images["close_over"]._show = self._over
self._images["close_off"]._show = not self._over
def on_close(self):
"""
Called when the dialog window is closed.
Emits an event with attribute 'gui_type' == "close".
Override this function to use as a callback handler.
"""
pygame.event.post(self._create_event("close"))
def update(self, time):
"""Update dialog window each frame."""
r = self._images["close_off"].rect_abs
if not self._over and r.collidepoint(pygame.mouse.get_pos()):
# Display over button
self._set_over(True)
elif self._over and not r.collidepoint(pygame.mouse.get_pos()):
# Display normal button
self._set_over(False)
self._settings["widget"].update(time)
self.image.blit(self._settings["widget"].image,
self._settings["widget"].pos)
def _event(self, event):
"""Respond to events."""
minus_pos = lambda p1, p2: (p1[0] - p2[0], p1[1] - p2[1])
if event.type == MOUSEBUTTONDOWN and event.button == 1 and \
self.rect.collidepoint(event.pos) and event.pos[1] < self.rect.y + 20:
# Clicking title bar of window
self._settings["widget"]._focus_exit()
if (self._settings["show_button"] and
self._images["close_off"].rect_abs.collidepoint(event.pos)):
# Close button
self.remove()
self.on_close()
else:
# Initialise window drag
self._offset = minus_pos(event.pos, self.pos)
self._drag = True
elif event.type == MOUSEMOTION and self._drag:
# Move window
self.pos = minus_pos(event.pos, self._offset)
elif event.type == MOUSEBUTTONUP and event.button == 1 and self._drag:
# Stop moving window
self.pos = minus_pos(event.pos, self._offset)
self._drag = False
else:
self._settings["widget"]._event(event)
|
OneOneFour/ICSP_Monte_Carlo
|
sgc/sgc/widgets/dialog.py
|
Python
|
mit
| 5,678
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentBaseInfiniiVision import *
from .. import ivi
from .. import fgen
ScreenshotImageFormatMapping = {
'bmp': 'bmp',
'bmp24': 'bmp',
'bmp8': 'bmp8bit',
'png': 'png',
'png24': 'png'}
OutputMode = set(['function'])
OperationMode = set(['continuous', 'burst'])
StandardWaveformMapping = {
'sine': 'sin',
'square': 'squ',
#'triangle': 'tri',
'ramp_up': 'ramp',
#'ramp_down',
#'dc'
'pulse': 'puls',
'noise': 'nois',
'dc': 'dc'
}
class agilent2000A(agilentBaseInfiniiVision, fgen.Base, fgen.StdFunc, fgen.ModulateAM, fgen.ModulateFM):
"Agilent InfiniiVision 2000A series IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
super(agilent2000A, self).__init__(*args, **kwargs)
self._analog_channel_name = list()
self._analog_channel_count = 4
self._digital_channel_name = list()
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 200e6
self._horizontal_divisions = 10
self._vertical_divisions = 8
self._display_screenshot_image_format_mapping = ScreenshotImageFormatMapping
# wavegen option
self._output_count = 1
self._output_standard_waveform_mapping = StandardWaveformMapping
self._identity_description = "Agilent InfiniiVision 2000A X-series IVI oscilloscope driver"
self._identity_supported_instrument_models = ['DSOX2002A','DSOX2004A','DSOX2012A',
'DSOX2014A','DSOX2022A','DSOX2024A','MSOX2002A','MSOX2004A','MSOX2012A','MSOX2014A',
'MSOX2022A','MSOX2024A']
self._init_outputs()
def _init_outputs(self):
try:
super(agilent2000A, self)._init_outputs()
except AttributeError:
pass
self._output_name = list()
self._output_operation_mode = list()
self._output_enabled = list()
self._output_impedance = list()
self._output_mode = list()
self._output_reference_clock_source = list()
for i in range(self._output_count):
if self._output_count == 1:
self._output_name.append("wgen")
else:
self._output_name.append("wgen%d" % (i+1))
self._output_operation_mode.append('continuous')
self._output_enabled.append(False)
self._output_impedance.append(50)
self._output_mode.append('function')
self._output_reference_clock_source.append('')
self.outputs._set_list(self._output_name)
# wavegen option
def _get_output_operation_mode(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_operation_mode[index]
def _set_output_operation_mode(self, index, value):
index = ivi.get_index(self._output_name, index)
if value not in OperationMode:
raise ivi.ValueNotSupportedException()
self._output_operation_mode[index] = value
def _get_output_enabled(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:output?" % self._output_name[index])
self._output_enabled[index] = bool(int(resp))
self._set_cache_valid(index=index)
return self._output_enabled[index]
def _set_output_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
if not self._driver_operation_simulate:
self._write(":%s:output %d" % (self._output_name[index], value))
self._output_enabled[index] = value
self._set_cache_valid(index=index)
def _get_output_impedance(self, index):
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
val = self._ask(":%s:output:load?" % self._output_name[index])
if val == 'ONEM':
self._output_impedance[index] = 1000000
elif val == 'FIFT':
self._output_impedance[index] = 50
self._set_cache_valid(index=index)
return self._output_impedance[index]
def _set_output_impedance(self, index, value):
value = float(value)
index = ivi.get_index(self._analog_channel_name, index)
if value != 50 and value != 1000000:
raise Exception('Invalid impedance selection')
if not self._driver_operation_simulate:
if value == 1000000:
self._write(":%s:output:load onemeg" % self._output_name[index])
elif value == 50:
self._write(":%s:output:load fifty" % self._output_name[index])
self._output_impedance[index] = value
self._set_cache_valid(index=index)
self._set_cache_valid(False, 'output_standard_waveform_amplitude', index)
def _get_output_mode(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_mode[index]
def _set_output_mode(self, index, value):
index = ivi.get_index(self._output_name, index)
if value not in OutputMode:
raise ivi.ValueNotSupportedException()
self._output_mode[index] = value
def _get_output_reference_clock_source(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_reference_clock_source[index]
def _set_output_reference_clock_source(self, index, value):
index = ivi.get_index(self._output_name, index)
value = 'internal'
self._output_reference_clock_source[index] = value
def abort_generation(self):
pass
def initiate_generation(self):
pass
def _get_output_standard_waveform_amplitude(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:voltage?" % self._output_name[index])
self._output_standard_waveform_amplitude[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_standard_waveform_amplitude[index]
def _set_output_standard_waveform_amplitude(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:voltage %e" % (self._output_name[index], value))
self._output_standard_waveform_amplitude[index] = value
self._set_cache_valid(index=index)
def _get_output_standard_waveform_dc_offset(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:voltage:offset?" % self._output_name[index])
self._output_standard_waveform_dc_offset[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_standard_waveform_dc_offset[index]
def _set_output_standard_waveform_dc_offset(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:voltage:offset %e" % (self._output_name[index], value))
self._output_standard_waveform_dc_offset[index] = value
self._set_cache_valid(index=index)
def _get_output_standard_waveform_duty_cycle_high(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:function:square:dcycle?" % self._output_name[index])
self._output_standard_waveform_duty_cycle_high[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_standard_waveform_duty_cycle_high[index]
def _set_output_standard_waveform_duty_cycle_high(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if value < 20.0 or value > 80.0:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write(":%s:function:square:dcycle %e" % (self._output_name[index], value))
self._output_standard_waveform_duty_cycle_high[index] = value
self._set_cache_valid(index=index)
def _get_output_standard_waveform_start_phase(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_standard_waveform_start_phase[index]
def _set_output_standard_waveform_start_phase(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_standard_waveform_start_phase[index] = value
def _get_output_standard_waveform_frequency(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:frequency?" % self._output_name[index])
self._output_standard_waveform_frequency[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_standard_waveform_frequency[index]
def _set_output_standard_waveform_frequency(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:frequency %e" % (self._output_name[index], value))
self._output_standard_waveform_frequency[index] = value
self._set_cache_valid(index=index)
def _get_output_standard_waveform_waveform(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:function?" % self._output_name[index])
value = resp.lower()
value = [k for k,v in self._output_standard_waveform_mapping.items() if v==value][0]
self._output_standard_waveform_waveform[index] = value
self._set_cache_valid(index=index)
return self._output_standard_waveform_waveform[index]
def _set_output_standard_waveform_waveform(self, index, value):
index = ivi.get_index(self._output_name, index)
if value not in self._output_standard_waveform_mapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":%s:function %s" % (self._output_name[index], self._output_standard_waveform_mapping[value]))
self._output_standard_waveform_waveform[index] = value
self._set_cache_valid(index=index)
def _get_output_am_enabled(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_am_enabled[index]
def _set_output_am_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
self._output_am_enabled[index] = value
def _get_output_am_source(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_am_source[index]
def _set_output_am_source(self, index, value):
index = ivi.get_index(self._output_name, index)
value = 'internal'
self._output_am_source[index] = value
def _get_am_internal_depth(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask(":%s:modulation:am:depth?" % self._output_name[index])
self._am_internal_depth = float(resp)
self._set_cache_valid()
return self._am_internal_depth
def _set_am_internal_depth(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:modulation:am:depth %e" % (self._output_name[index], value))
self._am_internal_depth = value
self._set_cache_valid()
def _get_am_internal_frequency(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask(":%s:modulation:am:frequency?" % self._output_name[index])
self._am_internal_frequency = float(resp)
self._set_cache_valid()
return self._am_internal_frequency
def _set_am_internal_frequency(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:modulation:am:frequency %e" % (self._output_name[index], value))
self._am_internal_frequency = value
self._set_cache_valid()
def _get_am_internal_waveform(self):
return self._am_internal_waveform
def _set_am_internal_waveform(self, value):
value = float(value)
self._am_internal_waveform = value
def _get_output_fm_enabled(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_fm_enabled[index]
def _set_output_fm_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
self._output_fm_enabled[index] = value
def _get_output_fm_source(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_fm_source[index]
def _set_output_fm_source(self, index, value):
index = ivi.get_index(self._output_name, index)
value = 'internal'
self._output_fm_source[index] = value
def _get_fm_internal_deviation(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask(":%s:modulation:fm:deviation?" % self._output_name[index])
self._fm_internal_deviation = float(resp)
self._set_cache_valid()
return self._fm_internal_deviation
def _set_fm_internal_deviation(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:modulation:fm:deviation %e" % (self._output_name[index], value))
self._fm_internal_deviation = value
self._set_cache_valid()
def _get_fm_internal_frequency(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask(":%s:modulation:fm:frequency?" % self._output_name[index])
self._fm_internal_frequency = float(resp)
self._set_cache_valid()
return self._fm_internal_frequency
def _set_fm_internal_frequency(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:modulation:fm:frequency %e" % (self._output_name[index], value))
self._fm_internal_frequency = value
self._set_cache_valid()
def _get_fm_internal_waveform(self):
return self._fm_internal_waveform
def _set_fm_internal_waveform(self, value):
value = float(value)
self._fm_internal_waveform = value
|
dracorpg/python-ivi
|
ivi/agilent/agilent2000A.py
|
Python
|
mit
| 16,685
|
import json
from flask import request, Response
from werkzeug.exceptions import BadRequest
from werkzeug.test import EnvironBuilder
class Aggregator(object):
def __init__(self, app=None, endpoint=None):
self.url_map = {}
self.endpoint = endpoint or "/aggregate"
if app:
self.init_app(app)
def init_app(self, app):
app.add_url_rule(self.endpoint, view_func=self.post, methods=["POST"],
defaults={"app": app})
def get_response(self, app, route):
query_string = ""
if '?' in route:
route, query_string = route.split('?', 1)
builder = EnvironBuilder(path=route, query_string=query_string)
app.request_context(builder.get_environ()).push()
return app.dispatch_request()
def post(self, app):
try:
data = request.data.decode('utf-8')
routes = json.loads(data)
if not isinstance(routes, list):
raise TypeError
except (ValueError, TypeError) as e:
raise BadRequest("Can't get requests list.")
def __generate():
data = None
for route in routes:
yield data + ', ' if data else '{'
response = self.get_response(app, route)
json_response = json.dumps(response)
data = '"{}": {}'.format(route, json_response)
yield data + '}'
return Response(__generate(), mimetype='application/json')
|
ramnes/flask-aggregator
|
flask_aggregator.py
|
Python
|
mit
| 1,514
|
from django.contrib import admin
import models
class BatchJobAdmin(admin.ModelAdmin):
list_display = ['email', 'timestamp', 'completed', 'job']
search_fields = ['email']
list_filter = ['job', 'completed']
ordering = ['-timestamp']
admin.site.register(models.BatchJob, BatchJobAdmin)
admin.site.register(models.GroupList)
|
riltsken/python-constant-contact
|
python_constantcontact/django_constantcontact/admin.py
|
Python
|
mit
| 327
|
from theme_converter.base import BaseConverter
import os.path
class StatusConverter(BaseConverter):
_available_statuses = {
'Generic Available':'pidgin-status-available',
'Free for Chat':'pidgin-status-chat',
'Available for Friends Only':'',
'Generic Away':'pidgin-status-away',
'Extended Away':'pidgin-status-xa',
'Away for Friends Only':'pidgin-status-away',
'DND':'pidgin-status-busy',
'Not Available':'pidgin-status-busy',
'Occupied':'pidgin-status-busy',
'BRB':'',
'Busy':'pidgin-status-busy',
'Phone':'pidgin-status-busy',
'Lunch':'pidgin-status-busy',
'Not At Home':'pidgin-status-xa',
'Not At Desk':'pidgin-status-xa',
'Not In Office':'pidgin-status-xa',
'Vacation':'pidgin-status-xa',
'Stepped Out':'pidgin-status-xa',
'Idle And Away':'pidgin-status-busy',
'Idle':'pidgin-status-busy',
'Invisible':'pidgin-status-invisible',
'Offline':'pidgin-status-offline',
'Unknown':''
}
def get_default_theme_dir(self):
return os.path.expanduser('~/.purple/themes/%s/purple/status-icon' % self.theme_name)
def save_theme(self, path):
import xml.etree.ElementTree
import datetime
_tree = xml.etree.ElementTree
theme = _tree.Element('theme')
description = _tree.SubElement(theme, 'description')
description.text = 'Generated at %s in Affinity Status converter.' % datetime.datetime.now().strf('%m-%d-%Y %H:%M')
theme.attrib = {
'type': 'pidgin status icon',
'name': self.theme_name,
'author': 'affinity status converter'
}
icons = []
if not path: path = self.make_theme_dir()
new_path = '%s/16' % path
try:
os.mkdir(new_path)
except OSError:
print('Cannot make dir: %s\nMaybe the theme already exists?' % new_path)
for icon_id in self.plist['List']:
try:
if self._available_statuses[icon_id]:
sauce = '%s/%s' % (self.path, self.plist['List'][icon_id])
target_file = os.path.basename(sauce)
icons.append(_tree.SubElement(theme, 'icon'))
icons[-1].attrib = {
'id': self._available_statuses[icon_id],
'file': target_file
}
icons.append(_tree.SubElement(theme, 'icon'))
icons[-1].attrib = {
'id': self._available_statuses[icon_id] + '-i',
'file': target_file
}
shutil.copy(sauce, '%s/%s' % (new_path, target_file))
else:
print('Cannot find Adium\'s %s pidgin equivalent' % icon_id)
except KeyError:
print('Unknown Adium status icon id: %s' % icon_id)
tree = _tree.ElementTree(theme)
file_path = '%s/theme.xml' % path
print('Saving theme.xml in: %s' % file_path)
tree.write(file_path)
os.system('xmllint --format "{0}" --output "{0}"'.format(file_path))
|
hectron/adium_theme_converter
|
theme_converter/status_converter.py
|
Python
|
mit
| 3,259
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Automated Expanded Attention + Multi-Objective.
Combines both expanded attention and multi-objective training objective
"""
from parlai.zoo.light_whoami.whoami_download import download_with_model_type
def download(datapath):
download_with_model_type(datapath, 'expanded_and_multiobjective_1024', 'v1.0')
|
facebookresearch/ParlAI
|
parlai/zoo/light_whoami/expanded_and_multiobjective_1024.py
|
Python
|
mit
| 514
|
from django.conf import settings
from . import defaults
__title__ = 'events.contrib.plugins.form_elements.fields.' \
'checkbox_select_multiple.conf'
__author__ = 'Artur Barseghyan <artur.barseghyan@gmail.com>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('get_setting',)
def get_setting(setting, override=None):
"""Get setting.
Get a setting from
`fobi.contrib.plugins.form_elements.fields.checkbox_select_multiple` conf
module, falling back to the default.
If override is not None, it will be used instead of the setting.
:param setting: String with setting name
:param override: Value to use when no setting is available. Defaults
to None.
:return: Setting value.
"""
if override is not None:
return override
if hasattr(
settings,
'FOBI_FORM_ELEMENT_CHECKBOX_SELECT_MULTIPLE_{0}'.format(setting)
):
return getattr(
settings,
'FOBI_FORM_ELEMENT_CHECKBOX_SELECT_MULTIPLE_{0}'.format(setting)
)
else:
return getattr(defaults, setting)
|
mansonul/events
|
events/contrib/plugins/form_elements/fields/checkbox_select_multiple/conf.py
|
Python
|
mit
| 1,137
|
print "From package1.__init__.py"
from . import z_test
|
bdlamprecht/python_class
|
neteng/package1/__init__.py
|
Python
|
mit
| 54
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pyhkp documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 28 20:01:20 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pyhkp'
copyright = '2014, Emre Adil'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyhkpdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pyhkp.tex', 'pyhkp Documentation',
'Emre Adil', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyhkp', 'pyhkp Documentation',
['Emre Adil'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyhkp', 'pyhkp Documentation',
'Emre Adil', 'pyhkp', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'pyhkp'
epub_author = 'Emre Adil'
epub_publisher = 'Emre Adil'
epub_copyright = '2014, Emre Adil'
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'pyhkp'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
eadil/pyhkp
|
doc/conf.py
|
Python
|
mit
| 10,210
|
from .local import *
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
|
tranhnb/template
|
project_name/project_name/settings/local_heroku.py
|
Python
|
mit
| 509
|
"""Test multiballs and multiball_locks."""
from mpf.tests.MpfGameTestCase import MpfGameTestCase
class TestMultiBall(MpfGameTestCase):
def get_config_file(self):
return 'config.yaml'
def get_machine_path(self):
return 'tests/machine_files/multiball/'
def get_platform(self):
return 'smart_virtual'
def testSimpleMultiball(self):
self.mock_event("multiball_mb1_ended")
self.mock_event("multiball_mb1_ball_lost")
# prepare game
self.fill_troughs()
self.assertFalse(self.machine.multiballs["mb1"].enabled)
# start game
self.start_game()
# mb1 should not start because its not enabled
self.post_event("mb1_start")
self.assertEqual(0, self.machine.multiballs["mb1"].balls_added_live)
self.assertFalse(self.machine.multiballs["mb1"].enabled)
self.post_event("mb1_enable")
# multiball should be enabled now but not started
self.assertTrue(self.machine.multiballs["mb1"].enabled)
self.assertEqual(0, self.machine.multiballs["mb1"].balls_added_live)
# takes roughly 4s to get ball confirmed
self.advance_time_and_run(4)
self.assertNotEqual(None, self.machine.game)
self.assertEqual(1, self.machine.playfield.balls)
# ball drains right away
self.drain_one_ball()
self.advance_time_and_run(1)
# multiball not started. game should end
self.assertEqual(None, self.machine.game)
# start game again
self.start_game()
self.post_event("mb1_enable")
# multiball should be enabled
self.assertTrue(self.machine.multiballs["mb1"].enabled)
# takes roughly 4s to get ball confirmed
self.advance_time_and_run(4)
self.assertNotEqual(None, self.machine.game)
self.assertEqual(1, self.machine.playfield.balls)
self.post_event("mb1_disable")
# nothing happens
self.post_event("mb1_start")
self.assertEqual(0, self.machine.multiballs["mb1"].balls_added_live)
# mb start
self.post_event("mb1_enable")
self.post_event("mb1_start")
self.assertEqual(1, self.machine.multiballs["mb1"].balls_added_live)
# another ball should be ejected to pf
self.advance_time_and_run(10)
self.assertEqual(2, self.machine.playfield.balls)
self.assertBallsInPlay(2)
# ball drains
self.drain_one_ball()
self.advance_time_and_run(1)
# game should not end
self.assertNotEqual(None, self.machine.game)
self.assertEqual(1, self.machine.playfield.balls)
# it should be readded because of shoot again
self.advance_time_and_run(10)
self.assertEqual(2, self.machine.playfield.balls)
self.assertBallsInPlay(2)
# two balls drain
self.drain_one_ball()
self.drain_one_ball()
self.assertEqual(0, self._events['multiball_mb1_ended'])
# they should be readded because of shoot again
self.advance_time_and_run(10)
self.assertEqual(2, self.machine.playfield.balls)
self.assertEventNotCalled("multiball_mb1_ball_lost")
self.assertBallsInPlay(2)
# shoot again ends
self.advance_time_and_run(10)
# ball drains
self.drain_one_ball()
self.advance_time_and_run(1)
self.assertEventCalled("multiball_mb1_ball_lost", 1)
self.assertBallsInPlay(1)
# mb ends
self.assertEqual(1, self._events['multiball_mb1_ended'])
# the other ball also drains
self.drain_one_ball()
# game should end
self.advance_time_and_run(1)
self.assertEqual(None, self.machine.game)
def testRestartMultiball(self):
self.mock_event("multiball_mb1_ended")
# prepare game
self.fill_troughs()
self.assertFalse(self.machine.multiballs["mb1"].enabled)
# start game
self.start_game()
self.post_event("mb1_enable")
# multiball should be enabled
self.assertTrue(self.machine.multiballs["mb1"].enabled)
# takes roughly 4s to get ball confirmed
self.advance_time_and_run(4)
self.assertNotEqual(None, self.machine.game)
self.assertEqual(1, self.machine.playfield.balls)
# mb started
self.post_event("mb1_start")
self.assertEqual(1, self.machine.multiballs["mb1"].balls_added_live)
# another ball should be ejected to pf
self.advance_time_and_run(10)
self.assertEqual(2, self.machine.playfield.balls)
# ball drains
self.drain_one_ball()
self.advance_time_and_run(1)
# game should not end
self.assertNotEqual(None, self.machine.game)
self.assertEqual(1, self.machine.playfield.balls)
# it should be readded because of shoot again
self.advance_time_and_run(10)
self.assertEqual(2, self.machine.playfield.balls)
self.assertEqual(2, self.machine.playfield.available_balls)
# mb cannot start again/nothing happens
self.post_event("mb1_start")
self.assertEqual(1, self.machine.multiballs["mb1"].balls_added_live)
self.assertEqual(2, self.machine.playfield.available_balls)
# shoot again ends
self.advance_time_and_run(10)
# mb cannot start again because balls are still in play
self.post_event("mb1_start")
self.assertEqual(1, self.machine.multiballs["mb1"].balls_added_live)
self.assertEqual(2, self.machine.playfield.available_balls)
self.assertEqual(0, self._events['multiball_mb1_ended'])
# ball drains
self.drain_one_ball()
self.advance_time_and_run(1)
# mb ends
self.assertEqual(0, self.machine.multiballs["mb1"].balls_added_live)
self.assertEqual(1, self._events['multiball_mb1_ended'])
# restart mb
self.post_event("mb1_start")
self.advance_time_and_run(1)
self.assertEqual(1, self.machine.multiballs["mb1"].balls_added_live)
self.assertEqual(2, self.machine.playfield.available_balls)
self.assertEqual(1, self.machine.playfield.balls)
self.advance_time_and_run(40)
self.assertEqual(2, self.machine.playfield.balls)
# two balls drains
self.drain_one_ball()
self.drain_one_ball()
# game should end
self.advance_time_and_run(1)
self.assertEqual(2, self._events['multiball_mb1_ended'])
self.assertEqual(None, self.machine.game)
def testUnlimitedShootAgain(self):
self.mock_event("multiball_mb2_ended")
# prepare game
self.fill_troughs()
# start game
self.start_game()
self.post_event("mb2_enable")
# multiball should be enabled
self.assertTrue(self.machine.multiballs["mb2"].enabled)
# takes roughly 4s to get ball confirmed
self.advance_time_and_run(4)
self.assertNotEqual(None, self.machine.game)
self.assertEqual(1, self.machine.playfield.balls)
# mb started
self.post_event("mb2_start")
self.assertEqual(2, self.machine.multiballs["mb2"].balls_added_live)
# another two balls should be ejected to pf
self.advance_time_and_run(10)
self.assertEqual(3, self.machine.playfield.balls)
# ball drains
self.drain_one_ball()
self.advance_time_and_run(1)
# game should not end
self.assertNotEqual(None, self.machine.game)
self.assertEqual(2, self.machine.playfield.balls)
# it should be readded because of shoot again
self.advance_time_and_run(10)
self.assertEqual(3, self.machine.playfield.balls)
# two balls drain
self.drain_one_ball()
self.drain_one_ball()
self.advance_time_and_run(1)
self.assertEqual(0, self._events['multiball_mb2_ended'])
# they should be readded because of shoot again
self.advance_time_and_run(10)
self.assertEqual(3, self.machine.playfield.balls)
# shoot again forever
self.advance_time_and_run(100)
# three balls drain
self.drain_one_ball()
self.drain_one_ball()
self.drain_one_ball()
self.advance_time_and_run(1)
self.assertEqual(0, self.machine.playfield.balls)
# they should be readded because of shoot again
self.advance_time_and_run(20)
self.assertEqual(3, self.machine.playfield.balls)
# end mb
self.post_event("mb2_stop")
# ball drains
self.drain_one_ball()
self.advance_time_and_run(1)
# mb does not end yet
self.assertEqual(0, self._events['multiball_mb2_ended'])
# ball drains
self.drain_one_ball()
self.advance_time_and_run(1)
# mb ends
self.assertEqual(1, self._events['multiball_mb2_ended'])
# the other ball also drains
self.drain_one_ball()
# game should end
self.advance_time_and_run(1)
self.assertEqual(None, self.machine.game)
def testSimultaneousMultiballs(self):
self.mock_event("multiball_mb2_ended")
self.mock_event("multiball_mb3_ended")
# prepare game
self.fill_troughs()
# start game
self.start_game()
self.post_event("mb2_enable")
self.post_event("mb3_enable")
# multiballs should be enabled
self.assertTrue(self.machine.multiballs["mb2"].enabled)
self.assertTrue(self.machine.multiballs["mb3"].enabled)
# takes roughly 4s to get ball confirmed
self.advance_time_and_run(4)
self.assertNotEqual(None, self.machine.game)
self.assertEqual(1, self.machine.playfield.balls)
# mb started
self.post_event("mb2_start")
self.assertEqual(2, self.machine.multiballs["mb2"].balls_added_live)
# another two balls should be ejected to pf
self.advance_time_and_run(10)
self.assertEqual(3, self.machine.playfield.balls)
# start mb3
self.post_event("mb3_start")
self.assertEqual(1, self.machine.multiballs["mb3"].balls_added_live)
# another ball should appear
self.advance_time_and_run(10)
self.assertEqual(4, self.machine.playfield.balls)
self.assertTrue(self.machine.multiballs["mb2"].shoot_again)
self.assertFalse(self.machine.multiballs["mb3"].shoot_again)
self.post_event("mb2_stop")
self.assertFalse(self.machine.multiballs["mb2"].shoot_again)
# ball drains
self.drain_one_ball()
self.advance_time_and_run(1)
# game should not end
self.assertNotEqual(None, self.machine.game)
self.assertEqual(3, self.machine.playfield.balls)
self.assertEqual(0, self._events['multiball_mb2_ended'])
self.assertEqual(0, self._events['multiball_mb3_ended'])
# ball drains
self.drain_one_ball()
self.advance_time_and_run(1)
self.assertNotEqual(None, self.machine.game)
self.assertEqual(2, self.machine.playfield.balls)
self.assertEqual(0, self._events['multiball_mb2_ended'])
self.assertEqual(0, self._events['multiball_mb3_ended'])
# ball drains
self.drain_one_ball()
self.advance_time_and_run(1)
self.assertNotEqual(None, self.machine.game)
self.assertEqual(1, self.machine.playfield.balls)
self.assertEqual(1, self._events['multiball_mb2_ended'])
self.assertEqual(1, self._events['multiball_mb3_ended'])
# last ball drains
self.drain_one_ball()
# game should end
self.advance_time_and_run(1)
self.assertEqual(None, self.machine.game)
def testMultiballInMode(self):
self.mock_event("multiball_mb4_ended")
# prepare game
self.fill_troughs()
# start game
self.start_game()
# takes roughly 4s to get ball confirmed
self.advance_time_and_run(4)
self.assertNotEqual(None, self.machine.game)
self.assertEqual(1, self.machine.playfield.balls)
# mode not loaded. mb4 should not enable or start
self.post_event("mb4_enable")
self.post_event("mb4_start")
self.advance_time_and_run(4)
self.assertEqual(1, self.machine.playfield.available_balls)
# start mode
self.post_event("start_mode1")
# mode loaded. mb4 should enable and start
self.post_event("mb4_enable")
self.post_event("mb4_start")
self.assertTrue(self.machine.multiballs["mb4"].enabled)
self.advance_time_and_run(4)
self.assertEqual(2, self.machine.playfield.available_balls)
# another ball should be ejected to pf
self.advance_time_and_run(10)
self.assertEqual(2, self.machine.playfield.balls)
self.drain_one_ball()
self.advance_time_and_run(1)
# it should come back
self.assertEqual(2, self.machine.playfield.available_balls)
self.assertEqual(0, self._events['multiball_mb4_ended'])
# stop mode
self.post_event("stop_mode1")
# mode end should stop mp
self.assertFalse(self.machine.multiballs["mb4"].shoot_again)
self.assertFalse(self.machine.multiballs["mb4"].enabled)
self.assertEqual(0, self._events['multiball_mb4_ended'])
# next drain should end mb
self.drain_one_ball()
self.advance_time_and_run(1)
self.assertEqual(1, self.machine.playfield.available_balls)
self.assertEqual(1, self._events['multiball_mb4_ended'])
# ball drains
self.drain_one_ball()
self.advance_time_and_run(1)
# game should end
self.advance_time_and_run(1)
self.assertEqual(None, self.machine.game)
def testMultiballInModeSimple(self):
self.mock_event("multiball_mb5_ended")
# prepare game
self.fill_troughs()
# start game
self.start_game()
# takes roughly 4s to get ball confirmed
self.advance_time_and_run(4)
self.assertNotEqual(None, self.machine.game)
self.assertEqual(1, self.machine.playfield.balls)
self.advance_time_and_run(4)
self.assertEqual(1, self.machine.playfield.available_balls)
# start mode
self.post_event("start_mode2")
# mode loaded. mb5 should enabled but not started
self.assertTrue(self.machine.multiballs["mb5"].enabled)
self.assertEqual(0, self.machine.multiballs["mb5"].balls_added_live)
# start it
self.post_event("mb5_start")
self.assertEqual(1, self.machine.multiballs["mb5"].balls_added_live)
self.advance_time_and_run(4)
self.assertEqual(2, self.machine.playfield.available_balls)
# drain a ball
self.drain_one_ball()
self.advance_time_and_run(1)
# it should come back
self.assertEqual(2, self.machine.playfield.available_balls)
self.assertEqual(0, self._events['multiball_mb5_ended'])
# stop mode
self.post_event("stop_mode2")
# mode end should stop mb
self.assertFalse(self.machine.multiballs["mb5"].shoot_again)
self.assertFalse(self.machine.multiballs["mb5"].enabled)
self.assertEqual(0, self._events['multiball_mb5_ended'])
# next drain should end mb
self.drain_one_ball()
self.advance_time_and_run(1)
self.assertEqual(1, self.machine.playfield.available_balls)
self.assertEqual(1, self._events['multiball_mb5_ended'])
# ball drains
self.drain_one_ball()
self.advance_time_and_run(1)
# game should end
self.advance_time_and_run(1)
self.assertEqual(None, self.machine.game)
def testMultiballWithLock(self):
# prepare game
self.fill_troughs()
self.assertFalse(self.machine.multiballs["mb6"].enabled)
# start game
self.start_game()
# start mode
self.post_event("start_mode1")
# multiball should be enabled
self.assertTrue(self.machine.multiballs["mb6"].enabled)
# lock should be enabled
self.assertTrue(self.machine.multiball_locks["lock_mb6"].enabled)
# takes roughly 4s to get ball confirmed
self.advance_time_and_run(4)
self.assertNotEqual(None, self.machine.game)
self.assertEqual(1, self.machine.playfield.balls)
# lock one ball and another one should go to pf
self.hit_switch_and_run("s_lock1", 10)
self.assertEqual(1, self.machine.ball_devices["bd_lock"].balls)
self.assertEqual(1, self.machine.playfield.balls)
self.assertEqual(1, self.machine.game.player_list[0]["lock_mb6_locked_balls"])
# start mb
self.post_event("mb6_start")
self.assertEqual(2, self.machine.multiballs["mb6"].balls_added_live)
# three balls on pf
self.advance_time_and_run(10)
self.assertEqual(3, self.machine.playfield.balls)
self.assertEqual(0, self.machine.ball_devices["bd_lock"].balls)
self.assertEqual(0, self.machine.game.player_list[0]["lock_mb6_locked_balls"])
# game ends (because of slam tilt)
self.machine.game.end_ball()
self.advance_time_and_run()
# this should not crash
self.machine.default_platform.add_ball_to_device(self.machine.ball_devices["bd_trough"])
self.advance_time_and_run()
def test_total_ball_count(self):
# prepare game
self.fill_troughs()
self.assertFalse(self.machine.multiballs["mb10"].enabled)
self.assertFalse(self.machine.multiballs["mb11"].enabled)
# start game
self.start_game()
# start mode
self.post_event("start_mode1")
# multiball should be enabled
self.assertTrue(self.machine.multiballs["mb10"].enabled)
self.assertTrue(self.machine.multiballs["mb11"].enabled)
# takes roughly 4s to get ball confirmed
self.advance_time_and_run(4)
self.assertNotEqual(None, self.machine.game)
self.assertEqual(1, self.machine.playfield.balls)
# start mb10
self.post_event("mb10_start")
self.assertEqual(2, self.machine.multiballs["mb10"].balls_added_live)
self.assertEqual(3, self.machine.multiballs["mb10"].balls_live_target)
self.assertTrue(self.machine.multiballs["mb10"].shoot_again)
# three balls on pf
self.advance_time_and_run(10)
self.assertEqual(3, self.machine.playfield.balls)
# drain one. should come back
self.drain_one_ball()
self.advance_time_and_run(5)
self.assertEqual(3, self.machine.playfield.balls)
self.assertEqual(2, self.machine.multiballs["mb10"].balls_added_live)
self.assertEqual(3, self.machine.multiballs["mb10"].balls_live_target)
self.assertTrue(self.machine.multiballs["mb10"].shoot_again)
# no more shoot again
self.advance_time_and_run(5)
self.assertFalse(self.machine.multiballs["mb10"].shoot_again)
# start mb11
self.post_event("mb11_start")
self.assertEqual(0, self.machine.multiballs["mb11"].balls_added_live)
self.assertEqual(2, self.machine.multiballs["mb11"].balls_live_target)
self.advance_time_and_run(5)
self.assertEqual(3, self.machine.playfield.balls)
self.assertEqual(3, self.machine.game.balls_in_play)
self.assertTrue(self.machine.multiballs["mb11"].shoot_again)
# drain one. should not come back
self.drain_one_ball()
self.advance_time_and_run(4)
self.assertEqual(2, self.machine.playfield.balls)
self.assertEqual(2, self.machine.game.balls_in_play)
self.assertTrue(self.machine.multiballs["mb11"].shoot_again)
# but the second one should come back
self.drain_one_ball()
self.advance_time_and_run(4)
self.assertEqual(2, self.machine.playfield.balls)
self.assertEqual(2, self.machine.game.balls_in_play)
self.assertTrue(self.machine.multiballs["mb11"].shoot_again)
# shoot again ends
self.advance_time_and_run(10)
self.assertFalse(self.machine.multiballs["mb10"].shoot_again)
self.assertFalse(self.machine.multiballs["mb11"].shoot_again)
self.assertEqual(3, self.machine.multiballs["mb10"].balls_live_target)
self.assertEqual(2, self.machine.multiballs["mb11"].balls_live_target)
self.assertEqual(2, self.machine.game.balls_in_play)
# drain one balls
self.drain_one_ball()
self.advance_time_and_run()
self.assertEqual(1, self.machine.game.balls_in_play)
# both mbs should end
self.assertEqual(0, self.machine.multiballs["mb10"].balls_live_target)
self.assertEqual(0, self.machine.multiballs["mb11"].balls_live_target)
def test_total_ball_count_with_lock(self):
# prepare game
self.fill_troughs()
self.assertFalse(self.machine.multiballs["mb10"].enabled)
self.assertFalse(self.machine.multiballs["mb11"].enabled)
# start game
self.start_game()
# start mode
self.post_event("start_mode1")
# multiball should be enabled
self.assertTrue(self.machine.multiballs["mb10"].enabled)
self.assertTrue(self.machine.multiballs["mb11"].enabled)
# lock should be enabled
self.assertTrue(self.machine.multiball_locks["lock_mb6"].enabled)
# takes roughly 4s to get ball confirmed
self.advance_time_and_run(4)
self.assertNotEqual(None, self.machine.game)
self.assertEqual(1, self.machine.playfield.balls)
# lock one ball and another one should go to pf
self.hit_switch_and_run("s_lock1", 10)
self.assertEqual(1, self.machine.ball_devices["bd_lock"].balls)
self.assertEqual(1, self.machine.playfield.balls)
# start mb10
self.post_event("mb10_start")
self.assertEqual(2, self.machine.multiballs["mb10"].balls_added_live)
self.assertEqual(3, self.machine.multiballs["mb10"].balls_live_target)
# three balls on pf and one in lock
self.advance_time_and_run(10)
self.assertEqual(3, self.machine.playfield.balls)
self.assertEqual(1, self.machine.ball_devices["bd_lock"].balls)
# start mb12. eject lock
self.post_event("mb12_start")
self.assertEqual(1, self.machine.multiballs["mb12"].balls_added_live)
self.assertEqual(4, self.machine.multiballs["mb12"].balls_live_target)
self.advance_time_and_run(5)
self.assertEqual(4, self.machine.playfield.balls)
self.assertEqual(4, self.machine.game.balls_in_play)
self.assertTrue(self.machine.multiballs["mb12"].shoot_again)
def testAddABall(self):
self.mock_event("multiball_mb_add_a_ball_ended")
# prepare game
self.fill_troughs()
# start game
self.start_game()
self.post_event("start_or_add")
self.advance_time_and_run(10)
self.assertBallsOnPlayfield(2)
self.post_event("start_or_add")
self.advance_time_and_run(5)
self.assertBallsOnPlayfield(3)
self.drain_one_ball()
self.advance_time_and_run(5)
self.assertBallsOnPlayfield(2)
self.assertEventNotCalled("multiball_mb_add_a_ball_ended")
self.post_event("add_ball")
self.advance_time_and_run(5)
self.assertBallsOnPlayfield(3)
self.drain_one_ball()
self.advance_time_and_run(5)
self.assertBallsOnPlayfield(2)
self.assertEventNotCalled("multiball_mb_add_a_ball_ended")
self.drain_one_ball()
self.advance_time_and_run(5)
self.assertBallsOnPlayfield(1)
self.assertEventCalled("multiball_mb_add_a_ball_ended")
self.post_event("add_ball")
self.advance_time_and_run(5)
self.assertBallsOnPlayfield(1)
def testMultiballLockFullMultiplayer(self):
self.machine.config['game']['balls_per_game'] = self.machine.placeholder_manager.build_int_template(3)
self.mock_event("multiball_lock_lock_mb6_full")
self.fill_troughs()
self.start_two_player_game()
self.assertPlayerNumber(1)
self.assertBallNumber(1)
self.post_event("start_mode1")
# lock ball
self.machine.default_platform.add_ball_to_device(self.machine.ball_devices["bd_lock"])
self.advance_time_and_run(5)
# machine should request a new ball and the lock keeps one
self.assertEqual(1, self.machine.game.player_list[0]["lock_mb6_locked_balls"])
self.assertEqual(0, self.machine.game.player_list[1]["lock_mb6_locked_balls"])
self.assertEqual(1, self.machine.ball_devices["bd_lock"].balls)
self.assertEqual(4, self.machine.ball_devices["bd_trough"].balls)
self.assertBallsOnPlayfield(1)
self.assertBallsInPlay(1)
# drain ball. player 2 should be up
self.drain_one_ball()
self.advance_time_and_run(5)
self.post_event("start_mode1")
self.assertPlayerNumber(2)
self.assertBallNumber(1)
# also lock a ball
self.machine.default_platform.add_ball_to_device(self.machine.ball_devices["bd_lock"])
self.advance_time_and_run(5)
# lock should not keep the ball but count it for the player
self.assertEqual(1, self.machine.game.player_list[0]["lock_mb6_locked_balls"])
self.assertEqual(1, self.machine.game.player_list[1]["lock_mb6_locked_balls"])
self.assertEqual(1, self.machine.ball_devices["bd_lock"].balls)
self.assertEqual(4, self.machine.ball_devices["bd_trough"].balls)
self.assertBallsOnPlayfield(1)
self.assertBallsInPlay(1)
self.assertEventNotCalled("multiball_lock_lock_mb6_full")
# lock another ball. lock should keep it
self.machine.default_platform.add_ball_to_device(self.machine.ball_devices["bd_lock"])
self.advance_time_and_run(5)
self.assertEqual(1, self.machine.game.player_list[0]["lock_mb6_locked_balls"])
self.assertEqual(2, self.machine.game.player_list[1]["lock_mb6_locked_balls"])
self.assertEqual(2, self.machine.ball_devices["bd_lock"].balls)
self.assertEqual(3, self.machine.ball_devices["bd_trough"].balls)
self.assertBallsOnPlayfield(1)
self.assertBallsInPlay(1)
self.assertEventCalled("multiball_lock_lock_mb6_full")
# drain ball. lock should release a ball because player1 need to be able to complete it
self.drain_one_ball()
self.advance_time_and_run(5)
self.assertEqual(1, self.machine.game.player_list[0]["lock_mb6_locked_balls"])
self.assertEqual(2, self.machine.game.player_list[1]["lock_mb6_locked_balls"])
self.assertEqual(1, self.machine.ball_devices["bd_lock"].balls)
self.assertEqual(4, self.machine.ball_devices["bd_trough"].balls)
self.assertBallsOnPlayfield(1)
self.assertBallsInPlay(0)
# ball from lock drains
self.drain_one_ball()
self.advance_time_and_run(5)
self.assertPlayerNumber(1)
self.assertBallNumber(2)
self.assertEqual(1, self.machine.ball_devices["bd_lock"].balls)
self.assertEqual(4, self.machine.ball_devices["bd_trough"].balls)
self.assertBallsOnPlayfield(1)
self.assertBallsInPlay(1)
self.post_event("start_mode1")
# lock another ball. lock should keep it
self.machine.default_platform.add_ball_to_device(self.machine.ball_devices["bd_lock"])
self.advance_time_and_run(5)
self.assertEqual(2, self.machine.game.player_list[0]["lock_mb6_locked_balls"])
self.assertEqual(2, self.machine.game.player_list[1]["lock_mb6_locked_balls"])
self.assertEqual(2, self.machine.ball_devices["bd_lock"].balls)
self.assertEqual(3, self.machine.ball_devices["bd_trough"].balls)
self.assertBallsOnPlayfield(1)
self.assertBallsInPlay(1)
# start MB
self.post_event("mb6_start")
self.advance_time_and_run(5)
self.assertBallsOnPlayfield(3)
self.assertBallsInPlay(3)
self.assertEqual(0, self.machine.ball_devices["bd_lock"].balls)
self.assertEqual(3, self.machine.ball_devices["bd_trough"].balls)
# drain ball
self.drain_one_ball()
self.advance_time_and_run()
self.assertPlayerNumber(1)
self.assertBallNumber(2)
self.assertBallsOnPlayfield(2)
self.assertBallsInPlay(2)
# drain ball
self.drain_one_ball()
self.advance_time_and_run()
self.assertPlayerNumber(1)
self.assertBallNumber(2)
self.assertBallsOnPlayfield(1)
self.assertBallsInPlay(1)
# drain ball
self.drain_one_ball()
self.advance_time_and_run(5)
self.post_event("start_mode1")
self.assertPlayerNumber(2)
self.assertBallNumber(2)
self.assertEqual(0, self.machine.ball_devices["bd_lock"].balls)
self.assertEqual(5, self.machine.ball_devices["bd_trough"].balls)
self.assertEqual(0, self.machine.game.player_list[0]["lock_mb6_locked_balls"])
self.assertEqual(2, self.machine.game.player_list[1]["lock_mb6_locked_balls"])
# start mb without balls in lock
self.post_event("mb6_start")
self.advance_time_and_run(15)
self.assertBallsOnPlayfield(3)
self.assertBallsInPlay(3)
self.assertEqual(0, self.machine.ball_devices["bd_lock"].balls)
self.assertEqual(3, self.machine.ball_devices["bd_trough"].balls)
self.assertEqual(0, self.machine.game.player_list[0]["lock_mb6_locked_balls"])
self.assertEqual(0, self.machine.game.player_list[1]["lock_mb6_locked_balls"])
# drain ball
self.drain_one_ball()
self.advance_time_and_run()
self.assertPlayerNumber(2)
self.assertBallNumber(2)
self.assertBallsOnPlayfield(2)
self.assertBallsInPlay(2)
# drain ball
self.drain_one_ball()
self.advance_time_and_run()
self.assertPlayerNumber(2)
self.assertBallNumber(2)
self.assertBallsOnPlayfield(1)
self.assertBallsInPlay(1)
# drain last ball
self.drain_one_ball()
self.advance_time_and_run(5)
self.post_event("start_mode1")
self.assertPlayerNumber(1)
self.assertBallNumber(3)
self.assertEqual(0, self.machine.ball_devices["bd_lock"].balls)
self.assertEqual(5, self.machine.ball_devices["bd_trough"].balls)
self.assertBallsInPlay(1)
# lock ball
self.machine.default_platform.add_ball_to_device(self.machine.ball_devices["bd_lock"])
self.advance_time_and_run(5)
self.assertEqual(1, self.machine.ball_devices["bd_lock"].balls)
self.assertEqual(4, self.machine.ball_devices["bd_trough"].balls)
self.assertBallsInPlay(1)
# drain again
self.drain_one_ball()
self.advance_time_and_run(5)
self.assertPlayerNumber(2)
self.assertBallNumber(3)
self.assertEqual(1, self.machine.game.player_list[0]["lock_mb6_locked_balls"])
self.assertEqual(0, self.machine.game.player_list[1]["lock_mb6_locked_balls"])
# drain again. game should end
self.drain_one_ball()
# lock should eject all balls
self.advance_time_and_run(5)
self.assertGameIsNotRunning()
self.assertEqual(0, self.machine.ball_devices["bd_lock"].balls)
self.assertEqual(5, self.machine.ball_devices["bd_trough"].balls)
self.assertBallsOnPlayfield(1)
# game should not start yet
self.assertGameIsNotRunning()
self.hit_and_release_switch("s_start")
self.advance_time_and_run()
self.assertGameIsNotRunning()
# ball from lock drain
self.drain_one_ball()
self.advance_time_and_run()
# start new game
self.start_game()
self.post_event("start_mode1")
self.drain_one_ball()
self.advance_time_and_run()
def testModeWithMultiballAutostart(self):
# prepare game
self.fill_troughs()
# start game
self.start_game()
self.post_event("start_mode3")
self.advance_time_and_run(1)
# multiball should be enabled now but not started
self.assertTrue(self.machine.multiballs["mb_autostart"].enabled)
self.assertEqual(1, self.machine.multiballs["mb_autostart"].balls_added_live)
def testMultiballWhichStartsAfterLock(self):
self.mock_event("multiball_mb_autostart_ended")
self.mock_event("multiball_mb_autostart_ball_lost")
# prepare game
self.fill_troughs()
self.assertFalse(self.machine.multiballs["mb4_autostart"].enabled)
# start game
self.start_game()
# start mode
self.post_event("start_mode4")
self.advance_time_and_run(5)
self.assertAvailableBallsOnPlayfield(1)
self.assertEqual(5, self.machine.ball_devices["bd_trough"].available_balls)
# multiball should be enabled now but not started
self.assertTrue(self.machine.multiballs["mb4_autostart"].enabled)
self.assertEqual(0, self.machine.multiballs["mb4_autostart"].balls_added_live)
# lock a ball
self.machine.default_platform.add_ball_to_device(self.machine.ball_devices["bd_lock"])
self.advance_time_and_run(1)
# mb should start
self.assertTrue(self.machine.multiballs["mb4_autostart"].enabled)
self.assertEqual(1, self.machine.multiballs["mb4_autostart"].balls_added_live)
self.assertBallsInPlay(2)
self.assertAvailableBallsOnPlayfield(2)
# lock should eject
self.assertEqual(4, self.machine.ball_devices["bd_trough"].available_balls)
self.assertEqual(0, self.machine.ball_devices["bd_lock"].available_balls)
# both balls drain
self.drain_all_balls()
# game should end
self.advance_time_and_run(1)
self.assertEqual(None, self.machine.game)
def testMultiballStateInPlaceholder(self):
self.fill_troughs()
self.start_game()
self.post_event("start_default")
self.mock_event("should_post_when_enabled")
self.mock_event("should_post_when_disabled")
self.mock_event("should_not_post_when_enabled")
self.mock_event("should_not_post_when_disabled")
mb = self.machine.multiballs["mb1"]
self.assertFalse(mb.enabled)
self.post_event("test_event_when_disabled")
self.assertEventCalled("should_post_when_disabled")
self.assertEventNotCalled("should_not_post_when_disabled")
mb.enable()
self.assertTrue(mb.enabled)
self.post_event("test_event_when_enabled")
self.assertEventCalled("should_post_when_enabled")
self.assertEventNotCalled("should_not_post_when_enabled")
def testShootAgainPlaceholder(self):
self.fill_troughs()
self.start_game()
self.assertAvailableBallsOnPlayfield(1)
# start mb with no shoot again set in machine var
self.mock_event("multiball_mb_placeholder_shoot_again_ended")
self.mock_event("multiball_mb_placeholder_ended")
self.post_event("mb_placeholder_start")
self.advance_time_and_run(5)
self.assertAvailableBallsOnPlayfield(2)
# shoot again should end instantly
self.assertEventCalled("multiball_mb_placeholder_shoot_again_ended")
# drain one ball
self.drain_one_ball()
self.advance_time_and_run(5)
self.assertAvailableBallsOnPlayfield(1)
# mb should end
self.assertEventCalled("multiball_mb_placeholder_ended")
# set shoot again time
self.machine.variables.set_machine_var("shoot_again_sec", 30)
# start mb again
self.mock_event("multiball_mb_placeholder_shoot_again_ended")
self.mock_event("multiball_mb_placeholder_ended")
self.post_event("mb_placeholder_start")
self.advance_time_and_run(5)
self.assertAvailableBallsOnPlayfield(2)
# shoot again should not end instantly
self.assertEventNotCalled("multiball_mb_placeholder_shoot_again_ended")
# drain one ball
self.drain_one_ball()
self.advance_time_and_run(5)
# shoot again should bring it back
self.assertAvailableBallsOnPlayfield(2)
self.assertEventNotCalled("multiball_mb_placeholder_ended")
# wait 30s for shoot again to end
self.advance_time_and_run(30)
self.assertEventCalled("multiball_mb_placeholder_shoot_again_ended")
# drain one ball
self.drain_one_ball()
self.advance_time_and_run(5)
self.assertAvailableBallsOnPlayfield(1)
# mb should end
self.assertEventCalled("multiball_mb_placeholder_ended")
def testShootAgainHurryUpAndGracePeriod(self):
self.fill_troughs()
self.start_game()
self.assertAvailableBallsOnPlayfield(1)
self.mock_event("multiball_mb_alltimers_ended")
self.mock_event("multiball_mb_alltimers_shoot_again_ended")
self.mock_event("multiball_mb_alltimers_grace_period")
self.mock_event("multiball_mb_alltimers_hurry_up")
# start mb 30s shoot again, 10s hurry up, 5s grace
self.post_event("mb_alltimers_start")
self.advance_time_and_run(5)
self.assertAvailableBallsOnPlayfield(2)
# drain one ball
self.drain_one_ball()
self.advance_time_and_run(5)
# shoot again should bring it back
self.assertAvailableBallsOnPlayfield(2)
self.assertEventNotCalled("multiball_mb_alltimers_ended")
self.assertEventNotCalled("multiball_mb_alltimers_shoot_again_ended")
self.assertEventNotCalled("multiball_mb_alltimers_grace_period")
self.assertEventNotCalled("multiball_mb_alltimers_hurry_up")
#advance time to hurry up
self.advance_time_and_run(10)
self.assertEventCalled("multiball_mb_alltimers_hurry_up")
self.assertAvailableBallsOnPlayfield(2)
self.assertEventNotCalled("multiball_mb_alltimers_ended")
self.assertEventNotCalled("multiball_mb_alltimers_shoot_again_ended")
self.assertEventNotCalled("multiball_mb_alltimers_grace_period")
# drain one ball
self.drain_one_ball()
self.advance_time_and_run(5)
# shoot again should bring it back
self.assertAvailableBallsOnPlayfield(2)
# wait 7s for shoot again to end, but within grace period
self.advance_time_and_run(7)
self.assertEventCalled("multiball_mb_alltimers_grace_period")
self.assertEventNotCalled("multiball_mb_alltimers_ended")
self.assertEventNotCalled("multiball_mb_alltimers_shoot_again_ended")
# drain one ball after grace period has ended
self.advance_time_and_run(5)
self.drain_one_ball()
self.advance_time_and_run(5)
self.assertAvailableBallsOnPlayfield(1)
# mb should end
self.assertEventCalled("multiball_mb_alltimers_ended")
def testShootAgainModeEnd(self):
self.fill_troughs()
self.start_game()
self.assertAvailableBallsOnPlayfield(1)
self.mock_event("multiball_mb_mode5_ended")
self.mock_event("multiball_mb_mode5_shoot_again_ended")
self.mock_event("multiball_mb_mode5_grace_period")
self.mock_event("multiball_mb_mode5_hurry_up")
#start Mode5
self.post_event("start_mode5")
# start mb 30s shoot again, 10s hurry up, 5s grace
self.post_event("mb_mode5_start")
self.advance_time_and_run(5)
self.assertAvailableBallsOnPlayfield(2)
self.assertEventNotCalled("multiball_mb_mode5_ended")
self.assertEventNotCalled("multiball_mb_mode5_shoot_again_ended")
self.assertEventNotCalled("multiball_mb_mode5_grace_period")
self.assertEventNotCalled("multiball_mb_mode5_hurry_up")
#stop Mode5
self.post_event("stop_mode5")
self.advance_time_and_run(5)
self.assertEventNotCalled("multiball_mb_mode5_ended")
self.assertEventCalled("multiball_mb_mode5_shoot_again_ended")
self.assertEventCalled("multiball_mb_mode5_grace_period")
self.assertEventCalled("multiball_mb_mode5_hurry_up")
# drain one ball
self.drain_one_ball()
self.advance_time_and_run(5)
# shoot again should not bring it back
self.assertAvailableBallsOnPlayfield(1)
self.assertEventCalled("multiball_mb_mode5_ended")
def testShootAgainModeEndNoGracePeriodOrHurryUp(self):
self.fill_troughs()
self.start_game()
self.assertAvailableBallsOnPlayfield(1)
self.mock_event("multiball_mb_mode5_lean_ended")
self.mock_event("multiball_mb_mode5_lean_shoot_again_ended")
self.mock_event("multiball_mb_mode5_lean_grace_period")
self.mock_event("multiball_mb_mode5_lean_hurry_up")
#start Mode5
self.post_event("start_mode5")
# start mb 30s shoot again
self.post_event("mb_mode5_lean_start")
self.advance_time_and_run(5)
self.assertAvailableBallsOnPlayfield(2)
self.assertEventNotCalled("multiball_mb_mode5_lean_ended")
self.assertEventNotCalled("multiball_mb_mode5_lean_shoot_again_ended")
self.assertEventNotCalled("multiball_mb_mode5_lean_grace_period")
self.assertEventNotCalled("multiball_mb_mode5_lean_hurry_up")
#stop Mode5
self.post_event("stop_mode5")
self.advance_time_and_run(5)
self.assertEventNotCalled("multiball_mb_mode5_lean_ended")
self.assertEventCalled("multiball_mb_mode5_lean_shoot_again_ended")
self.assertEventNotCalled("multiball_mb_mode5_lean_grace_period")
self.assertEventNotCalled("multiball_mb_mode5_lean_hurry_up")
# drain one ball
self.drain_one_ball()
self.advance_time_and_run(5)
# shoot again should not bring it back
self.assertAvailableBallsOnPlayfield(1)
self.assertEventCalled("multiball_mb_mode5_lean_ended")
self.assertEventNotCalled("multiball_mb_mode5_lean_grace_period")
self.assertEventNotCalled("multiball_mb_mode5_lean_hurry_up")
def testAddABallSaver(self):
self.fill_troughs()
self.start_game()
self.assertAvailableBallsOnPlayfield(1)
self.mock_event("multiball_mb_add_a_ball_timers_ended")
self.mock_event("multiball_mb_add_a_ball_timers_shoot_again_ended")
self.mock_event("multiball_mb_add_a_ball_timers_grace_period")
self.mock_event("multiball_mb_add_a_ball_timers_hurry_up")
self.mock_event("ball_save_mb_add_a_ball_timers_timer_start")
self.mock_event("ball_save_mb_add_a_ball_timers_add_a_ball_timer_start")
# start mb 30s shoot again, 10s hurry up, 5s grace
self.post_event("mb_add_a_ball_timers_start")
self.advance_time_and_run(5)
self.assertAvailableBallsOnPlayfield(2)
self.assertEventCalled("ball_save_mb_add_a_ball_timers_timer_start")
# end ball save
self.advance_time_and_run(35)
self.assertEventCalled("multiball_mb_add_a_ball_timers_shoot_again_ended")
#add a ball - ball save 20, hurry up 5, grace 10
self.post_event("add_ball")
self.advance_time_and_run(5)
self.assertAvailableBallsOnPlayfield(3)
self.assertEventCalled("ball_save_mb_add_a_ball_timers_timer_start",1)
self.assertEventCalled("ball_save_mb_add_a_ball_timers_add_a_ball_timer_start")
self.assertEventNotCalled("multiball_mb_add_a_ball_timers_ended")
self.drain_one_ball()
self.advance_time_and_run(5)
self.assertAvailableBallsOnPlayfield(3)
#hurry up
self.advance_time_and_run(7)
self.assertEventCalled("multiball_mb_add_a_ball_timers_hurry_up")
self.drain_one_ball()
self.advance_time_and_run(5)
self.assertAvailableBallsOnPlayfield(3)
#grace period
self.assertEventCalled("multiball_mb_add_a_ball_timers_grace_period")
self.drain_one_ball()
self.advance_time_and_run(10)
self.assertAvailableBallsOnPlayfield(3)
self.assertEventCalled("multiball_mb_add_a_ball_timers_shoot_again_ended")
#drain out and mb should end
self.drain_one_ball()
self.drain_one_ball()
self.advance_time_and_run(5)
self.assertEventCalled("multiball_mb_add_a_ball_timers_ended")
def testAddABallSaverDuringShootAgain(self):
self.fill_troughs()
self.start_game()
self.assertAvailableBallsOnPlayfield(1)
self.mock_event("ball_save_mb_add_a_ball_timers_timer_start")
self.mock_event("ball_save_mb_add_a_ball_timers_add_a_ball_timer_start")
# start mb 30s shoot again, 10s hurry up, 5s grace
self.post_event("mb_add_a_ball_timers_start")
self.advance_time_and_run(5)
self.assertAvailableBallsOnPlayfield(2)
self.assertEventCalled("ball_save_mb_add_a_ball_timers_timer_start")
# add a ball
self.post_event("add_ball")
self.advance_time_and_run(5)
self.assertAvailableBallsOnPlayfield(3)
self.assertEventCalled("ball_save_mb_add_a_ball_timers_timer_start", 1)
self.assertEventNotCalled("ball_save_mb_add_a_ball_timers_add_a_ball_timer_start")
|
missionpinball/mpf
|
mpf/tests/test_MultiBall.py
|
Python
|
mit
| 45,871
|
from __future__ import print_function
import httplib2
import os
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
import helper
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/sheets.googleapis.com-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'
CLIENT_SECRET_FILE = os.path.expanduser('~') + '/.credentials/client_secret.json'
APPLICATION_NAME = 'Portfolio Analyzer'
def _get_credentials():
"""
Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'sheets.googleapis.com-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def _get_service():
"""
Creates a Google Sheets API service object.
"""
credentials = _get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
return service
def parse_spreadsheet(sheetID, indices):
"""
Parses a given spreadsheet.
sheetID -- the google spreadsheet ID
indices -- dictionary mapping the six required fields to their indices
"""
if not sheetID or not indices:
raise ValueError("Invalid input arguments")
titles = _get_titles(sheetID)
rows = []
for title in titles:
data = _parse_worksheet(sheetID, title, indices)
if data:
rows.extend(data)
return rows
def _parse_worksheet(sheetID, title, indices):
"""
Gets the data for a given worksheet.
sheetID -- the google spreadsheet ID
title -- the worksheet title
indices -- dictionary mapping the six required fields to their indices
"""
result = _get_worksheet(sheetID, title)
if result:
values = result.get('values', [])
return helper.get_rows(values, indices)
def _get_titles(sheetID):
"""
Gets a list of worksheet titles for the given spreadsheetId.
"""
service = _get_service()
result = service.spreadsheets().get(
spreadsheetId=sheetID, fields='sheets.properties').execute()
titles = [d['properties']['title'] for d in result['sheets'] if result]
return titles[0:2]
def _get_worksheet(sheetID, title):
service = _get_service()
range_name = title + '!A2:H'
try:
return service.spreadsheets().values().get(
spreadsheetId=sheetID, range=range_name).execute()
except:
return None
|
vikramraman/portfolio-analyzer
|
analyzer/sheetsapi.py
|
Python
|
mit
| 3,414
|
# -*- coding: utf-8 -*-
from gluon import *
from s3 import S3CustomController
THEME = "RedHat"
# =============================================================================
class index(S3CustomController):
""" Custom Home Page """
def __call__(self):
output = {}
# Allow editing of page content from browser using CMS module
if current.deployment_settings.has_module("cms"):
system_roles = current.auth.get_system_roles()
ADMIN = system_roles.ADMIN in current.session.s3.roles
s3db = current.s3db
table = s3db.cms_post
ltable = s3db.cms_post_module
module = "default"
resource = "index"
query = (ltable.module == module) & \
((ltable.resource == None) | \
(ltable.resource == resource)) & \
(ltable.post_id == table.id) & \
(table.deleted != True)
item = current.db(query).select(table.body,
table.id,
limitby=(0, 1)).first()
if item:
if ADMIN:
item = DIV(XML(item.body),
BR(),
A(current.T("Edit"),
_href=URL(c="cms", f="post",
args=[item.id, "update"]),
_class="action-btn"))
else:
item = DIV(XML(item.body))
elif ADMIN:
if current.response.s3.crud.formstyle == "bootstrap":
_class = "btn"
else:
_class = "action-btn"
item = A(current.T("Edit"),
_href=URL(c="cms", f="post", args="create",
vars={"module": module,
"resource": resource
}),
_class="%s cms-edit" % _class)
else:
item = ""
else:
item = ""
output["item"] = item
self._view(THEME, "index.html")
current.response.s3.stylesheets.append("../themes/CERT/homepage.css")
T = current.T
# @ToDo: Add event/human_resource - but this requires extending event_human_resource to link to event.
menus = [{"title":T("Incidents"),
"icon":"bolt",
"description":T("Manage communication for specific incidents"),
"module":"deploy",
"function":"mission",
"buttons":[{"args":"summary",
"icon":"list",
"label":T("View"),
},
{"args":"create",
"icon":"plus-sign",
"label":T("Create"),
}]
},
{"title":T("Messaging"),
"icon":"envelope-alt",
"description":T("Send messages to individuals and groups"),
"module":"msg",
"function":"index",
"args":None,
"buttons":[{"function":"inbox",
"args":None,
"icon":"inbox",
"label":T("Inbox"),
},
{"function":"compose",
"args":None,
"icon":"plus-sign",
"label":T("Compose"),
}]
},
{"title":T("Staff"),
"icon":"group",
"description":T("The staff of your Organization and your partners"),
"module":"deploy",
"function":"human_resource",
"buttons":[{"args":"summary",
"icon":"list",
"label":T("View"),
},
{"args":"create",
"icon":"plus-sign",
"label":T("Create"),
}]
},
{"title":T("Offices"),
"icon":"building",
"description":T("Your and your partners' offices around the world"),
"module":"org",
"function":"office",
"buttons":[{"args":"summary",
"icon":"list",
"label":T("View"),
},
{"args":"create",
"icon":"plus-sign",
"label":T("Create"),
}]
},
]
return dict(item = item,
menus=menus,
)
# END =========================================================================
|
flavour/RedHat
|
modules/templates/RedHat/controllers.py
|
Python
|
mit
| 5,254
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/views.ui'
#
# Created: Fri Sep 4 01:29:58 2015
# by: PyQt4 UI code generator 4.11.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_viewsWidget(object):
def setupUi(self, viewsWidget):
viewsWidget.setObjectName(_fromUtf8("viewsWidget"))
viewsWidget.resize(500, 490)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(6)
sizePolicy.setVerticalStretch(6)
sizePolicy.setHeightForWidth(viewsWidget.sizePolicy().hasHeightForWidth())
viewsWidget.setSizePolicy(sizePolicy)
viewsWidget.setMinimumSize(QtCore.QSize(500, 0))
self.verticalLayout = QtGui.QVBoxLayout(viewsWidget)
self.verticalLayout.setMargin(6)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.currentViews = QtGui.QWidget(viewsWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.currentViews.sizePolicy().hasHeightForWidth())
self.currentViews.setSizePolicy(sizePolicy)
self.currentViews.setObjectName(_fromUtf8("currentViews"))
self.currentViewsLayout = QtGui.QGridLayout(self.currentViews)
self.currentViewsLayout.setMargin(0)
self.currentViewsLayout.setObjectName(_fromUtf8("currentViewsLayout"))
self.tleft = DCurrentView(self.currentViews)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(6)
sizePolicy.setVerticalStretch(6)
sizePolicy.setHeightForWidth(self.tleft.sizePolicy().hasHeightForWidth())
self.tleft.setSizePolicy(sizePolicy)
self.tleft.setAcceptDrops(True)
self.tleft.setFrameShape(QtGui.QFrame.StyledPanel)
self.tleft.setFrameShadow(QtGui.QFrame.Raised)
self.tleft.setObjectName(_fromUtf8("tleft"))
self.tleftLayout = QtGui.QHBoxLayout(self.tleft)
self.tleftLayout.setMargin(0)
self.tleftLayout.setObjectName(_fromUtf8("tleftLayout"))
self.tleftLabel = Marker(self.tleft)
font = QtGui.QFont()
font.setPointSize(14)
self.tleftLabel.setFont(font)
self.tleftLabel.setStyleSheet(_fromUtf8("color: rgb(102, 102, 102);"))
self.tleftLabel.setAlignment(QtCore.Qt.AlignCenter)
self.tleftLabel.setObjectName(_fromUtf8("tleftLabel"))
self.tleftLayout.addWidget(self.tleftLabel)
self.currentViewsLayout.addWidget(self.tleft, 0, 0, 1, 1)
self.bleft = DCurrentView(self.currentViews)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(6)
sizePolicy.setVerticalStretch(4)
sizePolicy.setHeightForWidth(self.bleft.sizePolicy().hasHeightForWidth())
self.bleft.setSizePolicy(sizePolicy)
self.bleft.setAcceptDrops(True)
self.bleft.setFrameShape(QtGui.QFrame.StyledPanel)
self.bleft.setFrameShadow(QtGui.QFrame.Raised)
self.bleft.setObjectName(_fromUtf8("bleft"))
self.bleftLayout = QtGui.QHBoxLayout(self.bleft)
self.bleftLayout.setMargin(0)
self.bleftLayout.setObjectName(_fromUtf8("bleftLayout"))
self.bleftLabel = Marker(self.bleft)
font = QtGui.QFont()
font.setPointSize(14)
self.bleftLabel.setFont(font)
self.bleftLabel.setStyleSheet(_fromUtf8("color: rgb(102, 102, 102);\n"
""))
self.bleftLabel.setAlignment(QtCore.Qt.AlignCenter)
self.bleftLabel.setObjectName(_fromUtf8("bleftLabel"))
self.bleftLayout.addWidget(self.bleftLabel)
self.currentViewsLayout.addWidget(self.bleft, 1, 0, 1, 1)
self.bright = DCurrentView(self.currentViews)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(4)
sizePolicy.setVerticalStretch(4)
sizePolicy.setHeightForWidth(self.bright.sizePolicy().hasHeightForWidth())
self.bright.setSizePolicy(sizePolicy)
self.bright.setAcceptDrops(True)
self.bright.setFrameShape(QtGui.QFrame.StyledPanel)
self.bright.setFrameShadow(QtGui.QFrame.Raised)
self.bright.setObjectName(_fromUtf8("bright"))
self.brightLayout = QtGui.QHBoxLayout(self.bright)
self.brightLayout.setMargin(0)
self.brightLayout.setObjectName(_fromUtf8("brightLayout"))
self.brightLabel = Marker(self.bright)
font = QtGui.QFont()
font.setPointSize(14)
self.brightLabel.setFont(font)
self.brightLabel.setStyleSheet(_fromUtf8("color: rgb(102, 102, 102);"))
self.brightLabel.setAlignment(QtCore.Qt.AlignCenter)
self.brightLabel.setObjectName(_fromUtf8("brightLabel"))
self.brightLayout.addWidget(self.brightLabel)
self.currentViewsLayout.addWidget(self.bright, 1, 1, 1, 1)
self.tright = DCurrentView(self.currentViews)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(4)
sizePolicy.setVerticalStretch(6)
sizePolicy.setHeightForWidth(self.tright.sizePolicy().hasHeightForWidth())
self.tright.setSizePolicy(sizePolicy)
self.tright.setAcceptDrops(True)
self.tright.setFrameShape(QtGui.QFrame.StyledPanel)
self.tright.setFrameShadow(QtGui.QFrame.Raised)
self.tright.setObjectName(_fromUtf8("tright"))
self.trightLayout = QtGui.QHBoxLayout(self.tright)
self.trightLayout.setMargin(0)
self.trightLayout.setObjectName(_fromUtf8("trightLayout"))
self.trightLabel = Marker(self.tright)
font = QtGui.QFont()
font.setPointSize(14)
self.trightLabel.setFont(font)
self.trightLabel.setStyleSheet(_fromUtf8("color: rgb(102, 102, 102);"))
self.trightLabel.setAlignment(QtCore.Qt.AlignCenter)
self.trightLabel.setObjectName(_fromUtf8("trightLabel"))
self.trightLayout.addWidget(self.trightLabel)
self.currentViewsLayout.addWidget(self.tright, 0, 1, 1, 1)
self.verticalLayout.addWidget(self.currentViews)
self.buttonViews = QtGui.QHBoxLayout()
self.buttonViews.setObjectName(_fromUtf8("buttonViews"))
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Minimum)
self.buttonViews.addItem(spacerItem)
self.four = QtGui.QPushButton(viewsWidget)
self.four.setMinimumSize(QtCore.QSize(44, 44))
self.four.setStyleSheet(_fromUtf8("QPushButton{ \n"
" background-color: rgb(255, 255, 255);\n"
" border-style: outset;\n"
" border-width: 1px;\n"
" border-radius: 6px;\n"
" border-color: rgb(193, 193, 193);\n"
" border-style: solid;\n"
" padding: 6px;\n"
" \n"
"}\n"
"QPushButton:pressed { \n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-radius: 6px;\n"
" background-color: rgb(48, 131, 251);\n"
" color: rgb(255, 255, 255);\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" border-color: rgb(164, 205, 255);\n"
" border-radius: 6px;\n"
" border-width: 3px;\n"
" border-style: solid;\n"
"}"))
self.four.setCheckable(True)
self.four.setAutoExclusive(True)
self.four.setObjectName(_fromUtf8("four"))
self.buttonViews.addWidget(self.four)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Minimum)
self.buttonViews.addItem(spacerItem1)
self.vert = QtGui.QPushButton(viewsWidget)
self.vert.setMinimumSize(QtCore.QSize(44, 44))
self.vert.setStyleSheet(_fromUtf8("QPushButton{ \n"
" background-color: rgb(255, 255, 255);\n"
" border-style: outset;\n"
" border-width: 1px;\n"
" border-radius: 6px;\n"
" border-color: rgb(193, 193, 193);\n"
" border-style: solid;\n"
" padding: 6px;\n"
" \n"
"}\n"
"QPushButton:pressed { \n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-radius: 6px;\n"
" background-color: rgb(48, 131, 251);\n"
" color: rgb(255, 255, 255);\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" border-color: rgb(164, 205, 255);\n"
" border-radius: 6px;\n"
" border-width: 3px;\n"
" border-style: solid;\n"
"}"))
self.vert.setCheckable(True)
self.vert.setAutoExclusive(True)
self.vert.setObjectName(_fromUtf8("vert"))
self.buttonViews.addWidget(self.vert)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Minimum)
self.buttonViews.addItem(spacerItem2)
self.hor = QtGui.QPushButton(viewsWidget)
self.hor.setMinimumSize(QtCore.QSize(44, 44))
self.hor.setStyleSheet(_fromUtf8("QPushButton{ \n"
" background-color: rgb(255, 255, 255);\n"
" border-style: outset;\n"
" border-width: 1px;\n"
" border-radius: 6px;\n"
" border-color: rgb(193, 193, 193);\n"
" border-style: solid;\n"
" padding: 6px;\n"
" \n"
"}\n"
"QPushButton:pressed { \n"
" border-style: solid;\n"
" border-width: 1px;\n"
" border-radius: 6px;\n"
" background-color: rgb(48, 131, 251);\n"
" color: rgb(255, 255, 255);\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" border-color: rgb(164, 205, 255);\n"
" border-radius: 6px;\n"
" border-width: 3px;\n"
" border-style: solid;\n"
"}"))
self.hor.setCheckable(True)
self.hor.setChecked(True)
self.hor.setAutoExclusive(True)
self.hor.setObjectName(_fromUtf8("hor"))
self.buttonViews.addWidget(self.hor)
spacerItem3 = QtGui.QSpacerItem(40, 10, QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Minimum)
self.buttonViews.addItem(spacerItem3)
self.verticalLayout.addLayout(self.buttonViews)
self.viewsGroup = FocusGroupBox(viewsWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.viewsGroup.sizePolicy().hasHeightForWidth())
self.viewsGroup.setSizePolicy(sizePolicy)
self.viewsGroup.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setPointSize(11)
self.viewsGroup.setFont(font)
self.viewsGroup.setCheckable(True)
self.viewsGroup.setObjectName(_fromUtf8("viewsGroup"))
self.viewsGroupLayout = QtGui.QHBoxLayout(self.viewsGroup)
self.viewsGroupLayout.setSpacing(0)
self.viewsGroupLayout.setMargin(0)
self.viewsGroupLayout.setObjectName(_fromUtf8("viewsGroupLayout"))
self.availableViews = DAvailableView(self.viewsGroup)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.availableViews.sizePolicy().hasHeightForWidth())
self.availableViews.setSizePolicy(sizePolicy)
self.availableViews.setAcceptDrops(True)
self.availableViews.setFrameShape(QtGui.QFrame.NoFrame)
self.availableViews.setObjectName(_fromUtf8("availableViews"))
self.availableViewsLayout = QtGui.QHBoxLayout(self.availableViews)
self.availableViewsLayout.setMargin(1)
self.availableViewsLayout.setObjectName(_fromUtf8("availableViewsLayout"))
self.viewsGroupLayout.addWidget(self.availableViews)
self.verticalLayout.addWidget(self.viewsGroup)
self.retranslateUi(viewsWidget)
QtCore.QObject.connect(self.viewsGroup, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.availableViews.setVisible)
QtCore.QMetaObject.connectSlotsByName(viewsWidget)
def retranslateUi(self, viewsWidget):
viewsWidget.setWindowTitle(_translate("viewsWidget", "Form", None))
self.tleft.setAccessibleName(_translate("viewsWidget", "MV", None))
self.tleftLabel.setText(_translate("viewsWidget", "Drag any view here", None))
self.bleft.setAccessibleName(_translate("viewsWidget", "MV", None))
self.bleftLabel.setText(_translate("viewsWidget", "Drag any view here", None))
self.bright.setAccessibleName(_translate("viewsWidget", "MV", None))
self.brightLabel.setText(_translate("viewsWidget", "Drag any view here", None))
self.tright.setAccessibleName(_translate("viewsWidget", "MV", None))
self.trightLabel.setText(_translate("viewsWidget", "Drag any view here", None))
self.four.setText(_translate("viewsWidget", "┼", None))
self.vert.setText(_translate("viewsWidget", "│", None))
self.hor.setText(_translate("viewsWidget", "─", None))
self.viewsGroup.setAccessibleName(_translate("viewsWidget", "AV", None))
self.viewsGroup.setTitle(_translate("viewsWidget", "Available Views", None))
self.availableViews.setAccessibleName(_translate("viewsWidget", "AV", None))
from aui.mi.visual import FocusGroupBox
from aui.utilities.DropView import DCurrentView, Marker, DAvailableView
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
viewsWidget = QtGui.QWidget()
ui = Ui_viewsWidget()
ui.setupUi(viewsWidget)
viewsWidget.show()
sys.exit(app.exec_())
|
argenortega/AUI
|
aui/gui/views/ui_views.py
|
Python
|
mit
| 14,083
|
from struct import Struct
from collections import namedtuple
from shapy.framework.netlink.message import Attr
from shapy.framework.utils import nl_ticks2us, nl_us2ticks
from shapy.framework.netlink.constants import *
class NetemOptions(Attr):
#struct tc_netem_qopt {
# __u32 latency; /* added delay (us) */
# __u32 limit; /* fifo limit (packets) */
# __u32 loss; /* random packet loss (0=none ~0=100%) */
# __u32 gap; /* re-ordering gap (0 for none) */
# __u32 duplicate; /* random packet dup (0=none ~0=100%) */
# __u32 jitter; /* random jitter in latency (us) */
#};
data_format = Struct("6I")
data_struct = namedtuple('tc_netem_qopt',
"latency limit loss gap duplicate jitter")
@classmethod
def unpack(cls, data):
attr, rest = Attr.unpack(data)
opts = cls.data_struct._make(cls.data_format.unpack(attr.payload))
opts = opts._replace(latency=nl_ticks2us(opts.latency))
return cls(*opts), rest
def __init__(self, latency, limit=1000, loss=0, gap=0, duplicate=0, jitter=0):
"""Latency is in microseconds [us]"""
latency_ticks = nl_us2ticks(latency)
jitter_ticks = nl_us2ticks(jitter)
data = self.data_format.pack(latency_ticks, limit, loss,
gap, duplicate, jitter_ticks)
Attr.__init__(self, TCA_OPTIONS, data)
|
praus/shapy
|
shapy/framework/netlink/netem.py
|
Python
|
mit
| 1,477
|
# By Justin Walgran
# Copyright (c) 2012 Azavea, Inc.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import unittest
import os
import shutil
import tempfile
from helpers import create_file_with_content
from blend import YUICompressorMinifier, Resource
class TestYUICompressorMinifier(unittest.TestCase):
def setUp(self):
self.test_env_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_env_dir)
def make_an_empty_js_file(self):
test_file_path = os.path.join(self.test_env_dir, 'test.js')
create_file_with_content(test_file_path, '')
return test_file_path
def make_a_js_file(self, content='var answer = 42;'):
test_file_path = os.path.join(self.test_env_dir, 'test.js')
create_file_with_content(test_file_path, content)
return test_file_path
def make_a_minified_js_file(self, separator='.'):
test_file_path = os.path.join(self.test_env_dir, 'test%smin.js' % separator)
create_file_with_content(test_file_path, 'var a=42;')
return test_file_path
def test_analysis_fails_when_lib_dir_is_not_found(self):
invalid_lib_path = '/some/invalid/path'
yuic = YUICompressorMinifier({'lib_path': invalid_lib_path})
test_resource = Resource(self.make_a_js_file())
minification = yuic.minify(test_resource)
self.assertFalse(minification.good, 'Expected the minification to be bad since the tools could not be found')
self.assertEquals(1, len(minification.errors))
self.assertEquals('A YUI Compressor .jar file could not be found in %s.' % invalid_lib_path, minification.errors[0])
def test_minifying_an_empty_resource_returns_empty_minification_content(self):
test_resource = Resource(self.make_an_empty_js_file())
yuic = YUICompressorMinifier()
minification = yuic.minify(test_resource)
self.assertTrue(minification.good)
self.assertEqual('', minification.content)
def test_minifying_an_already_minified_resource_returns_a_message_and_unmodified_content(self):
test_resource = Resource(self.make_a_minified_js_file())
self.assertTrue(test_resource.minified)
yuic = YUICompressorMinifier()
minification = yuic.minify(test_resource)
self.assertTrue(minification.good)
self.assertEqual(test_resource.content, minification.content)
self.assertEqual('The resource %s is already minified.' % test_resource.path_to_file,
minification.errors_warnings_and_messages_as_string)
def test_compressor(self):
test_resource = Resource(self.make_a_js_file(
content='var answer = 42;\nvar question = "what is " +\n "6 times 7";'))
yuic = YUICompressorMinifier()
minification = yuic.minify(test_resource)
self.assertTrue(minification.good)
self.assertEqual('var answer=42;var question="what is 6 times 7";', minification.content)
def test_compressor_failure(self):
test_resource = Resource(self.make_a_js_file(
content='var obj = { function: "failure" }')) # 'function' is not a legal property name
yuic = YUICompressorMinifier()
minification = yuic.minify(test_resource)
self.assertFalse(minification.good)
|
azavea/blend
|
blend/test/TestYUICompressorMinifier.py
|
Python
|
mit
| 4,311
|
def powers(x, n0, n1=None):
if n1 is None:
n1 = n0
n0 = 1
if n0 == 0:
r = 1
elif n0 == 1:
r = x
else:
r = x ** n0
rs = [r]
for _ in range(n1 - n0):
r *= x
rs.append(r)
return rs
|
frostburn/frostsynth
|
frostsynth/sequence.py
|
Python
|
mit
| 262
|
import datetime
from .basic import Base
from .tables import *
class LoginInfo(Base):
__table__ = login_info
class User(Base):
__table__ = wbuser
def __init__(self, uid):
self.uid = uid
class SeedIds(Base):
__table__ = seed_ids
class KeyWords(Base):
__table__ = keywords
class KeyWordsTimerange(Base):
__table__ = keywords_timerange
class WeiboData(Base):
__table__ = weibo_data
def __repr__(self):
return 'weibo url:{};mid:{};uid:{};weibo content:{};' \
'location:{};create_time:{};weibo_img:{};weibo_img_path:{};' \
'weibo_video:{};repost_num:{};comment_num:{};praise_num:{};' \
'is_origin:{};device:{}'.format(
self.weibo_url, self.weibo_id, self.uid, self.weibo_cont,
self.weibo_location, str(self.create_time), self.weibo_img, self.weibo_img_path,
self.weibo_video, str(self.repost_num), str(self.comment_num), str(self.praise_num),
str(self.is_origin), self.device)
class KeywordsWbdata(Base):
__table__ = keywords_wbdata
class KeywordsTimerangeWbdata(Base):
__table__ = keywords_wbdata_timerange
# class Datastreams(Base):
# __table__ = datastreams
class WeiboComment(Base):
__table__ = weibo_comment
def __repr__(self):
return 'weibo_id:{},comment_id:{},comment_cont:{}'.format(self.weibo_id, self.comment_id, self.comment_cont)
class WeiboPraise(Base):
__table__ = weibo_praise
def __repr__(self):
return 'user_id:{},weibo_id:{}'.format(self.user_id, self.weibo_id)
class WeiboRepost(Base):
__table__ = weibo_repost
def __repr__(self):
return 'id:{},user_id:{},user_name:{},parent_user_id:{},parent_user_name:{}, weibo_url:{},weibo_id:{},' \
'repost_time:{},repost_cont:{}'.format(self.id, self.user_id, self.user_name, self.parent_user_id,
self.parent_user_name, self.weibo_url, self.weibo_id,
self.repost_time, self.repost_cont)
class UserRelation(Base):
__table__ = user_relation
def __init__(self, uid, other_id, type, from_where, crawl_time=True):
self.user_id = uid
self.follow_or_fans_id = other_id
self.type = type
self.from_where = from_where
if crawl_time:
self.crawl_time = datetime.datetime.now()
else:
self.crawl_time = None
def __repr__(self):
return 'user_id:{},follow_or_fans_id:{},type:{},from_where:{}'.format(self.user_id, self.follow_or_fans_id,
self.type, self.from_where)
class WeiboDialogue(Base):
__table__ = weibo_dialogue
def __repr__(self):
return 'weibo_id:{},dialogue_id:{},dialogue_cont:{}'.format(self.weibo_id, self.dialogue_id, self.dialogue_cont)
class HomeCollections(Base):
__table__ = home_collections
class HomeIds(Base):
__table__ = home_ids
|
yzsz/weibospider
|
db/models.py
|
Python
|
mit
| 3,047
|
from itertools import product
import pytest
from hatch.plugin.manager import PluginManager
from hatch.project.config import ProjectConfig
from hatch.project.env import RESERVED_OPTIONS
from hatch.utils.structures import EnvVars
from hatch.version.scheme.standard import StandardScheme
from hatchling.version.source.regex import RegexSource
ARRAY_OPTIONS = [o for o, t in RESERVED_OPTIONS.items() if t is list]
BOOLEAN_OPTIONS = [o for o, t in RESERVED_OPTIONS.items() if t is bool]
MAPPING_OPTIONS = [o for o, t in RESERVED_OPTIONS.items() if t is dict]
STRING_OPTIONS = [o for o, t in RESERVED_OPTIONS.items() if t is str and o != 'matrix-name-format']
def construct_matrix_data(env_name, config, overrides=None):
config = dict(config[env_name])
config.pop('overrides', None)
matrices = config.pop('matrix')
final_matrix_name_format = config.pop('matrix-name-format', '{value}')
# [{'version': ['9000']}, {'feature': ['bar']}]
envs = {}
for matrix in matrices:
matrix = dict(matrix)
variables = {}
python_selected = False
for variable in ('py', 'python'):
if variable in matrix:
python_selected = True
variables[variable] = matrix.pop(variable)
break
variables.update(matrix)
for result in product(*variables.values()):
variable_values = dict(zip(variables, result))
env_name_parts = []
for j, (variable, value) in enumerate(variable_values.items()):
if j == 0 and python_selected:
env_name_parts.append(value if value.startswith('py') else f'py{value}')
else:
env_name_parts.append(final_matrix_name_format.format(variable=variable, value=value))
new_env_name = '-'.join(env_name_parts)
if env_name != 'default':
new_env_name = f'{env_name}.{new_env_name}'
envs[new_env_name] = variable_values
config.update(overrides or {})
config.setdefault('type', 'virtual')
return {'config': config, 'envs': envs}
class TestEnv:
def test_not_table(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.env` must be a table'):
_ = ProjectConfig(isolation, {'env': 9000}).env
def test_default(self, isolation):
project_config = ProjectConfig(isolation, {})
assert project_config.env == project_config.env == {}
class TestEnvCollectors:
def test_not_table(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.env.collectors` must be a table'):
_ = ProjectConfig(isolation, {'env': {'collectors': 9000}}).env_collectors
def test_collector_not_table(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.env.collectors.foo` must be a table'):
_ = ProjectConfig(isolation, {'env': {'collectors': {'foo': 9000}}}).env_collectors
def test_default(self, isolation):
project_config = ProjectConfig(isolation, {})
assert project_config.env_collectors == project_config.env_collectors == {'default': {}}
def test_defined(self, isolation):
project_config = ProjectConfig(isolation, {'env': {'collectors': {'foo': {'bar': {'baz': 9000}}}}})
assert project_config.env_collectors == {'default': {}, 'foo': {'bar': {'baz': 9000}}}
assert list(project_config.env_collectors) == ['default', 'foo']
class TestEnvs:
def test_not_table(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.envs` must be a table'):
_ = ProjectConfig(isolation, {'envs': 9000}, PluginManager()).envs
def test_config_not_table(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.envs.foo` must be a table'):
_ = ProjectConfig(isolation, {'envs': {'foo': 9000}}, PluginManager()).envs
def test_unknown_collector(self, isolation):
with pytest.raises(ValueError, match='Unknown environment collector: foo'):
_ = ProjectConfig(isolation, {'env': {'collectors': {'foo': {}}}}, PluginManager()).envs
def test_unknown_template(self, isolation):
with pytest.raises(
ValueError, match='Field `tool.hatch.envs.foo.template` refers to an unknown environment `bar`'
):
_ = ProjectConfig(isolation, {'envs': {'foo': {'template': 'bar'}}}, PluginManager()).envs
def test_default_undefined(self, isolation):
project_config = ProjectConfig(isolation, {}, PluginManager())
assert project_config.envs == project_config.envs == {'default': {'type': 'virtual'}}
assert project_config.matrices == project_config.matrices == {}
def test_default_partially_defined(self, isolation):
env_config = {'default': {'option': True}}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
assert project_config.envs == {'default': {'option': True, 'type': 'virtual'}}
def test_default_defined(self, isolation):
env_config = {'default': {'type': 'foo'}}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
assert project_config.envs == {'default': {'type': 'foo'}}
def test_basic(self, isolation):
env_config = {'foo': {'option': True}}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
assert project_config.envs == {'default': {'type': 'virtual'}, 'foo': {'option': True, 'type': 'virtual'}}
def test_basic_override(self, isolation):
env_config = {'foo': {'type': 'baz'}}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
assert project_config.envs == {'default': {'type': 'virtual'}, 'foo': {'type': 'baz'}}
def test_multiple_inheritance(self, isolation):
env_config = {
'foo': {'option1': 'foo'},
'bar': {'template': 'foo', 'option2': 'bar'},
'baz': {'template': 'bar', 'option3': 'baz'},
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
assert project_config.envs == {
'default': {'type': 'virtual'},
'foo': {'type': 'virtual', 'option1': 'foo'},
'bar': {'type': 'virtual', 'option1': 'foo', 'option2': 'bar'},
'baz': {'type': 'virtual', 'option1': 'foo', 'option2': 'bar', 'option3': 'baz'},
}
def test_circular_inheritance(self, isolation):
with pytest.raises(
ValueError, match='Circular inheritance detected for field `tool.hatch.envs.*.template`: foo -> bar -> foo'
):
_ = ProjectConfig(
isolation, {'envs': {'foo': {'template': 'bar'}, 'bar': {'template': 'foo'}}}, PluginManager()
).envs
def test_scripts_inheritance(self, isolation):
env_config = {
'default': {'scripts': {'cmd1': 'bar', 'cmd2': 'baz'}},
'foo': {'scripts': {'cmd1': 'foo'}},
'bar': {'template': 'foo', 'scripts': {'cmd3': 'bar'}},
'baz': {},
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
assert project_config.envs == {
'default': {'type': 'virtual', 'scripts': {'cmd1': 'bar', 'cmd2': 'baz'}},
'foo': {'type': 'virtual', 'scripts': {'cmd1': 'foo', 'cmd2': 'baz'}},
'bar': {'type': 'virtual', 'scripts': {'cmd1': 'foo', 'cmd2': 'baz', 'cmd3': 'bar'}},
'baz': {'type': 'virtual', 'scripts': {'cmd1': 'bar', 'cmd2': 'baz'}},
}
def test_matrices_not_array(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.envs.foo.matrix` must be an array'):
_ = ProjectConfig(isolation, {'envs': {'foo': {'matrix': 9000}}}, PluginManager()).envs
def test_matrix_not_table(self, isolation):
with pytest.raises(TypeError, match='Entry #1 in field `tool.hatch.envs.foo.matrix` must be a table'):
_ = ProjectConfig(isolation, {'envs': {'foo': {'matrix': [9000]}}}, PluginManager()).envs
def test_matrix_empty(self, isolation):
with pytest.raises(ValueError, match='Matrix #1 in field `tool.hatch.envs.foo.matrix` cannot be empty'):
_ = ProjectConfig(isolation, {'envs': {'foo': {'matrix': [{}]}}}, PluginManager()).envs
def test_matrix_variable_empty_string(self, isolation):
with pytest.raises(
ValueError, match='Variable #1 in matrix #1 in field `tool.hatch.envs.foo.matrix` cannot be an empty string'
):
_ = ProjectConfig(isolation, {'envs': {'foo': {'matrix': [{'': []}]}}}, PluginManager()).envs
def test_matrix_variable_not_array(self, isolation):
with pytest.raises(
TypeError, match='Variable `bar` in matrix #1 in field `tool.hatch.envs.foo.matrix` must be an array'
):
_ = ProjectConfig(isolation, {'envs': {'foo': {'matrix': [{'bar': 9000}]}}}, PluginManager()).envs
def test_matrix_variable_array_empty(self, isolation):
with pytest.raises(
ValueError, match='Variable `bar` in matrix #1 in field `tool.hatch.envs.foo.matrix` cannot be empty'
):
_ = ProjectConfig(isolation, {'envs': {'foo': {'matrix': [{'bar': []}]}}}, PluginManager()).envs
def test_matrix_variable_entry_not_string(self, isolation):
with pytest.raises(
TypeError,
match='Value #1 of variable `bar` in matrix #1 in field `tool.hatch.envs.foo.matrix` must be a string',
):
_ = ProjectConfig(isolation, {'envs': {'foo': {'matrix': [{'bar': [9000]}]}}}, PluginManager()).envs
def test_matrix_variable_entry_empty_string(self, isolation):
with pytest.raises(
ValueError,
match=(
'Value #1 of variable `bar` in matrix #1 in field `tool.hatch.envs.foo.matrix` '
'cannot be an empty string'
),
):
_ = ProjectConfig(isolation, {'envs': {'foo': {'matrix': [{'bar': ['']}]}}}, PluginManager()).envs
def test_matrix_variable_entry_duplicate(self, isolation):
with pytest.raises(
ValueError,
match='Value #2 of variable `bar` in matrix #1 in field `tool.hatch.envs.foo.matrix` is a duplicate',
):
_ = ProjectConfig(isolation, {'envs': {'foo': {'matrix': [{'bar': ['1', '1']}]}}}, PluginManager()).envs
def test_matrix_multiple_python_variables(self, isolation):
with pytest.raises(
ValueError,
match='Matrix #1 in field `tool.hatch.envs.foo.matrix` cannot contain both `py` and `python` variables',
):
_ = ProjectConfig(
isolation,
{'envs': {'foo': {'matrix': [{'py': ['39', '310'], 'python': ['39', '311']}]}}},
PluginManager(),
).envs
def test_matrix_name_format_not_string(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.envs.foo.matrix-name-format` must be a string'):
_ = ProjectConfig(isolation, {'envs': {'foo': {'matrix-name-format': 9000}}}, PluginManager()).envs
def test_matrix_name_format_invalid(self, isolation):
with pytest.raises(
ValueError,
match='Field `tool.hatch.envs.foo.matrix-name-format` must contain at least the `{value}` placeholder',
):
_ = ProjectConfig(isolation, {'envs': {'foo': {'matrix-name-format': 'bar'}}}, PluginManager()).envs
def test_overrides_not_table(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.envs.foo.overrides` must be a table'):
_ = ProjectConfig(isolation, {'envs': {'foo': {'overrides': 9000}}}, PluginManager()).envs
def test_overrides_platform_not_table(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.envs.foo.overrides.platform` must be a table'):
_ = ProjectConfig(isolation, {'envs': {'foo': {'overrides': {'platform': 9000}}}}, PluginManager()).envs
def test_overrides_env_not_table(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.envs.foo.overrides.env` must be a table'):
_ = ProjectConfig(isolation, {'envs': {'foo': {'overrides': {'env': 9000}}}}, PluginManager()).envs
def test_overrides_matrix_not_table(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.envs.foo.overrides.matrix` must be a table'):
_ = ProjectConfig(
isolation,
{'envs': {'foo': {'matrix': [{'version': ['9000']}], 'overrides': {'matrix': 9000}}}},
PluginManager(),
).envs
def test_overrides_platform_entry_not_table(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.envs.foo.overrides.platform.bar` must be a table'):
_ = ProjectConfig(
isolation, {'envs': {'foo': {'overrides': {'platform': {'bar': 9000}}}}}, PluginManager()
).envs
def test_overrides_env_entry_not_table(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.envs.foo.overrides.env.bar` must be a table'):
_ = ProjectConfig(isolation, {'envs': {'foo': {'overrides': {'env': {'bar': 9000}}}}}, PluginManager()).envs
def test_overrides_matrix_entry_not_table(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.envs.foo.overrides.matrix.bar` must be a table'):
_ = ProjectConfig(
isolation,
{'envs': {'foo': {'matrix': [{'version': ['9000']}], 'overrides': {'matrix': {'bar': 9000}}}}},
PluginManager(),
).envs
def test_matrix_simple_no_python(self, isolation):
env_config = {'foo': {'option': True, 'matrix': [{'version': ['9000', '3.14']}]}}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', 'option': True},
'foo.3.14': {'type': 'virtual', 'option': True},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
def test_matrix_simple_no_python_custom_name_format(self, isolation):
env_config = {
'foo': {
'option': True,
'matrix-name-format': '{variable}_{value}',
'matrix': [{'version': ['9000', '3.14']}],
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.version_9000': {'type': 'virtual', 'option': True},
'foo.version_3.14': {'type': 'virtual', 'option': True},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('indicator', ['py', 'python'])
def test_matrix_simple_only_python(self, isolation, indicator):
env_config = {'foo': {'option': True, 'matrix': [{indicator: ['39', '310']}]}}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.py39': {'type': 'virtual', 'option': True, 'python': '39'},
'foo.py310': {'type': 'virtual', 'option': True, 'python': '310'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('indicator', ['py', 'python'])
def test_matrix_simple(self, isolation, indicator):
env_config = {'foo': {'option': True, 'matrix': [{'version': ['9000', '3.14'], indicator: ['39', '310']}]}}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.py39-9000': {'type': 'virtual', 'option': True, 'python': '39'},
'foo.py39-3.14': {'type': 'virtual', 'option': True, 'python': '39'},
'foo.py310-9000': {'type': 'virtual', 'option': True, 'python': '310'},
'foo.py310-3.14': {'type': 'virtual', 'option': True, 'python': '310'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('indicator', ['py', 'python'])
def test_matrix_simple_custom_name_format(self, isolation, indicator):
env_config = {
'foo': {
'option': True,
'matrix-name-format': '{variable}_{value}',
'matrix': [{'version': ['9000', '3.14'], indicator: ['39', '310']}],
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.py39-version_9000': {'type': 'virtual', 'option': True, 'python': '39'},
'foo.py39-version_3.14': {'type': 'virtual', 'option': True, 'python': '39'},
'foo.py310-version_9000': {'type': 'virtual', 'option': True, 'python': '310'},
'foo.py310-version_3.14': {'type': 'virtual', 'option': True, 'python': '310'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
def test_matrix_multiple_non_python(self, isolation):
env_config = {
'foo': {
'option': True,
'matrix': [{'version': ['9000', '3.14'], 'py': ['39', '310'], 'foo': ['baz', 'bar']}],
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.py39-9000-baz': {'type': 'virtual', 'option': True, 'python': '39'},
'foo.py39-9000-bar': {'type': 'virtual', 'option': True, 'python': '39'},
'foo.py39-3.14-baz': {'type': 'virtual', 'option': True, 'python': '39'},
'foo.py39-3.14-bar': {'type': 'virtual', 'option': True, 'python': '39'},
'foo.py310-9000-baz': {'type': 'virtual', 'option': True, 'python': '310'},
'foo.py310-9000-bar': {'type': 'virtual', 'option': True, 'python': '310'},
'foo.py310-3.14-baz': {'type': 'virtual', 'option': True, 'python': '310'},
'foo.py310-3.14-bar': {'type': 'virtual', 'option': True, 'python': '310'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
def test_matrix_series(self, isolation):
env_config = {
'foo': {
'option': True,
'matrix': [
{'version': ['9000', '3.14'], 'py': ['39', '310'], 'foo': ['baz', 'bar']},
{'version': ['9000'], 'py': ['310'], 'baz': ['foo', 'test'], 'bar': ['foobar']},
],
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.py39-9000-baz': {'type': 'virtual', 'option': True, 'python': '39'},
'foo.py39-9000-bar': {'type': 'virtual', 'option': True, 'python': '39'},
'foo.py39-3.14-baz': {'type': 'virtual', 'option': True, 'python': '39'},
'foo.py39-3.14-bar': {'type': 'virtual', 'option': True, 'python': '39'},
'foo.py310-9000-baz': {'type': 'virtual', 'option': True, 'python': '310'},
'foo.py310-9000-bar': {'type': 'virtual', 'option': True, 'python': '310'},
'foo.py310-3.14-baz': {'type': 'virtual', 'option': True, 'python': '310'},
'foo.py310-3.14-bar': {'type': 'virtual', 'option': True, 'python': '310'},
'foo.py310-9000-foo-foobar': {'type': 'virtual', 'option': True, 'python': '310'},
'foo.py310-9000-test-foobar': {'type': 'virtual', 'option': True, 'python': '310'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
def test_matrices_not_inherited(self, isolation):
env_config = {
'foo': {'option1': True, 'matrix': [{'py': ['39']}]},
'bar': {'template': 'foo', 'option2': False},
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.py39': {'type': 'virtual', 'option1': True, 'python': '39'},
'bar': {'type': 'virtual', 'option1': True, 'option2': False},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
def test_matrix_default_naming(self, isolation):
env_config = {'default': {'option': True, 'matrix': [{'version': ['9000', '3.14']}]}}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'9000': {'type': 'virtual', 'option': True},
'3.14': {'type': 'virtual', 'option': True},
}
assert project_config.envs == expected_envs
assert project_config.matrices['default'] == construct_matrix_data('default', env_config)
def test_matrix_pypy_naming(self, isolation):
env_config = {'foo': {'option': True, 'matrix': [{'py': ['python3.9', 'pypy3']}]}}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.python3.9': {'type': 'virtual', 'option': True, 'python': 'python3.9'},
'foo.pypy3': {'type': 'virtual', 'option': True, 'python': 'pypy3'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', MAPPING_OPTIONS)
def test_overrides_matrix_mapping_invalid_type(self, isolation, option):
with pytest.raises(
TypeError,
match=f'Field `tool.hatch.envs.foo.overrides.matrix.version.{option}` must be a string or an array',
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {'matrix': [{'version': ['9000']}], 'overrides': {'matrix': {'version': {option: 9000}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', MAPPING_OPTIONS)
def test_overrides_matrix_mapping_array_entry_invalid_type(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f'Entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must be a string or an inline table'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {
'matrix': [{'version': ['9000']}],
'overrides': {'matrix': {'version': {option: [9000]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', MAPPING_OPTIONS)
def test_overrides_matrix_mapping_table_entry_no_key(self, isolation, option):
with pytest.raises(
ValueError,
match=(
f'Entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must have an option named `key`'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {'matrix': [{'version': ['9000']}], 'overrides': {'matrix': {'version': {option: [{}]}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', MAPPING_OPTIONS)
def test_overrides_matrix_mapping_table_entry_key_not_string(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f'Option `key` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must be a string'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {
'matrix': [{'version': ['9000']}],
'overrides': {'matrix': {'version': {option: [{'key': 9000}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', MAPPING_OPTIONS)
def test_overrides_matrix_mapping_table_entry_key_empty_string(self, isolation, option):
with pytest.raises(
ValueError,
match=(
f'Option `key` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'cannot be an empty string'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {
'matrix': [{'version': ['9000']}],
'overrides': {'matrix': {'version': {option: [{'key': ''}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', MAPPING_OPTIONS)
def test_overrides_matrix_mapping_table_entry_value_not_string(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f'Option `value` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must be a string'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {
'matrix': [{'version': ['9000']}],
'overrides': {'matrix': {'version': {option: [{'key': 'foo', 'value': 9000}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', MAPPING_OPTIONS)
def test_overrides_matrix_mapping_table_entry_if_not_array(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f'Option `if` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must be an array'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {
'matrix': [{'version': ['9000']}],
'overrides': {
'matrix': {'version': {option: [{'key': 'foo', 'value': 'bar', 'if': 9000}]}}
},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', ARRAY_OPTIONS)
def test_overrides_matrix_array_invalid_type(self, isolation, option):
with pytest.raises(
TypeError, match=f'Field `tool.hatch.envs.foo.overrides.matrix.version.{option}` must be an array'
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {'matrix': [{'version': ['9000']}], 'overrides': {'matrix': {'version': {option: 9000}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', ARRAY_OPTIONS)
def test_overrides_matrix_array_table_entry_no_value(self, isolation, option):
with pytest.raises(
ValueError,
match=(
f'Entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must have an option named `value`'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {'matrix': [{'version': ['9000']}], 'overrides': {'matrix': {'version': {option: [{}]}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', ARRAY_OPTIONS)
def test_overrides_matrix_array_table_entry_value_not_string(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f'Option `value` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must be a string'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {
'matrix': [{'version': ['9000']}],
'overrides': {'matrix': {'version': {option: [{'value': 9000}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', ARRAY_OPTIONS)
def test_overrides_matrix_array_table_entry_value_empty_string(self, isolation, option):
with pytest.raises(
ValueError,
match=(
f'Option `value` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'cannot be an empty string'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {
'matrix': [{'version': ['9000']}],
'overrides': {'matrix': {'version': {option: [{'value': ''}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', ARRAY_OPTIONS)
def test_overrides_matrix_array_table_entry_if_not_array(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f'Option `if` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must be an array'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {
'matrix': [{'version': ['9000']}],
'overrides': {'matrix': {'version': {option: [{'value': 'foo', 'if': 9000}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', ARRAY_OPTIONS)
def test_overrides_matrix_array_entry_invalid_type(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f'Entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must be a string or an inline table'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {
'matrix': [{'version': ['9000']}],
'overrides': {'matrix': {'version': {option: [9000]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', STRING_OPTIONS)
def test_overrides_matrix_string_invalid_type(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f'Field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must be a string, inline table, or an array'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {'matrix': [{'version': ['9000']}], 'overrides': {'matrix': {'version': {option: 9000}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', STRING_OPTIONS)
def test_overrides_matrix_string_table_no_value(self, isolation, option):
with pytest.raises(
ValueError,
match=f'Field `tool.hatch.envs.foo.overrides.matrix.version.{option}` must have an option named `value`',
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {'matrix': [{'version': ['9000']}], 'overrides': {'matrix': {'version': {option: {}}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', STRING_OPTIONS)
def test_overrides_matrix_string_table_value_not_string(self, isolation, option):
with pytest.raises(
TypeError,
match=f'Option `value` in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` must be a string',
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {
'matrix': [{'version': ['9000']}],
'overrides': {'matrix': {'version': {option: {'value': 9000}}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', STRING_OPTIONS)
def test_overrides_matrix_string_array_entry_invalid_type(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f'Entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must be a string or an inline table'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {
'matrix': [{'version': ['9000']}],
'overrides': {'matrix': {'version': {option: [9000]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', STRING_OPTIONS)
def test_overrides_matrix_string_array_table_no_value(self, isolation, option):
with pytest.raises(
ValueError,
match=(
f'Entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must have an option named `value`'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {'matrix': [{'version': ['9000']}], 'overrides': {'matrix': {'version': {option: [{}]}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', STRING_OPTIONS)
def test_overrides_matrix_string_array_table_value_not_string(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f'Option `value` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must be a string'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {
'matrix': [{'version': ['9000']}],
'overrides': {'matrix': {'version': {option: [{'value': 9000}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', STRING_OPTIONS)
def test_overrides_matrix_string_array_table_if_not_array(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f'Option `if` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must be an array'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {
'matrix': [{'version': ['9000']}],
'overrides': {'matrix': {'version': {option: [{'value': 'foo', 'if': 9000}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_invalid_type(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f'Field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must be a boolean, inline table, or an array'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {'matrix': [{'version': ['9000']}], 'overrides': {'matrix': {'version': {option: 9000}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_table_no_value(self, isolation, option):
with pytest.raises(
ValueError,
match=f'Field `tool.hatch.envs.foo.overrides.matrix.version.{option}` must have an option named `value`',
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {'matrix': [{'version': ['9000']}], 'overrides': {'matrix': {'version': {option: {}}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_table_value_not_boolean(self, isolation, option):
with pytest.raises(
TypeError,
match=f'Option `value` in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` must be a boolean',
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {
'matrix': [{'version': ['9000']}],
'overrides': {'matrix': {'version': {option: {'value': 9000}}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_entry_invalid_type(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f'Entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must be a boolean or an inline table'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {
'matrix': [{'version': ['9000']}],
'overrides': {'matrix': {'version': {option: [9000]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_no_value(self, isolation, option):
with pytest.raises(
ValueError,
match=(
f'Entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must have an option named `value`'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {'matrix': [{'version': ['9000']}], 'overrides': {'matrix': {'version': {option: [{}]}}}}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_value_not_boolean(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f'Option `value` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must be a boolean'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {
'matrix': [{'version': ['9000']}],
'overrides': {'matrix': {'version': {option: [{'value': 9000}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_if_not_array(self, isolation, option):
with pytest.raises(
TypeError,
match=(
f'Option `if` in entry #1 in field `tool.hatch.envs.foo.overrides.matrix.version.{option}` '
f'must be an array'
),
):
_ = ProjectConfig(
isolation,
{
'envs': {
'foo': {
'matrix': [{'version': ['9000']}],
'overrides': {'matrix': {'version': {option: [{'value': True, 'if': 9000}]}}},
}
}
},
PluginManager(),
).envs
@pytest.mark.parametrize('option', MAPPING_OPTIONS)
def test_overrides_matrix_mapping_string_with_value(self, isolation, option):
env_config = {
'foo': {
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: 'FOO=ok'}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: {'FOO': 'ok'}},
'foo.bar': {'type': 'virtual'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', MAPPING_OPTIONS)
def test_overrides_matrix_mapping_string_without_value(self, isolation, option):
env_config = {
'foo': {
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: 'FOO'}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: {'FOO': '9000'}},
'foo.bar': {'type': 'virtual'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', MAPPING_OPTIONS)
def test_overrides_matrix_mapping_string_override(self, isolation, option):
env_config = {
'foo': {
option: {'TEST': 'baz'},
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: 'TEST'}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: {'TEST': '9000'}},
'foo.bar': {'type': 'virtual', option: {'TEST': 'baz'}},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', MAPPING_OPTIONS)
def test_overrides_matrix_mapping_array_string_with_value(self, isolation, option):
env_config = {
'foo': {
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: ['FOO=ok']}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: {'FOO': 'ok'}},
'foo.bar': {'type': 'virtual'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', MAPPING_OPTIONS)
def test_overrides_matrix_mapping_array_string_without_value(self, isolation, option):
env_config = {
'foo': {
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: ['FOO']}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: {'FOO': '9000'}},
'foo.bar': {'type': 'virtual'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', MAPPING_OPTIONS)
def test_overrides_matrix_mapping_array_string_override(self, isolation, option):
env_config = {
'foo': {
option: {'TEST': 'baz'},
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: ['TEST']}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: {'TEST': '9000'}},
'foo.bar': {'type': 'virtual', option: {'TEST': 'baz'}},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', MAPPING_OPTIONS)
def test_overrides_matrix_mapping_array_table_key_with_value(self, isolation, option):
env_config = {
'foo': {
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: [{'key': 'FOO', 'value': 'ok'}]}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: {'FOO': 'ok'}},
'foo.bar': {'type': 'virtual'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', MAPPING_OPTIONS)
def test_overrides_matrix_mapping_array_table_key_without_value(self, isolation, option):
env_config = {
'foo': {
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: [{'key': 'FOO'}]}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: {'FOO': '9000'}},
'foo.bar': {'type': 'virtual'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', MAPPING_OPTIONS)
def test_overrides_matrix_mapping_array_table_override(self, isolation, option):
env_config = {
'foo': {
option: {'TEST': 'baz'},
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: [{'key': 'TEST'}]}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: {'TEST': '9000'}},
'foo.bar': {'type': 'virtual', option: {'TEST': 'baz'}},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', MAPPING_OPTIONS)
def test_overrides_matrix_mapping_array_table_conditional(self, isolation, option):
env_config = {
'foo': {
option: {'TEST': 'baz'},
'matrix': [{'version': ['9000', '42']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: [{'key': 'TEST', 'if': ['42']}]}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: {'TEST': 'baz'}},
'foo.42': {'type': 'virtual', option: {'TEST': '42'}},
'foo.bar': {'type': 'virtual', option: {'TEST': 'baz'}},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', MAPPING_OPTIONS)
def test_overrides_matrix_mapping_overwrite(self, isolation, option):
env_config = {
'foo': {
option: {'TEST': 'baz'},
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {f'set-{option}': ['FOO=bar', {'key': 'BAZ'}]}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: {'FOO': 'bar', 'BAZ': '9000'}},
'foo.bar': {'type': 'virtual', option: {'TEST': 'baz'}},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', ARRAY_OPTIONS)
def test_overrides_matrix_array_string(self, isolation, option):
env_config = {
'foo': {
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: ['run foo']}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: ['run foo']},
'foo.bar': {'type': 'virtual'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', ARRAY_OPTIONS)
def test_overrides_matrix_array_string_existing_append(self, isolation, option):
env_config = {
'foo': {
option: ['run baz'],
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: ['run foo']}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: ['run baz', 'run foo']},
'foo.bar': {'type': 'virtual', option: ['run baz']},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', ARRAY_OPTIONS)
def test_overrides_matrix_array_table(self, isolation, option):
env_config = {
'foo': {
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: [{'value': 'run foo'}]}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: ['run foo']},
'foo.bar': {'type': 'virtual'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', ARRAY_OPTIONS)
def test_overrides_matrix_array_table_existing_append(self, isolation, option):
env_config = {
'foo': {
option: ['run baz'],
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: [{'value': 'run foo'}]}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: ['run baz', 'run foo']},
'foo.bar': {'type': 'virtual', option: ['run baz']},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', ARRAY_OPTIONS)
def test_overrides_matrix_array_table_conditional(self, isolation, option):
env_config = {
'foo': {
option: ['run baz'],
'matrix': [{'version': ['9000', '42']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: [{'value': 'run foo', 'if': ['42']}]}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: ['run baz']},
'foo.42': {'type': 'virtual', option: ['run baz', 'run foo']},
'foo.bar': {'type': 'virtual', option: ['run baz']},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', ARRAY_OPTIONS)
def test_overrides_matrix_array_overwrite(self, isolation, option):
env_config = {
'foo': {
option: ['run baz'],
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {f'set-{option}': ['run foo', {'value': 'run bar'}]}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: ['run foo', 'run bar']},
'foo.bar': {'type': 'virtual', option: ['run baz']},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', STRING_OPTIONS)
def test_overrides_matrix_string_string_create(self, isolation, option):
env_config = {
'foo': {
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: 'baz'}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: 'baz'},
'foo.bar': {'type': 'virtual'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', STRING_OPTIONS)
def test_overrides_matrix_string_string_overwrite(self, isolation, option):
env_config = {
'foo': {
option: 'test',
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: 'baz'}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: 'baz'},
'foo.bar': {'type': 'virtual', option: 'test'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', STRING_OPTIONS)
def test_overrides_matrix_string_table_create(self, isolation, option):
env_config = {
'foo': {
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: {'value': 'baz'}}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: 'baz'},
'foo.bar': {'type': 'virtual'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', STRING_OPTIONS)
def test_overrides_matrix_string_table_override(self, isolation, option):
env_config = {
'foo': {
option: 'test',
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: {'value': 'baz'}}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: 'baz'},
'foo.bar': {'type': 'virtual', option: 'test'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', STRING_OPTIONS)
def test_overrides_matrix_string_table_conditional(self, isolation, option):
env_config = {
'foo': {
option: 'test',
'matrix': [{'version': ['9000', '42']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: {'value': 'baz', 'if': ['42']}}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: 'test'},
'foo.42': {'type': 'virtual', option: 'baz'},
'foo.bar': {'type': 'virtual', option: 'test'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', STRING_OPTIONS)
def test_overrides_matrix_string_array_table_create(self, isolation, option):
env_config = {
'foo': {
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: [{'value': 'baz'}]}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: 'baz'},
'foo.bar': {'type': 'virtual'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', STRING_OPTIONS)
def test_overrides_matrix_string_array_table_override(self, isolation, option):
env_config = {
'foo': {
option: 'test',
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: [{'value': 'baz'}]}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: 'baz'},
'foo.bar': {'type': 'virtual', option: 'test'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', STRING_OPTIONS)
def test_overrides_matrix_string_array_table_conditional(self, isolation, option):
env_config = {
'foo': {
option: 'test',
'matrix': [{'version': ['9000', '42']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: [{'value': 'baz', 'if': ['42']}]}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: 'test'},
'foo.42': {'type': 'virtual', option: 'baz'},
'foo.bar': {'type': 'virtual', option: 'test'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', STRING_OPTIONS)
def test_overrides_matrix_string_array_table_conditional_eager_string(self, isolation, option):
env_config = {
'foo': {
option: 'test',
'matrix': [{'version': ['9000', '42']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: ['baz', {'value': 'foo', 'if': ['42']}]}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: 'baz'},
'foo.42': {'type': 'virtual', option: 'baz'},
'foo.bar': {'type': 'virtual', option: 'test'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', STRING_OPTIONS)
def test_overrides_matrix_string_array_table_conditional_eager_table(self, isolation, option):
env_config = {
'foo': {
option: 'test',
'matrix': [{'version': ['9000', '42']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: [{'value': 'baz', 'if': ['42']}, 'foo']}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: 'foo'},
'foo.42': {'type': 'virtual', option: 'baz'},
'foo.bar': {'type': 'virtual', option: 'test'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_boolean_create(self, isolation, option):
env_config = {
'foo': {
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: True}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: True},
'foo.bar': {'type': 'virtual'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_boolean_overwrite(self, isolation, option):
env_config = {
'foo': {
option: False,
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: True}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: True},
'foo.bar': {'type': 'virtual', option: False},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_table_create(self, isolation, option):
env_config = {
'foo': {
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: {'value': True}}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: True},
'foo.bar': {'type': 'virtual'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_table_override(self, isolation, option):
env_config = {
'foo': {
option: False,
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: {'value': True}}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: True},
'foo.bar': {'type': 'virtual', option: False},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_table_conditional(self, isolation, option):
env_config = {
'foo': {
option: False,
'matrix': [{'version': ['9000', '42']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: {'value': True, 'if': ['42']}}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: False},
'foo.42': {'type': 'virtual', option: True},
'foo.bar': {'type': 'virtual', option: False},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_create(self, isolation, option):
env_config = {
'foo': {
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: [{'value': True}]}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: True},
'foo.bar': {'type': 'virtual'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_override(self, isolation, option):
env_config = {
'foo': {
option: False,
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: [{'value': True}]}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: True},
'foo.bar': {'type': 'virtual', option: False},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_conditional(self, isolation, option):
env_config = {
'foo': {
option: False,
'matrix': [{'version': ['9000', '42']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: [{'value': True, 'if': ['42']}]}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: False},
'foo.42': {'type': 'virtual', option: True},
'foo.bar': {'type': 'virtual', option: False},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_conditional_eager_boolean(self, isolation, option):
env_config = {
'foo': {
option: False,
'matrix': [{'version': ['9000', '42']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: [True, {'value': False, 'if': ['42']}]}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: True},
'foo.42': {'type': 'virtual', option: True},
'foo.bar': {'type': 'virtual', option: False},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
@pytest.mark.parametrize('option', BOOLEAN_OPTIONS)
def test_overrides_matrix_boolean_array_table_conditional_eager_table(self, isolation, option):
env_config = {
'foo': {
option: False,
'matrix': [{'version': ['9000', '42']}, {'feature': ['bar']}],
'overrides': {'matrix': {'version': {option: [{'value': True, 'if': ['42']}, False]}}},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', option: False},
'foo.42': {'type': 'virtual', option: True},
'foo.bar': {'type': 'virtual', option: False},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
# We assert type coverage using matrix variable overrides, for the others just test one type
def test_overrides_platform_boolean_boolean_create(self, isolation, current_platform):
env_config = {
'foo': {
'overrides': {'platform': {'bar': {'dependencies': ['baz']}, current_platform: {'skip-install': True}}}
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo': {'type': 'virtual', 'skip-install': True},
}
assert project_config.envs == expected_envs
def test_overrides_platform_boolean_boolean_overwrite(self, isolation, current_platform):
env_config = {
'foo': {
'skip-install': True,
'overrides': {
'platform': {'bar': {'dependencies': ['baz']}, current_platform: {'skip-install': False}}
},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo': {'type': 'virtual', 'skip-install': False},
}
assert project_config.envs == expected_envs
def test_overrides_platform_boolean_table_create(self, isolation, current_platform):
env_config = {
'foo': {
'overrides': {
'platform': {
'bar': {'dependencies': ['baz']},
current_platform: {'skip-install': [{'value': True}]},
}
}
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo': {'type': 'virtual', 'skip-install': True},
}
assert project_config.envs == expected_envs
def test_overrides_platform_boolean_table_overwrite(self, isolation, current_platform):
env_config = {
'foo': {
'skip-install': True,
'overrides': {
'platform': {
'bar': {'dependencies': ['baz']},
current_platform: {'skip-install': [{'value': False}]},
}
},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo': {'type': 'virtual', 'skip-install': False},
}
assert project_config.envs == expected_envs
def test_overrides_env_boolean_boolean_create(self, isolation):
env_var_exists = 'OVERRIDES_ENV_FOO'
env_var_missing = 'OVERRIDES_ENV_BAR'
env_config = {
'foo': {
'overrides': {
'env': {env_var_missing: {'dependencies': ['baz']}, env_var_exists: {'skip-install': True}}
}
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo': {'type': 'virtual', 'skip-install': True},
}
with EnvVars({env_var_exists: 'any'}):
assert project_config.envs == expected_envs
def test_overrides_env_boolean_boolean_overwrite(self, isolation):
env_var_exists = 'OVERRIDES_ENV_FOO'
env_var_missing = 'OVERRIDES_ENV_BAR'
env_config = {
'foo': {
'skip-install': True,
'overrides': {
'env': {env_var_missing: {'dependencies': ['baz']}, env_var_exists: {'skip-install': False}}
},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo': {'type': 'virtual', 'skip-install': False},
}
with EnvVars({env_var_exists: 'any'}):
assert project_config.envs == expected_envs
def test_overrides_env_boolean_table_create(self, isolation):
env_var_exists = 'OVERRIDES_ENV_FOO'
env_var_missing = 'OVERRIDES_ENV_BAR'
env_config = {
'foo': {
'overrides': {
'env': {
env_var_missing: {'dependencies': ['baz']},
env_var_exists: {'skip-install': [{'value': True}]},
}
}
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo': {'type': 'virtual', 'skip-install': True},
}
with EnvVars({env_var_exists: 'any'}):
assert project_config.envs == expected_envs
def test_overrides_env_boolean_table_overwrite(self, isolation):
env_var_exists = 'OVERRIDES_ENV_FOO'
env_var_missing = 'OVERRIDES_ENV_BAR'
env_config = {
'foo': {
'skip-install': True,
'overrides': {
'env': {
env_var_missing: {'dependencies': ['baz']},
env_var_exists: {'skip-install': [{'value': False}]},
}
},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo': {'type': 'virtual', 'skip-install': False},
}
with EnvVars({env_var_exists: 'any'}):
assert project_config.envs == expected_envs
def test_overrides_env_boolean_conditional(self, isolation):
env_var_exists = 'OVERRIDES_ENV_FOO'
env_var_missing = 'OVERRIDES_ENV_BAR'
env_config = {
'foo': {
'overrides': {
'env': {
env_var_missing: {'dependencies': ['baz']},
env_var_exists: {'skip-install': [{'value': True, 'if': ['foo']}]},
}
}
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo': {'type': 'virtual', 'skip-install': True},
}
with EnvVars({env_var_exists: 'foo'}):
assert project_config.envs == expected_envs
# Tests for source precedence
def test_overrides_matrix_precedence_over_platform(self, isolation, current_platform):
env_config = {
'foo': {
'skip-install': False,
'matrix': [{'version': ['9000', '42']}, {'feature': ['bar']}],
'overrides': {
'platform': {current_platform: {'skip-install': True}},
'matrix': {'version': {'skip-install': [{'value': False, 'if': ['42']}]}},
},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', 'skip-install': True},
'foo.42': {'type': 'virtual', 'skip-install': False},
'foo.bar': {'type': 'virtual', 'skip-install': True},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config, {'skip-install': True})
def test_overrides_matrix_precedence_over_env(self, isolation):
env_var = 'OVERRIDES_ENV_FOO'
env_config = {
'foo': {
'skip-install': False,
'matrix': [{'version': ['9000', '42']}, {'feature': ['bar']}],
'overrides': {
'env': {env_var: {'skip-install': True}},
'matrix': {'version': {'skip-install': [{'value': False, 'if': ['42']}]}},
},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', 'skip-install': True},
'foo.42': {'type': 'virtual', 'skip-install': False},
'foo.bar': {'type': 'virtual', 'skip-install': True},
}
with EnvVars({env_var: 'any'}):
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config, {'skip-install': True})
def test_overrides_env_precedence_over_platform(self, isolation, current_platform):
env_var = 'OVERRIDES_ENV_FOO'
env_config = {
'foo': {
'overrides': {
'platform': {current_platform: {'skip-install': True}},
'env': {env_var: {'skip-install': [{'value': False, 'if': ['foo']}]}},
},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo': {'type': 'virtual', 'skip-install': False},
}
with EnvVars({env_var: 'foo'}):
assert project_config.envs == expected_envs
# Test for options defined by environment plugins
def test_overrides_for_environment_plugins(self, isolation, current_platform):
env_var = 'OVERRIDES_ENV_FOO'
env_config = {
'foo': {
'matrix': [{'version': ['9000']}, {'feature': ['bar']}],
'overrides': {
'platform': {current_platform: {'foo': True}},
'env': {env_var: {'bar': [{'value': 'foobar', 'if': ['foo']}]}},
'matrix': {'version': {'baz': 'BAR=ok'}},
},
}
}
project_config = ProjectConfig(isolation, {'envs': env_config}, PluginManager())
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual'},
'foo.bar': {'type': 'virtual'},
}
with EnvVars({env_var: 'foo'}):
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
project_config.finalize_env_overrides({'foo': bool, 'bar': str, 'baz': dict})
expected_envs = {
'default': {'type': 'virtual'},
'foo.9000': {'type': 'virtual', 'foo': True, 'bar': 'foobar', 'baz': {'BAR': 'ok'}},
'foo.bar': {'type': 'virtual', 'foo': True, 'bar': 'foobar'},
}
assert project_config.envs == expected_envs
assert project_config.matrices['foo'] == construct_matrix_data('foo', env_config)
class TestPublish:
def test_not_table(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.publish` must be a table'):
_ = ProjectConfig(isolation, {'publish': 9000}).publish
def test_config_not_table(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.publish.foo` must be a table'):
_ = ProjectConfig(isolation, {'publish': {'foo': 9000}}).publish
def test_default(self, isolation):
project_config = ProjectConfig(isolation, {})
assert project_config.publish == project_config.publish == {}
def test_defined(self, isolation):
project_config = ProjectConfig(isolation, {'publish': {'foo': {'bar': 'baz'}}})
assert project_config.publish == {'foo': {'bar': 'baz'}}
class TestScripts:
def test_not_table(self, isolation):
config = {'scripts': 9000}
project_config = ProjectConfig(isolation, config)
with pytest.raises(TypeError, match='Field `tool.hatch.scripts` must be a table'):
_ = project_config.scripts
def test_name_contains_spaces(self, isolation):
config = {'scripts': {'foo bar': []}}
project_config = ProjectConfig(isolation, config)
with pytest.raises(
ValueError, match='Script name `foo bar` in field `tool.hatch.scripts` must not contain spaces'
):
_ = project_config.scripts
def test_default(self, isolation):
project_config = ProjectConfig(isolation, {})
assert project_config.scripts == project_config.scripts == {}
def test_single_commands(self, isolation):
config = {'scripts': {'foo': 'command1', 'bar': 'command2'}}
project_config = ProjectConfig(isolation, config)
assert project_config.scripts == {'foo': ['command1'], 'bar': ['command2']}
def test_multiple_commands(self, isolation):
config = {'scripts': {'foo': 'command1', 'bar': ['command3', 'command2']}}
project_config = ProjectConfig(isolation, config)
assert project_config.scripts == {'foo': ['command1'], 'bar': ['command3', 'command2']}
def test_multiple_commands_not_string(self, isolation):
config = {'scripts': {'foo': [9000]}}
project_config = ProjectConfig(isolation, config)
with pytest.raises(TypeError, match='Command #1 in field `tool.hatch.scripts.foo` must be a string'):
_ = project_config.scripts
def test_config_invalid_type(self, isolation):
config = {'scripts': {'foo': 9000}}
project_config = ProjectConfig(isolation, config)
with pytest.raises(TypeError, match='Field `tool.hatch.scripts.foo` must be a string or an array of strings'):
_ = project_config.scripts
def test_command_expansion_basic(self, isolation):
config = {'scripts': {'foo': 'command1', 'bar': ['command3', 'foo']}}
project_config = ProjectConfig(isolation, config)
assert project_config.scripts == {'foo': ['command1'], 'bar': ['command3', 'command1']}
def test_command_expansion_multiple_nested(self, isolation):
config = {
'scripts': {
'foo': 'command3',
'baz': ['command5', 'bar', 'foo', 'command1'],
'bar': ['command4', 'foo', 'command2'],
}
}
project_config = ProjectConfig(isolation, config)
assert project_config.scripts == {
'foo': ['command3'],
'baz': ['command5', 'command4', 'command3', 'command2', 'command3', 'command1'],
'bar': ['command4', 'command3', 'command2'],
}
def test_command_expansion_modification(self, isolation):
config = {
'scripts': {
'foo': 'command3',
'baz': ['command5', 'bar world', 'foo', 'command1'],
'bar': ['command4', 'foo hello', 'command2'],
}
}
project_config = ProjectConfig(isolation, config)
assert project_config.scripts == {
'foo': ['command3'],
'baz': ['command5', 'command4 world', 'command3 hello world', 'command2 world', 'command3', 'command1'],
'bar': ['command4', 'command3 hello', 'command2'],
}
def test_command_expansion_circular_inheritance(self, isolation):
config = {'scripts': {'foo': 'bar', 'bar': 'foo'}}
project_config = ProjectConfig(isolation, config)
with pytest.raises(
ValueError, match='Circular expansion detected for field `tool.hatch.scripts`: foo -> bar -> foo'
):
_ = project_config.scripts
class TestVersionConfig:
def test_missing(self, isolation):
with pytest.raises(ValueError, match='Missing `tool.hatch.version` configuration'):
_ = ProjectConfig(isolation, {}).version
def test_not_table(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.version` must be a table'):
_ = ProjectConfig(isolation, {'version': 9000}).version
def test_parse(self, isolation):
project_config = ProjectConfig(isolation, {'version': {'foo': 'bar'}})
assert project_config.version.config == project_config.version.config == {'foo': 'bar'}
class TestVersionSourceName:
def test_empty(self, isolation):
with pytest.raises(
ValueError, match='The `source` option under the `tool.hatch.version` table must not be empty if defined'
):
_ = ProjectConfig(isolation, {'version': {'source': ''}}).version.source_name
def test_not_table(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.version.source` must be a string'):
_ = ProjectConfig(isolation, {'version': {'source': 9000}}).version.source_name
def test_correct(self, isolation):
project_config = ProjectConfig(isolation, {'version': {'source': 'foo'}})
assert project_config.version.source_name == project_config.version.source_name == 'foo'
def test_default(self, isolation):
project_config = ProjectConfig(isolation, {'version': {}})
assert project_config.version.source_name == project_config.version.source_name == 'regex'
class TestVersionSchemeName:
def test_missing(self, isolation):
with pytest.raises(
ValueError, match='The `scheme` option under the `tool.hatch.version` table must not be empty if defined'
):
_ = ProjectConfig(isolation, {'version': {'scheme': ''}}).version.scheme_name
def test_not_table(self, isolation):
with pytest.raises(TypeError, match='Field `tool.hatch.version.scheme` must be a string'):
_ = ProjectConfig(isolation, {'version': {'scheme': 9000}}).version.scheme_name
def test_correct(self, isolation):
project_config = ProjectConfig(isolation, {'version': {'scheme': 'foo'}})
assert project_config.version.scheme_name == project_config.version.scheme_name == 'foo'
def test_default(self, isolation):
project_config = ProjectConfig(isolation, {'version': {}})
assert project_config.version.scheme_name == project_config.version.scheme_name == 'standard'
class TestVersionSource:
def test_unknown(self, isolation):
with pytest.raises(ValueError, match='Unknown version source: foo'):
_ = ProjectConfig(isolation, {'version': {'source': 'foo'}}, PluginManager()).version.source
def test_cached(self, isolation):
project_config = ProjectConfig(isolation, {'version': {}}, PluginManager())
assert project_config.version.source is project_config.version.source
assert isinstance(project_config.version.source, RegexSource)
class TestVersionScheme:
def test_unknown(self, isolation):
with pytest.raises(ValueError, match='Unknown version scheme: foo'):
_ = ProjectConfig(isolation, {'version': {'scheme': 'foo'}}, PluginManager()).version.scheme
def test_cached(self, isolation):
project_config = ProjectConfig(isolation, {'version': {}}, PluginManager())
assert project_config.version.scheme is project_config.version.scheme
assert isinstance(project_config.version.scheme, StandardScheme)
|
ofek/hatch
|
tests/project/test_config.py
|
Python
|
mit
| 93,385
|
from .entity import Entity
class Message(Entity):
"""
Represents a single message.
Fields:
- id (string, max 34 characters)
* ID of the message
* Read-only
- direction
* Direction of the message: incoming messages are sent from one of your contacts to
your phone; outgoing messages are sent from your phone to one of your contacts
* Allowed values: incoming, outgoing
* Read-only
- status
* Current status of the message
* Allowed values: ignored, processing, received, sent, queued, failed, failed_queued,
cancelled, delivered, not_delivered
* Read-only
- message_type
* Type of the message
* Allowed values: sms, mms, ussd, call, service
* Read-only
- source
* How the message originated within Telerivet
* Allowed values: phone, provider, web, api, service, webhook, scheduled, integration
* Read-only
- time_created (UNIX timestamp)
* The time that the message was created on Telerivet's servers
* Read-only
- time_sent (UNIX timestamp)
* The time that the message was reported to have been sent (null for incoming messages
and messages that have not yet been sent)
* Read-only
- time_updated (UNIX timestamp)
* The time that the message was last updated in Telerivet.
* Read-only
- from_number (string)
* The phone number that the message originated from (your number for outgoing
messages, the contact's number for incoming messages)
* Read-only
- to_number (string)
* The phone number that the message was sent to (your number for incoming messages,
the contact's number for outgoing messages)
* Read-only
- content (string)
* The text content of the message (null for USSD messages and calls)
* Read-only
- starred (bool)
* Whether this message is starred in Telerivet
* Updatable via API
- simulated (bool)
* Whether this message was simulated within Telerivet for testing (and not actually
sent to or received by a real phone)
* Read-only
- label_ids (array)
* List of IDs of labels applied to this message
* Read-only
- vars (dict)
* Custom variables stored for this message
* Updatable via API
- priority (int)
* Priority of this message. Telerivet will attempt to send messages with higher
priority numbers first. Only defined for outgoing messages.
* Read-only
- error_message
* A description of the error encountered while sending a message. (This field is
omitted from the API response if there is no error message.)
* Updatable via API
- external_id
* The ID of this message from an external SMS gateway provider (e.g. Twilio or Nexmo),
if available.
* Read-only
- price (number)
* The price of this message, if known.
* Read-only
- price_currency
* The currency of the message price, if applicable.
* Read-only
- duration (number)
* The duration of the call in seconds, if known, or -1 if the call was not answered.
* Read-only
- ring_time (number)
* The length of time the call rang in seconds before being answered or hung up, if
known.
* Read-only
- audio_url
* For voice calls, the URL of an MP3 file to play when the contact answers the call
* Read-only
- tts_lang
* For voice calls, the language of the text-to-speech voice
* Allowed values: en-US, en-GB, en-GB-WLS, en-AU, en-IN, da-DK, nl-NL, fr-FR, fr-CA,
de-DE, is-IS, it-IT, pl-PL, pt-BR, pt-PT, ru-RU, es-ES, es-US, sv-SE
* Read-only
- tts_voice
* For voice calls, the text-to-speech voice
* Allowed values: female, male
* Read-only
- mms_parts (array)
* A list of parts in the MMS message, the same as returned by the
[getMMSParts](#Message.getMMSParts) method.
Note: This property is only present when retrieving an individual
MMS message by ID, not when querying a list of messages. In other cases, use
[getMMSParts](#Message.getMMSParts).
* Read-only
- track_clicks (boolean)
* If true, URLs in the message content are short URLs that redirect to a destination
URL.
* Read-only
- short_urls (array)
* For text messages containing short URLs, this is an array of objects with the
properties `short_url`, `link_type`, and `time_clicked` (the first time that URL was
clicked). If `link_type` is "redirect", the object also contains a `destination_url`
property. If `link_type` is "media", the object also contains an `media_index`
property (the index in the media array). If `link_type` is "service", the object also
contains a `service_id` property. This property is undefined for messages that do not
contain short URLs.
* Read-only
- media (array)
* For text messages containing media files, this is an array of objects with the
properties `url`, `type` (MIME type), `filename`, and `size` (file size in bytes).
Unknown properties are null. This property is undefined for messages that do not
contain media files. Note: For files uploaded via the Telerivet web app, the URL is
temporary and may not be valid for more than 1 day.
* Read-only
- time_clicked (UNIX timestamp)
* If the message contains any short URLs, this is the first time that a short URL in
the message was clicked. This property is undefined for messages that do not contain
short URLs.
* Read-only
- service_id (string, max 34 characters)
* ID of the service that handled the message (for voice calls, the service defines the
call flow)
* Read-only
- phone_id (string, max 34 characters)
* ID of the phone (basic route) that sent or received the message
* Read-only
- contact_id (string, max 34 characters)
* ID of the contact that sent or received the message
* Read-only
- route_id (string, max 34 characters)
* ID of the custom route that sent the message (if applicable)
* Read-only
- broadcast_id (string, max 34 characters)
* ID of the broadcast that this message is part of (if applicable)
* Read-only
- scheduled_id (string, max 34 characters)
* ID of the scheduled message that created this message is part of (if applicable)
* Read-only
- user_id (string, max 34 characters)
* ID of the Telerivet user who sent the message (if applicable)
* Read-only
- project_id
* ID of the project this contact belongs to
* Read-only
"""
def hasLabel(self, label):
"""
Returns true if this message has a particular label, false otherwise.
Arguments:
- label (Label)
* Required
Returns:
bool
"""
self.load()
return label.id in self._label_ids_set
def addLabel(self, label):
"""
Adds a label to the given message.
Arguments:
- label (Label)
* Required
"""
self._api.doRequest("PUT", label.getBaseApiPath() + "/messages/" + self.id);
self._label_ids_set[label.id] = True
def removeLabel(self, label):
"""
Removes a label from the given message.
Arguments:
- label (Label)
* Required
"""
self._api.doRequest("DELETE", label.getBaseApiPath() + "/messages/" + self.id)
if label.id in self._label_ids_set:
del self._label_ids_set[label.id]
def delete(self):
"""
Deletes this message.
"""
self._api.doRequest("DELETE", self.getBaseApiPath())
#def getBaseApiPath(self):
# return "/projects/" + self.project_id + "/messages/" + self.id
def getMMSParts(self):
"""
Retrieves a list of MMS parts for this message (empty for non-MMS messages).
Each MMS part in the list is an object with the following
properties:
- cid: MMS content-id
- type: MIME type
- filename: original filename
- size (int): number of bytes
- url: URL where the content for this part is stored (secret but
publicly accessible, so you could link/embed it in a web page without having to re-host it
yourself)
Returns:
array
"""
return self._api.doRequest("GET", self.getBaseApiPath() + "/mms_parts")
def save(self):
"""
Saves any fields that have changed for this message.
"""
super(Message, self).save()
def resend(self, **options):
"""
Resends a message, for example if the message failed to send or if it was not delivered. If
the message was originally in the queued, retrying, failed, or cancelled states, then
Telerivet will return the same message object. Otherwise, Telerivet will create and return a
new message object.
Arguments:
- route_id
* ID of the phone or route to send the message from
Returns:
Message
"""
from .message import Message
return Message(self._api, self._api.doRequest("POST", self.getBaseApiPath() + "/resend", options))
def cancel(self):
"""
Cancels sending a message that has not yet been sent. Returns the updated message object.
Only valid for outgoing messages that are currently in the queued, retrying, or cancelled
states. For other messages, the API will return an error with the code 'not_cancellable'.
Returns:
Message
"""
from .message import Message
return Message(self._api, self._api.doRequest("POST", self.getBaseApiPath() + "/cancel"))
def getBaseApiPath(self):
return "/projects/%(project_id)s/messages/%(id)s" % {'project_id': self.project_id, 'id': self.id}
def _setData(self, data):
super(Message, self)._setData(data)
self._label_ids_set = {}
if 'label_ids' in data:
for label_id in data['label_ids']:
self._label_ids_set[label_id] = True
|
Telerivet/telerivet-python-client
|
telerivet/message.py
|
Python
|
mit
| 11,412
|
from rest_framework import serializers
from .models import Performance, TopImage
class PerformanceSerializer(serializers.ModelSerializer):
class Meta:
model = Performance
fields = ('detector', 'average_precision', 'precision',
'recall', 'test_set')
class TopImageSerializer(serializers.ModelSerializer):
class Meta:
model = TopImage
fields = ('id', 'image_jpeg',
'image_height', 'image_width',
'box_x', 'box_y',
'box_width', 'box_height',
'detector')
read_only = ('uploaded_at', 'image_height', 'image_width')
|
mingot/detectme_server
|
detectme/leaderboards/serializers.py
|
Python
|
mit
| 652
|
# python3
r"""Learners of generalized method of moments with deep networks.
References:
- DeepGMMLearner:
Andrew Bennett, Nathan Kallus, and Tobias Schnabel. Deep generalized method of
moments forinstrumental variable analysis. InAdvances in Neural Information
Processing Systems, pages3559–3569, 2019.
- AdversarialSEMLearner
L. Liao, Y. L. Chen, Z. Yang, B. Dai, Z. Wang and M. Kolar, 2020. Provably
efficient neural estimation of structural equation model: An adversarial
approach. arXiv preprint arXiv:2007.01290.
- AGMMLearner
Dikkala, N., Lewis, G., Mackey, L. and Syrgkanis, V., 2020. Minimax estimation
of conditional moment models. Advances in Neural Information Processing
Systems, 33.
"""
import datetime
from typing import Dict, List, Optional
from acme import core
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import sonnet as snt
import tensorflow.compat.v2 as tf
from src.ope.deep_gmm.oadam import oadam
# Default Acme checkpoint TTL is 5 days.
_CHECKPOINT_TTL = int(datetime.timedelta(days=30).total_seconds())
def _optimizer_class(name):
return oadam.OAdam if name == 'OAdam' else getattr(snt.optimizers, name)
def _td_error(critic_network, o_tm1, a_tm1, r_t, d_t, o_t, a_t) -> tf.Tensor:
"""Computes TD error."""
q_tm1 = critic_network(o_tm1, a_tm1)
q_t = critic_network(o_t, a_t)
if q_tm1.shape != q_t.shape:
raise ValueError(f'Shape of q_tm1 {q_tm1.shape.as_list()} does not '
f'match that of q_t {q_t.shape.as_list()}')
d_t = tf.reshape(d_t, q_tm1.shape)
g = q_tm1 - d_t * q_t
r_t = tf.reshape(r_t, q_tm1.shape)
td_error = r_t - g
return td_error, q_tm1, q_t
def _orthogonal_regularization(network: snt.Module,) -> tf.Tensor:
"""Copied from third_party/py/dice_rl/estimators/neural_dice.py."""
reg = 0.
for w in network.trainable_variables:
if w.name.endswith('/w:0'):
prod = tf.matmul(tf.transpose(w), w)
reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))
return reg
def _l2_regularization(network: snt.Module,) -> tf.Tensor:
"""Copied from third_party/py/dice_rl/estimators/neural_dice.py."""
reg = 0.
for w in network.trainable_variables:
if w.name.endswith('/w:0'):
reg += tf.reduce_sum(tf.square(w))
return reg
class DeepGMMLearnerBase(core.Learner):
"""Deep GMM learner base class."""
def __init__(self,
policy_network: snt.Module,
critic_network: snt.Module,
f_network: snt.Module,
discount: float,
dataset: tf.data.Dataset,
use_tilde_critic: bool,
tilde_critic_network: snt.Module = None,
tilde_critic_update_period: int = None,
critic_optimizer_class: str = 'OAdam',
critic_lr: float = 1e-4,
critic_beta1: float = 0.5,
critic_beta2: float = 0.9,
f_optimizer_class: str = 'OAdam',
f_lr: float = None, # Either f_lr or f_lr_multiplier must be
# None.
f_lr_multiplier: Optional[float] = 1.0,
f_beta1: float = 0.5,
f_beta2: float = 0.9,
critic_regularizer: float = 0.0,
f_regularizer: float = 1.0, # Ignored if use_tilde_critic = True
critic_ortho_regularizer: float = 0.0,
f_ortho_regularizer: float = 0.0,
critic_l2_regularizer: float = 0.0,
f_l2_regularizer: float = 0.0,
checkpoint_interval_minutes: int = 10.0,
clipping: bool = True,
clipping_action: bool = True,
bre_check_period: int = 0, # Bellman residual error check.
bre_check_num_actions: int = 0, # Number of sampled actions.
dev_dataset: tf.data.Dataset = None,
counter: counting.Counter = None,
logger: loggers.Logger = None,
checkpoint: bool = True):
self._policy_network = policy_network
self._critic_network = critic_network
self._f_network = f_network
self._discount = discount
self._clipping = clipping
self._clipping_action = clipping_action
self._bre_check_period = bre_check_period
self._bre_check_num_actions = bre_check_num_actions
# Development dataset for hyper-parameter selection.
self._dev_dataset = dev_dataset
self._dev_actions_dataset = self._sample_actions()
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter()
self._logger = logger or loggers.TerminalLogger('learner', time_delta=1.)
# Necessary to track when to update tilde critic networks.
self._num_steps = tf.Variable(0, dtype=tf.int32)
self._use_tilde_critic = use_tilde_critic
self._tilde_critic_network = tilde_critic_network
self._tilde_critic_update_period = tilde_critic_update_period
if use_tilde_critic and tilde_critic_update_period is None:
raise ValueError('tilde_critic_update_period must be provided if '
'use_tilde_critic is True.')
# Batch dataset and create iterator.
self._iterator = iter(dataset)
# Create optimizers if they aren't given.
self._critic_optimizer = _optimizer_class(critic_optimizer_class)(
critic_lr, beta1=critic_beta1, beta2=critic_beta2)
if f_lr is not None:
if f_lr_multiplier is not None:
raise ValueError(f'Either f_lr ({f_lr}) or f_lr_multiplier '
f'({f_lr_multiplier}) must be None.')
else:
f_lr = f_lr_multiplier * critic_lr
# Prevent unreasonable value in hyper-param search.
f_lr = max(min(f_lr, 1e-2), critic_lr)
self._f_optimizer = _optimizer_class(f_optimizer_class)(
f_lr, beta1=f_beta1, beta2=f_beta2)
# Regularization on network values.
self._critic_regularizer = critic_regularizer
self._f_regularizer = f_regularizer
# Orthogonal regularization strength.
self._critic_ortho_regularizer = critic_ortho_regularizer
self._f_ortho_regularizer = f_ortho_regularizer
# L2 regularization strength.
self._critic_l2_regularizer = critic_l2_regularizer
self._f_l2_regularizer = f_l2_regularizer
# Expose the variables.
self._variables = {
'critic': self._critic_network.variables,
}
# Create a checkpointer object.
self._checkpointer = None
self._snapshotter = None
if checkpoint:
objects_to_save = {
'counter': self._counter,
'critic': self._critic_network,
'f': self._f_network,
'tilde_critic': self._tilde_critic_network,
'critic_optimizer': self._critic_optimizer,
'f_optimizer': self._f_optimizer,
'num_steps': self._num_steps,
}
self._checkpointer = tf2_savers.Checkpointer(
objects_to_save={k: v for k, v in objects_to_save.items()
if v is not None},
time_delta_minutes=checkpoint_interval_minutes,
checkpoint_ttl_seconds=_CHECKPOINT_TTL)
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save={
'critic': self._critic_network,
'f': self._f_network,
},
time_delta_minutes=60.)
def _check_bellman_residual_error(self, q_tm1, r_t, d_t, o_t):
"""Estimate of mean squared Bellman residual error."""
# Ignore stochasticity in s'.
# E[err(a')|s,a,s']^2 = E[\bar{err}]^2 - Var(err) / N.
if (self._bre_check_period > 0 and
tf.math.mod(self._num_steps, self._bre_check_period) == 0):
q_t_sum = 0.
q_t_sq_sum = 0.
for _ in range(self._bre_check_num_actions):
a_t = self._policy_network(o_t)
if self._clipping_action:
if not a_t.dtype.is_floating:
raise ValueError(f'Action dtype ({a_t.dtype}) is not floating.')
a_t = tf.clip_by_value(a_t, -1., 1.)
q_t = self._critic_network(o_t, a_t)
if q_tm1.shape != q_t.shape:
raise ValueError(f'Shape of q_tm1 {q_tm1.shape.as_list()} does not '
f'match that of q_t {q_t.shape.as_list()}')
q_t_sum += q_t
q_t_sq_sum += tf.square(q_t)
q_t_mean = q_t_sum / self._bre_check_num_actions
q_t_var = q_t_sq_sum / self._bre_check_num_actions - tf.square(q_t_mean)
d_t = tf.reshape(d_t, q_tm1.shape)
r_t = tf.reshape(r_t, q_tm1.shape)
td_error = r_t - (q_tm1 - d_t * q_t_mean)
td_mse = tf.reduce_mean(tf.square(td_error))
td_var = tf.reduce_mean(tf.square(d_t) * tf.reduce_mean(q_t_var))
bre_mse = td_mse - td_var / self._bre_check_num_actions
else:
bre_mse = tf.constant(0., dtype=tf.float32)
return bre_mse
@tf.function
def _step(self) -> Dict[str, tf.Tensor]:
# Get data from replay (dropping extras if any). Note there is no
# extra data here because we do not insert any into Reverb.
sample = next(self._iterator)
o_tm1, a_tm1, r_t, d_t, o_t = sample.data[:5]
a_t = self._policy_network(o_t)
if self._clipping_action:
if not a_t.dtype.is_floating:
raise ValueError(f'Action dtype ({a_t.dtype}) is not floating.')
a_t = tf.clip_by_value(a_t, -1., 1.)
# Cast the additional discount to match the environment discount dtype.
discount = tf.cast(self._discount, dtype=d_t.dtype)
d_t = discount * d_t
if self._use_tilde_critic:
tilde_td_error = _td_error(
self._tilde_critic_network, o_tm1, a_tm1, r_t, d_t, o_t, a_t)[0]
# In the same shape as tilde_td_error.
f_regularizer = 0.25 * tf.square(tilde_td_error)
else:
# Scalar.
tilde_td_error = 0.
f_regularizer = self._f_regularizer
with tf.GradientTape() as tape:
td_error, q_tm1, q_t = _td_error(
self._critic_network, o_tm1, a_tm1, r_t, d_t, o_t, a_t)
f = self._f_network(o_tm1, a_tm1)
if f.shape != td_error.shape:
raise ValueError(f'Shape of f {f.shape.as_list()} does not '
f'match that of td_error {td_error.shape.as_list()}')
moment = tf.reduce_mean(f * td_error)
f_reg_loss = tf.reduce_mean(f_regularizer * tf.square(f))
u = moment - f_reg_loss
# Add regularizations.
# Regularization on critic net output values.
if self._critic_regularizer > 0.:
critic_reg_loss = self._critic_regularizer * (
tf.reduce_mean(tf.square(q_tm1)) +
tf.reduce_mean(tf.square(q_t))) / 2.
else:
critic_reg_loss = 0.
# Ortho regularization on critic net.
if self._critic_ortho_regularizer > 0.:
critic_ortho_reg_loss = (
self._critic_ortho_regularizer *
_orthogonal_regularization(self._critic_network))
else:
critic_ortho_reg_loss = 0.
# Ortho regularization on f net.
if self._f_ortho_regularizer > 0.:
f_ortho_reg_loss = (
self._f_ortho_regularizer *
_orthogonal_regularization(self._f_network))
else:
f_ortho_reg_loss = 0.
# L2 regularization on critic net.
if self._critic_l2_regularizer > 0.:
critic_l2_reg_loss = (
self._critic_l2_regularizer *
_l2_regularization(self._critic_network))
else:
critic_l2_reg_loss = 0.
# L2 regularization on f net.
if self._f_l2_regularizer > 0.:
f_l2_reg_loss = (
self._f_l2_regularizer *
_l2_regularization(self._f_network))
else:
f_l2_reg_loss = 0.
loss = (u + critic_reg_loss
+ critic_ortho_reg_loss - f_ortho_reg_loss
+ critic_l2_reg_loss - f_l2_reg_loss)
bre_mse = self._check_bellman_residual_error(q_tm1, r_t, d_t, o_t)
# Get trainable variables.
critic_variables = self._critic_network.trainable_variables
f_variables = self._f_network.trainable_variables
# Compute gradients.
gradients = tape.gradient(loss, critic_variables + f_variables)
critic_gradients = gradients[:len(critic_variables)]
f_gradients = gradients[len(critic_variables):]
# Maybe clip gradients.
if self._clipping:
# # clip_by_global_norm
# critic_gradients = tf.clip_by_global_norm(critic_gradients, 40.)[0]
# f_gradients = tf.clip_by_global_norm(f_gradients, 40.)[0]
# clip_by_value
critic_gradients = [tf.clip_by_value(g, -1.0, 1.0)
for g in critic_gradients]
f_gradients = [tf.clip_by_value(g, -1.0, 1.0) for g in f_gradients]
# Apply critic gradients to minimize the loss.
self._critic_optimizer.apply(critic_gradients, critic_variables)
# Apply f gradients to maximize the loss.
f_gradients = [-g for g in f_gradients]
self._f_optimizer.apply(f_gradients, f_variables)
if self._use_tilde_critic:
if tf.math.mod(self._num_steps, self._tilde_critic_update_period) == 0:
source_variables = self._critic_network.variables
tilde_variables = self._tilde_critic_network.variables
# Make online -> tilde network update ops.
for src, dest in zip(source_variables, tilde_variables):
dest.assign(src)
self._num_steps.assign_add(1)
# Losses to track.
results = {
'loss': loss,
'u': u,
'f_reg_loss': f_reg_loss,
'td_mse': tf.reduce_mean(tf.square(td_error)),
'f_ms': tf.reduce_mean(tf.square(f)),
'moment': moment,
'global_steps': tf.convert_to_tensor(self._num_steps),
'bre_mse': bre_mse,
}
if self._use_tilde_critic:
results.update({
'tilde_td_mse': tf.reduce_mean(tf.square(tilde_td_error))})
if self._critic_regularizer > 0.:
results.update({'critic_reg_loss': critic_reg_loss})
if self._critic_ortho_regularizer > 0.:
results.update({'critic_ortho_reg_loss': critic_ortho_reg_loss})
if self._f_ortho_regularizer > 0.:
results.update({'f_ortho_reg_loss': f_ortho_reg_loss})
if self._critic_l2_regularizer > 0.:
results.update({'critic_l2_reg_loss': critic_l2_reg_loss})
if self._f_l2_regularizer > 0.:
results.update({'f_l2_reg_loss': f_l2_reg_loss})
return results
def num_steps(self):
return self._num_steps.numpy()
def step(self):
# Run the learning step.
fetches = self._step()
# Update our counts and record it.
counts = self._counter.increment(steps=1)
fetches.update(counts)
# Checkpoint and attempt to write the logs.
if self._checkpointer is not None:
self._checkpointer.save()
if self._snapshotter is not None:
self._snapshotter.save()
self._logger.write(fetches)
def get_variables(self, names: List[str]) -> List[List[np.ndarray]]:
return [tf2_utils.to_numpy(self._variables[name]) for name in names]
def _sample_actions(self):
if self._dev_dataset is not None:
a_ts = []
for sample in self._dev_dataset:
o_t = sample.data[4]
a_t = self._policy_network(o_t)
if self._clipping_action:
if not a_t.dtype.is_floating:
raise ValueError(f'Action dtype ({a_t.dtype}) is not floating.')
a_t = tf.clip_by_value(a_t, -1., 1.)
a_ts.append(a_t)
return tf.data.Dataset.from_tensor_slices(a_ts)
else:
return None
def dev_td_error_and_f_values(self):
"""Return TD error and f values from the dev dataset."""
td_errors = []
fs = []
for sample, a_t in zip(self._dev_dataset, self._dev_actions_dataset):
o_tm1, a_tm1, r_t, d_t, o_t = sample.data[:5]
# Cast the additional discount to match the environment discount dtype.
discount = tf.cast(self._discount, dtype=d_t.dtype)
d_t = discount * d_t
td_error, _, _ = _td_error(
self._critic_network, o_tm1, a_tm1, r_t, d_t, o_t, a_t)
f = self._f_network(o_tm1, a_tm1)
if f.shape != td_error.shape:
raise ValueError(f'Shape of f {f.shape.as_list()} does not '
f'match that of td_error {td_error.shape.as_list()}')
td_errors.append(td_error)
fs.append(f)
td_errors = tf.concat(td_errors, axis=0)
fs = tf.concat(fs, axis=0)
return {
'td_errors': td_errors,
'fs': fs,
'global_steps': tf.convert_to_tensor(self._num_steps),
}
class DeepGMMLearner(DeepGMMLearnerBase):
r"""Deep GMM learner.
Reference:
A. Bennett, N. Kallus, and T. Schnabel. Deep generalized method of moments for
instrumental variable analysis. In Advances in Neural Information Processing
Systems 32, pages 3564–3574. 2019.
Open source code: https://github.com/CausalML/DeepGMM
Objective (with 0.25 scaling on the original value)
argmin_q sup_f U
U = E[f * (R - Q_tm1 + gamma * Q_t)]
- 0.25 * E[f^2 * (R - \tilde{Q}_tm1 + gamma * \tilde{Q}_t)^2]
Additional optional losses include:
- L2 regularization on Q values.
- Orthogonal loss for Q net.
- Orthogonal loss for f net.
If not using the tilde Q net, one can specify a L2 regularizer for f
values with a constant strength to replace the second term in U.
"""
def __init__(self,
policy_network: snt.Module,
critic_network: snt.Module,
f_network: snt.Module,
discount: float,
dataset: tf.data.Dataset,
tilde_critic_network: snt.Module,
tilde_critic_update_period: int = 1,
critic_optimizer_class: str = 'OAdam',
critic_lr: float = 1e-4,
critic_beta1: float = 0.5, # From open-sourced code.
critic_beta2: float = 0.9, # From open-sourced code.
f_optimizer_class: str = 'OAdam',
f_lr: float = None, # Either f_lr or f_lr_multiplier must be
# None.
f_lr_multiplier: Optional[float] = 1.0,
f_beta1: float = 0.5, # From open-sourced code.
f_beta2: float = 0.9, # From open-sourced code.
checkpoint_interval_minutes: int = 10.0,
clipping: bool = True,
clipping_action: bool = True,
bre_check_period: int = 0, # Bellman residual error check.
bre_check_num_actions: int = 0, # Number of sampled actions.
dev_dataset: tf.data.Dataset = None,
counter: counting.Counter = None,
logger: loggers.Logger = None,
checkpoint: bool = True):
super().__init__(
policy_network=policy_network,
critic_network=critic_network,
f_network=f_network,
discount=discount,
dataset=dataset,
use_tilde_critic=True,
tilde_critic_network=tilde_critic_network,
tilde_critic_update_period=tilde_critic_update_period,
critic_optimizer_class=critic_optimizer_class,
critic_lr=critic_lr,
critic_beta1=critic_beta1,
critic_beta2=critic_beta2,
f_optimizer_class=f_optimizer_class,
f_lr=f_lr,
f_lr_multiplier=f_lr_multiplier,
f_beta1=f_beta1,
f_beta2=f_beta2,
checkpoint_interval_minutes=checkpoint_interval_minutes,
clipping=clipping,
clipping_action=clipping_action,
bre_check_period=bre_check_period,
bre_check_num_actions=bre_check_num_actions,
dev_dataset=dev_dataset,
counter=counter,
logger=logger,
checkpoint=checkpoint)
class AdversarialSEMLearner(DeepGMMLearnerBase):
"""Adversarial SEM learner.
Reference:
L. Liao, Y. L. Chen, Z. Yang, B. Dai, Z. Wang and M. Kolar, 2020. Provably
efficient neural estimation of structural equation model: An adversarial
approach. arXiv preprint arXiv:2007.01290.
"""
def __init__(self,
policy_network: snt.Module,
critic_network: snt.Module,
f_network: snt.Module,
discount: float,
dataset: tf.data.Dataset,
critic_optimizer_class: str = 'OAdam',
critic_lr: float = 1e-4,
critic_beta1: float = 0.,
critic_beta2: float = 0.01,
f_optimizer_class: str = 'OAdam',
f_lr: float = None, # Either f_lr or f_lr_multiplier must be
# None.
f_lr_multiplier: Optional[float] = 1.0,
f_beta1: float = 0.,
f_beta2: float = 0.01,
critic_regularizer: float = 0.0,
f_regularizer: float = 0.5, # From paper.
critic_l2_regularizer: float = 1e-4,
f_l2_regularizer: float = 1e-4,
checkpoint_interval_minutes: int = 10.0,
clipping: bool = True,
clipping_action: bool = True,
bre_check_period: int = 0, # Bellman residual error check.
bre_check_num_actions: int = 0, # Number of sampled actions.
dev_dataset: tf.data.Dataset = None,
counter: counting.Counter = None,
logger: loggers.Logger = None,
checkpoint: bool = True):
super().__init__(
policy_network=policy_network,
critic_network=critic_network,
f_network=f_network,
discount=discount,
dataset=dataset,
use_tilde_critic=False,
critic_optimizer_class=critic_optimizer_class,
critic_lr=critic_lr,
critic_beta1=critic_beta1,
critic_beta2=critic_beta2,
f_optimizer_class=f_optimizer_class,
f_lr=f_lr,
f_lr_multiplier=f_lr_multiplier,
f_beta1=f_beta1,
f_beta2=f_beta2,
critic_regularizer=critic_regularizer,
f_regularizer=f_regularizer,
critic_l2_regularizer=critic_l2_regularizer,
f_l2_regularizer=f_l2_regularizer,
checkpoint_interval_minutes=checkpoint_interval_minutes,
clipping=clipping,
clipping_action=clipping_action,
bre_check_period=bre_check_period,
bre_check_num_actions=bre_check_num_actions,
dev_dataset=dev_dataset,
counter=counter,
logger=logger,
checkpoint=checkpoint)
class AGMMLearner(DeepGMMLearnerBase):
"""AGMM learner.
Reference:
Dikkala, N., Lewis, G., Mackey, L. and Syrgkanis, V., 2020. Minimax estimation
of conditional moment models. Advances in Neural Information Processing
Systems, 33.
Open source code: https://github.com/microsoft/AdversarialGMM
"""
def __init__(self,
policy_network: snt.Module,
critic_network: snt.Module,
f_network: snt.Module,
discount: float,
dataset: tf.data.Dataset,
critic_optimizer_class: str = 'OAdam',
critic_lr: float = 1e-4,
critic_beta1: float = 0., # From open-sourced code.
critic_beta2: float = 0.01, # From open-sourced code.
f_optimizer_class: str = 'OAdam',
f_lr: float = None, # Either f_lr or f_lr_multiplier must be
# None.
f_lr_multiplier: Optional[float] = 1.0,
f_beta1: float = 0., # From open-sourced code.
f_beta2: float = 0.01, # From open-sourced code.
f_regularizer: float = 1.0, # From open-sourced code.
critic_l2_regularizer: float = 1e-4,
f_l2_regularizer: float = 1e-4,
checkpoint_interval_minutes: int = 10.0,
clipping: bool = True,
clipping_action: bool = True,
bre_check_period: int = 0, # Bellman residual error check.
bre_check_num_actions: int = 0, # Number of sampled actions.
dev_dataset: tf.data.Dataset = None,
counter: counting.Counter = None,
logger: loggers.Logger = None,
checkpoint: bool = True):
super().__init__(
policy_network=policy_network,
critic_network=critic_network,
f_network=f_network,
discount=discount,
dataset=dataset,
use_tilde_critic=False,
critic_optimizer_class=critic_optimizer_class,
critic_lr=critic_lr,
critic_beta1=critic_beta1,
critic_beta2=critic_beta2,
f_optimizer_class=f_optimizer_class,
f_lr=f_lr,
f_lr_multiplier=f_lr_multiplier,
f_beta1=f_beta1,
f_beta2=f_beta2,
f_regularizer=f_regularizer,
critic_l2_regularizer=critic_l2_regularizer,
f_l2_regularizer=f_l2_regularizer,
checkpoint_interval_minutes=checkpoint_interval_minutes,
clipping=clipping,
clipping_action=clipping_action,
bre_check_period=bre_check_period,
bre_check_num_actions=bre_check_num_actions,
dev_dataset=dev_dataset,
counter=counter,
logger=logger,
checkpoint=checkpoint)
|
liyuan9988/IVOPEwithACME
|
src/ope/deep_gmm/learner.py
|
Python
|
mit
| 25,153
|
import requests, json
url = 'http://educacao.dadosabertosbr.com/api/cidades/ce'
cidades = requests.get(url).content
cidades = cidades.decode('utf-8')
cidades = json.loads(cidades)
for cidade in cidades:
codigo, nome = cidade.split(':')
print(nome)
|
santiagosilas/propython
|
raspagem/random/lista_cidades.py
|
Python
|
mit
| 252
|