repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
dariomangoni/chrono | src/demos/python/fea/demo_FEA_shells.py | Python | bsd-3-clause | 15,226 | 0.008669 | # =============================================================================
# PROJECT CHRONO - http:#projectchrono.org
#
# Copyright (c) 2014 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http:#projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Alessandro Tasora
# =============================================================================
#
# FEA for shells of Reissner 6-field type
#
# =============================================================================
import pychrono as chrono
import pychrono.fea as fea
import pychrono.pardisomkl as mkl
import pychrono.irrlicht as chronoirr
import os
import numpy as np
import matplotlib.pyplot as plt
# Output directory
out_dir = chrono.GetChronoOutputPath() + "FEA_SHELLS"
print( "Copyright (c) 2017 projectchrono.org")
# Create (if needed) output directory
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
if not os.path.isdir(out_dir):
raise NameError("Error creating directory " )
# Create a Chrono::Engine physical system
my_system = chrono.ChSystemSMC()
# Create the Irrlicht visualization (open the Irrlicht device,
# bind a simple user interface, etc. etc.)
application = chronoirr.ChIrrApp(my_system, "Shells FEA", chronoirr.dimension2du(800, 600), False, True)
# Easy shortcuts to add camera, lights, logo and sky in Irrlicht scene:
application.AddTypicalLogo()
application.AddTypicalSky()
application.AddTypicalLights()
application.AddTypicalCamera(chronoirr.vector3df(0, 6.0, -10))
# Create a mesh, that is a container for groups
# of elements and their referenced nodes.
my_mesh = fea.ChMesh()
# Remember to add the mesh to the system!
my_system.Add(my_mesh)
# my_system.Set_G_acc(VNULL) or
my_mesh.SetAutomaticGravity(False)
nodePlotA = fea.ChNodeFEAxyzrot()
nodePlotB = fea.ChNodeFEAxyzrot()
nodesLoad = []
ref_X = chrono.ChFunction_Recorder()
ref_Y = chrono.ChFunction_Recorder()
load_torque = chrono.ChVectorD()
load_force = chrono.ChVectorD()
bench1 = False
bench2 = True
bench3 = False
#
# BENCHMARK n.1
#
# Add an EANS SHELL cantilever:
#
if (bench1): # set as 'True' to execute this
rect_thickness = 0.10
rect_L = 10.0
rect_W = 1
# Create a material
rho = 0.0
E = 1.2e6
nu = 0.0
melasticity =fea.ChElasticityReissnerIsothropic(E, nu, 1.0, 0.01)
mat = fea.ChMaterialShellReissner(melasticity)
# In case you need also damping it would add...
#mdamping = chrono_types::make_shared<ChDampingReissnerRayleigh>(melasticity,0.01)
#mat = chrono_types::make_shared<ChMaterialShellReissner>(melasticity, nullptr, mdamping)
mat.SetDensity(rho)
# Create the nodes
nels_L = 12
nels_W = 1
elarray = [fea.ChElementShellReissner4]*(nels_L * nels_W)
nodearray = [fea.ChNodeFEAxyzrot]*((nels_L + 1) * (nels_W + 1))
nodes_start = [fea.ChNodeFEAxyzrot]*(nels_W + 1)
nodes_end = [fea.ChNodeFEAxyzrot]*(nels_W + 1)
for il in range(nels_L+1) :
for iw in range(nels_W +1):
# Make nodes
nodepos = chrono.ChVectorD(rect_L * (il / nels_L), 0, rect_W * (iw / nels_W))
noderot = chrono.ChQuaternionD(chrono.QUNIT)
nodeframe = chrono.ChFrameD(nodepos, noderot)
mnode = fea.ChNodeFEAxyzrot(nodeframe)
my_mesh.AddNode(mnode)
for i in range(3):
mnode.GetInertia()[i,i] = 0 # approx]
mnode.SetMass(0)
nodearray[il * (nels_W + 1) + iw] = mnode
if (il == 0):
nodes_start[iw] = mnode
if (il == nels_L):
nodes_end[iw] = mnode
# Make elements
if (il > 0 and iw > 0) :
melement = fea.ChElementShellReissner4()
my_mesh.AddElement(melement)
melement.SetNodes(nodearray[(il - 1) * (nels_W + 1) + (iw - 1)],
nodearray[(il) * (nels_W + 1) + (iw - 1)], nodearray[(il) * (nels_W + 1) + ( | iw)],
nodearray[(il - 1) * (nels_W + 1) + (iw)])
melement.AddLayer(rect_thickness, 0 * chrono.CH_C_DEG_TO_RAD, mat)
elarray[(il - 1) * (nels_W) + (iw - 1)] = melement
nodesLoad = nodes_end
nodePlotA = nodes_end[0]
nodePlotB = nodes_end[-1]
for startnode in( nodes_start):
startnode | .SetFixed(True)
# applied load
# load_force = chrono.ChVectorD(200000,0, 20000)
load_force = chrono.ChVectorD(0, 4, 0)
# load_torque = chrono.ChVectorD(0, 0, 50*CH_C_PI/3.0)
# reference solution for (0, 4, 0) shear to plot
ref_Y.AddPoint(0.10, 1.309)
ref_X.AddPoint(0.40, 0.103)
ref_Y.AddPoint(0.20, 2.493)
ref_X.AddPoint(0.80, 0.381)
ref_Y.AddPoint(0.30, 3.488)
ref_X.AddPoint(1.20, 0.763)
ref_Y.AddPoint(0.40, 4.292)
ref_X.AddPoint(1.60, 1.184)
ref_Y.AddPoint(0.50, 4.933)
ref_X.AddPoint(2.00, 1.604)
ref_Y.AddPoint(0.60, 5.444)
ref_X.AddPoint(2.40, 2.002)
ref_Y.AddPoint(0.70, 5.855)
ref_X.AddPoint(2.80, 2.370)
ref_Y.AddPoint(0.80, 6.190)
ref_X.AddPoint(3.20, 2.705)
ref_Y.AddPoint(0.90, 6.467)
ref_X.AddPoint(3.60, 3.010)
ref_Y.AddPoint(1.00, 6.698)
ref_X.AddPoint(4.00, 3.286)
#
# BENCHMARK n.2
#
# Add a SLIT ANNULAR PLATE:
#
if (bench2): # set as 'True' to execute this
plate_thickness = 0.03
plate_Ri = 6
plate_Ro = 10
# Create a material
rho = 0.0
E = 21e6
nu = 0.0
mat = fea.ChMaterialShellReissnerIsothropic(rho, E, nu, 1.0, 0.01)
# Create the nodes
nels_U = 60
nels_W = 10
arc = chrono.CH_C_2PI * 1
elarray = [fea.ChElementShellReissner4]*(nels_U * nels_W)
nodearray = [fea.ChNodeFEAxyzrot]*((nels_U + 1) * (nels_W + 1))
nodes_start = [fea.ChNodeFEAxyzrot]*(nels_W + 1)
nodes_end = [fea.ChNodeFEAxyzrot]*(nels_W + 1)
for iu in range(nels_U +1):
for iw in range(nels_W +1) :
# Make nodes
u = iu / nels_U
w = iw / nels_W
nodepos = chrono.ChVectorD((plate_Ri + (plate_Ro - plate_Ri) * w) * np.cos(u * arc), 0,
(plate_Ri + (plate_Ro - plate_Ri) * w) * np.sin(u * arc))
noderot = chrono.ChQuaternionD(chrono.QUNIT)
nodeframe = chrono.ChFrameD(nodepos, noderot)
mnode = fea.ChNodeFEAxyzrot(nodeframe)
my_mesh.AddNode(mnode)
for i in range(3):
mnode.GetInertia()[i,i] = 0
mnode.SetMass(0)
nodearray[iu * (nels_W + 1) + iw] = mnode
if (iu == 0):
nodes_start[iw] = mnode
if (iu == nels_U):
nodes_end[iw] = mnode
# Make elements
if (iu > 0 and iw > 0) :
melement = fea.ChElementShellReissner4()
my_mesh.AddElement(melement)
melement.SetNodes(nodearray[(iu) * (nels_W + 1) + (iw)], nodearray[(iu - 1) * (nels_W + 1) + (iw)],
nodearray[(iu - 1) * (nels_W + 1) + (iw - 1)],
nodearray[(iu) * (nels_W + 1) + (iw - 1)])
melement.AddLayer(plate_thickness, 0 * chrono.CH_C_DEG_TO_RAD, mat)
elarray[(iu - 1) * (nels_W) + (iw - 1)] = melement
nodesLoad = nodes_end
nodePlotA = nodes_end[0]
nodePlotB = nodes_end[-1]
for mstartnode in nodes_start :
mstartnode.SetFixed(True)
load_force = chrono.ChVectorD(0, 0.8 * 4, 0)
load_torque = chrono.VNULL
# reference solution to plot
ref_X.AddPoint(0.025, 1.305)
ref_Y.AddPoint(0.025, 1.789)
ref_X.AddPoint(0.10, 4.277)
ref_Y.AddPoint(0.10, 5.876)
ref_X.AddPoint(0.20, 6.725)
ref_Y.AddPoint(0.20, 9.160)
ref_X.AddPoint(0.30, 8.340)
ref_Y.AddPoint(0.30, 11.213)
ref_X.AddPoint(0.40, 9.529)
ref_Y.AddPoint(0.40, 12.661)
ref_X.AddPoint(0.50, 10.468)
ref_Y.AddPoint(0.50, 13.768)
ref_X.AddPoint(0.60, 11.257)
ref_Y.AddPoint(0.60, 14.674)
ref_X.AddPoint(0.70, 11.970)
ref_Y.AddPoint(0.70, 15.469)
ref_X.AddPoint(0.80, 12.642)
|
USGS-EROS/lcmap-firebird | test/test_cli.py | Python | unlicense | 367 | 0.00545 | from ccdc import cli
from ccdc import cli
from click.testing import CliRunner
import test
@test.vcr.use_cassette(test.cassette)
def test_changed | etection():
runner = CliRunner()
result = runner.invoke(cli.entrypoint, ['changedetection', '-x 0', '-y 0', '-n 1'])
print(result.output)
assert result.exit_code == 0
def test_classification():
pass | |
SuperMass/distOS-lab3 | src/integrated/client2/client_config.py | Python | gpl-3.0 | 334 | 0.005988 | remote_s | erver_ips = ('127.0.0.1', '127.0.0.1')
remote_server_ports = (8005, 8006)
assigned_server_index = 0 # in real system, client is distributed by a load balancing server in general; here I just simulate the balancing policy.
process_id = 2
client_addr = ('127.0.0.1', 7002)
poisson_lambda = 5
simu_len = 60
get_sco | re_pb = 0.8
|
costastf/blind-Pyrsync | pytxt2pdf/pyText2Pdf.py | Python | lgpl-3.0 | 22,089 | 0.012495 | #!/usr/bin/env python
#-*- coding: UTF-8 -*-
# File: pyText2Pdf.py
#
# Derived from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189858
__docformat__ = 'plaintext'
__date__ = '04/10/2013'
'''
pyText2Pdf - Python script to convert plain text files into Adobe
Acrobat PDF files.
Version 1.2
Author: Anand B Pillai <abpillai at lycos dot com>
Keywords: python, tools, converter, pdf, text2pdf, adobe, acrobat,
processing.
Copyright (C) 2003-2004 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This file is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU Emacs; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.'''
import sys, os
import string
import time
import optparse
import re
LF_EXTRA=0
LINE_END='\015'
# form feed character (^L)
FF=chr(12)
ENCODING_STR = """\
/Encoding <<
/Differences [ 0 /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /space /exclam
/quotedbl /numbersign /dollar /percent /ampersand
/quoteright /parenleft /parenright /asterisk /plus /comma
/hyphen /period /slash /zero /one /two /three /four /five
/six /seven /eight /nine /colon /semicolon /less /equal
/greater /question /at /A /B /C /D /E /F /G /H /I /J /K /L
/M /N /O /P /Q /R /S /T /U /V /W /X /Y /Z /bracketleft
/backslash /bracketright /asciicircum /underscore
/quoteleft /a /b /c /d /e /f /g /h /i /j /k /l /m /n /o /p
/q /r /s /t /u /v /w /x /y /z /braceleft /bar /braceright
/asciitilde /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/.notdef /.notdef /.notdef /.notdef /.notdef /.notdef
/dotlessi /grave /acute /circumflex /tilde /macron /breve
/dotaccent /dieresis /.notdef /ring /cedilla /.notdef
/hungarumlaut /ogonek /caron /space /exclamdown /cent
/sterling /currency /yen /brokenbar /section /dieresis
/copyright /ordfeminine /guillemotleft /logicalnot /hyphen
/registered /macron /degree /plusminus /twosuperior
/threesuperior /acute /mu /paragraph /periodcentered
/cedilla /onesuperior /ordmasculine /guillemotright
/onequarter /onehalf /threequarters /questiondown /Agrave
/Aacute /Acircumflex /Atilde /Adieresis /Aring /AE
/Ccedilla /Egrave /Eacute /Ecircumflex /Edieresis /Igrave
/Iacute /Icircumflex /Idieresis /Eth /Ntilde /Ograve
/Oacute /Ocircumflex /Otilde /Odieresis /multiply /Oslash
/Ugrave /Uacute /Ucircumflex /Udieresis /Yacute /Thorn
/germandbls /agrave /aacute /acircumflex /atilde /adieresis
/aring /ae /ccedilla /egrave /eacute /ecircumflex
/edieresis /igrave /iacute /icircumflex /idieresis /eth
/ntilde /ograve /oacute /ocircumflex /otilde /odieresis
/divide /oslash /ugrave /uacute /ucircumflex /udieresis
/yacute /thorn /ydieresis ]
>>
"""
INTRO="""\
%prog [options] filename
PyText2Pdf makes a 7-bit clean PDF file from any input file.
It reads from a named file, and writes the PDF file to a file specified by
the user, otherwise to a file with '.pdf' appended to the input file.
Author: Anand B Pillai."""
class PyText2Pdf(object):
""" Text2pdf converter in pure Python """
def __init__(self, **kwargs):
# version number
self._version="1.3"
# iso encoding flag
self._IsoEnc=False
# formfeeds flag
self._doFFs=False
self._progname="PyText2Pdf"
self._appname = " ".join((self._progname,str(self._version)))
# default font
self._font="/Courier"
# default font size
self._ptSize=10
# default vert space
self._vertSpace=12
self._lines=0
# number of characters in a row
self._cols=80
self._columns=1
# page ht
self._pageHt=792
# page wd
self._pageWd=612
# input file
self._ifile=""
if 'ifilename' in kwargs:
self._ifile = kwargs['ifilename']
# output file
self._ofile=""
| if 'ofile' in kwargs:
self._ofile = kwargs['ofile']
# default tab width
self._tab=4
# input file descriptor
self._ifs=None
# output file descriptor
self._ofs=None
self.buffers = False
if 'buffers' in kwargs and kwargs['buffers']:
self._ifs=kwargs['ifile']
self._ofs=kwargs['ofile']
self.buffers = True
# landscape flag
self._landscap | e=False
# Subject
self._subject = ''
# Author
self._author = ''
# Keywords
self._keywords = []
# Custom regexp for page breaks
self._pagebreakre = None
# marker objects
self._curobj = 5
self._pageObs = [0]
self._locations = [0,0,0,0,0,0]
self._pageNo=0
# file position marker
self._fpos=0
def parse_args(self):
""" Callback function called by argument parser.
Helps to remove duplicate code """
if len(sys.argv)<2:
sys.argv.append('-h')
parser = optparse.OptionParser(usage=INTRO)
parser.add_option('-o','--output',dest='outfile',help='Direct output to file OUTFILE',metavar='OUTFILE')
parser.add_option('-f','--font',dest='font',help='Use Postscript font FONT (must be in standard 14, default: Courier)',
default='Courier')
parser.add_option('-I','--isolatin',dest='isolatin',help='Use ISO latin-1 encoding',default=False,action='store_true')
parser.add_option('-s','--size',dest='fontsize',help='Use font at PTSIZE points (default=>10)',metavar='PTSIZE',default=10)
parser.add_option('-v','--linespace',dest='linespace',help='Use line spacing LINESPACE (deault 12)',metavar='LINESPACE',default=12)
parser.add_option('-l','--lines',dest='lines',help='Lines per page (default 60, determined automatically if unspecified)',default=60, metavar=None)
parser.add_option('-c','--chars',dest='chars',help='Maximum characters per line (default 80)',default=80,metavar=None)
parser.add_option('-t','--tab',dest='tabspace',help='Spaces per tab character (default 4)',default=4,metavar=None)
parser.add_option('-F','--ignoreff',dest='formfeed',help='Ignore formfeed character ^L (i.e, accept formfeed characters as pagebreaks)',default=False,action='store_true')
parser.add_option('-P','--papersize',dest='papersize',help='Set paper size (default is letter, accepted values are "A4" or "A3")')
parser.add_option('-W','--width',dest='width',help='Independent paper width in points',metavar=None,default=612)
parser.add_option('-H','--height',dest='height',help='Independent paper height in points',metavar=None,default=792)
parser.add_option('-2','--twocolumns',dest='twocolumns',help='Format as two columns',metavar=None,default=False,action='store_true')
parser.add_option('-L','--landscape',dest='landscape',help='Format in landscape mode',metavar=None,default=False,action='store_true')
parser.add_option('-R','--regexp',dest='pageregexp',help='Regular expression string to determine page breaks (if supplied, this will be used to split text into pages, instead of using line count)',metavar=None)
parser.add_option('-S','--subject',dest='subject',help='Optional subject for the document',metavar=None)
parser.add_option('-A','--author',dest='author',help='Optional author for the document',metavar=None)
parser.add_opt |
ENCODE-DCC/encoded | src/encoded/tests/fixtures/schemas/genetic_modification.py | Python | mit | 19,673 | 0.000305 | import pytest
@pytest.fixture
def genetic_modification(testapp, lab, award):
item = {
'award': award['@id'],
'lab': lab['@id'],
'modified_site_by_coordinates': {
'assembly': 'GRCh38',
'chromosome': '11',
'start': 20000,
'end': 21000
},
'purpose': 'repression',
'category': 'deletion',
'method': 'CRISPR',
'zygosity': 'homozygous'
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def genetic_modification_RNAi(testapp, lab, award):
item = {
'award': award['@id'],
'lab': lab['@id'],
'modified_site_by_coordinates': {
'assembly': 'GRCh38',
'chromosome': '11',
'start': 20000,
'end': 21000
},
'purpose': 'repression',
'category': 'deletion',
'method': 'RNAi'
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def genetic_modification_source(testapp, lab, award, source, gene):
item = {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'introduced_gene': gene['@id'],
'purpose': 'expression',
'method': 'CRISPR',
'reagents': [
{
'source': source['@id'],
'identifier': 'sigma:ABC123'
}
]
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def crispr_deletion(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'deletion',
'purpose': 'repression',
'method': 'CRISPR'
}
@pytest.fixture
def crispr_deletion_1(testapp, lab, award, target):
item = {
'lab': lab['@id'],
'award': award['@id'],
'category': 'deletion',
'purpose': 'repression',
'method': 'CRISPR',
'modified_site_by_target_id': target['@id'],
'guide_rna_sequences': ['ACCGGAGA']
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def tale_deletion(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'deletion',
'purpose': 'repression',
'method': 'TALEN',
'zygosity': 'heterozygous'
}
@py | test.fixture
def crispr_tag(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'tagging',
'method': 'CRISPR'
}
@pytest.fixture
def bombardment_tag(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'ins | ertion',
'purpose': 'tagging',
'nucleic_acid_delivery_method': ['bombardment']
}
@pytest.fixture
def recomb_tag(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'tagging',
'method': 'site-specific recombination'
}
@pytest.fixture
def transfection_tag(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'tagging',
'nucleic_acid_delivery_method': ['stable transfection']
}
@pytest.fixture
def crispri(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'interference',
'purpose': 'repression',
'method': 'CRISPR'
}
@pytest.fixture
def rnai(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'interference',
'purpose': 'repression',
'method': 'RNAi'
}
@pytest.fixture
def mutagen(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'mutagenesis',
'purpose': 'repression',
'method': 'mutagen treatment'
}
@pytest.fixture
def tale_replacement(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'replacement',
'purpose': 'characterization',
'method': 'TALEN',
'zygosity': 'heterozygous'
}
@pytest.fixture
def mpra(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'characterization',
'nucleic_acid_delivery_method': ['transduction']
}
@pytest.fixture
def starr_seq(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'episome',
'purpose': 'characterization',
'nucleic_acid_delivery_method': ['transient transfection']
}
@pytest.fixture
def introduced_elements(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'category': 'episome',
'purpose': 'characterization',
'nucleic_acid_delivery_method': ['transient transfection'],
'introduced_elements': 'genomic DNA regions'
}
@pytest.fixture
def crispr_tag_1(testapp, lab, award, ctcf):
item = {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'tagging',
'method': 'CRISPR',
'modified_site_by_gene_id': ctcf['@id'],
'introduced_tags': [{'name': 'mAID-mClover', 'location': 'C-terminal'}]
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def mpra_1(testapp, lab, award):
item = {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'characterization',
'nucleic_acid_delivery_method': ['transduction'],
'introduced_elements': 'synthesized DNA',
'modified_site_nonspecific': 'random'
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def recomb_tag_1(testapp, lab, award, target, treatment_5, document):
item = {
'lab': lab['@id'],
'award': award['@id'],
'category': 'insertion',
'purpose': 'tagging',
'method': 'site-specific recombination',
'modified_site_by_target_id': target['@id'],
'modified_site_nonspecific': 'random',
'category': 'insertion',
'treatments': [treatment_5['@id']],
'documents': [document['@id']],
'introduced_tags': [{'name': 'eGFP', 'location': 'C-terminal'}]
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def rnai_1(testapp, lab, award, source, target):
item = {
'lab': lab['@id'],
'award': award['@id'],
'category': 'interference',
'purpose': 'repression',
'method': 'RNAi',
'reagents': [{'source': source['@id'], 'identifier': 'addgene:12345'}],
'rnai_sequences': ['ATTACG'],
'modified_site_by_target_id': target['@id']
}
return testapp.post_json('/genetic_modification', item).json['@graph'][0]
@pytest.fixture
def genetic_modification_1(lab, award):
return {
'modification_type': 'deletion',
'award': award['uuid'],
'lab': lab['uuid'],
'modifiction_description': 'some description'
}
@pytest.fixture
def genetic_modification_2(lab, award):
return {
'modification_type': 'deletion',
'award': award['uuid'],
'lab': lab['uuid'],
'modification_description': 'some description',
'modification_zygocity': 'homozygous',
'modification_purpose': 'tagging',
'modification_treatments': [],
'modification_genome_coordinates': [{
'chromosome': '11',
'start': 5309435,
'end': 5309451
}]
}
@pytest.fixture
def crispr_gm(lab, award, source):
return {
'lab': lab['uuid'],
'award': award['uuid'],
'source': source['uuid'],
'guide_rna_sequences': [
"ACA",
"GCG"
],
'insert_sequence': 'TCGA',
'aliases': ['encode:crispr |
ericawright/bedrock | bedrock/newsletter/templatetags/helpers.py | Python | mpl-2.0 | 2,361 | 0.000424 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one a | t http://mozilla.org/MPL/2.0/.
import logging
imp | ort jinja2
from django.template.loader import render_to_string
from django_jinja import library
from lib.l10n_utils import get_locale
from bedrock.newsletter.forms import NewsletterFooterForm
log = logging.getLogger(__name__)
@library.global_function
@jinja2.contextfunction
def email_newsletter_form(ctx, newsletters='mozilla-and-you', title=None,
subtitle=None, desc=None, include_country=True,
include_language=True, details=None,
use_thankyou=True, thankyou_head=None,
thankyou_content=None, footer=True,
process_form=True, include_title=None,
submit_text=None, button_class=None,
spinner_color=None, protocol_component=False,
email_label=None, email_placeholder=None):
request = ctx['request']
context = ctx.get_all()
success = bool(ctx.get('success'))
if success and not use_thankyou:
return
form = ctx.get('newsletter_form', None)
if not form:
form = NewsletterFooterForm(newsletters, get_locale(request))
context.update(dict(
id=newsletters,
title=title,
subtitle=subtitle, # nested in/depends on include_title
desc=desc, # nested in/depends on include_title
include_country=include_country,
include_language=include_language,
details=details,
use_thankyou=use_thankyou,
thankyou_head=thankyou_head,
thankyou_content=thankyou_content,
footer=footer,
include_title=include_title if include_title is not None else footer,
form=form,
submit_text=submit_text,
button_class=button_class,
spinner_color=spinner_color,
success=success,
email_label=email_label,
email_placeholder=email_placeholder,
))
template_name = 'newsletter/includes/form-protocol.html' if protocol_component else 'newsletter/includes/form.html'
html = render_to_string(template_name, context, request=request)
return jinja2.Markup(html)
|
astrobin/astrobin | astrobin_apps_platesolving/migrations/0002_auto_20171109_2307.py | Python | agpl-3.0 | 468 | 0.002137 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-11-09 23:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('astrobin_apps_platesolving', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='solution',
| name='skyplot_ | zoom1',
field=models.ImageField(blank=True, null=True, upload_to=b'images/skyplots'),
),
]
|
souviik/NetSecure | vulnerability.py | Python | gpl-3.0 | 4,720 | 0.021186 | #!/usr/bin/env python
import urllib
import re
from site_fetch import * #importing everything from site_fetch module
def testUrl(url, payloads, check): # to split the url and try the append payloads in every parameter value.
opener = urllib.urlopen(url)
vulnerbs = 0
if opener.code == 999: # Detetcing the WebKnight WAF from the StatusCode.
print color.red +" WebKnight Web Application Firewall Detected!"+color.end
print color.red +" Delaying 3 seconds between every request"+color.end
time.sleep(3) #Delaying 3 seconds between every request
for evry_parameter in url.split("?")[1].split("&"):
| for payload in payloads:
bugs = url.replace(evry_parameter, e | vry_parameter + str(payload).strip()) #inserting payload after every parameter
request = useragent.open(bugs) #opening the URL with the insserted payload
html = request.readlines() #checking for errors on the resulted web page
for line in html:
checker = re.findall(check, line) #checking for errors using regex pattern
if len(checker) !=0:
print color.red+" Vulnerability Found . . ."+color.end
print color.red+" Payload Injected: " ,payload +color.end
print color.green+" Fault Code Snippet: " +color.end + line.strip()
print color.blue+" Proof of Concept: "+color.end + bugs
print color.green+"________________________________________________________________________________"+color.end
vulnerbs +=1
if vulnerbs == 0:
print color.green+" Target is not vulnerable!"+color.end
else:
print color.blue+" %i Bugs found :-) " % (vulnerbs) +color.end
#function for vulnerability and detection payloads.
def remoteCodeExec(url): #function to detect remote code application
print color.bold+"\n Scanning for Remote Code/Command Execution "+color.end
print color.blue+" Please wait ...."+color.end
# Remote Code Injection Payloads
payloads = [';${@print(md5(souvik0))}', ';${@print(md5("souvik0"))}']
# Below is the Encrypted Payloads to bypass some Security Filters & WAF's
payloads += ['%253B%2524%257B%2540print%2528md5%2528%2522souvik0%2522%2529%2529%257D%253B']
# Remote Command Execution Payloads
payloads += [';uname;', '&&dir', '&&type C:\\boot.ini', ';phpinfo();', ';phpinfo']
check = re.compile("51107ed95250b4099a0f481221d56497|Linux|eval\(\)|SERVER_ADDR|Volume.+Serial|\[boot", re.I) #appending the payload to check variable
testUrl(url, payloads, check) #calling the test URL function with check parameter
def xssExploit(url): #fn to detect XSS
print color.bold+"\n Scanning for XSS vulnerability"+color.end
print color.blue+" Please wait ...."+color.end
8
#Paylod souvik="css();" added for XSS in <a href TAG's
payloads = ['%27%3Esouvik0%3Csvg%2Fonload%3Dconfirm%28%2Fsouvik0%2F%29%3Eweb', '%78%22%78%3e%78'] #defining payload for XSS
payloads += ['%22%3Esouvik0%3Csvg%2Fonload%3Dconfirm%28%2Fsouvik0%2F%29%3Eweb', 'souvik0%3Csvg%2Fonload%3Dconfirm%28%2Fsouvik0%2F%29%3Eweb'] #encoding payload
check = re.compile('souvik0<svg|x>x', re.I)
testUrl(url, payloads, check)
def sqliExploit(url): #fn to detect SQL vulnerabilities
print color.bold+"\n Scanning for Error Based SQL Injection "+color.end
print color.blue+" Please wait ...."+color.end
# %5c encoded version of: \
# space - %20, ! - %21, ' - %27 , ( - %28, ) - %29
# reference check - http://www.w3schools.com/TAGS/ref_urlencode.asp
payloads = ["3'", "3%5c", "3%27%22%28%29", "3'><", "3%22%5C%27%5C%22%29%3B%7C%5D%2A%7B%250d%250a%3C%2500%3E%25bf%2527%27"]
check = re.compile("Incorrect syntax|Syntax error|Unclosed.+mark|unterminated.+qoute|SQL.+Server|Microsoft.+Database|Fatal.+error", re.I)
testUrl(url, payloads, check)
|
joshuaeitan/massivedata | PS04/forensicswiki_geolocate.py | Python | cc0-1.0 | 1,874 | 0.013874 | #!/usr/bin/spark-submit
import sys
from operator import add
from pyspark import SparkContext
from pyspark import join
import socket, struct
from bisect import bisect_left
if __name__ == "__main__":
infileIp = 's3://gu-anly502/maxmind/GeoLite2-Country-Blocks-IPv4.csv'
infileCountry = 's3://gu-anly502/maxmind/GeoLite2-Country-Locations-en.csv'
sc = SparkContext( appName="Wikipedia Geolocate" )
## Part A
linesIp = sc.textFile(infileIp)
linesCountry = sc.textFile(infileCountry)
fieldsIp = linesIp.map(lambda line: line.split(',')).filter(lambda line: line[0]!='network')
fieldsCountry = linesCountry.map(lambda line: line.split(',')).filter(lambda line: line[0]!='geoname_id')
geonameIdIp = fieldsIp.map(lambda line: line[1])
networkIp = fieldsIp.map(lambda line: line[0].split('/'))
networkIp = networkIp.map(lambda line: line[0])
geonameIdCountry = fieldsCountry.map(lambda line: line[0])
nameCountry = fieldsCountry.map(lambda line: line[5])
geonameNetwork = geonameIdIp.zip(networkIp)
geonameCountry = geonameIdCountry.zip(nameCountry)
maxMind = geonameNetwork.join(geonameCountry)
## Part B; from http://stackoverflow.com/questions/9590965/convert-an-ip-string-to-a-number-and-vice-versa
## Convert an IP string to long
def ip2long(ip):
packedIP = socket.inet_aton(ip)
return struct.unpack("!L", packedIP)[0]
ipLong = maxMind.map(lambda ip: (ip2long(ip[1][0]),ip[1][1])).collect()
## Part C
bcIpLong = sc.broadcas | t(ipLong)
## Part D
infileFwiki = 's3: | //gu-anly502/ps03/forensicswiki.2012.txt'
linesFwiki = sc.textFile(infileFwiki)
## Part E
# Convert IPs in fwiki to long
fwiki = linesFwiki.map(lambda line: line.split(' - -'))
fwikiIpLong = fwiki.map(lambda line: (ip2long(line[0]),line[1]))
|
zjost/antsystem | functions/tau_branch.py | Python | gpl-2.0 | 184 | 0.038043 | import numpy as np
def tau_branch(tau,epp):
N = len | (tau[0,:])
branch_cnt = []
for i in range(N):
branch_cnt.append(np.sum(tau[i,:]>epp | ))
return branch_cnt |
xelphene/swaf | swaf/resp.py | Python | gpl-3.0 | 2,300 | 0.047391 |
codes = {
307: 'Temporary Redirect',
303: 'See Other',
302: 'Found',
301: 'Moved Permanently'
}
def authRequired(realm):
return {
'status': 401,
'reason': 'Authentication Required',
'headers': [
('Content-type','text/plain'),
('WWW-Authenticate', 'Basic realm="%s"' % realm)
],
'body': 'Authentication required.'
}
def redirect(url, status=303):
if status not in codes.keys():
raise ValueError('redirect called with unknown status value')
return {
'status': status,
'reason': codes[status],
'headers': [
('Content-type', 'text/plain'),
('Location', url)
],
'body': 'Moved to %s' % url
}
def wrongMethod():
return {
'status': 405,
'reason': 'Method Not Allowed',
'headers': [('Content-type', 'text/plain')],
'body': 'The request was issued with a method not allowed for this resource.'
}
def css(body):
return ok('text/css', body)
def plain(body):
return ok('text/plain', body)
def html(body):
return ok('text/html', body)
def ok(ctype, body):
return {
'status': 200,
'reason': 'OK',
'headers': [('Content-type',ctype)],
'body': body
}
def notFound():
return {
'status': 404,
'reason': 'Not Found',
'headers': [('Content-type','text/plain')],
'body': 'The requested resource cannot be found.'
}
notfound = notFound
def forbidden():
return {
'status': 403,
'reason': 'Forbidden',
'headers': [('Content-type','text/plain')],
'body': 'You do not have access to the requested resource.'
}
def is_a_resp(x):
if type(x)!=dict:
return False
if not x.has_key('status'):
return False
if not x.has_key('reason'):
return False
if not x.has_key('body'):
return False
if not x.has_key('headers'):
return False
def error_verbose(code=None, report=None):
r = {
'status': 500,
'reason': 'Internal Server Error',
'headers': [('Content-t | ype','text/plain')],
'body': '500 Internal Server Error. Error code: %s.' % str(code)
}
r['body'] += '\ | n\n-------------------------------------------\n'
r['body'] += 'Error Detail:\n\n'
r['body'] += '\n'.join(report)
return r
def error_vague(code=None):
r = {
'status': 500,
'reason': 'Internal Server Error',
'headers': [('Content-type','text/plain')],
'body': '500 Internal Server Error. Error code: %s.' % str(code)
}
return r
|
molmod/zeobuilder | zeobuilder/nodes/reference.py | Python | gpl-3.0 | 6,367 | 0.002042 | # -*- coding: utf-8 -*-
# Zeobuilder is an extensible GUI-toolkit for molecular model construction.
# Copyright (C) 2007 - 2012 Toon Verstraelen <Toon.Verstraelen@UGent.be>, Center
# for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights
# reserved unless otherwise stated.
#
# This file is part of Zeobuilder.
#
# Zeobuilder is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "ZEOBUILDER: a GUI toolkit for the construction of complex molecules on the
# nanoscale with building blocks", Toon Verstraelen, Veronique Van Speybroeck
# and Michel Waroquier, Journal of Chemical Information and Modeling, Vol. 48
# (7), 1530-1541, 2008
# DOI:10.1021/ci8000748
#
# Zeobuilder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
from zeobuilder.nodes.node import Node, NodeInfo
from zeobuilder.nodes.glmixin import GLTransformationMixin
from zeobuilder.nodes.analysis import bridge as tree_bridge
from zeobuilder.gui import load_image
from molmod import Translation
__all__ = [
"TargetError", "Reference", "SpatialReference"
]
class TargetError(Exception):
pass
class Reference(Node):
info = NodeInfo("SelectTargets")
overlay_icon = load_image("reference.svg", (20, 20))
def __init__(self, prefix):
Node.__init__(self)
self.target = None
self.prefix = prefix
self.icon = self.overlay_icon
def set_target(self, target):
if self.target is None and target is not None:
self.define_target(target)
elif self.target is not None and target is not None:
self.undefine_target()
self.define_target(target)
elif self.target is not None and target is None:
self.undefine_target()
else:
return
#
# Tree
#
def get_name(self):
if self.target is None:
return "Empty refer | ence. This should never happen. Contact the authors."
else:
return self.prefix + ": " + self.target.name
def set_model(self, model, parent, index):
Node.set_model(self, model, parent, index)
if self.target is not None:
self.target.references.append(self)
def unset_model(self):
Node.unset_model(self)
if self.target is not None:
self.target.references.remove | (self)
#
# Targets
#
def define_target(self, new_target):
if self.target is not None:
raise TargetError("Reference already has a target.")
if new_target is None:
raise TargetError("Must assign a target.")
if not self.check_target(new_target):
raise TargetError("Target %s not accepted." % new_target)
self.target = new_target
if self.model is not None:
self.target.references.append(self)
self.icon = self.target.reference_icon
self.parent.define_target(self, new_target)
def undefine_target(self):
if self.target is None:
raise TargetError("Reference has no target to undefine.")
old_target = self.target
if self.model is not None:
old_target.references.remove(self)
self.target = None
self.icon = self.overlay_icon
self.parent.undefine_target(self, old_target)
def check_target(self, new_target):
if isinstance(new_target, Reference): return False
if self.parent is not None:
return self.parent.check_target(self, new_target)
else:
return True
class SpatialReference(Reference):
#
# State
#
def __init__(self, prefix):
Reference.__init__(self, prefix)
self.bridge_handlers = []
#
# Tree
#
def set_model(self, model, parent, index):
Reference.set_model(self, model, parent, index)
if self.target is not None: self.connect_bridge()
def unset_model(self):
Reference.unset_model(self)
if self.target is not None: self.disconnect_bridge()
#
# Targets
#
def define_target(self, new_target):
Reference.define_target(self, new_target)
if self.model is not None: self.connect_bridge()
def undefine_target(self):
if self.model is not None: self.disconnect_bridge()
Reference.undefine_target(self)
def check_target(self, new_target):
if not isinstance(new_target, GLTransformationMixin) or not isinstance(new_target.transformation, Translation): return False
return Reference.check_target(self, new_target)
def on_target_move(self, model_object):
self.disconnect_bridge()
self.connect_bridge()
self.parent.target_moved(self, model_object)
def on_target_transformed(self, model_object):
self.parent.target_moved(self, model_object)
def connect_bridge(self):
bridge = tree_bridge(self, self.target)
for model_object in bridge:
self.bridge_handlers.append((model_object, model_object.connect("on-move", self.on_target_move)))
if isinstance(model_object, GLTransformationMixin):
self.bridge_handlers.append((model_object, model_object.connect("on-transformation-list-invalidated", self.on_target_transformed)))
def disconnect_bridge(self):
for model_object, connection_identifier in self.bridge_handlers:
model_object.disconnect(connection_identifier)
self.bridge_handlers = []
#
# About translation
#
def translation_relative_to(self, other):
if self.target is not None:
return self.target.get_frame_relative_to(other).t
else:
return None
|
alberthdev/pyradmon | pyradmon/args.py | Python | apache-2.0 | 68,354 | 0.009144 | #!/usr/bin/env python
# PyRadmon - Python Radiance Monitoring Tool
# Copyright 2014 Albert Huang.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# |
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless requi | red by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Argument Parsing Library -
# library for parsing command line arguments
#
import argparse
import textwrap
import os
import sys
from _version import __version__
from core import *
import config
import config_printer
from config import SPECIAL_FIELDS
import log
import logging
try:
from collections import OrderedDict
except:
try:
from ordereddict import OrderedDict
except:
print "ERROR: OrderedDict not found! It is required to run this script."
sys.exit(1)
def add_args(parser, inherit, opts):
for opt in opts:
if inherit:
opts[opt]['help'] = argparse.SUPPRESS
opts[opt]['default'] = argparse.SUPPRESS
opt_opt = dict(opts[opt])
parser.add_argument(opt, **opt_opt)
def add_list_args(parser, inherit = False):
opts = OrderedDict()
opts['--data-single-date'] = \
{
'action' : 'store',
'metavar' : 'DATE',
'dest' : 'data_single_date',
'help' : 'Use single date. Format should be "YYYY-MM-DD HHz". Negates the options below specifying dates and times.',
}
opts['--data-path-format'] = \
{
'action' : 'store',
'metavar' : 'PATH_FORMAT',
'dest' : 'data_path_format',
'help' : 'Specify the path format for data.',
}
opts['--data-experiment-id'] = \
{
'action' : 'store',
'metavar' : 'EXPERIMENT_ID',
'dest' : 'data_experiment_id',
'help' : 'Specify the experiment ID for data.',
}
opts['--data-start-date'] = \
{
'action' : 'store',
'metavar' : 'DATE',
'dest' : 'data_start_date',
'help' : 'Specify the start date for data. Format should be "YYYY-MM-DD HHz".',
}
opts['--data-end-date'] = \
{
'action' : 'store',
'metavar' : 'DATE',
'dest' : 'data_end_date',
'help' : 'Specify the end date for data. Format should be "YYYY-MM-DD HHz".',
}
opts['--data-instrument-sat'] = \
{
'action' : 'store',
'metavar' : 'INSTRUMENT_SAT',
'dest' : 'data_instrument_sat',
'help' : 'Specify the instrument and satellite ID for data.',
}
opts['--data-step'] = \
{
'action' : 'store',
'metavar' : 'STEP_TYPE',
'dest' : 'data_step',
'help' : 'Specify the step/type for the data. "anl" and "ges" are allowed. If you wish to specify more than one, use a pipe to seperate them, e.g. "anl|ges".',
}
opts['--data-time-delta'] = \
{
'action' : 'store',
'metavar' : 'TIME_DELTA',
'dest' : 'data_time_delta',
'help' : """Specify the time interval for data. The time format is
expressed using the sleep command's format, "#U", where # is
a number and U is a letter representing a unit of time.""",
}
add_args(parser, inherit, opts)
def add_dump_args(parser, inherit = False):
opts = OrderedDict()
opts['--dump-columns'] = \
{
'action' : 'store',
'metavar' : 'COLUMNS',
'dest' : 'dump_columns',
'help' : 'Specify the columns to dump/use, separated by commas.',
}
opts['--dump-all-channels'] = \
{
'action' : 'store_true',
'dest' : 'dump_all_channels',
'help' : 'Specify to dump all channels. Negates the option below specifying channels to use.',
}
opts['--dump-channels'] = \
{
'action' : 'store',
'metavar' : 'CHANNELS',
'dest' : 'dump_channels',
'help' : 'Specify the channels to dump/use, separated by commas. Ranges are also acceptable.',
}
opts['--dump-assim-only'] = \
{
'action' : 'store_true',
'dest' : 'dump_assim_only',
'help' : 'Specify to use only assimilated data (iuse = 1).',
}
opts['--dump-suppress-warnings'] = \
{
'action' : 'store_true',
'dest' : 'dump_suppress_warnings',
'help' : 'Specify whether to suppress data warnings or not. This will hide important warnings about data inconsistencies, so only enable if you are 100%% sure that your data is valid!',
}
add_args(parser, inherit, opts)
def add_plot_args(parser, inherit = False):
opts = OrderedDict()
opts['--plot-define-plots'] = \
{
'action' : 'append',
'metavar' : 'PLOTS',
'dest' : 'plot_define_plots',
'help' : 'Define plots. Uses the value list system, specified by "plot1,plot2,plot3,...".',
}
opts['--plot-define-subplots'] = \
{
'action' : 'append',
'metavar' : 'SUBPLOTS',
'dest' : 'plot_define_subplots',
'help' : 'Define subplots. Uses the key-value pair system, specified by "plot1:subplot1,subplotid2,...;".',
}
opts['--plot-define-axes'] = \
{
'action' : 'append',
'metavar' : 'AXES',
'dest' : 'plot_define_axes',
'help' : 'Define axes for the subplot. Uses the key-value pair system, specified by "plot1|subplot1|y:ticks=5,label="test";...".',
}
opts['--plot-define-data'] = \
{
'action' : 'append',
'metavar' : 'DATA',
'dest' : 'plot_define_data',
'help' : 'Define data to be plotted in the subplot. Uses the key-value pair system, specified by "plot1|subplot1|x:data_field_1;plot1|subplot1|y:...".',
}
opts['--plot-define-title'] = \
{
'action' : 'append',
'metavar' : 'TITLE',
'dest' : 'plot_define_title',
'help' : 'Define the title for the plot, and optionally, subplot and legend. Uses the key-value pair system, specified by "plot1:title;plot1|subplot1:title;plot1|subplot1|legend:title;...".',
}
opts['--plot-define-output'] = \
{
'action' : 'append',
'metavar' : 'OUTPUT_FILE',
'dest' : 'plot_define_output',
'help' : 'Define the output file for the plot. Uses the key-value pair system, specified by "plot1:output_file.png;...".',
}
opts['--plot-define-settings'] = \
{
'action' : 'append',
'metavar' : 'SETTINGS',
'dest' : 'plot_define_settings',
'help' : 'Define the settings for the plot. Uses the key-value pair system, specified by "plot1:target_size=595x700,dpi=50;...".',
}
opts['--plot-define-custom-vars'] = \
{
'action' : 'append',
'metavar' : 'CUSTOM_VARS',
'dest' : 'plot_define_custom_vars',
'help' : 'Define the custom variables for use in the output file and title. Uses the key-value pair system, specified by "myvar:123,myvar2:abc,...".',
}
opts['--plot-make-dirs'] = \
{
'action' : 'store_true',
'dest' : 'plot_m |
biocore/pyqi | pyqi/interfaces/optparse/input_handler.py | Python | bsd-3-clause | 561 | 0.005348 | #!/usr/bin/env python
#----------------------------- | ------------------------------------------------
# Copyright (c) 2013, The BiPy Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#--------------------------------------------------------- | --------------------
from __future__ import division
__credits__ = ["Greg Caporaso", "Daniel McDonald", "Doug Wendel",
"Jai Ram Rideout"]
## Store project/inteface specific input handlers here
|
yland/mailman3 | src/mailman/interfaces/pipeline.py | Python | gpl-2.0 | 1,138 | 0.000879 | # Copyright (C) 2008-2015 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Ma | ilman. If not, see <http://www.gnu.org/ | licenses/>.
"""Interface for describing pipelines."""
__all__ = [
'IPipeline',
]
from zope.interface import Interface, Attribute
class IPipeline(Interface):
"""A pipeline of handlers."""
name = Attribute('Pipeline name; must be unique.')
description = Attribute('A brief description of this pipeline.')
def __iter__():
"""Iterate over all the handlers in this pipeline."""
|
meteorfox/PerfKitBenchmarker | perfkitbenchmarker/linux_benchmarks/oldisim_benchmark.py | Python | apache-2.0 | 8,628 | 0.008693 | # Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs oldisim.
oldisim is a framework to support benchmarks that emulate Online Data-Intensive
(OLDI) workloads, such as web search and social networking. oldisim includes
sample workloads built on top of this framework.
With its default config, oldisim models an example search topology. A user query
is first processed by a front-end server, which then eventually fans out the
query to a large number of leaf nodes. The latency is measured at the root of
the tree, and often increases with the increase of fan-out. oldisim reports a
scaling efficiency for a given topology. The scaling efficiency is defined
as queries per second (QPS) at the current fan-out normalized to QPS at fan-out
1 with ISO root latency.
Sample command line:
./pkb.py --benchmarks=oldisim --project='YOUR_PROJECT' --oldisim_num_leaves=4
--oldisim_fanout=1,2,3,4 --oldisim_latency_target=40
--oldisim_latency_metric=avg
The above command will build a tree with one root node and four leaf nodes. The
average latency target is 40ms. The root node will vary the fanout from 1 to 4
and measure the scaling efficiency.
"""
import logging
import re
import time
from perfkitbenchmarker import configs
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import oldisim_dependencies
FLAGS = flags.FLAGS
flags.DEFINE_integer('oldisim_num_leaves', 4, 'number of leaf nodes',
lower_bound=1, upper_bound=64)
flags.DEFINE_list('oldisim_fanout', [],
'a list of fanouts to be tested. '
'a root can connect to a subset of leaf nodes (fanout). '
'the value of fanout has to be smaller than num_leaves.')
flags.DEFINE_enum('oldisim_latency_metric', 'avg',
['avg', '50p', '90p', '95p', '99p', '99.9p'],
'Allowable metrics for end-to-end latency')
flags.DEFINE_float('oldisim_latency_target', '30', 'latency target in ms')
NUM_DRIVERS = 1
NUM_ROOTS = 1
BENCHMARK_NAME = 'oldisim'
BENCHMARK_CONFIG = """
oldisim:
description: >
Run oldisim. Specify the number of leaf
nodes with --oldisim_num_leaves
vm_groups:
default:
vm_spec: *default_single_core
"""
def GetConfig(user_config):
"""Decide number of vms needed to run oldisim."""
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
config['vm_groups']['default']['vm_count'] = (FLAGS.oldisim_num_leaves
+ NUM_DRIVERS + NUM_ROOTS)
return config
def InstallAndBuild(vm):
"""Install and build oldisim on the target vm.
Args:
vm: A vm instance that runs oldisim.
"""
logging.info('prepare oldisim on %s', vm)
vm.Install('oldisim_dependencies')
def Prepare(benchmark_spec):
"""Install and build oldisim on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
leaf_vms = [vm for vm_idx, vm in enumerate(vms)
if vm_idx >= (NUM_DRIVERS + NUM_ROOTS)]
if vms:
vm_util.RunThreaded(InstallAndBuild, vms)
# Launch job on the leaf nodes.
leaf_server_bin = oldisim_dependencies.BinaryPath('LeafNode')
for vm in leaf_vms:
leaf_cmd = '%s --threads=%s' % (leaf_server_bin, vm.num_cpus)
vm.RemoteCommand('%s &> /dev/null &' % leaf_cmd)
def SetupRoot(root_vm, leaf_vms):
"""Connect a root node to a list of leaf nodes.
Args:
root_vm: A root vm instance.
leaf_vms: A list of leaf vm instances.
"""
fanout_args = ' '.join(['--leaf=%s' % i.internal_ip
for i in leaf_vms])
root_server_bin = oldisim_dependencies.BinaryPath('ParentNode')
root_cmd = '%s --threads=%s %s' % (root_server_bin, root_vm.num_cpus,
fanout_args)
logging.info('Root cmdline: %s', root_cmd)
root_vm.RemoteCommand('%s &> /dev/null &' % root_cmd)
def ParseOutput(oldisim_output):
"""Parses the output from oldisim.
Args:
oldisim_output: A string containing the text of oldisim output.
Returns:
A tuple of (peak_qps, peak_lat, target_qps, target_lat).
"""
re_peak = re.compile(r'peak qps = (?P<qps>\S+), latency = (?P<lat>\S+)')
re_target = re.compile(r'measured_qps = (?P<qps>\S+), latency = (?P<lat>\S+)')
for line in oldisim_output.splitlines():
match = re.search(re_peak, line)
if match:
peak_qps = float(match.group('qps'))
peak_lat = float(match.group('lat'))
target_qps = float(peak_qps)
target_lat = float(peak_lat)
continue
match = re.search(re_target, line)
if match:
target_qps = float(match.group('qps'))
target_lat = float(match.group('lat'))
return peak_qps, peak_lat, target_qps, target_lat
def RunLoadTest(benchmark_spec, fanout):
"""Run Loadtest for a given topology.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
fanout: Request is first processed by a root node, which then
fans out to a subset of leaf nodes.
Returns:
A tuple of (peak_qps, peak_lat, target_qps, target_lat).
"""
assert fanout <= FLAGS.oldisim_num_leaves, (
'The number of leaf nodes a root node connected to is defined by the '
'flag fanout. Its current value %s is bigger than the total number of '
'leaves %s.' % (fanout, FLAGS.oldisim_num_leaves))
vms = benchmark_spec.vms
driver_vms = []
root_vms = []
leaf_vms = []
for vm_index, vm in enumerate(vms):
if vm_index < NUM_DRIVERS:
driver_vms.append(vm)
elif vm_index | < (NUM_DRIVERS + NUM_ROOTS):
root_vms.append(vm)
else:
leaf_vms.append(vm)
leaf_vms = leaf_vms[:fanout]
for root_vm in root_vms:
SetupRoot(root_vm, leaf_vms)
driver_vm = driver_vms[0]
driver_binary = oldisim_dependencies.BinaryPath('DriverNode')
launch_script = oldisim_dependencies.Path('workloads/search/search_qps.sh')
driver_args = ' '.join(['--server=%s' % i.internal_ip
for i in root_vms])
# Make sure server is u | p.
time.sleep(5)
driver_cmd = '%s -s %s:%s -t 30 -- %s %s --threads=%s --depth=16' % (
launch_script, FLAGS.oldisim_latency_metric, FLAGS.oldisim_latency_target,
driver_binary, driver_args, driver_vm.num_cpus)
logging.info('Driver cmdline: %s', driver_cmd)
stdout, _ = driver_vm.RemoteCommand(driver_cmd, should_log=True)
return ParseOutput(stdout)
def Run(benchmark_spec):
"""Run oldisim on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
results = []
qps_dict = dict()
vms = benchmark_spec.vms
vm = vms[0]
fanout_list = set([1, FLAGS.oldisim_num_leaves])
for fanout in map(int, FLAGS.oldisim_fanout):
if fanout > 1 and fanout < FLAGS.oldisim_num_leaves:
fanout_list.add(fanout)
metadata = {'num_cpus': vm.num_cpus}
metadata.update(vm.GetMachineTypeDict())
for fanout in sorted(fanout_list):
qps = RunLoadTest(benchmark_spec, fanout)[2]
qps_dict[fanout] = qps
if fanout == 1:
base_qps = qps
name = 'Scaling efficiency of %s leaves' % fanout
scaling_efficiency = round(min(qps_dict[fanout] / base_qps, 1), 2)
results.append(sample.Sample(name, scaling_efficiency, '', metadata))
return results
def Cleanup(benchmark_spec): # pylint: disable=unused-argument
|
Swetabhsuman8/Tic-Toe-game-in-python | TicToe.py | Python | mit | 2,330 | 0.060944 | def print_board(board):
print "Tic Toe Game Developed by Swetabh Suman \n"
print "Gmail- swetabhsuman8@gmail.com/Twitter- swetabhsuman8"
print "The board look like this: \n"
for i in range(3):
print " ",
for j in range(3):
if board[i*3+j] == 1:
print 'X',
elif board[i*3+j] == 0:
print 'O',
elif board[i*3+j] != -1:
print board[i*3+j]-1,
else:
print ' ',
if j != 2:
print " | ",
print
if i != 2:
print "-----------------"
else:
print
def print_instruction():
print "Please use the following cell numbers to make your move"
print_board([2,3,4,5,6,7,8,9,10])
def get_input(turn):
valid = False
while not valid:
try:
user = raw_input("Where would you like to place " + turn + " (1-9)? ")
user = int(user)
if user >= 1 and user <= 9:
return user-1
else:
print "That is not a valid move! Please try again.\n"
print_instruction()
except Exception as e:
print user + " is not a valid move! Please try again.\n"
def check_win(board):
win_cond = ((1,2,3),(4,5,6),(7,8,9),(1,4,7),(2,5,8),(3,6,9),(1,5,9),(3,5,7))
for each in win_cond:
try:
if board[each[0]-1] == board[each[1]-1] and board[each[1]-1] == board[each[2]-1]:
return board[each[0]-1]
except:
pass
return -1
def quit_game(board,msg):
print_board(board)
print msg
quit()
def main():
# setup game
# alternate turns
# check if win or end
# quit and show the board
print_instruction()
board = []
for i in range(9):
board.append(-1)
win = False
move = 0
while not win:
# print board
print_board(board)
print "Turn number " + str(move+1)
if move % 2 == 0:
turn = 'X'
else:
turn = 'O'
# get user input
user = get_input(turn)
while board[user] != -1:
print "Invalid move! Cell already taken. Please try again.\n"
user = get_input(turn)
board[user] = 1 if turn == 'X' else 0
# advance move and check for end game
move += 1
if move | > 4:
winner = check_win(board)
if winner != -1:
out = "The winner is "
out += "X" if winn | er == 1 else "O"
out += " :)"
quit_game(board,out)
elif move == 9:
quit_game(board,"No winner :(")
if __name__ == "__main__":
main()
|
PetroDE/control | control/substitution.py | Python | mit | 9,525 | 0.002835 | """
While a Controlfile is being read in MetaServices allow for substitutions to be
made. All of the code is here instead of living in controlfile.py so you don't
have to scroll past the Controlfile class
"""
from enum import Enum
from random import randint
import logging
module_logger = logging.getLogger('control.substitution') # pylint: disable=invalid-name
class Kind(Enum):
"""Enum for things that don't fall into the type system"""
none = 0
singular = 1
list = 2
dict = 3
DEFAULT_KIND_MAPPING = {
Kind.singular: str,
Kind.list: list,
Kind.dict: dict,
}
def _get_default_of_kind(val):
return DEFAULT_KIND_MAPPING[_determine_kind(val)]()
def _pick_most_generic(left, right):
return DEFAULT_KIND_MAPPING[
sorted([
_determine_kind(left),
_determine_kind(right)
], key=lambda x: x.value)[-1]
]()
# Make sure to call the constructor so you get a new object of that type
# instead of something else
def _determine_kind(config_option):
if isinstance(config_option, dict):
return Kind.dict
elif isinstance(config_option, list):
return Kind.list
elif config_option is None:
return Kind.none
return Kind.singular
def _build_values_for_key(k, op, x, y): # pylint: disable=invalid-name
default = _pick_most_generic(x.get(k, None), y.get(k, None))
return operations[
(
_determine_kind(x.get(k, default)),
_determine_kind(y.get(k, default)),
op
)
](x.get(k, default), y.get(k, default))
operations = {
# pylint: disable=unnecessary-lambda
# aliaeses into the workings of this dict
'suffix': lambda x, y: operations[(_determine_kind(x), _determine_kind(y), 'suffix')](x, y),
'prefix': lambda x, y: operations[(_determine_kind(x), _determine_kind(y), 'prefix')](x, y),
'union': lambda x, y: operations[(_determine_kind(x), _determine_kind(y), 'union')](x, y),
'replace': lambda x, y: y if y else x,
# Union ops
(Kind.singular, Kind.singular, 'union'): lambda x, y: [i for i in [x, y] if i] if x != y else ([x] if x else []),
(Kind.singular, Kind.list, 'union'): lambda x, yy: [x] + [y for y in yy if y != x],
(Kind.singular, Kind.dict, 'union'): lambda x, y: {
k: _build_values_for_key(k, 'union', {'shared': [x]}, y) for k in y.keys() | {'shared'}
} if x else {k: (v if isinstance(v, list) else [v]) for k, v in y.items()},
(Kind.list, Kind.singular, 'union'): lambda xx, y: xx + [y] if y not in xx else xx,
(Kind.list, Kind.list, 'union'): lambda xx, yy: xx + [y for y in yy if y not in xx],
(Kind.list, Kind.dict, 'union'): lambda xx, y: {
k: _build_values_for_key(k, 'union', {'shared': xx}, y) for k in y.keys() | {'shared'}
} if xx else y,
(Kind.dict, Kind.singular, 'union'): lambda x, y: {
k: _build_values_for_key(k, 'union', x, {'shared': [y]}) for k in x.keys() | {'shared'}
},
(Kind.dict, Kind.list, 'union'): lambda x, yy: {
k: _build_values_for_key(k, 'union', x, {'shared': yy}) for k in x.keys() | {'shared'}
} if yy else x,
(Kind.dict, Kind.dict, 'union'): lambda x, y: {
k: _build_values_for_key(k, 'union', x, y) for k in x.keys() | y.keys()
},
# Suffix Ops
(Kind.singular, Kind.singular, 'suffix'): '{0}{1}'.format,
(Kind.singular, Kind.list, 'suffix'): lambda x, y: [x] + y,
(Kind.list, Kind.singular, 'suffix'): lambda x, y: x | + [y],
(Kind.list, Kind.list, 'suffix'): lambda x, y: x + y,
(Kind.list, Kind.dict, 'suffix'): lambda x, y: {
k: _build_values_for_key(k, 'suffix', {'shared': x}, y) for k in y.keys() | {'shared'}
},
(Kind.singular, Kind.dict, 'suffix'): lambda x, y: {
k: _build_values_for_key(k, 'suffix', {'shared': x}, y) for k in y.keys() | {'shared'}
},
(Kind.dict, Kind.singular, 'suffix'): lambda x, y: {
k: _build_values_for_key(k, 'suffix', x, {'shared': y}) for k | in x.keys() | {'shared'}
},
(Kind.dict, Kind.list, 'suffix'): lambda x, y: {
k: _build_values_for_key(k, 'suffix', x, {'shared': y}) for k in x.keys() | {'shared'}
},
(Kind.dict, Kind.dict, 'suffix'): lambda x, y: {
k: _build_values_for_key(k, 'suffix', x, y) for k in x.keys() | y.keys()
},
# Prefix Ops
(Kind.singular, Kind.singular, 'prefix'): '{1}{0}'.format,
(Kind.singular, Kind.list, 'prefix'): lambda x, y: y + [x],
(Kind.singular, Kind.dict, 'prefix'): lambda x, y: {
k: _build_values_for_key(k, 'prefix', {'shared': x}, y) for k in y.keys() | {'shared'}
},
(Kind.list, Kind.singular, 'prefix'): lambda x, y: [y] + x,
(Kind.list, Kind.list, 'prefix'): lambda x, y: y + x,
(Kind.list, Kind.dict, 'prefix'): lambda x, y: {
k: _build_values_for_key(k, 'prefix', {'shared': x}, y) for k in y.keys() | {'shared'}
},
(Kind.dict, Kind.singular, 'prefix'): lambda x, y: {
k: _build_values_for_key(k, 'prefix', x, {'shared': y}) for k in x.keys() | {'shared'}
},
(Kind.dict, Kind.list, 'prefix'): lambda x, y: {
k: _build_values_for_key(k, 'prefix', x, {'shared': y}) for k in x.keys() | {'shared'}
},
(Kind.dict, Kind.dict, 'prefix'): lambda x, y: {
k: _build_values_for_key(k, 'prefix', x, y) for k in x.keys() | y.keys()
},
}
def normalize_service(service, opers, variables):
"""
Takes a service, and options and applies the transforms to the service.
Allowed args:
- service: must be service object that was created before hand
- options: a dict of options that define transforms to a service.
The format must conform to a Controlfile metaservice options
definition
Returns: a service with all the transforms applied and all the variables
substituted in.
"""
# We check that the Controlfile only specifies operations we support,
# that way we aren't trusting a random user to accidentally get a
# random string eval'd.
for key, op, val in (
(key, op, val)
for key, ops in opers.items()
for op, val in ops.items() if (op in operations and
key in service.all_options)):
module_logger.log(11, "service '%s' %sing %s with '%s'.",
service.service, op, key, val)
try:
replacement = operations[op](service[key], val)
except KeyError as e:
module_logger.debug(e)
module_logger.log(11, "service '%s' missing key '%s'",
service.service, key)
module_logger.log(11, service.__dict__)
replacement = operations[op](_get_default_of_kind(val), val)
finally:
service[key] = replacement
for key in service.keys():
try:
module_logger.debug('now at %s, passing in %i vars', key, len(variables))
service[key] = _substitute_vars(service[key], variables)
except KeyError:
continue
return service['service'], service
# used exclusively by visit_every_leaf, but defined outside it so it's only compiled once
substitute_vars_decision_dict = {
# dict, list, str
(True, False, False): lambda d, vd: {k: _substitute_vars(v, vd) for k, v in d.items()},
(False, True, False): lambda d, vd: [x.format(**_merge_dicts(
vd,
{'RANDOM': str(randint(0, 10000))}
)) for x in d],
(False, False, True): lambda d, vd: d.format(**_merge_dicts(
vd,
{'RANDOM': str(randint(0, 10000))}
)),
(False, False, False): lambda d, vd: d
}
def _merge_dicts(*args):
"""
Before python 3.5 you can't do foo(**dict_one, **dict_two)
so, this function exists.
"""
if len(args) < 1:
return {}
ret = args[0].copy()
for d in args[1:]:
ret.update(d)
return ret
def _substitute_vars(d, var_dict): # pylint: disable=invalid-name
"""
Visit every leaf and substitute any variables that are found. This function
is named poorly, it sounds like it should generically visit every and allow
a function to be |
openqt/algorithms | leetcode/python/lc669-trim-a-binary-search-tree.py | Python | gpl-3.0 | 1,292 | 0.017802 | # coding=utf-8
import unittest
"""669. Trim a Binary Search Tree
https://leetcode.com/problems/trim-a-binary-search-tree/description/
Given a binary search tree and the lowest and highest boundaries as `L` and
`R`, trim the tree so that all its elements lies | in `[L, R]` (R >= L). You
might need to change the root of the tree, so the result should return the new
root of the trimmed binary searc | h tree.
**Example 1:**
**Input:**
1
/ \
0 2
L = 1
R = 2
**Output:**
1
\
2
**Example 2:**
**Input:**
3
/ \
0 4
\
2
/
1
L = 1
R = 3
**Output:**
3
/
2
/
1
Similar Questions:
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def trimBST(self, root, L, R):
"""
:type root: TreeNode
:type L: int
:type R: int
:rtype: TreeNode
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
|
daspecster/google-cloud-python | datastore/google/cloud/datastore/query.py | Python | apache-2.0 | 19,705 | 0 | # Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with Google Cloud Datastore queries."""
import base64
from google.cloud._helpers import _ensure_tuple_or_list
from google.cloud.iterator import Iterator as BaseIterator
from google.cloud.iterator import Page
from google.cloud.proto.datastore.v1 import datastore_pb2 as _datastore_pb2
from google.cloud.proto.datastore.v1 import entity_pb2 as _entity_pb2
from google.cloud.proto.datastore.v1 import query_pb2 as _query_pb2
from google.cloud.datastore import helpers
from google.cloud.datastore.key import Key
_NOT_FINISHED = _query_pb2.QueryResultBatch.NOT_FINISHED
_FINISHED = (
_query_pb2.QueryResultBatch.NO_MORE_RESULTS,
_query_pb2.QueryResultBatch.MORE_RESULTS_AFTER_LIMIT,
_query_pb2.QueryResultBatch.MORE_RESULTS_AFTER_CURSOR,
)
class Query(object):
"""A Query against the Cloud Datastore.
This class serves as an abstraction for creating a query over data
stored in the Cloud Datastore.
:type client: :class:`google.cloud.datastore.client.Client`
:param client: The client used to connect to Datastore.
:type kind: str
:param kind: The kind to query.
:type project: str
:param project:
(Optional) The project associated with the query. If not passed, uses
the client's value.
:type namespace: str
:param namespace:
(Optional) The namespace to which to restrict results. If not passed,
uses the client's value.
:type ancestor: :class:`google.cloud.datastore.key.Key`
:param ancestor:
(Optional) key of the ancestor to which this query's results are
restricted.
:type filters: sequence of (property_name, operator, value) tuples
:param filters: property filters applied by this query.
:type projection: sequence of string
:param projection: fields returned as part of query results.
:type order: sequence of string
:param order: field names used to order query results. Prepend '-'
to a field name to sort it in descending order.
:type distinct_on: sequence of string
:param distinct_on: field names used to group query results.
:raises: ValueError if ``project`` is not passed and no implicit
default is set.
"""
OPERATORS = {
'<=': _query_pb2.PropertyFilter.LESS_THAN_OR_EQUAL,
'>=': _query_pb2.PropertyFilter.GREATER_THAN_OR_EQUAL,
'<': _query_pb2.PropertyFilter.LESS_THAN,
'>': _query_pb2.PropertyFilter.GREATER_THAN,
'=': _query_pb2.PropertyFilter.EQUAL,
}
"""Mapping of operator strings and their protobuf equivalents."""
def __init__(self,
client,
kind=None,
project=None,
namespace=None,
ancestor=None,
filters=(),
projection=(),
order=(),
distinct_on=()):
self._client = client
self._kind = kind
self._project = project or client.project
self._namespace = namespace or client.namespace
self._ancestor = ancestor
self._filters = []
# Verify filters passed in.
for property_name, operator, value in filters:
self.add_filter(property_name, operator, value)
self._projection = _ensure_tuple_or_list('projection', projection)
self._order = _ensure_tuple_or_list('order', order)
self._distinct_on = _ensure_tuple_or_list('distinct_on', distinct_on)
@property
def project(self):
"""Get the project for this Query.
:rtype: str
:returns: The project for the query.
"""
return self._project or self._client.project
@property
def namespace(self):
"""This query's namespace
:rtype: str or None
:returns: the namespace assigned to this query
"""
return self._namespace or self._client.namespace
@namespace.setter
def namespace(self, value):
"""Update the query's namespace.
:type value: str
"""
if not isinstance(value, str):
raise ValueError("Namespace must be a string")
self._namespace = value
@property
def kind(self):
"""Get the Kind of the Query.
:rtype: str
:returns: The kind for the query.
"""
return self._kind
@kind.setter
def kind(self, value):
"""Update the Kind of the Query.
:type value: str
:param value: updated kind for the query.
.. note::
The protobuf specification allows for ``kind`` to be repeated,
but the current implementation returns an error if more than
one value is passed. If the back-end changes in the future to
allow multiple values, this method will be updated to allow passing
either a string or a sequence of strings.
"""
if not isinstance(value, str):
raise TypeError("Kind must be a string")
self._kind = value
@property
def ancestor(self):
"""The ancestor key for the query.
:rtype: Key or None
:returns: The ancestor for the query.
"""
return self._ancestor
@ancestor.setter
def ancestor(self, value):
"""Set the ancestor for the query
:type value: Key
:param value: the new ancestor key
"""
if not isinstance(value, Key):
raise TypeError("Ancestor must be a Key")
self._ancestor = value
@ancestor.deleter
def ancestor(self):
"""Remove the ancestor for the query."""
self._ancestor = None
@property
def filters(self):
"""Filters set on the query | .
:rtype: sequence of (property_name, operator, value) tuples.
| :returns: The filters set on the query.
"""
return self._filters[:]
def add_filter(self, property_name, operator, value):
"""Filter the query based on a property name, operator and a value.
Expressions take the form of::
.add_filter('<property>', '<operator>', <value>)
where property is a property stored on the entity in the datastore
and operator is one of ``OPERATORS``
(ie, ``=``, ``<``, ``<=``, ``>``, ``>=``)::
>>> from google.cloud import datastore
>>> client = datastore.Client()
>>> query = client.query(kind='Person')
>>> query.add_filter('name', '=', 'James')
>>> query.add_filter('age', '>', 50)
:type property_name: str
:param property_name: A property name.
:type operator: str
:param operator: One of ``=``, ``<``, ``<=``, ``>``, ``>=``.
:type value: :class:`int`, :class:`str`, :class:`bool`,
:class:`float`, :class:`NoneType`,
:class:`datetime.datetime`,
:class:`google.cloud.datastore.key.Key`
:param value: The value to filter on.
:raises: :class:`ValueError` if ``operation`` is not one of the
specified values, or if a filter names ``'__key__'`` but
passes an invalid value (a key is required).
"""
if self.OPERATORS.get(operator) is None:
error_message = 'Invalid expression: "%s"' % (operator,)
choices_message = 'Please use one of: =, <, <=, >, >=.'
raise ValueError(error_message, choices_message)
if property_name == '__key__' and not isinstance(value, Key):
raise ValueError('Invalid key: "%s"' % value)
|
eunchong/build | scripts/slave/recipes/chromium.gpu.recipe_autogen.py | Python | bsd-3-clause | 6,588 | 0.019126 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'depot_tools/bot_update',
'chromium',
'depot_tools/gclient',
'recipe_engine/json',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/step',
]
def Android_Debug__Nexus_9__steps(api):
# update scripts step; implicitly run by recipe engine.
# bot_update step
src_cfg = api.gclient.make_config(GIT_MODE=True)
soln = src_cfg.solutions.add()
soln.name = "src"
soln.url = "https://chromium.googlesource.com/chromium/src.git"
soln.custom_deps = {'src/third_party/WebKit/LayoutTests': None}
soln.custom_vars = {'webkit_trunk': 'http://src.chromium.org/blink/trunk',
'googlecode_url': 'http://%s.googlecode.com/svn',
'nacl_trunk': 'http://src.chromium.org/native_client/trunk',
'sourceforge_url': 'https://svn.code.sf.net/p/%(repo)s/code',
'llvm_url': 'http://llvm.org/svn/llvm-project'}
src_cfg.target_os = set(['android'])
src_cfg.got_revision_mapping.update({'src': 'got_revision',
'src/third_party/WebKit': 'got_webkit_revision',
'src/tools/swarming_client': 'got_swarming_client_revision',
'src/v8': 'got_v8_revision'})
api.gclient.c = src_cfg
api.bot_update.ensure_checkout(force=True)
# gclient revert step
api.gclient.revert()
# cleanup_temp step
api.chromium.cleanup_temp()
# slave_steps step
api.python("slave_steps", "src/build/android/buildbot/bb_run_bot.py",
args=['--build-properties=%s' % api.json.dumps(api.properties.legacy(),
separators=(',', ':')),
'--factory-properties={"GYP_DEFINES":" component=shared_library",'+\
'"android_bot_id":"gpu-builder-tests-dbg","clobber":false,'+\
'"gclient_env":{},"gclient_timeout":3600,"target":"Debug",'+\
'"target_os":"android"}'], allow_subannotations=True)
def Android_Debug__Nexus_5__steps(api):
# update scripts step; implicitly run by recipe engine.
# bot_update step
src_cfg = api.gclient.make_config(GIT_MODE=True)
soln = src_cfg.solutions.add()
soln.name = "src"
soln.url = "https://chromium.googlesource.com/chromium/src.git"
soln.custom_deps = {'src/third_party/WebKit/LayoutTests': None}
soln.custom_vars = {'webkit_trunk': 'http://src.chromium.org/blink/trunk',
'googlecode_url': 'http://%s.googlecode.com/svn',
'nacl_trunk': 'http://src.chromium.org/native_client/trunk',
'sourceforge_url': 'https://svn.code.sf.net/p/%(repo)s/code',
'llvm_url': 'http://llvm.org/svn/llvm-project'}
src_cfg.target_os = set(['android'])
src_cfg.got_revision_mapping.update({'src': 'got_revision',
'src/third_party/WebKit': 'got_webkit_revision',
'src/tools/swarming_client': 'got_swarming_client_revision',
'src/v8': 'got_v8_revision'})
api.gclient.c = src_cfg
api.bot_update.ensure_checkout(force=True)
# gclient revert step
api.gclient.revert()
# cleanup_temp step
api.chromium.cleanup_temp()
# slave_steps step
api.python("slave_steps", "src/build/android/buildbot/bb_run_bot.py",
args=['--build-properties=%s' % api.json.dumps(api.properties.legacy(),
separators=(',', ':')),
'--factory-properties={"GYP_DEFINES":" component=shared_library",'+\
'"android_bot_id":"gpu-builder-tests-dbg","clobber":false,'+\
'"gclient_env":{},"gclient_timeout":3600,"target":"Debug",'+\
'"target_os":"android"}'], allow_subannotations=True)
def Android_Debug__Nexus_6__steps(api):
# update scripts step; implicitly run by recipe engine.
# bot_update step
src_cfg = api.gclient.make_config(GIT_MODE=True)
soln = src_cfg.solutions.add()
soln.name = "src"
soln.url = "https://chromium.googlesource.com/chromium/src.git"
soln.custom_deps = {'src/third_party/WebKit/LayoutTests': None}
soln.c | ustom_vars = {'webkit_trunk': 'http://src.chromium.org/blink/trunk',
'googlecode_url': 'http://%s.googlecode.com/svn',
'nacl_trunk': 'http://src.chromium.org/native_client/trunk',
'sourceforge_url': 'https://svn.code.sf.net/p/%(repo)s/code',
'llvm_url': 'http://llvm.org/svn/llvm-project'}
src_cfg.target_os = set(['android'])
src_cfg.got_revision_mapping.update({'src': 'got_revision',
'src/third_party/WebKit': 'got_webkit_revision',
'src/tools/swarming_client': | 'got_swarming_client_revision',
'src/v8': 'got_v8_revision'})
api.gclient.c = src_cfg
api.bot_update.ensure_checkout(force=True)
# gclient revert step
api.gclient.revert()
# cleanup_temp step
api.chromium.cleanup_temp()
# slave_steps step
api.python("slave_steps", "src/build/android/buildbot/bb_run_bot.py",
args=['--build-properties=%s' % api.json.dumps(api.properties.legacy(),
separators=(',', ':')),
'--factory-properties={"GYP_DEFINES":" component=shared_library",'+\
'"android_bot_id":"gpu-builder-tests-dbg","clobber":false,'+\
'"gclient_env":{},"gclient_timeout":3600,"target":"Debug",'+\
'"target_os":"android"}'], allow_subannotations=True)
dispatch_directory = {
'Android Debug (Nexus 9)': Android_Debug__Nexus_9__steps,
'Android Debug (Nexus 5)': Android_Debug__Nexus_5__steps,
'Android Debug (Nexus 6)': Android_Debug__Nexus_6__steps,
}
def RunSteps(api):
if api.properties["buildername"] not in dispatch_directory:
raise api.step.StepFailure("Builder unsupported by recipe.")
else:
dispatch_directory[api.properties["buildername"]](api)
def GenTests(api):
yield (api.test('Android_Debug__Nexus_9_') +
api.properties(mastername='chromium.gpu') +
api.properties(buildername='Android Debug (Nexus 9)') +
api.properties(slavename='TestSlave')
)
yield (api.test('Android_Debug__Nexus_5_') +
api.properties(mastername='chromium.gpu') +
api.properties(buildername='Android Debug (Nexus 5)') +
api.properties(slavename='TestSlave')
)
yield (api.test('Android_Debug__Nexus_6_') +
api.properties(mastername='chromium.gpu') +
api.properties(buildername='Android Debug (Nexus 6)') +
api.properties(slavename='TestSlave')
)
yield (api.test('builder_not_in_dispatch_directory') +
api.properties(mastername='chromium.gpu') +
api.properties(buildername='nonexistent_builder') +
api.properties(slavename='TestSlave')
)
|
edaniszewski/pylint-quotes | tests/test_async_function.py | Python | mit | 8,816 | 0.002949 | """Tests for the string quote checker for function-level docstrings.
"""
import sys
import pytest
from pylint_quotes.checker import StringQuoteChecker
from pylint.testutils import Message, set_config
from utils import TRI_Q_DOUB, TRI_Q_SING, StringQuoteCheckerTestCase
@pytest.mark.skipif(sys.version_info < (3, 5), reason='requires python3.5 or python3.6')
class TestAsyncFunctionStringQuoteChecker(StringQuoteCheckerTestCase):
""" Test case for asynchronous function-level docstrings.
"""
CHECKER_CLASS = StringQuoteChecker
@set_config(docstring_quote='double')
def test_single_line_double_quote_docstring_with_cfg_double(self):
test_str = '''
async def fn(x): #@
"""Function level docstring on a single line."""
pass
'''
self.check_async_function(test_str)
@set_config(docstring_quote='single')
def test_single_line_double_quote_docstring_with_cfg_single(self):
test_str = '''
async def fn(x): #@
"""Function level docstring on a single line."""
pass
'''
msg = Message(msg_id='invalid-docstring-quote', line=3, args=(TRI_Q_DOUB, TRI_Q_SING))
self.check_async_function(test_str, msg)
@set_config(docstring_quote='double')
def test_single_line_single_quote_docstring_with_cfg_double(self):
test_str = """
async def fn(x): #@
'''Function level docstring on a single line.'''
pass
"""
msg = Message(msg_id='invalid-docstring-quote', line=3, args=(TRI_Q_SING, TRI_Q_DOUB))
self.check_async_function(test_str, msg)
@set_config(docstring_quote='single')
def test_single_line_single_quote_docstring_with_cfg_single(self):
test_str = """
async def fn(x): #@
'''Function level docstring on a single line.'''
pass
"""
self.check_async_function(test_str)
@set_config(docstring_quote='double')
def test_single_line_double_quote_docstring_with_cfg_double_multi_row_def(self):
test_str = '''
async def fn( #@
x
):
"""Function level docstring on a single line."""
pass
'''
self.check_async_function(test_str)
@set_config(docstring_quote='single')
def test_single_line_double_quote_docstring_with_cfg_single_multi_row_def(self):
test_str = '''
async def fn( #@
x
):
"""Function level docstring on a single line."""
'''
msg = Message(msg_id='invalid-docstring-quote', line=5, args=(TRI_Q_DOUB, TRI_Q_SING))
self.check_async_function(test_str, msg)
@set_config(docstring_quote='double')
def test_single_line_double_quote_docstring_with_cfg_double_multiple_def(self):
test_str = '''
async def fn1(x):
"""Function docstring"""
async def fn2(x): #@
"""Function level docstring on a single line."""
async def fn3(x):
"""Another function level docstring on single line."""
'''
self.check_async_function(test_str)
@set_config(docstring_quote='single')
def test_single_line_double_quote_docstring_with_cfg_single_multiple_def(self):
test_str = '''
async def fn1(x):
"""Function docstring"""
async def fn2(x): #@
"""Function level docstring on a single line."""
async def fn3(x):
"""Another function level docstring on single line."""
'''
msg = Message(msg_id='invalid-docstring-quote', line=6, args=(TRI_Q_DOUB, TRI_Q_SING))
self.check_async_function(test_str, msg)
@set_config(docstring_quote='double')
def test_multi_line_double_quote_docstring_with_cfg_double(self):
test_str = '''
async def fn(x): #@
"""Function level docstring
on multiple lines.
"""
'''
self.check_async_function(test_str)
@set_config(docstring_quote='single')
def test_multi_line_double_quote_docstring_with_cfg_single(self):
test_str = '''
async def fn(x): #@
"""Function level docstring
on multiple lines.
"""
'''
msg = Message(msg_id='invalid-docstring-quote', line=3, args=(TRI_Q_DOUB, TRI_Q_SING))
self.check_async_function(test_str, msg)
@set_config(docstring_quote='double')
def test_multi_line_single_quote_docstring_with_cfg_double(self):
test_str = """
async def fn(x): #@
'''Function level docstring
on multiple lines.
'''
"""
msg = Message(msg_id='invalid-docstring-quote', line=3, args=(TRI_Q_SING, TRI_Q_DOUB))
self.check_async_function(test_str, msg)
@set_config(docstring_quote='single')
def test_multi_line_single_quote_docstring_with_cfg_single(self):
test_str = """
async def fn(x): #@
'''Function level docstring
on multiple lines.
'''
"""
self.check_async_function(test_str)
@set_config(docstring_quote='double')
def test_multi_line_double_quote_docstring_with_cfg_double_multi_row_def(self):
test_str = '''
async def fn( #@
x
):
"""Function level docstring
on multiple lines.
"""
'''
self.check_async_function(test_str)
@set_config(docstring_quote='single')
def test_multi_line_double_quote_docstring_with_cfg_single_multi_row_def(self):
test_str = '''
async def fn( #@
x
):
"""Function level docstring
on multiple lines.
"""
'''
msg = Message(msg_id='invalid-docstring-quote', line=5, args=(TRI_Q_DOUB, TRI_Q_SING))
self.check_async_function(test_str, msg)
@set_config(docstring_quote='double')
def test_multi_line_double_quote_docstring_with_cfg_double_multiple_def(self):
test_str = '''
async def fn1(x):
"""Function docstring
on multiple lines.
"""
async def fn2(x): #@
"""Function level docstring
on multiple lines.
"""
async def fn3(x):
"""Another function level docstring
on multiple lines.
"""
'''
self.check_async_function(test_str)
@set_config(docstring_quote='single')
def test_multi_line_double_quote_docstring_with_cfg_single_multiple_def(self):
test_str = '''
async def fn1(x):
"""Function docstring
on multiple lines.
"""
async def fn2(x): #@
"""Function level docstring
on multiple lines.
"""
async def fn3(x):
"""Another function level docstring
on multiple lines.
"""
'''
msg = Message(msg_id='invalid-docstring-quote', line=8, args=(TRI_Q_DOUB, TRI_Q_SING))
self.check_async_function(test_str, msg)
@set_config(docstring_quote='double')
def test_single_line_double_quote_docstring_with_cfg_double_def_contents_01(self):
test_str = '''
async def fn(x): #@
"""Function level docstring on a single line."""
return 33
'''
self.check_async_function(test_str)
@set_config(docstring_quote='single')
def test_single_line_double_quote_docstring_with_cfg_single_def_contents_01(self):
test_str = '''
async def fn(x): #@
"""Function level docstring on a single line."""
return 33
'''
msg = Message(msg_id='invalid-docstring-quote', line=3, args=(TRI_Q_DOUB, TRI_Q_SING))
self.check_async_function(test_str, msg)
@set_config(docstring_quote='double')
def test_single_line_double_quote_docstring_with_cfg_double_def_contents_02(self):
test_str = '''
async def fn(x): #@
"""Function level docstring on a single line."""
# this is a dictionary
values = {1:2, 3:4}
return values.get(x)
'''
self.check_async_function(test_str)
@set_config(docstring_quote='single')
def test_single_line_double_quote_docstring_with_cfg_single_def_contents_02(self):
test_str = '''
async def fn(x): #@
"""Function level docstring on a single line."""
# this is a dictionary
values = {1:2, 3:4}
return values.get(x)
'''
msg = Message(msg_id='invalid-docstring-quote', line=3, args=(TRI_Q_DOUB, TRI_Q_SING))
self.check_a | sync_function(test_str, msg)
@set_config(docstring_quote='double')
def test_single_line_double_quote_docstring_with_cfg_double_def_contents_03(self):
test_str = '''
async def fn(x): #@
"""Function level docstring on a single line."""
# define a function in a function
def local_fn(z):
| """Function within a function - incept |
ssanderson/numpy | numpy/linalg/linalg.py | Python | bsd-3-clause | 78,991 | 0.000987 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar, object_, ones
)
from numpy.core.multiarray import normalize_axis_index
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) | == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimen | sional' % a.ndim)
def _assertRankAtLeast2(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _isEmpty2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
def _assertNoEmpty2d(*arrays):
for a in arrays:
if _isEmpty2d(a):
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
|
bkolada/koalocleaner | uis/__init__.py | Python | gpl-3.0 | 59 | 0 | # -*- coding: utf-8 -*-
f | rom MainWindow import Control | ler
|
nlhepler/idepi | idepi/util/__init__.py | Python | gpl-3.0 | 13,422 | 0.001416 | #
# idepi :: (IDentify EPItope) python libraries containing | some useful machine
# learning interfaces for regression and discrete analysis (including
# cross-validation, grid-search, and maximum | -relevance/mRMR feature selection)
# and utilities to help identify neutralizing antibody epitopes via machine
# learning.
#
# Copyright (C) 2011 N Lance Hepler <nlhepler@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division, print_function
from json import dumps as json_dumps, loads as json_loads
from logging import getLogger
from math import copysign
from os import close, remove
from os.path import exists, splitext
from re import compile as re_compile, I as re_I
from shutil import copyfile
from tempfile import mkstemp
import numpy as np
from Bio import AlignIO, SeqIO
from BioExt.misc import translate
from idepi.constants import AminoAlphabet, DNAAlphabet
from idepi.encoder import DNAEncoder
from idepi.hmmer import HMMER
from idepi.labeledmsa import LabeledMSA
from idepi.logging import IDEPI_LOGGER
from idepi.verifier import VerifyError, Verifier
__all__ = [
'seqfile_format',
'set_util_params',
'is_refseq',
'seqrecord_get_values',
'seqrecord_get_subtype',
'seqrecord_set_values',
'ystoconfusionmatrix',
'reference_index',
'extract_feature_weights_similar',
'extract_feature_weights',
'generate_alignment',
'C_range',
'load_stockholm',
'coefs_ranks'
]
__REFSEQ_IDS = []
def set_util_params(refseq_ids=None):
global __REFSEQ_IDS
if refseq_ids is not None:
if isinstance(refseq_ids, (list, tuple)):
__REFSEQ_IDS = refseq_ids
else:
__REFSEQ_IDS = [refseq_ids]
def is_refseq(seqrecord):
try:
if seqrecord.id.strip() in __REFSEQ_IDS:
return True
except IndexError:
pass
# we don't care about this, do we?
# print >> stderr, "ERROR: malformed ID: %s" % id
return False
def seqrecord_get_values(seqrecord, label='IC50'):
# cap ic50s to 25
try:
values = json_loads(seqrecord.description)['values'][label]
except ValueError:
raise ValueError("Cannot parse `{0}' for {1} value".format(
seqrecord.description,
label
))
except KeyError:
return None
return values
def seqrecord_get_subtype(seqrecord):
try:
subtype = json_loads(seqrecord.description)['subtype']
except ValueError:
raise ValueError("Cannot parse `%s' for HIV subtype".format(
seqrecord.description
))
return subtype
def seqrecord_set_values(seqrecord, label, values):
desc = json_loads(seqrecord.description)
desc['values'][label] = values
seqrecord.description = json_dumps(desc)
return seqrecord
# very heavily based on the design of friedmanchisquare in scipy
try:
from scipy.special import fdtrc
def durbin(*args):
# taken verbatim from scipy.stats._support.abut
def _abut(source, *args):
source = np.asarray(source)
if len(source.shape) == 1:
width = 1
source = np.resize(source, [source.shape[0], width])
else:
width = source.shape[1]
for addon in args:
if len(addon.shape) == 1:
width = 1
addon = np.resize(addon, [source.shape[0], width])
else:
width = source.shape[1]
if len(addon) < len(source):
addon = np.resize(addon, [source.shape[0], addon.shape[1]])
elif len(addon) > len(source):
source = np.resize(source, [addon.shape[0], source.shape[1]])
source = np.concatenate((source, addon), 1)
return source
# also taken from scipy.stats, but ignores everything under 0.
def _rankposdata(a):
a = np.ravel(a)
b = np.argsort(a)
a = a[b]
n = len(a)
dupcount = 0
oldrank = -1
sumranks = 0
newarray = np.zeros(n, float)
for i in range(n):
if a[i] <= 0.:
newarray[b[i]] = 0.
continue
oldrank += 1
sumranks += oldrank
dupcount += 1
if i == n - 1 or a[i] != a[i + 1]:
averrank = float(sumranks) / float(dupcount) + 1
for j in range(i - dupcount + 1, i + 1):
newarray[b[j]] = averrank
sumranks = 0
dupcount = 0
return newarray
b = len(args)
if b < 3:
raise ValueError('Less than 3 levels. Durbin test is not appropriate')
k = len(args[0])
for i in range(1, b):
if len(args[i]) != k:
raise ValueError('Unequal N in durbin. Aborting.')
data = _abut(*args)
data = data.astype(float)
A = 0.
t = data.shape[1]
R = np.zeros(t, float)
rs = np.zeros(t, int)
for i in range(len(data)):
data[i] = _rankposdata(data[i])
for j in range(len(data[i])):
A += pow(data[i, j], 2.)
R[j] += data[i, j]
if data[i, j] > 0.:
rs[j] += 1
r = np.mean(rs)
t = float(t)
b = float(b)
k = float(k)
C = b * k * pow(k + 1, 2) / 4
T1 = (t - 1) * sum([pow(x, 2) - r * C for x in R]) / (A - C)
T2 = (T1 / (t - 1)) / ((b * k - b - T1) / (b * k - b - t + 1))
print(data)
print(R)
print("r = %g, t = %g, b = %g, k = %g, C = %g, A = %g, T1 = %g" % (r, t, b, k, C, A, T1))
return T2, fdtrc(k - 1, b * k - b - t + 1, T2)
__all__ += ['durbin']
except ImportError:
pass
def ystoconfusionmatrix(truth, preds):
tps = truth > 0.
tns = truth <= 0.
pps = preds > 0.
pns = preds <= 0.
# true pos true neg false pos false neg
tp, tn, fp, fn = (np.sum(np.multiply(a, b)) for a, b in ((tps, pps), (tns, pns), (tns, pps), (tps, pns)))
return np.array([[tp, fn], [fp, tn]], dtype=int)
def reference_index(alignment, ref_id_func=None):
refidx = None
if ref_id_func is not None:
for i, seq in enumerate(alignment):
if ref_id_func(seq):
refidx = i
break
if refidx is None:
raise RuntimeError('ref_id_func provided but no reference found')
return refidx
def extract_feature_weights_similar(instance, similar=True):
ret = {
'features': instance.features(),
'weights': instance.classifier.weights()
}
if similar:
ret['similar'] = instance.selector.related()
return ret
def extract_feature_weights(instance):
return extract_feature_weights_similar(instance, False)
def seqfile_format(filename):
return 'stockholm' if splitext(filename)[1].find('sto') == 1 else 'fasta'
def generate_hmm_(opts):
fd, tmphmm = mkstemp(); close(fd)
fd, tmpaln = mkstemp(); close(fd)
is_dna = opts.ENCODER == DNAEncoder
try:
with open(opts.REFMSA) as msa_fh:
with open(tmpaln, 'w') as aln_fh:
msa_fmt = seqfile_format(opts.REFMSA)
source = Verifier(SeqIO.parse(msa_fh, msa_ |
chapmanb/cwltool | tests/test_ext.py | Python | apache-2.0 | 5,706 | 0.00368 | import unittest
import tempfile
import os
import shutil
import cwltool.expression as expr
import cwltool.factory
import cwltool.pathmapper
import cwltool.process
import cwltool.workflow
from .util import get_data
from cwltool.main import main
class TestListing(unittest.TestCase):
def test_missing_enable_ext(self):
# Require that --enable-ext is provided.
self.assertEquals(main([get_data('tests/wf/listing_deep.cwl'), get_data('tests/listing-job.yml')]), 1)
def test_listing_deep(self):
# Should succeed.
self.assertEquals(main(["--enable-ext", get_data('tests/wf/listing_deep.cwl'), get_data('tests/listing-job.yml')]), 0)
def test_listing_shallow(self):
# This fails on purpose, because it tries to access listing in a subdirectory the same way that listing_deep does,
# but it shouldn't be expanded.
self.assertEquals(main(["--enable-ext", get_data('tests/wf/listing_shallow.cwl'), get_data('tests/listing-job.yml')]), 1)
def test_listing_none(self):
# This fails on purpose, because it tries to access listing but it shouldn't be there.
self.assertEquals(main(["--enable-ext", get_data('tests/wf/listing_none.cwl'), get_data('tests/listing-job.yml')]), 1)
def test_listing_v1_0(self):
# Default behavior in 1.0 is deep expansion.
self.assertEquals(main([get_data('tests/wf/listing_v1_0.cwl'), get_data('tests/listing-job.yml')]), 0)
# def | test_li | sting_v1_1(self):
# # Default behavior in 1.1 will be no expansion
# self.assertEquals(main([get_data('tests/wf/listing_v1_1.cwl'), get_data('tests/listing-job.yml')]), 1)
class TestInplaceUpdate(unittest.TestCase):
def test_updateval(self):
try:
tmp = tempfile.mkdtemp()
with open(os.path.join(tmp, "value"), "w") as f:
f.write("1")
out = tempfile.mkdtemp()
self.assertEquals(main(["--outdir", out, get_data('tests/wf/updateval.cwl'), "-r", os.path.join(tmp, "value")]), 0)
with open(os.path.join(tmp, "value"), "r") as f:
self.assertEquals("1", f.read())
with open(os.path.join(out, "value"), "r") as f:
self.assertEquals("2", f.read())
finally:
shutil.rmtree(tmp)
shutil.rmtree(out)
def test_updateval_inplace(self):
try:
tmp = tempfile.mkdtemp()
with open(os.path.join(tmp, "value"), "w") as f:
f.write("1")
out = tempfile.mkdtemp()
self.assertEquals(main(["--enable-ext", "--leave-outputs", "--outdir", out, get_data('tests/wf/updateval_inplace.cwl'), "-r", os.path.join(tmp, "value")]), 0)
with open(os.path.join(tmp, "value"), "r") as f:
self.assertEquals("2", f.read())
self.assertFalse(os.path.exists(os.path.join(out, "value")))
finally:
shutil.rmtree(tmp)
shutil.rmtree(out)
def test_write_write_conflict(self):
try:
tmp = tempfile.mkdtemp()
with open(os.path.join(tmp, "value"), "w") as f:
f.write("1")
self.assertEquals(main(["--enable-ext", get_data('tests/wf/mut.cwl'), "-a", os.path.join(tmp, "value")]), 1)
with open(os.path.join(tmp, "value"), "r") as f:
self.assertEquals("2", f.read())
finally:
shutil.rmtree(tmp)
def test_sequencing(self):
try:
tmp = tempfile.mkdtemp()
with open(os.path.join(tmp, "value"), "w") as f:
f.write("1")
self.assertEquals(main(["--enable-ext", get_data('tests/wf/mut2.cwl'), "-a", os.path.join(tmp, "value")]), 0)
with open(os.path.join(tmp, "value"), "r") as f:
self.assertEquals("3", f.read())
finally:
shutil.rmtree(tmp)
# def test_read_write_conflict(self):
# try:
# tmp = tempfile.mkdtemp()
# with open(os.path.join(tmp, "value"), "w") as f:
# f.write("1")
# self.assertEquals(main(["--enable-ext", get_data('tests/wf/mut3.cwl'), "-a", os.path.join(tmp, "value")]), 0)
# finally:
# shutil.rmtree(tmp)
def test_updatedir(self):
try:
tmp = tempfile.mkdtemp()
with open(os.path.join(tmp, "value"), "w") as f:
f.write("1")
out = tempfile.mkdtemp()
self.assertFalse(os.path.exists(os.path.join(tmp, "blurb")))
self.assertFalse(os.path.exists(os.path.join(out, "blurb")))
self.assertEquals(main(["--outdir", out, get_data('tests/wf/updatedir.cwl'), "-r", tmp]), 0)
self.assertFalse(os.path.exists(os.path.join(tmp, "blurb")))
self.assertTrue(os.path.exists(os.path.join(out, "inp/blurb")))
finally:
shutil.rmtree(tmp)
shutil.rmtree(out)
def test_updateval_inplace(self):
try:
tmp = tempfile.mkdtemp()
with open(os.path.join(tmp, "value"), "w") as f:
f.write("1")
out = tempfile.mkdtemp()
self.assertFalse(os.path.exists(os.path.join(tmp, "blurb")))
self.assertFalse(os.path.exists(os.path.join(out, "blurb")))
self.assertEquals(main(["--enable-ext", "--leave-outputs", "--outdir", out, get_data('tests/wf/updatedir_inplace.cwl'), "-r", tmp]), 0)
self.assertTrue(os.path.exists(os.path.join(tmp, "blurb")))
self.assertFalse(os.path.exists(os.path.join(out, "inp/blurb")))
finally:
shutil.rmtree(tmp)
shutil.rmtree(out)
|
dropbox/changes-lxc-wrapper | changes_lxc_wrapper/snapshot_cache.py | Python | apache-2.0 | 3,494 | 0.000286 | import os.path
import shutil
from datetime import datetime
from uuid import UUID
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
def get_directory_size(path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def convert_date(value):
return datetime.strptime(value, DATETIME_FORMAT)
class SnapshotImage(object):
def __init__(self, id, path, date_created=None, is_active=None,
is_valid=True, project=None):
self.id = id
self.path = path
self.size = get_directory_size(path)
self.date_created = date_created
self.is_active = is_active
self.is_valid = is_valid
self.project = project
class SnapshotCache(object):
def __init__(self, root, api):
self.api = api
self.root = root
self.snapshots = []
def initialize(self):
print("==> Initializing snapshot cache")
# find all valid snapshot paths
path_list = self._collect_files(self.root)
upstream_data = {}
if path_list:
# get upstream metadata
print("==> Fetching upstream metadata")
for snapshot in self.api.list_snapshots():
for image in snapshot['images']:
upstream_data[UUID(image['id'])] = {
| 'project': UUID(snapshot['project']['id']),
'date_created': convert_date(snapshot['dateCreated']),
'is_active': snapshot['isActive'],
}
# collect size information for each path
snapshot_list = []
for path in path_list:
| id_ = UUID(path.rsplit('/', 1)[-1])
path_data = upstream_data.get(id_, {})
snapshot_list.append(SnapshotImage(
id=id_,
path=path,
is_active=path_data.get('is_active', False),
date_created=path_data.get('date_created'),
is_valid=bool(path_data),
project=path_data.get('project'),
))
self.snapshots = snapshot_list
print("==> {} items found in cache ({} bytes)".format(len(self.snapshots), self.total_size))
@property
def total_size(self):
return sum(s.size for s in self.snapshots)
def remove(self, snapshot, on_disk=True):
assert not snapshot.is_active
print("==> Removing snapshot: {}".format(snapshot.id))
if on_disk:
shutil.rmtree(snapshot.path)
self.snapshots.remove(snapshot)
def _collect_files(self, root):
# The root will consist of three subdirs, depicting the dist, release,
# and arch. i.e. ubuntu/precise/amd64/
# We need to collect all children that are three levels deep
if not os.path.exists(root):
return []
def _r_collect_files(path, _stack=None, _depth=1):
if _stack is None:
_stack = []
for name in os.listdir(path):
name_path = os.path.join(path, name)
if not os.path.isdir(name_path):
continue
if _depth <= 3:
_r_collect_files(name_path, _stack, _depth + 1)
else:
_stack.append(name_path)
return _stack
return _r_collect_files(root)
|
pfs/CSA14 | python/csa14/QCD_80_120_MuEnriched_pythia8_cfi.py | Python | gpl-2.0 | 5,943 | 0.00875 | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/0A2744F9-FA05-E411-BD0C-00259073E36C.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/0E936434-FD05-E411-81BF-F4CE46B27A1A.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/32E07232-FD05-E411-897C-00259073E522.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/3CE2B535-FB05-E411-919A-20CF307C98DC.root', |
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt | 5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/48093276-FC05-E411-9EEE-001F296564C6.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/50B66FF3-FA05-E411-A937-001F296564C6.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/544B2DF7-FA05-E411-B91F-001F2965F296.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/54DB2FF7-FE05-E411-824B-00259073E522.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/56D1BC32-FD05-E411-A512-20CF3027A5EB.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/5AD70432-FC05-E411-906C-20CF3027A5CD.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/5C4FBFF4-FA05-E411-9767-00259073E36C.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/5CF748F8-FC05-E411-814B-20CF3027A5A2.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/7806E24D-FC05-E411-8922-001F2965F296.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/7C16B231-FD05-E411-8E00-20CF3027A5EB.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/802452C1-FC05-E411-A969-00221983E092.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/8217E3BD-FC05-E411-B8C2-0025907277CE.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/8676BEF4-FA05-E411-B26A-00259073E36C.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/8C1741F3-FA05-E411-B5B5-20CF3027A582.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/8C915AB8-FC05-E411-9EAF-F4CE46B27A1A.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/AA0FCBB0-FC05-E411-898D-00259073E36C.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/B49383BA-FC05-E411-9914-F4CE46B27A1A.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/B6DAEFDD-FB05-E411-9851-20CF3027A5CD.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/C6F5C44F-FD05-E411-B86F-D48564592B02.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/C83B6B6C-FC05-E411-BAFD-D48564599CAA.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/CEF64C64-FD05-E411-A799-001F2965648A.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/D6C305FC-FA05-E411-9AF5-00259073E522.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/DE0FC6A4-FC05-E411-A2F9-00259073E36C.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/E2D5AD33-FD05-E411-868A-D48564594F36.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/E63BCC43-FB05-E411-834E-D48564599CEE.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/EAD01F32-FD05-E411-91E4-20CF3027A5F4.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/F0A18D25-FC05-E411-8DFC-20CF3027A582.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/F0B8E6B6-FA05-E411-9DAE-20CF3027A5CD.root',
'/store/mc/Spring14miniaod/QCD_Pt-80to120_MuEnrichedPt5_Tune4C_13TeV_pythia8/MINIAODSIM/PU20bx25_POSTLS170_V5-v1/00000/F23A21C3-FD05-E411-9E29-A4BADB3D00FF.root' ] );
secFiles.extend( [
] )
|
calico/basenji | bin/sonnet_predict_bed.py | Python | apache-2.0 | 9,710 | 0.016478 | #!/usr/bin/env python
# Copyright 2017 Calico LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import json
import os
import pdb
import pickle
import sys
import h5py
import numpy as np
import pandas as | pd
import pysam
import pyBigWig
import tensorflow as tf
if tf.__version__[0] == '1':
tf.compat.v1.enable_eager_execution()
from basenji import bed
from basenji import dna_io
from basenji import seqnn
from basenji import stream
'''
basenji_predict_bed.py
Predict sequences from a BED fi | le.
'''
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <model_file> <bed_file>'
parser = OptionParser(usage)
parser.add_option('-b', dest='bigwig_indexes',
default=None, help='Comma-separated list of target indexes to write BigWigs')
parser.add_option('-e', dest='embed_layer',
default=None, type='int',
help='Embed sequences using the specified layer index.')
parser.add_option('-f', dest='genome_fasta',
default=None,
help='Genome FASTA for sequences [Default: %default]')
parser.add_option('-g', dest='genome_file',
default=None,
help='Chromosome length information [Default: %default]')
parser.add_option('-l', dest='site_length',
default=None, type='int',
help='Prediction site length. [Default: model seq_length]')
parser.add_option('-o', dest='out_dir',
default='pred_out',
help='Output directory [Default: %default]')
# parser.add_option('--plots', dest='plots',
# default=False, action='store_true',
# help='Make heatmap plots [Default: %default]')
parser.add_option('-p', dest='processes',
default=None, type='int',
help='Number of processes, passed by multi script')
parser.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Ensemble forward and reverse complement predictions [Default: %default]')
parser.add_option('-s', dest='sum',
default=False, action='store_true',
help='Sum site predictions [Default: %default]')
parser.add_option('--shifts', dest='shifts',
default='0',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option('--species', dest='species',
default='human')
parser.add_option('-t', dest='targets_file',
default=None, type='str',
help='File specifying target indexes and labels in table format')
(options, args) = parser.parse_args()
if len(args) == 2:
model_file = args[0]
bed_file = args[1]
elif len(args) == 4:
# multi worker
options_pkl_file = args[0]
model_file = args[1]
bed_file = args[2]
worker_index = int(args[3])
# load options
options_pkl = open(options_pkl_file, 'rb')
options = pickle.load(options_pkl)
options_pkl.close()
# update output directory
options.out_dir = '%s/job%d' % (options.out_dir, worker_index)
else:
parser.error('Must provide parameter and model files and BED file')
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
options.shifts = [int(shift) for shift in options.shifts.split(',')]
if options.bigwig_indexes is not None:
options.bigwig_indexes = [int(bi) for bi in options.bigwig_indexes.split(',')]
else:
options.bigwig_indexes = []
if len(options.bigwig_indexes) > 0:
bigwig_dir = '%s/bigwig' % options.out_dir
if not os.path.isdir(bigwig_dir):
os.mkdir(bigwig_dir)
#################################################################
# read parameters and collet target information
if options.targets_file is None:
target_slice = None
else:
targets_df = pd.read_table(options.targets_file, index_col=0)
target_slice = targets_df.index
#################################################################
# setup model
seqnn_model = tf.saved_model.load(model_file).model
# query num model targets
seq_length = seqnn_model.predict_on_batch.input_signature[0].shape[1]
null_1hot = np.zeros((1,seq_length,4))
null_preds = seqnn_model.predict_on_batch(null_1hot)
null_preds = null_preds[options.species].numpy()
_, preds_length, preds_depth = null_preds.shape
# hack sizes
preds_window = 128
seq_crop = (seq_length - preds_length*preds_window) // 2
#################################################################
# sequence dataset
if options.site_length is None:
options.site_length = preds_window*preds_length
print('site_length: %d' % options.site_length)
# construct model sequences
model_seqs_dna, model_seqs_coords = bed.make_bed_seqs(
bed_file, options.genome_fasta,
seq_length, stranded=False)
# construct site coordinates
site_seqs_coords = bed.read_bed_coords(bed_file, options.site_length)
# filter for worker SNPs
if options.processes is not None:
worker_bounds = np.linspace(0, len(model_seqs_dna), options.processes+1, dtype='int')
model_seqs_dna = model_seqs_dna[worker_bounds[worker_index]:worker_bounds[worker_index+1]]
model_seqs_coords = model_seqs_coords[worker_bounds[worker_index]:worker_bounds[worker_index+1]]
site_seqs_coords = site_seqs_coords[worker_bounds[worker_index]:worker_bounds[worker_index+1]]
num_seqs = len(model_seqs_dna)
#################################################################
# setup output
assert(preds_length % 2 == 0)
preds_mid = preds_length // 2
assert(options.site_length % preds_window == 0)
site_preds_length = options.site_length // preds_window
assert(site_preds_length % 2 == 0)
site_preds_start = preds_mid - site_preds_length//2
site_preds_end = site_preds_start + site_preds_length
# initialize HDF5
out_h5_file = '%s/predict.h5' % options.out_dir
if os.path.isfile(out_h5_file):
os.remove(out_h5_file)
out_h5 = h5py.File(out_h5_file, 'w')
# create predictions
if options.sum:
out_h5.create_dataset('preds', shape=(num_seqs, preds_depth), dtype='float16')
else:
out_h5.create_dataset('preds', shape=(num_seqs, site_preds_length, preds_depth), dtype='float16')
# store site coordinates
site_seqs_chr, site_seqs_start, site_seqs_end = zip(*site_seqs_coords)
site_seqs_chr = np.array(site_seqs_chr, dtype='S')
site_seqs_start = np.array(site_seqs_start)
site_seqs_end = np.array(site_seqs_end)
out_h5.create_dataset('chrom', data=site_seqs_chr)
out_h5.create_dataset('start', data=site_seqs_start)
out_h5.create_dataset('end', data=site_seqs_end)
#################################################################
# predict scores, write output
# define sequence generator
def seqs_gen():
for seq_dna in model_seqs_dna:
yield dna_io.dna_1hot(seq_dna)
# initialize predictions stream
preds_stream = stream.PredStreamSonnet(seqnn_model, seqs_gen(),
rc=options.rc, shifts=options.shifts, species=options.species)
for si in range(num_seqs):
preds_seq = preds_stream[si]
# slice site
preds_site = preds_seq[site_preds_start:site_preds_end,:]
# write
if options.sum:
out_h5['preds'][si] = preds_site.sum(axis=0)
else:
out_h5['preds'][si] = preds_site
# write bigwig
for ti in options.bigwig_indexes:
bw_file = '%s/s%d_t%d.bw' % (bigwig_dir, si, ti)
bigwig_write(preds_seq[:,ti], model_seqs_coords[si], bw_file,
options.genome_file, seq_crop)
# close output HDF5
out_h5.close()
def bigwig_open(bw_file, genome_file):
"" |
MrSenko/Nitrate | tcms/testplans/migrations/0002_add_initial_data.py | Python | gpl-2.0 | 929 | 0.001076 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
plan_types = [
'Unit',
'Integration',
'Function',
'System',
'Acceptance',
'Installation',
'Performance',
'Product',
'Interoperability',
'Smoke',
'Regression',
]
def forwards_add_initial_data(apps, schema_editor):
TestPlanType = apps.get_model('testplans', 'TestPlanType')
TestPlanType.objects.bulk_create(
[TestPlanType(name=name, description='') for name in plan_types])
def reverse_add_initial_data(apps, schema_editor):
TestPlanType = apps.get_model('tes | tplans', 'TestPlanType')
TestPlanType.objects.filter(name__in=plan_types).delete()
class Migration(migrations.Migration):
| dependencies = [
('testplans', '0001_initial'),
]
operations = [
migrations.RunPython(forwards_add_initial_data, reverse_add_initial_data)
]
|
google/loaner | loaner/web_app/backend/handlers/cron/run_shelf_audit_events_test.py | Python | apache-2.0 | 6,937 | 0.002451 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backend.handlers.cron.run_shelf_audit_events."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import logging
import mock
from loaner.web_app.backend.clients import directory # pylint: disable=unused-import
from loaner.web_app.backend.lib import events # pylint: disable=unused-import
from loaner.web_app.backend.models import config_model
from loaner.web_app.backend.models import event_models
from loaner.web_app.backend.models import shelf_model
from loaner.web_app.backend.testing import handlertest
from loaner.web_app.backend.testing import loanertest
_NOW = datetime.datetime.utcnow()
class RunShelfAuditEventsHandlerTest(handlertest.HandlerTestCase):
"""Test the RunShelfAuditEventsHandler."""
def setUp(self, *args, **kwargs):
super(RunShelfAuditEventsHandlerTest, self).setUp(*args, **kwargs)
event_models.ShelfAuditEvent.create()
def setup_shelves(self):
self.shelf1 = shelf_model.Shelf.enroll(
'test@{}'.format(loanertest.USER_DOMAIN), 'US-AUS', 16, 'Alamo',
40.6892534, -74.0466891, 1.0, loanertest.USER_EMAIL)
self.shelf2 = shelf_model.Shelf.enroll(
'test@{}'.format(loanertest.USER_DOMAIN), 'US-BLD', 24,
'Overlook Hotel', 40.6892534, -74.0466891, 1.0, loanertest.USER_EMAIL)
self.shelf3 = shelf_model.Shelf.enroll(
'test@{}'.format(loanertest.USER_DOMAIN), 'US-CAM', 24, 'Freedom Trail',
40.6892534, -74.0466891, 1.0, loanertest.USER_EMAIL)
self.shelf4 = shelf_model.Shelf.enroll(
'test@{}'.format(loanertest.USER_DOMAIN), 'US-NYC', 24,
'Statue of Liberty', 40.6892534, -74.0466891, 1.0,
loanertest.USER_EMAIL)
self.shelf_audit_interval = 24
config_model.Config.set('shelf_audit_interval', self.shelf_audit_interval)
def setup_shelf_not_audited(self):
self.shelf2.audit_notification_enabled = True
self.shelf2.audit_requested = False
self.shelf2.last_audit_time = _NOW - datetime.timedelta(
hours=self.shelf_audit_interval + 1)
self.shelf2.put()
@mock.patch.object(config_model.Config, 'get', return_value=True)
@mock.patch.object(logging, 'warning')
def test_no_shelves(self, mock_warning, mock_config_get):
"""Tests with no entities in datastore."""
response = self.testapp.get(r'/_cron/run_shelf_audit_events')
self.assertEqual(response.status_int, 200)
self.assertFalse(self.testbed.mock_raiseevent.called)
self.assertEqual(mock_warning.call_count, 0)
def test_shelves(self):
"""Tests with two shelves, and only one raises the event."""
self.setup_shelves()
# Shelf ready for notifications but recently audited.
self.shelf1.audit_notification_enabled = True
self.shelf1.audit_requested = False
self.shelf1.last_audit_time = _NOW - datetime.timedelta(
hours=self.shelf_audit_interval - 1)
self.shelf1.put()
# Shelf ready for notifications and not audited in a while.
self.setup_shelf_not_audited()
# Shelf disabled for notifications
self.shelf3.audit_notification_enabled = False
self.shelf3.audit_requested = False
self.shelf3.put()
# Shelf already notified.
self.shelf4.audit_notification_enabled = True
self.shelf4.audit_requested = True
self.shelf4.put()
# Shelf ready for notifications but has not been audited ever.
shelf5 = shelf_model.Shelf.enroll(
'test@{}'.format(loanertest.USER_DOMAIN), 'US-WAS', 24,
'Washington', 40.6892534, -74.0466891, 1.0,
loanertest.USER_EMAIL)
shelf5.audit_notification_enabled = True
shelf5.audit_requested = False
shelf5.last_audit_time = None
self.testbed.mock_raiseevent.reset_mock()
response = self.testapp.get(r'/_cron/run_shelf_audit_events')
self.assertEqual(response.status_int, 200)
self.assertTrue(self.testbed.mock_raiseevent.called)
expected_calls = [
mock.call(event_name='shelf_needs_audit', shelf=self.shelf2)
]
self.assertListEqual(
self.testbed.mock_raiseevent.mock_calls, expected_calls)
def test_shelves_with_overrides(self):
"""Tests with two shelves, both have overrides, reversing results."""
self.setup_shelves()
# Shelf ready for notifications but recently audited.
self.shelf1.audit_notification_enabled = True
self.shelf1.audit_requested = False
self.shelf1.last_audit_time = _NOW - datetime.timedelta(
hours=self.shelf_audit_interval - 1)
self.shelf1.audit_interval_override = 22 # Override makes it more strict.
self.shelf1.put()
# Shelf ready for notifications and not audited in a while.
self.shelf2.audit_notification_enabled = True
self.shelf2.audit_requested = False
self.shelf2.last_known_healthy = _NOW - datetime.timedelta(
hours=self.shelf_audit_interval + 1)
self.shelf2.audit_interval_override = 26 # Override make | s it more lenient.
self.shelf2.put()
# Shelf disabled for notifications
self.shelf3.audit_notification_enabled = False
self.shelf3.audit_requested = False
self.shelf3.put()
# Shelf already notified.
self.shel | f4.audit_notification_enabled = True
self.shelf4.audit_requested = True
self.shelf4.put()
self.testbed.mock_raiseevent.reset_mock()
response = self.testapp.get(r'/_cron/run_shelf_audit_events')
self.assertEqual(response.status_int, 200)
self.assertTrue(self.testbed.mock_raiseevent.called)
expected_calls = [
mock.call(event_name='shelf_needs_audit', shelf=self.shelf1)
]
self.assertListEqual(
self.testbed.mock_raiseevent.mock_calls, expected_calls)
@mock.patch.object(logging, 'error', autospec=True)
def test_event_error(self, mock_logging):
self.setup_shelves()
self.setup_shelf_not_audited()
self.testbed.mock_raiseevent.side_effect = events.EventActionsError
self.testapp.get(r'/_cron/run_shelf_audit_events')
self.assertEqual(mock_logging.call_count, 1)
@mock.patch.object(config_model.Config, 'get', return_value=False)
@mock.patch.object(logging, 'warning')
def test_get_audits_disabled(self, mock_warning, mock_config_get):
self.testapp.get(r'/_cron/run_shelf_audit_events')
self.assertEqual(mock_warning.call_count, 1)
if __name__ == '__main__':
handlertest.main()
|
microcom/partner-contact | partner_multi_relation/models/res_partner_relation.py | Python | agpl-3.0 | 5,718 | 0 | # Copyright 2013-2017 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
# pylint: disable=api-one-deprecated
"""Store relations (connections) between partners."""
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class ResPartnerRelation(models.Model):
"""Model res.partner.relation is used to describe all links or relations
between partners in the database.
This model is actually only used to store the data. The model
res.partner.relation.all, based on a view that contains each record
two times, once for the normal relation, once for the inverse relation,
will be used to maintain the data.
"""
_name = 'res.partner.relation'
_description = 'Partner relation'
left_partner_id = fields.Many2one(
comodel_name='res.partner',
string='Source Partner',
required=True,
auto_join=True,
ondelete='cascade',
)
right_partner_id = fields.Many2one(
comodel_name='res.partner',
string='Destination Partner',
required=True,
auto_join=True,
ondelete='cascade',
)
type_id = fields.Many2one(
comodel_name='res.partner.relation.type',
string='Type',
required=True,
auto_join=True,
)
date_start = fields.Date('Starting date')
date_end = fields.Date('Ending date')
@api.model
def create(self, vals):
"""Override create to correct values, before being stored."""
context = self.env.context
if 'left_partner_id' not in vals and context.get('active_id'):
vals['left_partner_id'] = context.get('active_id')
return super(ResPartnerRelation, self).create(vals)
@api.constrains('date_start', 'date_end')
def _check_dates(self):
"""End date should not be before start date, if not filled
:raises ValidationError: When constraint is violated
"""
for record in self:
if (record.date_start and record.date_end and
record.date_start > record.date_end):
raise ValidationError(
_('The starting date cannot be after the ending date.')
)
@api.constrains('left_partner_id', 'type_id')
def _check_partner_left(self):
"""Check left partner for required company or person
:raises ValidationError: When constraint is violated
"""
self._check_partner("left")
@api.constrains('right_partner_id', 'type_id')
def _check_partner_right(self):
"""Check right partner for required company or person
:raises ValidationError: When constraint is violated
"""
self._check_partner("right")
@api.multi
def _check_partner(self, side):
"""Check partner for required company or person, and for category
:param str side: left or right
:raises ValidationError: When constraint is violated
"""
for record in self:
assert side in ['left', 'right']
ptype = getattr(record.type_id, "contact_type_%s" % side)
partner = getattr(record, '%s_partner_id' % side)
if ((ptype == 'c' and not partner.is_company) or
(ptype == 'p' and partner.is_company)):
raise ValidationError(
_('The %s partner is not applicable for this '
'relation type.') % side
)
category = getattr(record.type_id, "partner_category_%s" % side)
if category and category.id not in partner.category_id.ids:
raise ValidationError(
_('The %s partner does not have category %s.') %
(side, category.name)
)
@api.constrains('left_partner_id', 'right_partner_id')
def _check_not_with_self(self):
"""Not allowed to link partner to same partner
:raises ValidationError: When constraint is violated
"""
for recor | d in self:
if record.left_partner_id == record.right_partner_id:
if not (record.type_id and record.type_id.allow_self):
raise ValidationError(
_('Partners cannot have a relation with themselves.')
)
@api.constrains(
'left_partner_id',
'type_id',
'right_partner_id',
'date_ | start',
'date_end',
)
def _check_relation_uniqueness(self):
"""Forbid multiple active relations of the same type between the same
partners
:raises ValidationError: When constraint is violated
"""
# pylint: disable=no-member
# pylint: disable=no-value-for-parameter
for record in self:
domain = [
('type_id', '=', record.type_id.id),
('id', '!=', record.id),
('left_partner_id', '=', record.left_partner_id.id),
('right_partner_id', '=', record.right_partner_id.id),
]
if record.date_start:
domain += [
'|',
('date_end', '=', False),
('date_end', '>=', record.date_start),
]
if record.date_end:
domain += [
'|',
('date_start', '=', False),
('date_start', '<=', record.date_end),
]
if record.search(domain):
raise ValidationError(
_('There is already a similar relation with '
'overlapping dates')
)
|
sysadminmatmoz/pmis | issue_change_request/__manifest__.py | Python | agpl-3.0 | 609 | 0 | # -*- coding: utf-8 -*-
{
'name': 'Project Issue to Change Request',
'v | ersion': '8.0.1.0.3',
'summary': 'Create Change Requests from Project Issues',
'sequence': '19',
'complexity': 'ea | sy',
'author': 'Matmoz d.o.o., '
'Project Expert Team',
'contributors': [
'Matjaž Mozetič <m.mozetic@matmoz.si>',
],
'website': 'http://project.expert',
'category': 'Project Management',
'license': 'AGPL-3',
'data': [
'views/change_request_view.xml'
],
'depends': ['project_issue', 'change_management'],
'installable': False,
}
|
Jdash99/des-inventory-simulation | psim/psim_control.py | Python | mit | 4,034 | 0.000248 | import sys
from PySide.QtGui import QPushButton, QWidget, QVBoxLayout, \
QHBoxLayout, QFormLayout, QComboBox, \
QLineEdit, QLabel, QIntValidator, QIcon
class ControlLayout(QWidget):
"""Widget that stores the controls"""
def __init__(self):
QWidget.__init__(self)
self.layout = QVBoxLayout()
self.form_layout = QFormLayout()
# The products that we want to make available
self.products = ['Producto A']
# Create and fill the combo box to choose the product
self.product = QComboBox(self)
self.product.addItems(self.products)
# Add it to the form layout with a label
self.form_layout.addRow('Producto:', self.product)
# Add policies label and combobox
self.policies = ['Qs', 'Ss', 'RS', 'RSs']
self.policy = QCo | mboBox(self)
self.policy.addItems(self.policies)
self.form_layout.addRow('Politica', self.policy)
# Connect policy button to hide 3rd parameter
self.policy.activated.connect(self.changed_policy)
# Add Parameters
self.parameters = QLabel('', self)
self.form_layout.addRow("&Parametros", self.parameters)
# Parameter 1
self.p1 = QLineEdit(self, QWidget) |
self.p1_label = QLabel("Q")
self.pam1_box = QHBoxLayout()
self.pam1_box.addWidget(self.p1_label)
self.pam1_box.addStretch(1)
self.pam1_box.addWidget(self.p1)
self.form_layout.addRow(self.pam1_box)
# Parameter 2
self.p2 = QLineEdit(self, QWidget)
self.p2_label = QLabel("s")
self.pam2_box = QHBoxLayout()
self.pam2_box.addWidget(self.p2_label)
self.pam2_box.addStretch(1)
self.pam2_box.addWidget(self.p2)
self.form_layout.addRow(self.pam2_box)
# Parameter 3
self.p3 = QLineEdit(self, QWidget)
self.p3.hide()
self.p3_label = QLabel("")
self.pam3_box = QHBoxLayout()
self.pam3_box.addWidget(self.p3_label)
self.pam3_box.addStretch(1)
self.pam3_box.addWidget(self.p3)
self.form_layout.addRow(self.pam3_box)
# Add Periods
self.periods = QLineEdit(self, QWidget)
self.periods_label = QLabel("Periodos")
self.periods_box = QHBoxLayout()
self.periods_box.addWidget(self.periods_label)
self.periods_box.addStretch(1)
self.periods_box.addWidget(self.periods)
self.form_layout.addRow(self.periods_box)
# Add form layout to main layout
self.layout.addLayout(self.form_layout)
# Add stretch to separate the form layout from the button
self.layout.addStretch(1)
# Create a horizontal box layout to hold the button
self.button_box = QHBoxLayout()
# Add stretch to push the button to the far right
#self.button_box.addStretch(1)
# Create the sim button with its caption
self.sim_button = QPushButton('Simular', self)
# Add it to the button box
self.button_box.addWidget(self.sim_button)
# Add the button box to the bottom of the main VBox layout
self.layout.addLayout(self.button_box)
self.setLayout(self.layout)
def changed_policy(self):
"""Changes labels if the policy is changed"""
pol = self.policy.currentText()
if pol in ['Qs', 'Ss', 'RS']:
self.p3_label.setText('')
self.p3.hide()
if pol == 'Qs':
self.p1_label.setText('Q')
self.p2_label.setText('s')
elif pol == 'Ss':
self.p1_label.setText('S')
self.p2_label.setText('s')
elif pol == 'RS':
self.p1_label.setText('R')
self.p2_label.setText('S')
elif pol == 'RSs':
self.p1_label.setText('R')
self.p2_label.setText('S')
self.p3_label.setText('s')
self.p3.show()
|
googleinterns/contextual-adjectives | categorize_adjectives/libraries/bert_embeddings.py | Python | apache-2.0 | 5,343 | 0.003556 | """Contains function for calculating BERT embeddings"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import re
import torch
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.modeling import BertModel
from scipy.spatial.distance import cosine, euclidean
class BertEmbedding(object):
"""Class for calculating embeddings between two texts"""
def __init__(self, bert_model='bert-base-uncased', max_seq_length=50, device='cpu'):
"""Initializing the BERT model"""
self.bert_model = bert_model
self.max_seq_length = max_seq_length
self.device = torch.device("cpu" if device=='cpu' or not torch.cuda.is_available() else "cuda")
n_gpu = torch.cuda.device_count()
self.tokenizer = BertTokenizer.from_pretrained(self.bert_model, do_lower_case=True)
self.model = BertModel.from_pretrained(self.bert_model)
self.model.to(self.device)
if n_gpu > 1:
self.model = torch.nn.DataParallel(self.model)
self.model.eval()
def get_embeddings(self, sentences, layer=-1):
"""Returns embeddings of words/sentences"""
assert isinstance(sentences, list)
for pair in sentences:
assert len(pair) == 1
examples = self._read_examples(sentences)
features = self._convert_examples_to_features(
examples=examples)
unique_id_to_feature = {}
for feature in features:
unique_id_to_feature[feature.unique_id] = feature
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=16)
out_features = []
for input_ids, input_mask, example_indices in eval_dataloader:
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
all_encoder_layers, _ = self.model(input_ids, token_type_ids=None, attention_mask=input_mask)
all_encoder_layers = all_encoder_layers
values = torch.mean(all_encoder_layers[layer], 1)
out_features.append(values.detach().cpu().numpy())
flat_list = [item for sublist in out_features for item in sublist]
return flat_list
def _convert_examples_to_features(self, examples):
"""Generate features of examples"""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = self.tokenizer.tokenize(example.text)
if len(tokens_a) > self.max_seq_length - 2:
tokens_a = tokens_a[0:(self.max_seq_length - 2)]
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < self.max_seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == self.max_seq_length
assert len(input_mask) == self.max_seq_length
assert len(input_type_ids) == self.max_seq_length
features.append(
InputFeatures(
unique_id=example.unique_id,
| tokens=tokens,
| input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids))
return features
def _read_examples(self, inp):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
for a, in inp:
line_a = a.strip()
examples.append(
InputExample(unique_id=unique_id, text=line_a))
unique_id += 1
return examples
class InputExample(object):
"""Input an example"""
def __init__(self, unique_id, text):
self.unique_id = unique_id
self.text = text
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
|
ESOedX/edx-platform | openedx/core/djangoapps/api_admin/widgets.py | Python | agpl-3.0 | 1,985 | 0.003526 | """ Form widget classes """
from __future__ import absolute_import
from django.conf import settings
from django.forms.utils import flatatt
from django.forms.widgets import CheckboxInput
from django.urls import reverse
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.translation import ugettext as _
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangolib.markup import HTML, Text
class TermsOfServiceCheckboxInput(CheckboxInput):
""" Renders a checkbox with a label linking to the terms of service. """
def render(self, name, value, attrs=None):
extra_attrs = attrs.copy()
extra_attrs.update({'type': 'checkbox', 'name': name})
final_attrs = self.build_attrs(self.attrs, extra_attrs=extra_attrs)
if self.check_test(value):
final_attrs['checked'] = 'checked'
if not (value is True or value is False or value is None or value == ''):
# Only | add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(value)
# Translators: link_start and link_end are HTML tags for a link to the terms of service.
# platform_name is the name of this Open edX installation.
label = Text(_(
u'I, and my organization, accept t | he {link_start}{platform_name} API Terms of Service{link_end}.'
)).format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
link_start=HTML(u'<a href="{url}" rel="noopener" target="_blank">').format(
url=reverse('api_admin:api-tos')
),
link_end=HTML('</a>'),
)
html = HTML(u'<input{{}} /> <label class="tos-checkbox-label" for="{id}">{label}</label>').format(
id=final_attrs['id'],
label=label
)
return format_html(html, flatatt(final_attrs))
|
vied12/superdesk | server/superdesk/services.py | Python | agpl-3.0 | 4,269 | 0.001405 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from flask import current_app as app
from eve.defaults import resolve_default_values
from eve.utils import ParsedRequest, config
from eve.methods.common import resolve_document_etag
log = logging.getLogger(__name__)
class BaseService():
'''
Base service for all endpoints, defines the basic implementation
for CRUD datalayer functionality.
'''
datasource = None
def __init__(self, datasource=None, backend=None):
self.backend = backend
self.datasource = datasource
def on_create(self, docs):
pass
def on_created(self, docs):
pass
def on_update(self, updates, original):
pass
def on_updated(self, updates, original):
pass
def on_replace(self, document, original):
pass
def on_replaced(self, document, original):
pass
def on_delete(self, doc):
pass
def on_deleted(self, doc):
pass
def on_fetched(self, doc):
pass
def on_fetched_item(self, doc):
pass
def create(self, docs, **kwargs):
ids = self.backend.create(self.datasource, docs, **kwargs)
return ids
def update(self, id, updates, original):
return self.backend.update(self.datasource, id, updates, original)
def system_update(self, id, updates, original):
return self.backend.system_update(self.datasource, id, updates, original)
def replace(self, id, document, original):
res = self.backend.replace(self.datasource, id, document, original)
return res
def delete(self, lookup):
res = self.backend.delete(self.datasource, lookup)
return res
def find_one(self, req, **lookup):
res = self.backend.find_one(self.datasource, req=req, **lookup)
return res
def get(self, req, lookup):
if req is None:
req = ParsedRequest()
return self.backend.get(self.datasource, req=req, lookup=lookup)
def get_from_mongo(self, req, lookup):
if req is None:
req = ParsedRequest()
return self.backend.get_from_mongo(self.datasource, req=req, lookup=lookup)
def post(self, docs, **kwargs):
for doc in docs:
resolve_default_values(doc, app.config['DOMAIN'][self.datasource]['defaults'])
self.on_create(docs)
resolve_document_etag(docs, self.datasource)
ids = self.create(docs, **kwargs)
self.on_created(docs)
return ids
def patch(self, id, updates):
original = self.find_one(req=None, _id=id)
updated = original.copy()
self.on_update(updates, original)
updated.update(updates)
if config.IF_MATCH:
resolve_document_etag(updated, self.datasource)
updates[config.ETAG] = updated[config.ETAG]
res = self.update(id, updates, original)
self.on_updated(updates, original)
return res
def put(self, id, document):
resolve_default_values(document, app.config['DOMAIN'][self.datasource]['defaults'])
original = self.find_one(req=None, _id=id)
self.on_replace(document, original)
resolve_document_etag(document, self.datasource)
res = self.replace(id, document, original)
self.on_replaced(document, original)
| return res
def delete_action(self, lookup=None): |
if lookup is None:
lookup = {}
if lookup:
doc = self.find_one(req=None, **lookup)
self.on_delete(doc)
res = self.delete(lookup)
if lookup and doc:
self.on_deleted(doc)
return res
def is_authorized(self, **kwargs):
"""
Subclass should override if the resource handled by the service has intrinsic privileges.
:param kwargs: should have properties which help in authorizing the request
:return: False if unauthorized and True if authorized
"""
return True
|
1032231418/python | lesson10/apps/books/publish/__init__.py | Python | apache-2.0 | 3,345 | 0.00129 | # coding=utf8
from django.views.generic import ListView, DetailView, CreateView
from django.db.models import Q
from django.http import Jso | nResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.shortcuts import render
from pure_pagination.mixins import PaginationMixin
from django.contrib.auth.mixins import LoginRequiredMixin
from django.conf import | settings
from books.models import Publish, Author, Book
from books.forms import PublishForm
import json
import logging
logger = logging.getLogger('opsweb')
class PublishListView(LoginRequiredMixin, PaginationMixin, ListView):
'''
动作:getlist, create
'''
model = Publish
template_name = "books/publish_list.html"
context_object_name = "publish_list"
paginate_by = 5
keyword = ''
def get_queryset(self):
queryset = super(PublishListView, self).get_queryset()
self.keyword = self.request.GET.get('keyword', '').strip()
if self.keyword:
queryset = queryset.filter(Q(name__icontains=self.keyword) |
Q(address__icontains=self.keyword) |
Q(city__icontains=self.keyword))
return queryset
def get_context_data(self, **kwargs):
context = super(PublishListView, self).get_context_data(**kwargs)
context['keyword'] = self.keyword
return context
def post(self, request):
form = PublishForm(request.POST)
if form.is_valid():
form.save()
res = {'code': 0, 'result': '添加出版商成功'}
else:
# form.errors会把验证不通过的信息以对象的形式传到前端,前端直接渲染即可
res = {'code': 1, 'errmsg': form.errors}
print form.errors
return JsonResponse(res, safe=True)
class PublishDetailView(LoginRequiredMixin, DetailView):
'''
动作:getone, update, delete
'''
model = Publish
template_name = "books/publish_detail.html"
context_object_name = 'publish'
next_url = '/books/publishlist/'
def post(self, request, *args, **kwargs):
pk = kwargs.get('pk')
p = self.model.objects.get(pk=pk)
form = PublishForm(request.POST, instance=p)
if form.is_valid():
form.save()
res = {"code": 0, "result": "更新出版商成功", 'next_url': self.next_url}
else:
res = {"code": 1, "errmsg": form.errors, 'next_url': self.next_url}
return render(request, settings.JUMP_PAGE, res)
# return HttpResponseRedirect(reverse('books:publish_detail',args=[pk]))
def delete(self, request, *args, **kwargs):
pk = kwargs.get('pk')
# 通过出版社对象查所在该出版社的书籍,如果有关联书籍不可以删除,没有关联书籍可以删除
try:
obj = self.model.objects.get(pk=pk)
if not obj.book_set.all():
self.model.objects.filter(pk=pk).delete()
res = {"code": 0, "result": "删除出版商成功"}
else:
res = {"code": 1, "errmsg": "该出版社有关联书籍,请联系管理员"}
except:
res = {"code": 1, "errmsg": "删除错误请联系管理员"}
return JsonResponse(res, safe=True)
|
AndreySibiryakov/coding | py/check_errors_xlsx.py | Python | gpl-3.0 | 3,308 | 0.031741 | from openpyxl import load_workbook
import os
# list of text to search for
keyWordList = ['Resume', 'Label', 'Description', 'ClueText', 'Title', 'QTEtitle']
# default path for docs on my PC for sh8 game xlsx documents
#docDir = "d:/svn/ue3/SH8Game/Production/Dialogs/"
docDir = "d:/svn/ue3/SH8Game/Production/Data/"
#docDir = "d:/sh8/xlsx_python_tests/"
# output for the log file
logFile = 'd:/sh8/xlsx_python_tests/genlog.txt'
# searching for INT column ID
# returns column serial nubmer
def FindBase(sheetName, keyWord):
for col in range(1,50):
findSpokenCoord = sheetName.cell(row = 1, column = col)
findSpokenVal = findSpokenCoord.value
if findSpokenVal == keyWord:
return col
# searching for all localization columns that is present
# returns list of columns serial number
def FindLoc(sheetName, keyWord):
TextColList = []
for col in range(1,100):
findSpokenCoord = sheetName.cell(row = 1, column = col)
findSpokenVal = findSpokenCoord.value
#print findSpokenVal
if findSpokenVal:
if ('.' + keyWord) in findSpokenVal:
TextColList.append(col)
return TextColList
# comparing INT cell content with localization content
# returns string if INT and LOC cell are indentical
# returns string if LOC is empty while INT is not
def FindAndLog(docPath, keyWordList):
# declaring var for storing log
logVal = ''
workBook = load_workbook(docPath)
# for test purposes
print docPath
# obtaining list of all sheets in document
sheetList = workBook.get_sheet_names()
# adding path to log
logVal += docPath + '\n'
# iterating through key words
for keyWord in keyWordList:
# iterating through sheets in document
for sheet in sheetList:
sheetName = workBook[sheet]
intColNum = FindBase(sheetName, keyWord)
locColNumList = FindLoc(sheetName, keyWord)
# checking if INT keyword is present in document
if intColNum:
# even for comments it is enough length
for row in range(4,200):
intRowCoord = sheetName.cell(row = row, column = intColNum)
# obtaining INT cell value
intRowVal = intRowCoord.value
# checking if INT cell is not empty
if intRowVal:
# iterating through LOC columns in list
for col in locColNumList:
locRowCoord = sheetName.cell(row = row, column = col)
# obtaining LOC cell value
locRowVal = locRowCoord.value
# checking whether LOC cell is duplicate of INT
if intRowVal == locRowVal:
#convering non ASCII characters
#locASCII = str(intRowVal).encode('ascii', 'ignore').decode('ascii')
#print intRowVal
logVal += str(locRowCoord) + str(intRowVal) + '\n'
# checking if LOC cell is empty while INT cell is not
elif locRowVal == None:
logVal += str(locRowCoord) + ' is empty\n'
return | logVal
# collecting all .xlsxs from supplied path
genLog = ''
for path, dirs, fileNames in os.walk(docDir):
for file | Name in fileNames:
docPath = os.path.join(path, fileName)
# filtering files except .xlsx
if '.xlsx' in docPath:
# filling log
genLog += FindAndLog(docPath, keyWordList)
# writing and saving the log file
filePath = open(logFile, 'wb')
filePath.write(genLog)
filePath.close()
|
wdingx/pan-genome-analysis | panX.py | Python | gpl-3.0 | 17,982 | 0.019686 | #!/usr/bin/env python
import argparse
import os, sys, time
panX_script_dir= os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0,panX_script_dir+'/scripts/')
from pangenome_computation import pangenome
from sf_miscellaneous import times, check_dependency
'''
panX clusters genes from a set of microbial genomes into orthologous clusters
and exports alignments, phylogenies, and meta data for interactive web visualization
and powerful pan-genomic data exploration.
USAGE EXAMPLE: ./panX -fn data/TestSet -sl TestSet-RefSeq.txt 1>TestSet.log 2>TestSet.err
For help, type:
./panX -h
'''
parser = argparse.ArgumentParser(description=\
'panX: Software for computing core and pan-genome from a set of genome sequences.'
' The results will be exported as json files for visualization in the browser.',
usage='./%(prog)s'+' -h (help)')
parser.add_argument('-fn', '--folder_name', type = str, required=True,
help='the absolute path for project folder ', metavar='')
parser.add_argument('-sl', '--species_name', type = str, required=True,
help='species name as prefix for some temporary folders (e.g.: P_aeruginosa)', metavar='')
parser.add_argument('-ngbk', '--gbk_present', action='store_false',
help='use nucleotide/amino acid sequence files (fna/faa) when no genBank files given (this option does not consider annotations)')
parser.add_argument('-st', '--steps', nargs='+', type = int, default = ['all'],
help='run specific steps or run all steps by default', metavar='')
parser.add_argument('-mo', '--metainfo_organism', action='store_true',
help='add organism information in metadata table.')
parser.add_argument('-mr', '--metainfo_reconcile', action='store_true',
help='use reconciled metadata (redundancy removed) instead of original metadata.')
parser.add_argument('-rt', '--raxml_max_time', type = int, default = 30,
help='RAxML tree optimization: maximal runing time (minutes, default:30min)' , metavar='')
parser.add_argument('-t', '--threads', type = int, default = 1,
help='number of threads', metavar='')
parser.add_argument('-v', '--version', action='version', version='panX analysis v1.5.1')
#/*==================================
# clustering
#==================================*/
parser.add_argument('-bp', '--blast_file_path', type = str, default = 'none',
help='the absolute path for blast result (e.g.: /path/blast.out)' , metavar='')
parser.add_argument('-rp', '--roary_file_path', type = str, default = 'none',
help='the absolute path for roary result (e.g.: /path/roary.out)' , metavar='')
parser.add_argument('-op', '--orthofinder_file_path', type = str, default = 'none',
help='the absolute path for orthofinder result (e.g.: /path/orthofinder.out)' , metavar='')
parser.add_argument('-otp', '--other_tool_fpath', type = str, default = 'none',
help='the absolute path for result from other orthology inference t | ool (e.g.: /path/other_tool.out)' , metavar='')
parser.add_argument('-mi', '--metainfo_fpath', type = str, default = 'none',
help='the absolute path for meta_information file (e.g.: /path/meta.out)' , metavar='')
p | arser.add_argument('-dmp', '--diamond_path', type = str, default = '',
help='alternative diamond path provided by user', metavar='')
parser.add_argument('-dme', '--diamond_evalue', type = str, default = '0.001',
help='default: e-value threshold below 0.001', metavar='')
parser.add_argument('-dmt', '--diamond_max_target_seqs', type = str, default = '600',
help='Diamond: maximum number of target sequences per query\
Estimation: #strain * #max_duplication (50*10=500)', metavar='')
parser.add_argument('-dmi', '--diamond_identity', type = str, default = '0',
help='Diamond: sequence identity threshold to report an alignment. Default: no restriction (0)', metavar='')
parser.add_argument('-dmqc', '--diamond_query_cover', type = str, default = '0',
help='Diamond: query sequence coverage threshold to report an alignment. Default: no restriction (0)', metavar='')
parser.add_argument('-dmsc', '--diamond_subject_cover', type = str, default = '0',
help='Diamond: subject sequence coverage threshold to report an alignment. Default: no restriction (0)', metavar='')
parser.add_argument('-dmdc', '--diamond_divide_conquer', action='store_true',
help='running diamond alignment in divide-and-conquer(DC) algorithm for large dataset')
parser.add_argument('-dcs', '--subset_size', type = int, default = 50,
help='subset_size (number of strains in a subset) for divide-and-conquer(DC) algorithm. Default:50',\
metavar='')
parser.add_argument('-dmsi', '--diamond_identity_subproblem', type = str, default = '90',
help='Diamond divide-and-conquer subproblem: sequence identity threshold to report an alignment.', metavar='')
parser.add_argument('-dmsqc', '--diamond_query_cover_subproblem', type = str, default = '90',
help='Diamond divide-and-conquer subproblem: query sequence coverage threshold to report an alignment', metavar='')
parser.add_argument('-dmssc', '--diamond_subject_cover_subproblem', type = str, default = '90',
help='Diamond divide-and-conquer subproblem: subject sequence coverage threshold to report an alignment', metavar='')
parser.add_argument('-imcl', '--mcl_inflation', type = float, default = 1.5,
help='MCL: inflation parameter (this parameter affects granularity) ', metavar='')
parser.add_argument('-bmt', '--blastn_RNA_max_target_seqs', type = str, default = '1000',
help='Blastn on RNAs: the maximum number of target sequences per query\
Estimation: #strain * #max_duplication', metavar='')
#/*=======================================
# post-processing
#=======================================*/
parser.add_argument('-np', '--disable_cluster_postprocessing', action='store_true',
help='disable postprocessing (split overclustered genes and paralogs, and cluster unclustered genes)')
parser.add_argument('-nsl', '--disable_long_branch_splitting', action='store_true',
help='disable splitting long branch')
parser.add_argument('-rna', '--enable_RNA_clustering', action='store_true',
help='cluster rRNAs')
## split tree via breaking up long branches (resolving over-clustering)
#parser.add_argument('-sf', '--split_long_branch_factor', type = float, default = 3.0,
# help='use (0.1+3.0*core_diversity)/(1+3.0*core_diversity) to decide split_long_branch_cutoff',metavar='')
parser.add_argument('-fcd', '--factor_core_diversity', type = float, default = 2.0,
help='default: factor used to refine raw core genome diversity, \
apply (0.1+2.0*core_diversity)/(1+2.0*core_diversity) to decide split_long_branch_cutoff', metavar='')
parser.add_argument('-slb', '--split_long_branch_cutoff', type = float, default = 0.0,
help='split long branch cutoff provided by user (by default: 0.0 as not given):',metavar='')
## split paralogy
parser.add_argument('-pep', '--explore_paralog_plot', action='store_true',
help='default: not plot paralog statistics')
parser.add_argument('-pfc', '--paralog_frac_cutoff', type = float, default = 0.33,
help='fraction of strains required for splitting paralogy. Default: 0.33', metavar='')
parser.add_argument('-pbc', '--paralog_branch_cutoff', type = float, default = 0.0,
help='branch_length cutoff used in paralogy splitting', metavar='')
## resolve peaks (unclustered records)
parser.add_argument('-ws', '--window_size_smoothed', type = int, default = 5,
help='postprocess_unclustered_genes: window size for smoothed cluster length distribution',
metavar='')
parser.add_argument('-spr', '--strain_proportion', type = float, default = 0.3,
help='postprocess_unclustered_genes: strain proportion', metavar='')
parser.add_argument('-ss', '--sigma_scale', type = int, default = 3,
help='postprocess_unclustered_genes: sigma scale', metavar='')
## core genome cutoff
parser.add_argument('-cg', '--core_genome_threshold', type = float, default = 1.0,
help='percentage of strains used to decide whether a gene is core.\
Default: 1.0 for strictly core gene; < 1.0 for soft core genes',
metavar='')
## core gene strain constraint
parser.add_argument('-csf', '-- |
SeleniumHQ/selenium | py/test/selenium/webdriver/chrome/chrome_network_emulation_tests.py | Python | apache-2.0 | 1,447 | 0 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.common.exceptions import WebDriverException
from selenium.webdriver import Chrome
def test_network_conditions_emu | lation():
driver = Chrome()
driver.set_network_conditions(
offline=False,
latency=56, # additional latency (ms)
throughput=789)
conditions = driver.get_network_conditions()
assert conditions['offline'] is False
assert conditions['latency'] == 56
assert conditions['download_throughput'] == 789
assert conditions['upload_throu | ghput'] == 789
driver.delete_network_conditions()
with pytest.raises(WebDriverException):
driver.get_network_conditions()
|
prasannav7/ggrc-core | src/ggrc/migrations/versions/20160412134448_11cee57a4149_add_assessor_reminder_notification.py | Python | apache-2.0 | 1,996 | 0.002505 | # Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: urban@reciprocitylabs.com
# Maintained By: urban@reciprocitylabs.com
"""
Add assessors reminder notification
Create Date: 2016-04-12 13:44:48.265193
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from sqlalchemy.sql import select
from alembic import op
# revision identifiers, used by Alembic.
revision = '11cee57a4149'
down_revision = '50c374901d42'
notifications_table = table(
'notifications',
column('id', sa.Integer),
column('notification_type_id', sa.Integer),
)
notification_types_tab | le = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('descripti | on', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer)
)
def upgrade():
"""Inserts new notification type"""
op.bulk_insert(
notification_types_table,
[{
"name": "assessment_assessor_reminder",
"description": ("Notify all Assessors that they should take a look "
"at the assessment."),
"template": "",
"advance_notice": 0,
"instant": False
}]
)
def downgrade():
"""First removes notifications and then removes notification type"""
op.execute(
notifications_table.delete().where(
notifications_table.c.notification_type_id == select(
[notification_types_table.c.id]).where(
notification_types_table.c.name == "assessment_assessor_reminder"
)
)
)
op.execute(
notification_types_table.delete().where(
notification_types_table.c.name == "assessment_assessor_reminder"
)
)
|
hail-is/hail | hail/python/hailtop/hailctl/dataproc/describe.py | Python | mit | 281 | 0.003559 | from .. import describe
import sys
init_parser = describe.init_parser
async def main(*args, **kwargs):
await describe.main_after_parsing(*args, **kwargs)
print('!!! `hailctl dataproc describe` is DEPRECATED. Please use `hailctl describe` instead. !!!', file=sys.stderr) | ||
CCI-MOC/GUI-Backend | core/migrations/0007_create_allocation_strategy_and_behaviors.py | Python | apache-2.0 | 5,962 | 0.004193 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def do_nothing(apps, schema_editor):
pass
def create_new_behaviors_and_strategies(apps, schema_editor):
CountingBehavior = apps.get_model("core", "CountingBehavior")
RefreshBehavior = apps.get_model("core", "RefreshBehavior")
RulesBehavior = apps.get_model("core", "RulesBehavior")
Provider = apps.get_model("core", "Provider")
AllocationStrategy = apps.get_model("core", "AllocationStrategy")
# Strategy #1 - Count from first to end of month, refresh on the first
counting_strategy_1, _ = CountingBehavior.objects.get_or_create(
name="1 Month - Calendar Window")
refresh_strategy_1, _ = RefreshBehavior.objects.get_or_create(
name="First of the Month")
# Strategy #2 - Count UP for one month, starting at (& refreshing at) the
# anniversary
counting_strategy_2, _ = CountingBehavior.objects.get_or_create(
name="1 Month - Calendar Window - Anniversary")
refresh_strategy_2, _ = RefreshBehavior.objects.get_or_create(
name="Anniversary Date")
# Rules that will be applied by default
rules = []
rule, _ = RulesBehavior.objects.get_or_create(
name="Ignore non-active status")
rules.append(rule)
rule, _ = RulesBehavior.objects.get_or_create(name="Multiply by Size CPU")
rules.append(rule)
for provider in Provider.objects.all():
new_strategy, _ = AllocationStrategy.objects.get_or_create(
provider=provider, counting_behavior=counting_strategy_1)
new_strategy.refresh_behaviors.add(refresh_strategy_1)
for rule in rules:
new_strategy.rules_behaviors.add(rule)
return
class Migration(migrations.Migration):
dependencies = [
('core', '0006_change_fields_as_not_null'),
]
operations = [
migrations.CreateModel(
name='AllocationStrategy', fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ], options={
'db_table': 'allocation_strategy', }, bases=(
models.Model,), ), migrations.CreateModel(
name='CountingBehavior', fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(
max_length=255)), ], options={
'db_table': 'counting_behavior', }, bases=(
models.Model,), ), migrations.CreateModel(
name='RefreshBehavior', fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(
max_length=255)), ], options={
'db_table': 'refresh_behavior', }, bases=(
models.Model,), ), migrations.CreateModel(
name='RulesBehavior', fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(
max_length=255)), ], options={
'db_table': 'rules_behavior', }, bases=(
models.Model,), ), migrations.AddField(
model_name='allocationstrategy', name='counting_behavior', field=models.ForeignKey(
to='core.CountingBehavior'), preserve_default=True, ), migrations.AddField(
model_name='allocationstrategy', name='provider', field=models.OneToOneField(
to='core.Provider'), preserve_default=True, ), migrations.AddF | ield(
model_name='allocationstrategy', name='refresh_behaviors', field=models.ManyToManyField(
| to='core.RefreshBehavior', null=True, blank=True), preserve_default=True, ), migrations.AddField(
model_name='allocationstrategy', name='rules_behaviors', field=models.ManyToManyField(
to='core.RulesBehavior', null=True, blank=True), preserve_default=True, ), migrations.RunPython(
create_new_behaviors_and_strategies, do_nothing)]
|
simontakite/sysadmin | pythonscripts/programmingpython/Internet/Email/PyMailGui/wraplines.py | Python | gpl-2.0 | 4,750 | 0.004842 | """
###############################################################################
split lines on fixed columns or at delimiters before a column;
see also: related but different textwrap standard library module (2.3+);
4E caveat: this assumes str; supporting bytes might help avoid some decodes;
###############################################################################
"""
defaultsize = 80
def wrapLinesSimple(lineslist, size=defaultsize):
"split at fixed position size"
wraplines = []
for line in lineslist:
while True:
wraplines.append(line[:size]) # OK if len < size
line = line[size:] # split without analysis
if not line: break
return wraplines
def wrapLinesSmart(lineslist, size=defaultsize, delimiters='.,:\t '):
"wrap at first delimiter left of size"
wraplines = []
for line in lineslist:
while True:
if len(line) <= size:
wraplines += [line]
break
else:
for look in range(size-1, size // 2, -1): # 3.0: need // not /
if line[look] in delimiters:
front, line = line[:look+1], line[look+1:]
break
else:
front, line = line[:size], line[size: | ]
wraplines += [front]
return wraplines
###############################################################################
# common use case utilities
###############################################################################
def wrapText1(text, size=defaultsize): # better for line-based txt: mail
"when text read all a | t once" # keeps original line brks struct
lines = text.split('\n') # split on newlines
lines = wrapLinesSmart(lines, size) # wrap lines on delimiters
return '\n'.join(lines) # put back together
def wrapText2(text, size=defaultsize): # more uniform across lines
"same, but treat as one long line" # but loses original line struct
text = text.replace('\n', ' ') # drop newlines if any
lines = wrapLinesSmart([text], size) # wrap single line on delimiters
return lines # caller puts back together
def wrapText3(text, size=defaultsize):
"same, but put back together"
lines = wrapText2(text, size) # wrap as single long line
return '\n'.join(lines) + '\n' # make one string with newlines
def wrapLines1(lines, size=defaultsize):
"when newline included at end"
lines = [line[:-1] for line in lines] # strip off newlines (or .rstrip)
lines = wrapLinesSmart(lines, size) # wrap on delimiters
return [(line + '\n') for line in lines] # put them back
def wrapLines2(lines, size=defaultsize): # more uniform across lines
"same, but concat as one long line" # but loses original structure
text = ''.join(lines) # put together as 1 line
lines = wrapText2(text) # wrap on delimiters
return [(line + '\n') for line in lines] # put newlines on ends
###############################################################################
# self-test
###############################################################################
if __name__ == '__main__':
lines = ['spam ham ' * 20 + 'spam,ni' * 20,
'spam ham ' * 20,
'spam,ni' * 20,
'spam ham.ni' * 20,
'',
'spam'*80,
' ',
'spam ham eggs']
sep = '-' * 30
print('all', sep)
for line in lines: print(repr(line))
print('simple', sep)
for line in wrapLinesSimple(lines): print(repr(line))
print('smart', sep)
for line in wrapLinesSmart(lines): print(repr(line))
print('single1', sep)
for line in wrapLinesSimple([lines[0]], 60): print(repr(line))
print('single2', sep)
for line in wrapLinesSmart([lines[0]], 60): print(repr(line))
print('combined text', sep)
for line in wrapLines2(lines): print(repr(line))
print('combined lines', sep)
print(wrapText1('\n'.join(lines)))
assert ''.join(lines) == ''.join(wrapLinesSimple(lines, 60))
assert ''.join(lines) == ''.join(wrapLinesSmart(lines, 60))
print(len(''.join(lines)), end=' ')
print(len(''.join(wrapLinesSimple(lines))), end=' ')
print(len(''.join(wrapLinesSmart(lines))), end=' ')
print(len(''.join(wrapLinesSmart(lines, 60))), end=' ')
input('Press enter') # pause if clicked
|
chrisfroe/readdy | wrappers/python/src/python/readdy/examples/chain_decay.py | Python | lgpl-3.0 | 4,956 | 0.002422 | # coding=utf-8
# Copyright © 2016 Computational Molecular Biology Group,
# Freie Universität Berlin (GER)
#
# This file is part of ReaDDy.
#
# ReaDDy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General
# Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
"""
Created on 21.06.17
@author: clonker
"""
from __future__ import print_function
from contextlib import closing
import numpy as np
import readdy._internal.readdybinding.api.top as top
import readdy._internal.readdybinding.common.io as io
import readdy._internal.readdybinding.common as common
from readdy._internal.readdybinding.api import KernelProvider
from readdy._internal.readdybinding.api import ParticleTypeFlavor
from readdy._internal.readdybinding.api import Simulation
from readdy.util import platform_utils
class ChainDecay(object):
def __init__(self, kernel, time_step):
self.kernel_provider = KernelProvider.get()
self.kernel_provider.load_from_dir(platform_utils.get_readdy_plugin_dir())
self.kernel = kernel
self.time_step = time_step
def _get_split_reaction(self):
def reaction_function(topology):
recipe = top.Recipe(topology)
if topology.get_n_particles() > 1:
edge = np.random.randint(0, topology.get_n_particles() - 1)
recipe.remove_edge(edge, edge + 1)
return recipe
def rate_function(topology):
if topology.get_n_particles() > 1:
return float(topology.get_n_particles()) / 5.
else:
return .0
fun1 = top.ReactionFunction(reaction_function)
fun2 = top.RateFunction(rate_function)
reaction = top.TopologyReaction(fun1, fun2)
reaction.roll_back_if_invalid()
reaction.create_child_topologies_after_reaction()
return reaction
def _get_decay_reaction(self, typeidb):
def reaction_function(topology):
recipe = top.Recipe(topology)
if topology.get_n_particles() == 1:
recipe.change_particle_type(0, typeidb)
return recipe
def rate_function(topology):
return 1./self.time_step if topology.get_n_particles() == 1 else 0
fun1, fun2 = top.ReactionFunction(reaction_function), top.RateFunction(rate_function)
reaction = top.TopologyReaction(fun1, fun2)
reaction.raise_if_invalid()
reaction.create_child_topologies_after_reaction()
return reaction
def run(self, time_steps, out_file):
sim = Simulation()
sim.set_kernel(self.kernel)
sim.box_size = common.Vec(60, 20, 20)
sim.periodic_boundary = [True, True, True]
typeid_b = sim.register_particle_type("B", 1.0, 1.0, ParticleTypeFlavor.NORMAL)
sim.register_particle_type("Topology A", .5, .5, ParticleTypeFlavor.TOPOLOGY)
sim.register_potential_harmonic_repulsion("Topology A", "Topology A", 10)
sim.register_potential_harmonic_repulsion("Topology A", "B", 10)
sim.register_potential_harmonic_repulsion("B", "B", 10)
sim.configure_topology_bond_potential("Topology A", "Topology A", 10, 1.)
sim.configure_topology_angle_potential("Topology A", "Topology A", "Topology A", 10, np.pi)
# sim.configure_topology_dihedral_potential("Topology A", "Topology A", "Topology A", "Topology A", 1, 1, -np.pi)
n_elements = 50.
particles = [sim.create_topology_particle("Topology A", common.Vec(-25. + i, 0, 0))
for i in range(int(n_elements))]
topology = sim.add_topology(particles)
for i in range(int(n_elements - 1)):
topology.get_graph().add_edge(i, i + 1)
topology.add_reaction(self._get_decay_reaction(typeid_b))
topology.add_reaction(self._get_split_reaction())
traj_handle = sim.register_observable_flat_trajectory(1)
with closing(io.File(out_file, io.FileAction.CREATE, io.FileFlag.OVERWRITE)) | as f:
traj_handle.enable_write_to_file(f, u"", 50)
sim.run_scheme_readdy(True)\
.evaluate_topology_reactions()\
.write_config_to_file(f)\
.configure_and_run(time_steps, sel | f.time_step)
print("currently %s topologies" % len(sim.current_topologies()))
if __name__ == '__main__':
sim = ChainDecay("SingleCPU", .001)
sim.run(10000, "out.h5") |
axinging/chromium-crosswalk | third_party/WebKit/LayoutTests/http/tests/websocket/duplicated-headers_wsh.py | Python | bsd-3-clause | 754 | 0.001326 | from mod_pywebsocket import handshake
from mod_pywebsocket.handshake.hybi import compute_accept
def web | _socket_do_extra_handshake(request):
message = 'HTTP/1.1 101 Switching Protocols\r\n'
message += 'Upgrade: websocket\r\n'
message += 'Connection: Upgrade\r\n'
message += 'Sec-WebSocket-Accept: %s\r\n' % compute_accept(request.headers_in['Sec-WebSocket-Key'])[0]
message += 'foo: bar, baz\r\n'
message += 'foo: hoge\r\n'
message += 'FOO: FUGA\r\n'
message += 'xxx: y | yy\r\n'
message += '\r\n'
request.connection.write(message)
# Prevents pywebsocket from sending its own handshake message.
raise handshake.AbortedByUserException('Abort the connection')
def web_socket_transfer_data(request):
pass
|
steakwipe/ytdl-namer | mp3gen.py | Python | gpl-3.0 | 1,558 | 0.005777 | #!/usr/bin/python
# -*- coding: utf-8 -*- #
# This would be a lot easier with a she | llscript.
# Will probably just do this in bash, once I make this work.
# In this case we're doing it for educational purposes.
# Expect future revisions to be faster and more efficient.
# Started with working code, broke it to fit my needs.
# Makes me very sa | d, and way more work to figure out than ls *.mp3
# but at least it's not full on regex? :p
# by steakwipe with way too much help from beatsteak
#importing old soup for good luck
import os
#yt variable needed later
yt=https://youtu.be/
mp3list = '/home/steakwipe/git/ytdl-namer' #i'd like this to be a runtime option later
# let's look around here for some mp3s
def mp3gen():
for root, dirs, files in os.walk('.'):
for filename in files:
if os.path.splitext(filename)[1] == ".mp3":
yield os.path.join(root, filename)
# next we are attempting to process all.mp3 files in the dir
# to isolate the part of teh filename that is for YT.
# pretty much dies right away, but i basically did this
# in a python console with a single file.
# splitext, grab the first piece, then trim off the last
# 11 characters. Should result in NVEzFqKGrXY or something.
# broke as fuck. hopefully i've at least got the right idea.
# this'll need to chew thru hundreds of mp3s at a time
# and pushing the output youtube url's back in as id3 tags.
for mp3file in mp3gen():
fn = os.path.splitext(os.path.basename('mp3file')),
text=print([fn[0]]),
url = text[-11::],
print(yt+url)
|
manthey/girder | girder/utility/config.py | Python | apache-2.0 | 2,764 | 0.000724 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import cherrypy
import os
import six
from girder | .constants import PACKAGE_DIR
def _mergeConfig(filename):
"""
Load `filename` into the cherrypy config.
Also, handle global options by putting them in the root.
"""
cherrypy._cpconfig.merge(cherrypy.config, filename)
# When in Sphinx, cherrypy may be mocked and returning None
global_config = cherrypy.config.pop('global', {}) or {}
for option, value in six.viewitems(global_config):
cherr | ypy.config[option] = value
def _loadConfigsByPrecedent():
"""
Load configuration in reverse order of precedent.
"""
configPaths = [os.path.join(PACKAGE_DIR, 'conf', 'girder.dist.cfg')]
if 'GIRDER_TEST_DB' not in os.environ:
# we don't want to load the local config file if we are running tests
configPaths.append(os.path.join(PACKAGE_DIR, 'conf', 'girder.local.cfg'))
configPaths.append(os.path.join('/etc', 'girder.cfg'))
configPaths.append(os.path.join(os.path.expanduser('~'), '.girder', 'girder.cfg'))
if 'GIRDER_CONFIG' in os.environ:
configPaths.append(os.environ['GIRDER_CONFIG'])
for curConfigPath in configPaths:
if os.path.exists(curConfigPath):
_mergeConfig(curConfigPath)
def loadConfig():
_loadConfigsByPrecedent()
if 'GIRDER_PORT' in os.environ:
port = int(os.environ['GIRDER_PORT'])
cherrypy.config['server.socket_port'] = port
if 'GIRDER_MONGO_URI' in os.environ:
if 'database' not in cherrypy.config:
cherrypy.config['database'] = {}
cherrypy.config['database']['uri'] = os.getenv('GIRDER_MONGO_URI')
if 'GIRDER_TEST_DB' in os.environ:
cherrypy.config['database']['uri'] =\
os.environ['GIRDER_TEST_DB'].replace('.', '_')
def getConfig():
if 'database' not in cherrypy.config:
loadConfig()
# When in Sphinx, cherrypy may be mocked and returning None
return cherrypy.config or {}
|
t-brandt/acorns-adi | centroid/finecenter.py | Python | bsd-2-clause | 4,176 | 0.00431 | #!/usr/bin/env python
#
# Original filename: finecenter.py
#
# Author: Tim Brandt
# Email: tbra | ndt@astro.princeton.edu
# Date: May 2012
#
# Summary: Interactively refine the centroid of an image sequence
#
#from speckle_centroid import speckle_centroid
from easygui import *
from pylab import *
import pyfits as pyf
def finecenter(flux, objname, output_dir):
"""
function finecenter interactively refines the centroid of an image
sequence. The routine averages a sequence of fra | mes, plots the
central 80x80 pixels (0.8x0.8 arcseconds for HiCIAO) and labels
the center. The user then inputs the offset, and, when satisfied,
clicks 'OK'. The final image is saved in the output directory.
The function takes three inputs:
1. A 3D flux array; the first index should run over frame number
2. The object name, for naming the output file. This should be
a string.
3. The output directory. This should be a string, and the directory
should exist and have write permissions.
The function returns the user-determined offset in the centroid as
[y_offset, x_offset].
"""
################################################################
# Extract, plot the central portion of the flux array. Try to
# centroid between pairs of speckles (local maxima in intensity)
# to form a guess as to the absolute centroid. This doesn't
# always improve things; the user will soon check interactively.
################################################################
dimy, dimx = flux.shape
#y, x = speckle_centroid('', flux, center=[dimy // 2, dimx // 2])
y, x = [dimy // 2, dimx // 2]
dimy, dimx = flux.shape
di = min(40, dimy // 2 - 1, dimx // 2 - 1)
subarr = flux[dimy // 2 - di:dimy // 2 + di + 1,
dimx // 2 - di:dimx // 2 + di + 1]
grid = np.arange(di * 2 + 1)
grid_y, grid_x = np.meshgrid(grid, grid)
yc, xc = [0., 0.]
while 1:
################################################################
# Plot the central part of the image with bullseye-type
# annotations until the user indicates he/she is satisfied.
################################################################
r = np.sqrt((grid_y - di - y + 100 - yc)**2 +
(grid_x - di - x + 100 - xc)**2)
figure(figsize=(8,8))
imshow(np.sqrt(subarr), interpolation='bilinear', origin='lower')
contour(grid_x, grid_y, r, [4, 4, 15, 25, 35],
linewidths=(4, 1, 3, 3, 3,),
colors=('k', 'm', 'k', 'k', 'k'),
linestyles=('solid', 'solid', 'dashed', 'dashed', 'dashed'))
plot([di + xc + x - 100], [di + y + yc - 100],
color='y', marker='+', markersize=8, mew=2)
axis('off')
savefig(output_dir + '/' + objname + '_center_verify.png')
show(block=False)
clf()
shift = enterbox(msg='Check/refine the pipeline absolute center.\n' +
'Enter an offset to apply in the format dx, dy.\n' +
'Graphic has dimensions ' + str(subarr.shape[0]) +
' x ' + str(subarr.shape[1]) + '.\n' +
'Click OK to keep the current center.',
title='Fine Centroiding',
default=str(xc) + ',' + str(yc))
try:
yc_tmp = float(shift.split(',')[1])
xc_tmp = float(shift.split(',')[0])
if yc_tmp == yc and xc_tmp == xc:
if ynbox(msg='Keep the center shown?',
title='Fine Centroiding'):
break
else:
yc, xc = [yc_tmp, xc_tmp]
except:
msgbox(msg='Invalid format, please try again.',
title='Fine Centroiding')
################################################################
# Return the user-determined offset.
################################################################
return [yc + y - 100, xc + x - 100]
|
ichuang/sympy | sympy/printing/pretty/stringpict.py | Python | bsd-3-clause | 17,143 | 0.003733 | """Prettyprinter by Jurjen Bos.
(I hate spammers: mail me at pietjepuk314 at the reverse of ku.oc.oohay).
All objects have a method that create a "stringPict",
that can be used in the str method for pretty printing.
Updates by Jason gedge (email <my last name> at cs mun ca)
- terminal_string() method
- minor fixes and changes (mostly to prettyForm)
TODO:
- Allow left/center/right alignment options for above/below and
top/center/bottom alignment options for left/right
"""
from pretty_symbology import hobj, vobj, xsym, pretty_use_unicode
class stringPict(object):
"""An ASCII picture.
The pictures are represented as a list of equal length strings.
"""
#special value for stringPict.below
LINE = 'line'
def __init__(self, s, baseline=0):
"""Initialize from string.
Multiline strings are centered.
"""
#picture is a string that just can be printed
self.picture = stringPict.equalLengths(s.splitlines())
#baseline is the line number of the "base line"
self.baseline = baseline
self.binding = None
@staticmethod
def equalLengths(lines):
# empty lines
if not lines:
return ['']
width = max(len(line) for line in lines)
return [line.center(width) for line in lines]
def height(self):
"""The height of the picture in characters."""
return len(self.picture)
def width(self):
"""The width of the picture in characters."""
return len(self.picture[0])
@staticmethod
def next(*args):
"""Put a string of stringPicts next to each other.
Returns string, baseline arguments for stringPict.
"""
#convert everything to stringPicts
objects = []
for arg in args:
if isinstance(arg, basestring): arg = stringPict(arg)
objects.append(arg)
#make a list of pictures, with equal height and baseline
newBaseline = max(obj.baseline for obj in objects)
newHeightBelowBaseline = max(
obj.height()-obj.baseline
for obj in objects)
newHeight = newBaseline + newHeightBelowBaseline
pictures = []
for obj in objects:
oneEmptyLine = [' '*obj.width()]
basePadding = newBaseline-obj.baseline
totalPadding = newHeight-obj.height()
pictures.append(
oneEmptyLine * basePadding +
obj.picture +
oneEmptyLine * (totalPadding-basePadding))
result = [''.join(lines) for lines in zip(*pictures)]
return '\n'.join(result), newBaseline
def right(self, *args):
r"""Put pictures next to this one.
Returns string, baseline arguments for stringPict.
(Multiline) strings are allowed, and are given a baseline of 0.
Examples
========
>>> from sympy.printing.pretty.stringpict import stringPict
>>> print stringPict("10").right(" + ",stringPict("1\r-\r2",1))[0]
1
10 + -
2
"""
return stringPict.next(self, *args)
def left(self, *args):
"""Put pictures (left to right) at left.
Returns string, baseline arguments for stringPict.
"""
return stringPict.next(*(args+(self,)))
@staticmethod
def stack(*args):
"""Put pictures on top of each other,
from top to bottom.
Returns string, baseline arguments for stringPict.
The baseline is the baseline of the second picture.
Everything is centered.
Baseline is the baseline of the second picture.
Strings are allowed.
The special value stringPict.LINE is a row of '-' extended to the width.
"""
#convert everything to stringPicts; keep LINE
objects = []
for arg in args:
if arg is not stringPict.LINE and isinstance(arg, basestring):
arg = stringPict(arg)
objects.append(arg)
#compute new width
newWidth = max(
obj.width()
for obj in objects
if obj is not stringPict.LINE)
lineObj = stringPict(hobj('-', newWidth))
#replace LINE with proper lines
for i, obj in enumerate(objects):
if obj is stringPict.LINE:
objects[i] = lineObj
#stack the pictures, and center the result
newPicture = []
for obj in objects:
newPicture.extend(obj.picture)
newPicture = [line.center(newWidth) for line in newPicture]
newBaseline = objects[0].height()+objects[1].baseline
return '\n'.join(newPicture), newBaseline
def below(self, *args):
"""Put pictures under this picture.
Returns string, baseline arguments for stringPict.
Baseline is baseline of top picture
Examples
========
>>> from sympy.printing.pretty.stringpict import stringPict
>>> print stringPict("x+3").below(stringPict.LINE, '3')[0] #doctest: +NORMALIZE_WHITESPACE
x+3
---
3
"""
s, baseline = stringPict.stack(self, *args)
return s, self.baseline
def above(self, *args):
"""Put pictures above this picture.
Returns string, baseline arguments for stringPict.
Baseline is baseline of bottom picture.
"""
string, baseline = stringPict.stack(*(args+(self,)))
baseline = len(string.splitlines())-self.height()+self.baseline
return string, baseline
def parens(self, left='(', right=')', ifascii_nougly=False):
"""Put parentheses around self.
Returns string, baseline arguments for stringPict.
left or right can be None or empty string which means 'no paren from
that side'
"""
h = self.height()
b = self.baseline
# XXX this is a hack -- ascii parens are ugly!
if ifascii_nougly and not pretty_use_unicode():
h = 1
b = 0
res = self
if left:
lparen = stringPict(vobj(left, h), baseline=b)
res = stringPict(*lparen.right(self))
if right:
rparen = stringPict(vobj(right, h), baseline=b)
res = stringPict(*res.right(rparen))
return ('\n'.join(res.picture), res.baseline)
def leftslash(self):
"""Precede ob | ject by a slash of the proper size.
"""
# XXX not used anywhere ?
height = max(
self.baseline,
self.height()-1-self.baseline)*2 + 1
slash = '\n'.join(
' '*(height-i-1)+xobj('/',1)+' '*i
for i in range(height)
)
return self.left(stringPict(slash, height//2))
def root(self, n=None):
"""Produce a nice root symbol.
Produces ugly results for big n inserts.
"" | "
# XXX not used anywhere
# XXX duplicate of root drawing in pretty.py
#put line over expression
result = self.above('_'*self.width())
#construct right half of root symbol
height = self.height()
slash = '\n'.join(
' ' * (height-i-1) + '/' + ' ' * i
for i in range(height)
)
slash = stringPict(slash, height-1)
#left half of root symbol
if height > 2:
downline = stringPict('\\ \n \\',1)
else:
downline = stringPict('\\')
#put n on top, as low as possible
if n is not None and n.width()>downline.width():
downline = downline.left(' '*(n.width()-downline.width()))
downline = downline.above(n)
#build root symbol
root = downline.right(slash)
#glue it on at the proper height
#normally, the root symbel is as high as self
#which is one less than result
#this moves the root symbol one down
#if the root became higher, the baseline has to grow too
root.baseline = result.baseline-result.height()+root.height()
return result.left(root)
def render(self, * args, **kwargs):
"""Re |
QRAAT/QRAAT | node/python/qa_rmg.py | Python | gpl-3.0 | 1,878 | 0.019702 | #!/usr/bin/env python2
#
# Copyright 2004 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
#editted by Todd Borrowman ECE-UIUC 01/18/08~02/2010
from gnuradio import gr, gr_unittest, blks2
import rmg, sys
tx_name = "test"
test_data = "unit_test.tdat"
class qa_rmg (gr_unittest.TestCase):
def setUp (self):
self.fg = gr.top_block ()
def tearDown (self):
self.fg = None
def test_001_detect (self):
src_file = gr.file_source | (gr.sizeof_gr_complex, test_data)
#src_file = gr.file_source(gr.sizeof_gr_complex, "data_backup | 20070423/20070208164806.tdat")
#src_file = gr.file_source(gr.sizeof_gr_complex, "20080911161545.tdat")
di = gr.deinterleave(gr.sizeof_gr_complex)
self.fg.connect(src_file, di)
pd = rmg.detect(4,8000,160,480,1.1,"results", tx_name, 1.1,1.5)
pd.enable()
self.fg.connect((di,0),(pd,0))
self.fg.connect((di,1),(pd,1))
self.fg.connect((di,2),(pd,2))
self.fg.connect((di,3),(pd,3))
self.fg.run();
#self.assertFloatTuplesAlmostEqual ([1],[1],1)
if __name__ == '__main__':
gr_unittest.main ()
|
kgilmo/penning_artiq | artiq/master/worker_db.py | Python | gpl-3.0 | 6,331 | 0.004265 | from collections import OrderedDict
import importlib
import logging
import os
import time
import re
import numpy as np
import h5py
from artiq.protocols.sync_struct import Notifier
from artiq.protocols.pc_rpc import AutoTarget, Client, BestEffortClient
logger = logging.getLogger(__name__)
def _create_device(desc, device_mgr):
ty = desc["type"]
if ty == "local":
module = importlib.import_module(desc["module"])
device_class = getattr(module, desc["class"])
return device_class(device_mgr, **desc["arguments"])
elif ty == "controller":
if desc.get("best_effort", False):
cls = BestEffortClient
else:
cls = Client
| # Automatic target can be specified either by the absence of
# the target_name parameter, or a None value.
target_name = desc.get("target_name", None)
if target_name is None:
target_name = AutoTarget
return cls | (desc["host"], desc["port"], target_name)
else:
raise ValueError("Unsupported type in device DB: " + ty)
class DeviceManager:
"""Handles creation and destruction of local device drivers and controller
RPC clients."""
def __init__(self, ddb, virtual_devices=dict()):
self.ddb = ddb
self.virtual_devices = virtual_devices
self.active_devices = OrderedDict()
def get_device_db(self):
"""Returns the full contents of the device database."""
return self.ddb.get_device_db()
def get(self, name):
"""Get the device driver or controller client corresponding to a
device database entry."""
if name in self.virtual_devices:
return self.virtual_devices[name]
if name in self.active_devices:
return self.active_devices[name]
else:
desc = self.ddb.get(name)
while isinstance(desc, str):
# alias
desc = self.ddb.get(desc)
dev = _create_device(desc, self)
self.active_devices[name] = dev
return dev
def close_devices(self):
"""Closes all active devices, in the opposite order as they were
requested."""
for dev in reversed(list(self.active_devices.values())):
try:
if isinstance(dev, (Client, BestEffortClient)):
dev.close_rpc()
elif hasattr(dev, "close"):
dev.close()
except Exception as e:
logger.warning("Exception %r when closing device %r", e, dev)
self.active_devices.clear()
def get_hdf5_output(start_time, rid, name):
dirname = os.path.join("results",
time.strftime("%Y-%m-%d", start_time),
time.strftime("%H-%M", start_time))
filename = "{:09}-{}.h5".format(rid, name)
os.makedirs(dirname, exist_ok=True)
return h5py.File(os.path.join(dirname, filename), "w")
def get_last_rid():
r = -1
try:
day_folders = os.listdir("results")
except:
return r
day_folders = filter(lambda x: re.fullmatch('\d\d\d\d-\d\d-\d\d', x),
day_folders)
for df in day_folders:
day_path = os.path.join("results", df)
try:
minute_folders = os.listdir(day_path)
except:
continue
minute_folders = filter(lambda x: re.fullmatch('\d\d-\d\d', x),
minute_folders)
for mf in minute_folders:
minute_path = os.path.join(day_path, mf)
try:
h5files = os.listdir(minute_path)
except:
continue
for x in h5files:
m = re.fullmatch('(\d\d\d\d\d\d\d\d\d)-.*\.h5', x)
rid = int(m.group(1))
if rid > r:
r = rid
return r
_type_to_hdf5 = {
int: h5py.h5t.STD_I64BE,
float: h5py.h5t.IEEE_F64BE,
np.int8: h5py.h5t.STD_I8BE,
np.int16: h5py.h5t.STD_I16BE,
np.int32: h5py.h5t.STD_I32BE,
np.int64: h5py.h5t.STD_I64BE,
np.uint8: h5py.h5t.STD_U8BE,
np.uint16: h5py.h5t.STD_U16BE,
np.uint32: h5py.h5t.STD_U32BE,
np.uint64: h5py.h5t.STD_U64BE,
np.float16: h5py.h5t.IEEE_F16BE,
np.float32: h5py.h5t.IEEE_F32BE,
np.float64: h5py.h5t.IEEE_F64BE
}
def result_dict_to_hdf5(f, rd):
for name, data in rd.items():
if isinstance(data, list):
el_ty = type(data[0])
for d in data:
if type(d) != el_ty:
raise TypeError("All list elements must have the same"
" type for HDF5 output")
try:
el_ty_h5 = _type_to_hdf5[el_ty]
except KeyError:
raise TypeError("List element type {} is not supported for"
" HDF5 output".format(el_ty))
dataset = f.create_dataset(name, (len(data), ), el_ty_h5)
dataset[:] = data
elif isinstance(data, np.ndarray):
f.create_dataset(name, data=data)
else:
ty = type(data)
if ty is str:
ty_h5 = "S{}".format(len(data))
data = data.encode()
else:
try:
ty_h5 = _type_to_hdf5[ty]
except KeyError:
raise TypeError("Type {} is not supported for HDF5 output"
.format(ty))
dataset = f.create_dataset(name, (), ty_h5)
dataset[()] = data
class DatasetManager:
def __init__(self, ddb):
self.broadcast = Notifier(dict())
self.local = dict()
self.ddb = ddb
self.broadcast.publish = ddb.update
def set(self, key, value, broadcast=False, persist=False, save=True):
if persist:
broadcast = True
r = None
if broadcast:
self.broadcast[key] = (persist, value)
r = self.broadcast[key][1]
if save:
self.local[key] = value
return r
def get(self, key):
try:
return self.local[key]
except KeyError:
return self.ddb.get(key)
def write_hdf5(self, f):
result_dict_to_hdf5(f, self.local)
|
wildchildyn/autism-website | yanni_env/lib/python3.6/site-packages/click/formatting.py | Python | gpl-3.0 | 8,889 | 0 | from contextlib import contextmanager
from .termui import get_terminal_size
from .parser import split_opt
from ._compat import term_len
# Can force a width. This is used by the test system
FORCED_WIDTH = None
def measure_table(rows):
widths = {}
for row in rows:
for idx, col in enumerate(row):
widths[idx] = max(widths.get(idx, 0), term_len(col))
return tuple(y for x, y in sorted(widths.items()))
def iter_rows(rows, col_count):
for row in rows:
row = tuple(row)
yield row + ('',) * (col_count - len(row))
def wrap_text(text, width=78, initial_indent='', subsequent_indent='',
preserve_paragraphs=False):
"""A helper function that intelligently wraps text. By default, it
assumes that it operates on a single paragraph of text but if the
`preserve_paragraphs` parameter is provided it will intelligently
handle paragraphs (defined by two empty lines).
If paragraphs are handled, a paragraph can be prefixed with an empty
line containing the ``\\b`` character (``\\x08``) to indicate that
no rewrapping should happen in that block.
:param text: the text that should be rewrapped.
:param width: the maximum width for the text.
:param initial_indent: the initial indent that should be placed on the
first line as a string.
:param subsequent_indent: the indent string that should be placed on
each consecutive line.
:param preserve_paragraphs: if this flag is set then the wrapping will
intelligently handle paragraphs.
"""
from ._textwrap import TextWrapper
text = text.expandtabs()
wrapper = TextWrapper(width, initial_indent=initial_indent,
subsequent_indent=subsequent_indent,
replace_whitespace=False)
if not preserve_paragraphs:
return wrapper.fill(text)
p = []
buf = []
indent = None
def _flush_par():
if not buf:
return
if buf[0].strip() == '\b':
p.append((indent or 0, True, '\n'.join(buf[1:])))
else:
p.append((indent or 0, False, ' '.join(buf)))
del buf[:]
for line in text.splitlines():
if not line:
_flush_par()
indent = None
else:
if indent is None:
orig_len = term_len(line)
line = line.lstrip()
indent = orig_len - term_len(line)
buf.append(line)
_flush_par()
rv = []
for indent, raw, text in p:
with wrapper.extra_indent(' ' * indent):
if raw:
rv.append(wrapper.indent_only(text))
else:
rv.append(wrapper.fill(text))
return '\n\n'.join(rv)
class HelpFormatter(object):
"""This class helps with formatting text-based help pages. It's
usually just needed for very special internal cases, but it's also
exposed so that developers can write their own fancy outputs.
At present, it always writes into memory.
:param indent_increment: the additional increment for each level.
:param width: the width for the text. This defaults to the terminal
width clamped to a maximum of 78.
"""
def __init__(self, indent_increment=2, width=None, max_width=None):
self.indent_increment = indent_increment
if max_width is None:
max_width = 80
if width is None:
width = FORCED_WIDTH
if width is None:
width = max(min(get_terminal_size()[0], max_width) - 2, 50)
self.width = width
self.current_indent = 0
self.buffer = []
def write(self, string):
"""Writes a unicode string into the internal buffer."""
self.buffer.append(string)
def indent(self):
"""Increases the indentation."""
self.current_indent += self.indent_increment
def dedent(self):
"""Decr | eases the indentation."""
self.current_indent -= self.indent_increment
def write_usage(self, prog, args='', prefix='Usage: '):
"""Writes a usage line into the buffer.
:param prog: the program name.
:param args: whitespace separated list of arguments.
:param prefix: the prefix for the first line.
"""
usag | e_prefix = '%*s%s ' % (self.current_indent, prefix, prog)
text_width = self.width - self.current_indent
if text_width >= (term_len(usage_prefix) + 20):
# The arguments will fit to the right of the prefix.
indent = ' ' * term_len(usage_prefix)
self.write(wrap_text(args, text_width,
initial_indent=usage_prefix,
subsequent_indent=indent))
else:
# The prefix is too long, put the arguments on the next line.
self.write(usage_prefix)
self.write('\n')
indent = ' ' * (max(self.current_indent, term_len(prefix)) + 4)
self.write(wrap_text(args, text_width,
initial_indent=indent,
subsequent_indent=indent))
self.write('\n')
def write_heading(self, heading):
"""Writes a heading into the buffer."""
self.write('%*s%s:\n' % (self.current_indent, '', heading))
def write_paragraph(self):
"""Writes a paragraph into the buffer."""
if self.buffer:
self.write('\n')
def write_text(self, text):
"""Writes re-indented text into the buffer. This rewraps and
preserves paragraphs.
"""
text_width = max(self.width - self.current_indent, 11)
indent = ' ' * self.current_indent
self.write(wrap_text(text, text_width,
initial_indent=indent,
subsequent_indent=indent,
preserve_paragraphs=True))
self.write('\n')
def write_dl(self, rows, col_max=30, col_spacing=2):
"""Writes a definition list into the buffer. This is how options
and commands are usually formatted.
:param rows: a list of two item tuples for the terms and values.
:param col_max: the maximum width of the first column.
:param col_spacing: the number of spaces between the first and
second column.
"""
rows = list(rows)
widths = measure_table(rows)
if len(widths) != 2:
raise TypeError('Expected two columns for definition list')
first_col = min(widths[0], col_max) + col_spacing
for first, second in iter_rows(rows, len(widths)):
self.write('%*s%s' % (self.current_indent, '', first))
if not second:
self.write('\n')
continue
if term_len(first) <= first_col - col_spacing:
self.write(' ' * (first_col - term_len(first)))
else:
self.write('\n')
self.write(' ' * (first_col + self.current_indent))
text_width = max(self.width - first_col - 2, 10)
lines = iter(wrap_text(second, text_width).splitlines())
if lines:
self.write(next(lines) + '\n')
for line in lines:
self.write('%*s%s\n' % (
first_col + self.current_indent, '', line))
else:
self.write('\n')
@contextmanager
def section(self, name):
"""Helpful context manager that writes a paragraph, a heading,
and the indents.
:param name: the section name that is written as heading.
"""
self.write_paragraph()
self.write_heading(name)
self.indent()
try:
yield
finally:
self.dedent()
@contextmanager
def indentation(self):
"""A context manager that increases the indentation."""
self.indent()
try:
yield
finally:
self.dedent()
def getva |
hellais/nemesys-qos | nemesys/timeNtp.py | Python | gpl-3.0 | 1,035 | 0.007729 | # timeNtp.py
# -*- coding: utf-8 -*-
# Copyright (c) 2010 Fondazione Ugo Bordoni.
#
# This program is free software: you can redistribute it and/or mod | ify
# it under the terms of the GNU General | Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ntplib
import time
SERVERNTP = "tempo.cstv.to.cnr.it"
def timestampNtp():
x = ntplib.NTPClient()
try:
TimeRX = x.request(SERVERNTP, version=3)
timestamp = TimeRX.tx_time
except Exception:
timestamp = time.time()
return timestamp
if __name__ == '__main__':
n = timestampNtp()
print n
|
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractNotsogoodtranslatorWordpressCom.py | Python | bsd-3-clause | 578 | 0.032872 |
def extra | ctNotsogoodtranslatorWordpressCom(item):
'''
Parser for 'notsogoodtranslator.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in it | em['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
chumsley/gambit | contrib/scripts/enumpoly/setup.py | Python | gpl-2.0 | 241 | 0.008299 | # Th | is is a distutils/py2exe script to build the Windows binary version
# of gambit-enumphc
from distutils.core import setup
import py2exe
setup(console=["enumphc.py"],
data_files=[(".",
[ | "phc.exe", "README" ])])
|
0/SpanishAcquisition | spacq/interface/pulse/tool/box.py | Python | bsd-2-clause | 1,290 | 0.031783 | import csv
from spacq.tool.box import flatten
"""
Pulse program helpers.
"""
def find_location(s, loc):
"""
Find where loc (linear index) in s.
Returns:
line number
column number
line itself
Note: Tabs are not handled specially.
"""
if loc < 0 or loc > len(s):
raise ValueError('Location given ({0}) is outside string'.format(loc))
count = 0
lines = s.splitlines()
with_ends = s.splitlines(True)
for i, (line, wit | h_end) in enumerate(zip(lines, with_ends), 1):
if count + len(line) < loc:
count += len(with_end)
else:
col = loc - count + 1
return i, col, line
def format_error(msg, row=None, col=None, line=None):
"""
Format the error for human consumption.
"""
if row is None or col is None or line is No | ne:
return 'error: {0}'.format(msg)
else:
return 'error: {0} at column {1} on line {2}:\n{3}{4}\n{5}^'.format(msg,
col, row, ' ' * 2, line, ' ' * (col + 1))
def load_values(f):
"""
Load data points from a file.
The values in the file must either be comma separated, line-wise, or a combination of the two.
For example:
1.0,2.0,3.0
4.0,5.0
6.0
would be interpreted as [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
"""
reader = csv.reader(f)
# Ignore blank lines.
return [float(x) for x in flatten(reader) if not x.isspace()]
|
googleads/google-ads-python | google/ads/googleads/v9/enums/types/click_type.py | Python | apache-2.0 | 2,994 | 0.000334 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.enums",
marshal="google.ads.googleads.v9",
manifest={"ClickTypeEnum",},
)
class ClickTypeEnum(proto.Message):
r"""Container for enumerati | on of Google Ads click types.
"""
class ClickType(proto.Enum):
r"""Enumerates Google Ads click types."""
UNSPECIFIED = 0
UNKNOWN = 1
APP_DEEPLINK = 2
BREADCRUMBS = 3
BROADBAND_PLAN = 4
CALL_TRACKING = 5
CALLS = 6
| CLICK_ON_ENGAGEMENT_AD = 7
GET_DIRECTIONS = 8
LOCATION_EXPANSION = 9
LOCATION_FORMAT_CALL = 10
LOCATION_FORMAT_DIRECTIONS = 11
LOCATION_FORMAT_IMAGE = 12
LOCATION_FORMAT_LANDING_PAGE = 13
LOCATION_FORMAT_MAP = 14
LOCATION_FORMAT_STORE_INFO = 15
LOCATION_FORMAT_TEXT = 16
MOBILE_CALL_TRACKING = 17
OFFER_PRINTS = 18
OTHER = 19
PRODUCT_EXTENSION_CLICKS = 20
PRODUCT_LISTING_AD_CLICKS = 21
SITELINKS = 22
STORE_LOCATOR = 23
URL_CLICKS = 25
VIDEO_APP_STORE_CLICKS = 26
VIDEO_CALL_TO_ACTION_CLICKS = 27
VIDEO_CARD_ACTION_HEADLINE_CLICKS = 28
VIDEO_END_CAP_CLICKS = 29
VIDEO_WEBSITE_CLICKS = 30
VISUAL_SITELINKS = 31
WIRELESS_PLAN = 32
PRODUCT_LISTING_AD_LOCAL = 33
PRODUCT_LISTING_AD_MULTICHANNEL_LOCAL = 34
PRODUCT_LISTING_AD_MULTICHANNEL_ONLINE = 35
PRODUCT_LISTING_ADS_COUPON = 36
PRODUCT_LISTING_AD_TRANSACTABLE = 37
PRODUCT_AD_APP_DEEPLINK = 38
SHOWCASE_AD_CATEGORY_LINK = 39
SHOWCASE_AD_LOCAL_STOREFRONT_LINK = 40
SHOWCASE_AD_ONLINE_PRODUCT_LINK = 42
SHOWCASE_AD_LOCAL_PRODUCT_LINK = 43
PROMOTION_EXTENSION = 44
SWIPEABLE_GALLERY_AD_HEADLINE = 45
SWIPEABLE_GALLERY_AD_SWIPES = 46
SWIPEABLE_GALLERY_AD_SEE_MORE = 47
SWIPEABLE_GALLERY_AD_SITELINK_ONE = 48
SWIPEABLE_GALLERY_AD_SITELINK_TWO = 49
SWIPEABLE_GALLERY_AD_SITELINK_THREE = 50
SWIPEABLE_GALLERY_AD_SITELINK_FOUR = 51
SWIPEABLE_GALLERY_AD_SITELINK_FIVE = 52
HOTEL_PRICE = 53
PRICE_EXTENSION = 54
HOTEL_BOOK_ON_GOOGLE_ROOM_SELECTION = 55
SHOPPING_COMPARISON_LISTING = 56
__all__ = tuple(sorted(__protobuf__.manifest))
|
egbertbouman/tribler-g | Tribler/Test/test_channelcast.py | Python | lgpl-2.1 | 15,423 | 0.015496 | # Written by Arno Bakker
# see LICENSE.txt for license information
# TODO: let one hit to SIMPLE+METADATA be P2PURL
import unittest
import os
import sys
import time
import tempfile
import shutil
from Tribler.Core.Utilities.Crypto import sha
from types import StringType, DictType, IntType
from M2Crypto import EC
from copy import deepcopy
from Tribler.Test.test_as_server import TestAsServer
from olconn import OLConnection
from Tribler.Core.API import *
from Tribler.Core.BitTornado.bencode import bencode,bdecode
from Tribler.Core.BitTornado.BT1.MessageID import *
from Tribler.Core.BuddyCast.moderationcast_util import validChannelCastMsg, validVoteCastMsg
from Tribler.Core.BuddyCast.channelcast import ChannelCastCore
from Tribler.Core.BuddyCast.buddycast import BuddyCastCore
from Tribler.Core.BuddyCast.votecast import VoteCastCore
from Tribler.Core.CacheDB.sqlitecachedb import str2bin,bin2str
DEBUG=True
class TestChannels(TestAsServer):
"""
Testing QUERY message of Social Network extension V1
"""
def setUpPreSession(self):
""" override TestAsServer """
TestAsServer.setUpPreSession(self)
self.config.set_buddycast(True)
BuddyCastCore.TESTASSERVER = True
ChannelCastCore.TESTASSERVER = True
VoteCastCore.TESTASSERVER = True
self.config.set_start_recommender(True)
self.config.set_bartercast(True)
self.config.set_remote_query(True)
self.config.set_crawler(False)
self.config.set_torrent_collecting_dir(os.path.join(self.config_path, "tmp_torrent_collecting"))
# Write superpeers.txt and DB schema
self.install_path = tempfile.mkdtemp()
spdir = os.path.join(self.install_path, LIBRARYNAME, 'Core')
os.makedirs(spdir)
statsdir = os.path.join(self.install_path, LIBRARYNAME, 'Core', 'Statistics')
os.makedirs(statsdir)
superpeerfilename = os.path.join(spdir, 'superpeer.txt')
print >> sys.stderr,"test: writing empty superpeers to",superpeerfilename
f = open(superpeerfilename, "w")
f.write('# Leeg')
f.close()
self.config.set_install_dir(self.install_path)
srcfiles = []
srcfiles.append(os.path.join(LIBRARYNAME,"schema_sdb_v5.sql"))
for srcfile in srcfiles:
sfn = os.path.join('..','..',srcfile)
dfn = os.path.join(self.install_path,srcfile)
print >>sys.stderr,"test: copying",sfn,dfn
shutil.copyfile(sfn,dfn)
def setUpPostSession(self):
""" override TestAsServer """
TestAsServer.setUpPostSession(self)
self.mypermid = str(self.my_keypair.pub().get_der())
self.hispermid = str(self.his_keypair.pub().get_der())
def setupDB(self,nickname):
# Change at runtime. Must be set before DB inserts
self.session.set_nickname(nickname)
self.torrent_db = self.session.open_dbhandler(NTFY_TORRENTS)
self.channelcast_db = self.session.open_dbhandler(NTFY_CHANNELCAST)
self.votecast_db = self.session.open_dbhandler(NTFY_VOTECAST)
try:
# Add some torrents belonging to own channel
tdef1, self.bmetainfo1 = self.get_default_torrent('sumfilename1','Hallo S01E10')
dbrec= self.torrent_db.addExternalTorrent(tdef1, extra_info={"filename":"sumfilename1"})
self.infohash1 = tdef1.get_infohash()
self.channelcast_db.addOwnTorrent(tdef1)
tdef2, self.bmetainfo2 = self.get_default_torrent('sumfilename2','Hallo S02E01')
dbrec = self.torrent_db.addExternalTorrent(tdef2, extra_info={"filename":"sumfilename2"})
self.infohash2 = tdef2.get_infohash()
self.torrenthash2 = sha(self.bmetainfo2).digest()
self.channelcast_db.addOwnTorrent(tdef2)
tdef3, self.bmetainfo3 = self.get_default_torrent('sumfilename3','Halo Demo')
self.torrent_db.addExternalTorrent(tdef3, extra_info={"filename":"sumfilename3"})
self.infohash3 = tdef3.get_infohash()
self.torrenthash3 = sha(self.bmetainfo3).digest()
self.channelcast_db.addOwnTorrent(tdef3)
# Now, add some votes
self.votecast_db.subscribe("MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIV8h+eS+vQ+0uqZNv3MYYTLo5s0JP+cmkvJ7U4JAHhfRv1wCqZSKIuY7Q+3ESezhRnnmmX4pbOVhKTU")
self.votecast_db.spam("MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIV8h+eS+vQ+0uqZNv3MYYTLo5s0JP+cmkvJ7U4JAHhfRv1wCqZSKIuY7Q+3ESezhRnnmmX4pbOVhKTX")
vote = {'mod_id':"MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIV8h+eS+vQ+0uqZNv3MYYTLo5s0JP+cmkvJ7U4JAHhfRv1wCqZSKIuY7Q+3ESezhRnnmmX4pbOVhKTU", 'voter_id':"MFIwEAYHKoZIzj0CAQYFK4EEABoDPgAEAIV8h+eS+vQ+0uqZNv3MYYTLo5s0JP+cmkvJ7U4JAHhfRv1wCqZSKIuY7Q+3ESezhRnnmmX4pbOVhKTX",'vote':1, 'time_stamp':132314}
self.votecast_db.addVote(vote)
except:
print_exc()
def tearDown(self):
TestAsServer.tearDown(self)
self.session.close_dbhandler(self.torrent_db)
def get_default_torrent(self,filename,title,paths=None):
metainfo = {}
metainfo['announce'] = 'http://localhost:0/announce'
metainfo['announce-list'] = []
metainfo['creation date'] = int(time.time())
metainfo['encoding'] = 'UTF-8'
info = {}
info['name'] = title.encode("UTF-8")
info['piece length'] = 2 ** 16
info['pieces'] = '*' * 20
if paths is None:
info['length'] = 481
else:
d1 = {}
d1['path'] = [paths[0].encode("UTF-8")]
d1['length'] = 201
d2 = {}
d2['path'] = [paths[1].encode("UTF-8")]
d2['length'] = 280
info['files'] = [d1,d2]
metainfo['info'] = info
path = os.path.join(self.config.get_torrent_collecting_dir(),filename)
tdef = TorrentDef.load_from_dict(metainfo)
tdef.save(path)
return tdef, bencode(metainfo)
def singtest_plain_nickname(self):
self._test_all("nick")
def singtest_unicode_nickname(self):
self._test_all(u"nick\u00f3")
def _test_all(self,nickname):
"""
I want to start a Tribler client once and then connect to
it many times. So there must be only one test method
to prevent setUp() from creating a new client every time.
The code is constructed so unittest will show the name of the
(sub)test where the error occured in the traceback it prints.
"""
self.setupDB(nickname)
# test ChannelCast
self.subtest_channelcast()
# test VoteCast
self.subtest_votecast()
# test ChannelQuery-keyword
self.subtest_channel_keyword_query(nickname)
# test ChannelQuery-permid
self.subtest_channel_permid_query(nickname)
#test voting
self.subtest_voting()
def subtest_voting(self):
self.votecast_db.unsubscribe(bin2str(self.mypermid))
self.assertEqual(self.vot | ecast_db.getVote(bin2str(self.mypermid),bin2str(self.hispermid)),None)
#print >> sys.stderr, self.votecast_db.getAll()
self.votecast_db.spam(bin2str(self.mypermid))
self.assertEqual(self.votecast_db.getVote(bin2str(self.mypermid),bin2str(self.hispermid)),-1)
#print >> sys.stderr, self.votec | ast_db.getAll()
self.votecast_db.subscribe(bin2str(self.mypermid))
self.assertEqual(self.votecast_db.getVote(bin2str(self.mypermid),bin2str(self.hispermid)),2)
#print >> sys.stderr, self.votecast_db.getAll()
self.votecast_db.unsubscribe(bin2str(self.mypermid))
self.assertEqual(self.votecast_db.getVote(bin2str(self.mypermid),bin2str(self.hispermid)),None)
#print >> sys.stderr, self.voteca |
shaded-enmity/dnf | dnf/base.py | Python | gpl-2.0 | 72,096 | 0.000222 | # Copyright 2005 Duke University
# Copyright (C) 2012-2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Supplies the Base class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from dnf.comps import CompsQuery
from dnf.i18n import _, P_, ucd
from dnf.util import first
from dnf.yum import history
from dnf.yum import misc
from dnf.yum import rpmsack
from functools import reduce
import collections
import dnf.callback
import dnf.comps
import dnf.conf
import dnf.conf.read
import dnf.crypto
import dnf.drpm
import dnf.exceptions
import dnf.goal
import dnf.history
import dnf.lock
import dnf.logging
import dnf.persistor
import dnf.plugin
import dnf.query
import dnf.repo
import dnf.repodict
import dnf.rpm.connection
import dnf.rpm.miscutils
import dnf.rpm.transaction
import dnf.sack
import dnf.subject
import dnf.transaction
import dnf.util
import dnf.yum.config
import dnf.yum.rpmtrans
import functools
import hawkey
import itertools
import logging
import os
import operator
import re
import rpm
import time
logger = logging.getLogger("dnf")
class Base(object):
def __init__(self, conf=None):
# :api
self._closed = False
self._conf = conf or self._setup_default_conf()
self._goal = None
self.repo_persistor = None
self._sack = None
self._transaction = None
self._ts = None
self._comps = None
self._history = None
self._tempfiles = set()
self.ds_callback = dnf.callback.Depsolve()
self.group_persistor = None
self.logging = dnf.logging.Logging()
self._repos = dnf.repodict.RepoDict()
self.rpm_probfilter = set([rpm.RPMPROB_FILTER_OLDPACKAGE])
self.plugins = dnf.plugin.Plugins()
self.clean_tempfiles = False
self._tempfile_persistor = None
def __enter__(self):
return self
def __exit__(self, *exc_args):
self.close()
def _add_repo_to_sack(self, name):
repo = self.repos[name]
try:
repo.load()
except dnf.exceptions.RepoError as e:
if repo.skip_if_unavailable is False:
raise
logger.warning(_("%s, disabling."), e)
repo.disable()
return
hrepo = repo.hawkey_repo
hrepo.repomd_fn = repo.repomd_fn
hrepo.primary_fn = repo.primary_fn
hrepo.filelists_fn = repo.filelists_fn
hrepo.cost = repo.cost
if repo.presto_fn:
hrepo.presto_fn = repo.presto_fn
else:
logger.debug("not found deltainfo for: %s" % repo.name)
if repo.updateinfo_fn:
hrepo.updateinfo_fn = repo.updateinfo_fn
else:
logger.debug("not found updateinfo for: %s" % repo.name)
self._sack.load_repo(hrepo, build_cache=True, load_filelists=True,
load_presto=repo.deltarpm,
load_updateinfo=True)
@staticmethod
def _setup_default_conf():
conf = dnf.conf.Conf()
subst = conf.substitutions
if 'releasever' not in subst:
subst['releasever'] = \
dnf.rpm.detect_releasever(conf.installroot)
cache_dirs = dnf.conf.CliCache(conf.cachedir)
conf.cachedir = cache_dirs.cachedir
return conf
def _setup_excludes_includes(self):
disabled = set(self.conf.disable_excludes)
if 'all' in disabled:
return
if 'main' not in disabled:
for excl in self.conf.exclude:
subj = dnf.subject.Subject(excl)
pkgs = subj.get_best_query(self.sack)
self.sack.add_excludes(pkgs)
for incl in self.conf.include:
subj = dnf.subject.Subject(incl)
pkgs = subj.get_best_query(self.sack)
self.sack.add_includes(pkgs)
for r in self.repos.iter_enabled():
if r.id in disabled:
continue
for excl in r.exclude:
pkgs = self.sack.query().filter(reponame=r.id).\
filter_autoglob(name=excl)
self.sack.add_excludes(pkgs)
for incl in r.include:
pkgs = self.sack.query().filter(reponame=r.id).\
filter_autoglob(name=incl)
self.sack.add_includes(pkgs)
def _store_persistent_data(self):
def check_expired(repo):
try:
exp_remaining = repo.metadata_expire_in()[1]
return False if exp_remaining is None else exp_remaining <= 0
except dnf.exceptions.MetadataError:
return False
if self.repo_persistor:
expired = [r.id for r in self.repos.iter_enabled()
if check_expired(r)]
self.repo_persistor.set_expired_repos(expired)
if self.group_persistor:
self.group_persistor.save()
if self._tempfile_persistor:
self._tempfile_persistor.save()
@property
def comps(self):
# :api
return self._comps
@property
def conf(self):
# :api
return self._conf
@property
def goal(self):
return self._goal
@property
def repos(self):
return self._repos
@repos.deleter
def repos(self):
self._repos = None
@property
@dnf.util.lazyattr("_rpmconn")
def rpmconn(self):
return dnf.rpm.connection.RpmConnection(self.conf.installroot)
@property
def sack(self):
# :api
return self._sack
@property
def transaction(self):
# :api
return self._transaction
@transaction.setter
def transaction(self, value):
if self._transaction:
raise ValueError('transaction already set')
self._transaction = value
def activate_persistor(self):
self.repo_persistor = dnf.persistor.RepoPersistor(self.conf.cachedir)
def fill_sack(self, load_system_repo=True, load_available_repos=True):
"""Prepare the Sack and the Goal objects. :api."""
timer = dnf.logging.Timer('sack setup')
self._sack = dnf.sack.build_sack(self)
lock = dnf.lock.build_metadata_lock(self.conf.cachedir)
with lock:
if load_system_repo is not False:
try:
self._sack.load_system_repo(build_cache=True)
except IOError:
if load_system_repo != 'auto':
raise
if load_available_repos:
for r in self.repos.iter_enabled():
| self._add_repo_to_sack(r.id)
conf = self.conf
self._sack.configure(conf.installonlypkgs, conf | .installonly_limit)
self._setup_excludes_includes()
timer()
self._goal = dnf.goal.Goal(self._sack)
return self._sack
@property
@dnf.util.lazyattr("_yumdb")
def yumdb(self):
db_path = os.path.normpath(self.conf.persistdir + '/yumdb')
return rpmsack.AdditionalPkgDB(db_path)
def close(self):
"""Close all potential handles and clean cache. :api
Typically the handles are to data sources and sinks.
"""
if self._closed:
return
logger.log(dnf.logging.DDEBUG, 'Cleaning up.')
self._closed = True
self._tempfile_persistor = dnf.p |
ntt-sic/neutron | neutron/openstack/common/rpc/amqp.py | Python | apache-2.0 | 22,783 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared code between AMQP based openstack.common.rpc implementations.
The code in this module is shared between the rpc implemenations based on AMQP.
Specifically, this includes impl_ | kombu and impl_qpid. impl_carrot also uses
AMQP, but is deprecated and predates this code.
"""
import collections
import inspect
import sys
import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import queue
from eventlet import semaphore
from oslo.config import cfg
from neutron.openstack.common impo | rt excutils
from neutron.openstack.common.gettextutils import _
from neutron.openstack.common import local
from neutron.openstack.common import log as logging
from neutron.openstack.common.rpc import common as rpc_common
amqp_opts = [
cfg.BoolOpt('amqp_durable_queues',
default=False,
deprecated_name='rabbit_durable_queues',
deprecated_group='DEFAULT',
help='Use durable queues in amqp.'),
cfg.BoolOpt('amqp_auto_delete',
default=False,
help='Auto-delete queues in amqp.'),
]
cfg.CONF.register_opts(amqp_opts)
UNIQUE_ID = '_unique_id'
LOG = logging.getLogger(__name__)
class Pool(pools.Pool):
"""Class that implements a Pool of Connections."""
def __init__(self, conf, connection_cls, *args, **kwargs):
self.connection_cls = connection_cls
self.conf = conf
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
self.reply_proxy = None
# TODO(comstud): Timeout connections not used in a while
def create(self):
LOG.debug(_('Pool creating new connection'))
return self.connection_cls(self.conf)
def empty(self):
while self.free_items:
self.get().close()
# Force a new connection pool to be created.
# Note that this was added due to failing unit test cases. The issue
# is the above "while loop" gets all the cached connections from the
# pool and closes them, but never returns them to the pool, a pool
# leak. The unit tests hang waiting for an item to be returned to the
# pool. The unit tests get here via the tearDown() method. In the run
# time code, it gets here via cleanup() and only appears in service.py
# just before doing a sys.exit(), so cleanup() only happens once and
# the leakage is not a problem.
self.connection_cls.pool = None
_pool_create_sem = semaphore.Semaphore()
def get_connection_pool(conf, connection_cls):
with _pool_create_sem:
# Make sure only one thread tries to create the connection pool.
if not connection_cls.pool:
connection_cls.pool = Pool(conf, connection_cls)
return connection_cls.pool
class ConnectionContext(rpc_common.Connection):
"""The class that is actually returned to the create_connection() caller.
This is essentially a wrapper around Connection that supports 'with'.
It can also return a new Connection, or one from a pool.
The function will also catch when an instance of this class is to be
deleted. With that we can return Connections to the pool on exceptions
and so forth without making the caller be responsible for catching them.
If possible the function makes sure to return a connection to the pool.
"""
def __init__(self, conf, connection_pool, pooled=True, server_params=None):
"""Create a new connection, or get one from the pool."""
self.connection = None
self.conf = conf
self.connection_pool = connection_pool
if pooled:
self.connection = connection_pool.get()
else:
self.connection = connection_pool.connection_cls(
conf,
server_params=server_params)
self.pooled = pooled
def __enter__(self):
"""When with ConnectionContext() is used, return self."""
return self
def _done(self):
"""If the connection came from a pool, clean it up and put it back.
If it did not come from a pool, close it.
"""
if self.connection:
if self.pooled:
# Reset the connection so it's ready for the next caller
# to grab from the pool
self.connection.reset()
self.connection_pool.put(self.connection)
else:
try:
self.connection.close()
except Exception:
pass
self.connection = None
def __exit__(self, exc_type, exc_value, tb):
"""End of 'with' statement. We're done here."""
self._done()
def __del__(self):
"""Caller is done with this connection. Make sure we cleaned up."""
self._done()
def close(self):
"""Caller is done with this connection."""
self._done()
def create_consumer(self, topic, proxy, fanout=False):
self.connection.create_consumer(topic, proxy, fanout)
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
self.connection.join_consumer_pool(callback,
pool_name,
topic,
exchange_name)
def consume_in_thread(self):
self.connection.consume_in_thread()
def __getattr__(self, key):
"""Proxy all other calls to the Connection instance."""
if self.connection:
return getattr(self.connection, key)
else:
raise rpc_common.InvalidRPCConnectionReuse()
class ReplyProxy(ConnectionContext):
"""Connection class for RPC replies / callbacks."""
def __init__(self, conf, connection_pool):
self._call_waiters = {}
self._num_call_waiters = 0
self._num_call_waiters_wrn_threshhold = 10
self._reply_q = 'reply_' + uuid.uuid4().hex
super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
self.declare_direct_consumer(self._reply_q, self._process_data)
self.consume_in_thread()
def _process_data(self, message_data):
msg_id = message_data.pop('_msg_id', None)
waiter = self._call_waiters.get(msg_id)
if not waiter:
LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
', message : %(data)s'), {'msg_id': msg_id,
'data': message_data})
LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
else:
waiter.put(message_data)
def add_call_waiter(self, waiter, msg_id):
self._num_call_waiters += 1
if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
LOG.warn(_('Number of call waiters is greater than warning '
'threshhold: %d. There could be a MulticallProxyWaiter '
'leak.') % self._num_call_waiters_wrn_threshhold)
self._num_call_waiters_wrn_threshhold *= 2
self._call_waiters[msg_id] = waiter
def del_call_waite |
moocowmoo/pycoin | tests/tools_test.py | Python | mit | 4,283 | 0.004203 | #!/usr/bin/env python
import unittest
from pycoin.serialize import h2b
from pycoin.intbytes import int_to_bytes, bytes_from_ints
from pycoin.tx.script.tools import bin_script, compile, disassemble, int_to_script_bytes, int_from_script_bytes
from pycoin.tx.script.opcodes import OPCODE_LIST
from pycoin.tx.script.vm import eval_script
class ToolsTest(unittest.TestCase):
def test_bin_script(self):
def test_bytes | (as_bytes):
script = bin_script([as_bytes])
stack = []
eval_script(script, None, | lock_time=0, stack=stack, disallow_long_scripts=False)
assert len(stack) == 1
assert stack[0] == as_bytes
def test_val(n):
as_bytes = int_to_bytes(n)
test_bytes(as_bytes)
for i in range(100):
test_val(100)
for i in range(0xfff0, 0x10004):
test_val(i)
for i in range(0xfffff0, 0x1000005):
test_val(i)
for l in (1, 2, 3, 254, 255, 256, 257, 258, 0xfff9, 0xfffe, 0xffff, 0x10000, 0x10001, 0x10005):
for v in (1, 2, 3, 4, 15, 16, 17, 18):
b = bytes_from_ints([v] * l)
test_bytes(b)
b = bytes_from_ints([30] * (0x1000000+1))
for l in (0x1000000-1, 0x1000000, 0x1000000+1):
test_bytes(b[:l])
def test_compile_decompile(self):
def check(s):
b1 = compile(s)
s1 = disassemble(b1)
b2 = compile(s1)
self.assertEqual(s, s1)
self.assertEqual(b1, b2)
def build_hex(size, a, b):
"build some random-looking hex"
return "[%s]" % "".join("%02x" % (((i+a)*b) & 0xff) for i in range(size))
scripts = []
check("[ff]")
check("[ff03]")
check("[ff030102]")
check("[55aabbccddeeff112131]")
long_hex_260 = build_hex(260, 13, 93)
long_hex_270 = build_hex(270, 11, 47)
check("%s %s" % (long_hex_260, long_hex_270))
for opcode, code in OPCODE_LIST:
if opcode.startswith("OP_PUSHDATA"):
# these disassemble differently
continue
check(opcode)
def test_tx_7e0114e93f903892b4dff5526a8cab674b2825fd715c4a95f852a1aed634a0f6(self):
# this tx is from testnet. We add an extra "OP_0" to the end
# we need to check that the script is being disassembled correctly
script = h2b("0047304402201f994ca49451bc764fd090f31adb2fa4381b91f967dc05a6f538d4d1baaa83cd022"
"06ef3ad06de7890bc4130b4f57401412ca94897ea19b646f794a4472375351c1f0147304402201f"
"994ca49451bc764fd090f31adb2fa4381b91f967dc05a6f538d4d1baaa83cd02204655e9eccac41"
"2407dfc3e5753a0f2ac605e41c7eb91630dc67137f2d8081c3a014d0b0152410479be667ef9dcbb"
"ac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e110"
"8a8fd17b448a68554199c47d08ffb10d4b84104c6047f9441ed7d6d3045406e95c07cd85c778e4b"
"8cef3ca7abac09b95c709ee51ae168fea63dc339a3c58419466ceaeef7f632653266d0e1236431a"
"950cfe52a4104f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f938"
"8f7b0f632de8140fe337e62a37f3566500a99934c2231b6cb9fd7584b8e6724104e493dbf1c10d8"
"0f3581e4904930b1404cc6c13900ee0758474fa94abe8c4cd1351ed993ea0d455b75642e2098ea5"
"1448d967ae33bfbdfe40cfe97bdc4773992254ae00")
d1 = disassemble(script).split()
self.assertEqual(len(d1), 5)
self.assertEqual(d1[-1], "OP_0")
def test_int_to_from_script_bytes(self):
for i in range(-127, 127):
self.assertEqual(int_from_script_bytes(int_to_script_bytes(i)), i)
for i in range(-1024, 1024, 16):
self.assertEqual(int_from_script_bytes(int_to_script_bytes(i)), i)
for i in range(-1024*1024, 1024*1024, 10000):
self.assertEqual(int_from_script_bytes(int_to_script_bytes(i)), i)
self.assertEqual(int_to_script_bytes(1), b"\1")
self.assertEqual(int_to_script_bytes(127), b"\x7f")
self.assertEqual(int_to_script_bytes(128), b"\x80\x00")
if __name__ == "__main__":
unittest.main()
|
Prasad9/incubator-mxnet | python/mxnet/ndarray/ndarray.py | Python | apache-2.0 | 98,461 | 0.001219 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=too-many-lines, protected-access
# pylint: disable=import-error, no-name-in-module, undefined-variable
"""NDArray API of MXNet."""
from __future__ import absolute_import
from __future__ import division
try:
from __builtin__ import slice as py_slice
except ImportError:
from builtins import slice as py_slice
import ctypes
import warnings
import operator
from functools import reduce # pylint: disable=redefined-builtin
import numpy as np
from ..base import _LIB, numeric_types, integer_types
from ..base import c_array, mx_real_t
from ..base import mx_uint, NDArrayHandle, check_call
from ..base import ctypes2buffer
from ..context import Context
from . import _internal
from . import op
from ._internal import NDArrayBase
__all__ = ["NDArray", "concatenate", "_DTYPE_NP_TO_MX", "_DTYPE_MX_TO_NP", "_GRAD_REQ_MAP",
"ones", "add", "arange", "divide", "equal", "full", "greater", "greater_equal",
"imdecode", "lesser", "lesser_equal", "maximum", "minimum", "moveaxis", "modulo",
"multiply", "not_equal", "onehot_encode", "power", "subtract", "true_divide",
"waitall", "_new_empty_handle"]
_STORAGE_TYPE_UNDEFINED = -1
_STORAGE_TYPE_DEFAULT = 0
_STORAGE_TYPE_ROW_SPARSE = 1
_STORAGE_TYPE_CSR = 2
# pylint: disable= no-member
_DTYPE_NP_TO_MX = {
None: -1,
np.float32: 0,
np.float64: 1,
np.float16: 2,
np.uint8: 3,
np.int32: 4,
np.int8: 5,
np.int64: 6,
}
_DTYPE_MX_TO_NP = {
-1: None,
0: np.float32,
1: np.float64,
2: np.float16,
3: np.uint8,
4: np.int32,
5: np.int8,
6: np.int64,
}
_STORAGE_TYPE_STR_TO_ID = {
'undefined': _STORAGE_TYPE_UNDEFINED,
'default': _STORAGE_TYPE_DEFAULT,
'row_sparse': _STORAGE_TYPE_ROW_SPARSE,
'csr': _STORAGE_TYPE_CSR,
}
_STORAGE_TYPE_ID_TO_STR = {
_STORAGE_TYPE_UNDEFINED: 'undefined',
_STORAGE_TYPE_DEFAULT: 'default',
_STORAGE_TYPE_ROW_SPARSE: 'row_sparse',
_STORAGE_TYPE_CSR: 'csr',
}
_GRAD_REQ_MAP = {
'null': 0,
'write': | 1,
'add': 3
}
# pylint: enable= no-member
def _new_empty_handle():
"""Returns a new empty handle.
Empty handle can be used to hold a result.
Returns
-------
handle
A | new empty `NDArray` handle.
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateNone(ctypes.byref(hdl)))
return hdl
def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t):
"""Return a new handle with specified shape and context.
Empty handle is only used to hold results.
Returns
-------
handle
A new empty `NDArray` handle.
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateEx(
c_array(mx_uint, shape),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
ctypes.byref(hdl)))
return hdl
def waitall():
"""Wait for all async operations to finish in MXNet.
This function is used for benchmarking only.
"""
check_call(_LIB.MXNDArrayWaitAll())
def _storage_type(handle):
storage_type = ctypes.c_int(0)
check_call(_LIB.MXNDArrayGetStorageType(handle, ctypes.byref(storage_type)))
return storage_type.value
class NDArray(NDArrayBase):
"""An array object representing a multidimensional, homogeneous array of
fixed-size items.
"""
__slots__ = []
# make numpy functions return NDArray instead of numpy object array
__array_priority__ = 1000.0
# pylint: disable= no-member, undefined-variable
def __repr__(self):
"""Returns a string representation of the array."""
shape_info = 'x'.join(['%d' % x for x in self.shape])
return '\n%s\n<%s %s @%s>' % (str(self.asnumpy()),
self.__class__.__name__,
shape_info, self.context)
def __reduce__(self):
return NDArray, (None,), self.__getstate__()
def __add__(self, other):
"""x.__add__(y) <=> x+y <=> mx.nd.add(x, y) """
return add(self, other)
def __iadd__(self, other):
"""x.__iadd__(y) <=> x+=y """
if not self.writable:
raise ValueError('trying to add to a readonly NDArray')
if isinstance(other, NDArray):
return op.broadcast_add(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._plus_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
"""x.__sub__(y) <=> x-y <=> mx.nd.subtract(x, y) """
return subtract(self, other)
def __isub__(self, other):
"""x.__isub__(y) <=> x-=y """
if not self.writable:
raise ValueError('trying to subtract from a readonly NDArray')
if isinstance(other, NDArray):
return op.broadcast_sub(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._minus_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rsub__(self, other):
"""x.__rsub__(y) <=> y-x <=> mx.nd.subtract(y, x) """
return subtract(other, self)
def __mul__(self, other):
"""x.__mul__(y) <=> x*y <=> mx.nd.multiply(x, y) """
return multiply(self, other)
def __neg__(self):
"""x.__neg__(y) <=> -x """
return _internal._mul_scalar(self, -1.0)
def __imul__(self, other):
"""x.__imul__(y) <=> x*=y """
if not self.writable:
raise ValueError('trying to multiply to a readonly NDArray')
if isinstance(other, NDArray):
return op.broadcast_mul(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._mul_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
"""x.__div__(y) <=> x/y <=> mx.nd.divide(x, y) """
return divide(self, other)
def __rdiv__(self, other):
"""x.__rdiv__(y) <=> y/x <=> mx.nd.divide(y, x) """
return divide(other, self)
def __idiv__(self, other):
"""x.__rdiv__(y) <=> x/=y """
if not self.writable:
raise ValueError('trying to divide from a readonly NDArray')
if isinstance(other, NDArray):
return op.broadcast_div(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._div_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __truediv__(self, other):
return divide(self, other)
def __rtruediv__(self, other):
return divide(other, self)
def __itruediv__(self, other):
return self.__idiv__(other)
def __mod__(self, other):
"""x.__mod__(y) <=> x%y <=> mx.nd.modulo(x, y) """
return modulo(self, other)
def __rmod__(self, other):
"""x.__rmod__(y) <=> y%x <=> mx.nd.modulo( |
jirivrany/riskflow123d-prep | app/helpers/solver_utils.py | Python | gpl-2.0 | 2,691 | 0.002601 | '''
Created on 20.12.2012
@author: Jiri Vrany
'''
import os
import shutil
import glob
PROBLEMS = {
'basic': 'basicProblem',
'monte': 'MonteCarlo',
'sens': 'Sensitivity'
}
def __normalize_result_stora_poro(value):
'''
this function should not be called from outside
the new value for storativity or porosity has to be in <0.00001, 0.99999>
'''
if value > 1:
return 0.99999
else:
return value
def round_porosity(value):
'''
takes a text value, convert it to float,
round it to positive zero and return str again
'''
try:
value = float(value)
except ValueError:
value = 0.00001
value = round_to_positive_zero(value)
value = __normalize_result_stora_poro(value)
return str(value)
def round_storativity(value):
'''
takes a text value, convert it to float, round it and return
str again
'''
try:
value = float(value)
except ValueError:
value = 0.0
value = __normalize_result_stora_poro(value)
return str(value)
def round_to_positive_zero(value):
'''
takes a value convert it to 0.00001 if it's zero
'''
if value < 0.00001:
return 0.00001
else:
return value
def create_task_identifier(problem_type, work_dir):
'''
Save problem identifier file into given work_dir
'''
try:
task_file = open(work_dir + '/problem.type', 'w')
print >> task_file, PROBLEMS[problem_type]
task_file.close()
except IOError:
print 'oh crap'
def copy_master_files(flow_ini, output_dir, separator):
'''
create a copy of original basic problem
@param output_dir - where to copy
@param flow_ini - a FlowIni object
'''
if not os.path.exists(output_dir):
os.mkdir(output_dir)
original_dir = flow_ini.dir_name
shutil.copy2(flow_ini.file_name, output_dir)
copy_boudnary_cond_files(original_dir, output_dir)
for master_file_name in flow_ini.dict_files.values():
src = original_dir + separator + master_file_name
try:
shutil.copy2(src, output_dir)
except IOError:
pass
def copy_boudnary_cond_files(original_dir, output_dir):
'''
Fix for Flow1.6.6 boundary conditions in time change
''' |
files = glob.iglob(os.path.join(original_dir, "*.bct_*"))
for fil | e_name in files:
if os.path.isfile(file_name):
shutil.copy2(file_name, output_dir)
if __name__ == '__main__':
print round_porosity('0')
print round_porosity('0.0001')
print round_porosity('0.00001')
print round_porosity('')
|
TaliesinSkye/evennia | src/utils/test_utils.py | Python | bsd-3-clause | 952 | 0.008403 | """
Test runner for Evennia test suite. Run with "game/manage.py test".
"""
from django.conf import settings
from django.test.simple import DjangoTestSuiteRunner
class EvenniaTestSuiteRunner(DjangoTestSuiteRunner):
"""
This test runner only runs tests on the apps specified i | n src/ and game/ to
avoid running the large number of tests defined by Django
"""
def build_suite(self, test_labels, extra_tests=None, **kwargs):
"""
Build a test suite for Evennia. test_labels is a list of apps to test.
If not given, a subset of settings.INSTALLED_APPS will be used.
"""
if not test_labels:
test_labels = [applabel.rsplit('.', 1)[1] for applabel | in settings.INSTALLED_APPS
if (applabel.startswith('src.') or applabel.startswith('game.'))]
return super(EvenniaTestSuiteRunner, self).build_suite(test_labels, extra_tests=extra_tests, **kwargs)
|
arnehilmann/netkraken | src/unittest/python/netkraken_tests.py | Python | apache-2.0 | 1,468 | 0.004768 | from __future__ import print_function
from datetime import datetime
import unittest
from mock import patch
import netkraken
from testhelper import myAssertDictEqual
class NetKrakenTests(unittest.TestCase):
def test_get_timestamp(self):
self.assertEquals(("minute", "2042-12-12T12:12"),
netkraken.get_timestamp("2042-12-12T12:12"))
self.assertEquals(("minute", "2042-12-12T12:12"),
netkraken.get_timestamp("///foo///2042-12-12T12:12"))
self.assertRaises(Exception, netkraken.get_timestamp, "no-valid-date")
def test_get_higher_timestamp(self):
self.assertEquals(("hour", "2042-12-12T12"),
netkraken.get_higher_time | stamp("///foo///2042-12-12T12:12"))
self.assertEquals(("day", "2042-12-12"),
| netkraken.get_higher_timestamp("///foo///2042-12-12T12"))
@patch("netkraken.get_current_datetime")
def test_get_current_timestrings(self, now_mock):
now_mock.return_value = datetime(2042, 12, 12, 12, 12)
# self.assertDictEqual({'day': '2042-12-12', 'hour': '2042-12-12T12', 'minute': '2042-12-12T12:12'},
# netkraken.get_current_timestrings())
myAssertDictEqual({'day': '2042-12-12', 'hour': '2042-12-12T12', 'minute': '2042-12-12T12:12'},
netkraken.get_current_timestrings())
if __name__ == "__main__":
unittest.main()
|
remysaissy/paris-immo-finder | app/scrapers/base_scraper.py | Python | gpl-3.0 | 6,157 | 0.003086 | import logging
import sys
import time
import traceback
from weakref import WeakValueDictionary
import bs4
from selenium import webdriver
from app.services import slack
from app.utils import db
class BaseScraper(object):
""" Abstract class for implementing a datasource. """
_instances = WeakValueDictionary()
def __init__(self):
self._instances[id(self)] = self
self.logger = logging.getLogger()
self.__browser = webdriver.PhantomJS(service_args=['--load-images=no', '--disk-cache=true'])
# self.__browser = webdriver.Chrome()
# self.__browser.set_window_size(1280, 1024)
# self.__browser.implicitly_wait(10)
# self.__browser.set_page_load_timeout(60)
def __del__(self):
self.__browser.quit()
# region scraping methods
def _get_search_url(self):
"""
The search url of the datasource
:return string
"""
NotImplementedError("Class {} doesn't implement aMethod()".format(self.__class__.__name__))
def _get_offers(self, root):
"""
Builds a list of offers
:return list(Offer)
"""
NotImplementedError("Class {} doesn't implement aMethod()".format(self.__class__.__name__))
def __get_offers(self, root):
"""
Builds a list of offers
:return list(BaseOffer)
"""
offers = []
r_offers = self._get_offers(root)
for r_offer in r_offers:
o = self._get_offer_object(r_offer)
| if o is None:
continue
if self._is_valid_offer(o, r_offer):
payload = self._prepare_offer_filling(o, r_offer)
o.fill_object(self, r_offer, payload)
self._clean_offer_filling(o, r_offer, payload)
offers.append(o)
else:
self.logger.warning("Inval | id offer detected. Skipping...")
return offers
def _get_offer_object(self, r_offer):
"""
Returns a valid offer object for the offer to parse.
:return A BaseOffer object subclass instance
"""
NotImplementedError("Class {} doesn't implement aMethod()".format(self.__class__.__name__))
def _is_valid_offer(self, offer, r_offer):
"""
Let the datasource object checks if the offer to be parsed is a valid one.
If the validity check allows to prefill some fields, the offer model object can be used.
:return True if the offer is valid, False otherwise
"""
NotImplementedError("Class {} doesn't implement aMethod()".format(self.__class__.__name__))
def _prepare_offer_filling(self, offer, r_offer):
"""
Let the datasource object preloads required state to fill the offer object.
The offer object can already be filled with some properties to avoid duplicate lookups in r_offers.
:return a payload of any data useful for the offer filling
"""
NotImplementedError("Class {} doesn't implement aMethod()".format(self.__class__.__name__))
def _clean_offer_filling(self, offer, r_offer, payload):
"""
Let the datasource object clean up its state.
At the time of calling, the offer object must not be modified.
"""
NotImplementedError("Class {} doesn't implement aMethod()".format(self.__class__.__name__))
def _has_next_page(self, root):
"""
Check if there is a next page and returns it parameters.
:returns has_next_page(bool),url(string),params(dict)
"""
NotImplementedError("Class {} doesn't implement aMethod()".format(self.__class__.__name__))
def _load_web_page(self, url):
"""
Retrieves results and returns a ready to use return object
:return BeautifulSoup instance.
"""
self.__browser.get(url) # This does not throw an exception if it got a 404
html = self.__browser.page_source
self.logger.info("GET request: {}".format(url))
result = None
try:
result = bs4.BeautifulSoup(html, 'html5lib')
except Exception as e:
self.logger.error("Failed to load webpage {}: {}".format(url, str(e)))
finally:
return result
def _next_page(self):
""" Retrieve the next page of results. This method must yield each page.
:return list[Offer]: A list of Offer objects.
"""
has_next = True
url = self._get_search_url()
while has_next:
root = self._load_web_page(url)
if root is not None:
has_next, url = self._has_next_page(root)
yield self.__get_offers(root)
@classmethod
def get_or_none(cls, obj, key):
val = obj.find(key)
if val is not None:
val = val.text
return val
# endregion
# region datasource identification and scrape methods
def get_datasource_name(self):
""" Returns the datasource's name. """
return self.__class__.__name__
def scrape(self):
""" Runs the datasource. """
self.logger.info("{}: Retrieving offers from {}...".format(time.ctime(), self.get_datasource_name()))
try:
total_count = 0
for offers in self._next_page():
for o in offers:
if db.Offer.is_new_offer(o, self.get_datasource_name()):
db.Offer.persist_offer(o, self.get_datasource_name())
# if db.Offer.is_new_offer(o, self.get_datasource_name()) and filter.Filter.apply(o) is False:
# db.Offer.persist_offer(o, self.get_datasource_name())
slack.Slack.post_message(o)
total_count += 1
except Exception as exc:
self.logger.error("Error with the scraping:", sys.exc_info()[0])
traceback.print_exc()
else:
self.logger.info("{}: Got {} results from {}".format(time.ctime(), total_count, self.get_datasource_name()))
# endregion
# region field extraction methods
# endregion
|
arunhotra/tensorflow | tensorflow/python/ops/gradients_test.py | Python | apache-2.0 | 12,673 | 0.011521 | """Tests for tensorflow.ops.gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import tensorflow.python.platform
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import types
# pylint: disable=unused-import
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import data_flow_grad
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_grad
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad
from tensorflow.python.ops import state_grad
# pylint: enable=unused-import
from tensorflow.python.ops.constant_op import constant
from tensorflow.python.ops.nn_ops import bias_add
from tensorflow.python.platform import googletest
def _OpsBetween(graph, to_ops, from_ops):
"""Build the list of operations between two lists of Operations.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
Returns:
The list of operations between "from_ops" and "to_ops", sorted by
decreasing operation id. This list contains all elements of to_ops.
TODO(touts): Think about returning an empty list if from_ops are not
reachable from to_ops. Presently it returns to_ops in that case.
"""
# List of booleans, indexed by operation id, indicating if
# an op is reached from the output of "input_ops".
reached_ops = [False] * (graph._last_id + 1)
# We only care to reach up to "output_ops" so we mark the
# output ops as reached to avoid recursing past them.
for op in to_ops:
reached_ops[op._id] = True
gradients._MarkReachedOps(from_ops, reached_ops)
between_ops = gradients._GatherInputs(to_ops, reached_ops)
between_ops.sort(lambda x, y: y._id - x._id)
return between_ops
class GradientsTest(test_util.TensorFlowTestCase):
def _OpNames(self, op_list):
return ["%s/%d" % (str(op.name), op._id) for op in op_list]
def _assertOpListEqual(self, ops1, ops2):
self.assertEquals(self._OpNames(ops1), self._OpNames(ops2))
def testOpsBetweenSimple(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.pack([t1, t2])
# Full graph
self._assertOpListEqual([t3.op, t2.op, t1.op],
_OpsBetween(g, [t3.op], [t1.op, t2.op]))
# Only t1, t3.
self._assertOpListEqual([t3.op, t1.op],
_OpsBetween(g, [t3.op], [t1.op]))
def testOpsBetweenUnreachable(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
_ = array_ops.pack([t1, t2])
t4 = constant(1.0)
t5 = constant(2.0)
t6 = array_ops.pack([t4, t5])
# Elements of to_ops are always listed.
self._assertOpListEqual([t6.op], _OpsBetween(g, [t6.op], [t1.op]))
def testOpsBetweenCut(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.pack([t1, t2])
t4 = constant([1.0])
t5 = array_ops.concat(0, [t4, t3])
t6 = constant([2.0])
t7 = array_ops.concat(0, [t5, t6])
self._assertOpListEqual([t7.op, t5.op, t4.op],
_OpsBetween(g, [t7.op], [t4.op]))
def testOpsBetweenCycle(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.pack([t1, t2])
t4 = array_ops.concat(0, [t3, t3, t3])
t5 = constant([1.0])
t6 = array_ops.concat(0, [t4, t5])
t7 = array_ops.concat(0, [t6, t3])
self._assertOpListEqual([t6.op, t4.op, t3.op],
_OpsBetween(g, [t6.op], [t3.op]))
self._assertOpListEqual([t7.op, t6.op, t5.op, t4.op, t3.op, t1.op],
_OpsBetween(g, [t7.op], [t1.op, t5.op]))
self._assertOpListEqual([t6.op, t5.op, t4.op, t3.op, t2.op],
_OpsBetween(g, [t6.op], [t2.op, t5.op]))
def testGradients(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[32, 100], name="in")
w = constant(1.0, shape=[100, 10], name="w")
b = constant(1.0, shape=[10], name="b")
xw = math_ops.matmul(inp, w, name="xw")
h = bias_add(xw, b, name="h")
w_grad = gradients.gradients(h, w)[0]
self.assertEquals("MatMul", w_grad.op.type)
self.assertEquals(w_grad.op._original_op, xw.op)
self.assertTrue(w_grad.op.get_attr("transpose_a"))
self.assertFalse(w_grad.op.get_attr("transpose_b"))
def testUnusedOutput(self):
with ops.Graph().as_default():
w = constant(1.0, shape=[2, 2])
x = constant(1.0, shape=[2, 2])
wx = math_ops.matmul(w, x)
split_wx = array_ops.split(0, 2, wx)
c = math_ops.reduce_sum(split_wx[1])
gw = gradients.gradients(c, [w])[0]
self.assertEquals("MatMul", gw.op.type)
def testColocateGradients(self):
with ops.Graph().as_default() as g:
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
with g.device("/gpu:0"):
wx = math_ops.matmul(w, x)
gw = gradients.gradients(wx, [w], colocate_gradients_with_ops=True)[0]
self.assertEquals("/gpu:0", gw.device)
def testColocateGradientsWithAggregation(self):
with ops.Graph().as_default() as g:
with g.device("/gpu:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
wx = math_ops.matmul(w, x)
wy = math_ops.matmul(w, y)
with g.device("/gpu:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEquals("/gpu:1", gw1.device)
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertEquals(None, gw2.device)
def testBoundaryStop(self):
# Test that we don't differentiate 'x'. The gradient function for 'x' is
# set explicitly to None so we will get an exception if the gradient code
# tries to differentiate 'x'.
with ops.Graph().as_default() as g:
c = constant(1.0)
x = array_ops.identity(c)
y = x + 1.0
z = y + 1
grads = gradients.gradients(z, [x])
self.assertTrue(all([x for x in grads]))
def testBoundaryContinue(self):
# Test that we differentiate both 'x' and 'y' correctly when x is a
# predecessor of y.
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y * 3.0
grads = gradients.gradients(z, [x, y])
self.assertTrue(all([x for x in grads]))
self.assertEqual(6.0, grads[0].eval())
def testAggregationMethodAccumulateN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z,
[x, y],
aggregation_method=
gradients.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
self.assertTrue(all([x for x in grads]))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodAddN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z,
[x, y],
aggregation_method=gradients.AggregationMethod.ADD_N)
self.assertTrue(all([x for x in grads]))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodTree( | self):
with self.test_session():
x = constant( | 1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z,
[x, y],
aggregation_method=gradients.AggregationMethod.EXPERIMENTAL_TREE)
self.assertTrue(all([x for x in grads]))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
de |
jnmclarty/rrsm | rrsm/__init__.py | Python | mit | 50 | 0.02 | from r | rsm.core import StateMachine, StateT | ypeError |
kmike/port-for | src/port_for/__init__.py | Python | mit | 548 | 0 | # -*- coding: utf-8 -*-
__version__ = "0.6.2"
from ._ranges import UNASSIGNED_RANGES
from .api import (
available_good_ports,
available_ports,
is_available,
good_port_ranges,
port_is_used,
select_random,
get_port,
| )
from .store import PortStore
from .exceptions import PortForException
__all__ = (
"UNASSIGNED_RANGES",
"available_good_p | orts",
"available_ports",
"is_available",
"good_port_ranges",
"port_is_used",
"select_random",
"get_port",
"PortStore",
"PortForException",
)
|
jmbeuken/abinit | scripts/post_processing/ElectronPhononCoupling/ElectronPhononCoupling/interface/__init__.py | Python | gpl-3.0 | 23 | 0 | from | .comput | e import *
|
tedlaz/pyted | pykoin17/numtable.py | Python | gpl-3.0 | 4,379 | 0.000235 | # -*- coding: utf-8 -*-
import decimal
def isNum(value): # Einai to value arithmos, i den einai ?
"""
use: Returns False if value is not a number , True otherwise
input parameters :
1.value : the value to check against.
output: True or False
"""
try:
float(value)
except ValueError:
return False
else:
return True
def dec(poso=0, decimals=2):
""" use : Given a number, it returns a decimal with a specific number
of decimals
input Parameters:
1.poso : The number for conversion in any format (e.g. string or
int ..)
2.decimals : The number of decimals (default 2)
output: A decimal number
"""
PLACES = decimal.Decimal(10) ** (-1 * decimals)
if isNum(poso):
tmp = decimal.Decimal(str(poso))
else:
tmp = decimal.Decimal('0')
return tmp.quantize(PLACES)
def distribute(val, distArray, decimals=2):
"""
input parameters:
val : Decimal value for distribution
distArray : Distribution Array
decimals : Number of decimal digits
"""
tmpArr = []
val = dec(val, decimals)
try:
tar = dec(sum(distArray), decimals)
except:
return tmpArr
for el in distArray:
tmpArr.append(dec(val * dec(el, decimals) / tar, decimals))
nval = sum(tmpArr)
dif = val - nval # Get the possible differen | ce to fix round problem
if dif == 0:
pass
else:
# Max value Element gets the difference
tmpArr[tmpArr.index(max(tmpArr))] += dif
return tmpArr
def dist_d(val, dist_dic, decimals=2):
dist = {}
sorted_keys = sorted(dist_dic)
tmpdist_list = []
# Create a list with distribution values
for el in sorted_keys:
tmpdist_list.appe | nd(dist_dic[el])
if sum(tmpdist_list) != 1000:
print("Distribution total is not 1000")
dist_list = distribute(val, tmpdist_list, decimals)
for i, el in enumerate(sorted_keys):
dist[el] = dist_list[i]
return dist
class ntable():
def __init__(self):
self.title = u'Κοινόχρηστα'
self.subtitle = u'3ο Τετράμηνο 2015'
self.column_headers = {1: u'Θέρμανση',
2: u'Ασανσέρ',
3: u'Καθαριότητα',
4: u'Αποχέτευση'}
self.row_headers = {1: u'Φροντιστήριο',
2: u'Νικολόπουλος-Μάρδα',
3: u'Καλυβιώτης',
4: u'Νεοπούλου',
5: u'Αχλάδη',
6: u'Λάζαρος'}
self.xiliosta = {1: {2: 204, 3: 159, 4: 243, 5: 120, 6: 274},
2: {2: 139, 3: 108, 4: 249, 5: 122, 6: 382},
3: {2: 204, 3: 159, 4: 243, 5: 120, 6: 274},
4: {1: 270, 2: 150, 3: 115, 4: 178, 5: 87, 6: 200}
}
self.posa = {1: 800, 2: 100}
self.distribution = {}
def get_dist(self, y, x):
if y in self.distribution.keys():
ydist = self.distribution[y]
if x in ydist.keys():
return ydist[x]
return dec(0)
def row_list(self):
return sorted(self.row_headers.keys())
def row_titles(self):
lst = []
for key in self.row_list():
lst.append(self.row_headers[key])
return lst
def column_list(self):
return sorted(self.column_headers.keys())
def col_titles(self):
lst = []
for key in self.column_list():
lst.append(self.column_headers[key])
return lst
def distribute(self):
self.distribution = {}
for key in self.posa.keys():
self.distribution[key] = dist_d(self.posa[key], self.xiliosta[key])
def print_dist(self):
self.distribute()
print(', '.join(self.column_headers.values()))
for col in self.column_list():
pass
if __name__ == '__main__':
a = ntable()
a.print_dist()
print(a.get_dist(10, 23))
|
zwChan/VATEC | ~/eb-virt/Lib/site-packages/werkzeug/useragents.py | Python | apache-2.0 | 5,418 | 0 | # -*- coding: utf-8 -*-
"""
werkzeug.useragents
~~~~~~~~~~~~~~~~~~~
This module provides a helper to inspect user agent strings. This module
is far from complete but should work for most of the currently available
browsers.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
class UserAgentParser(object):
"""A simple user agent parser. Used by the `UserAgent`."""
platforms = (
('cros', 'chromeos'),
('iphone|ios', 'iphone'),
('ipad', 'ipad'),
(r'darwin|mac|os\s*x', 'macos'),
('win', 'windows'),
(r'android', 'android'),
(r'x11|lin(\b|ux)?', 'linux'),
('(sun|i86)os', 'solaris'),
(r'nintendo\s+wii', 'wii'),
('irix', 'irix'),
('hp-?ux', 'hpux'),
('aix', 'aix'),
('sco|unix_sv', 'sco'),
('bsd', 'bsd'),
('amiga', 'amiga'),
('blackberry|playbook', 'blackberry'),
('symbian', 'symbian')
)
browsers = (
('googlebot', 'google'),
('msnbot', 'msn'),
('yahoo', 'yahoo'),
('ask jeeves', 'ask'),
(r'aol|america\s+online\s+browser', 'aol'),
('opera', 'opera'),
('chrome', 'chrome'),
('firefox|firebird|phoenix|iceweasel', 'firefox'),
('galeon', 'galeon'),
('safari|version', 'safari'),
('webkit', 'webkit'),
('camino', 'camino'),
('konqueror', 'konqueror'),
('k-meleon', 'kmeleon'),
('netscape', 'netscape'),
(r'msie|microsoft\s+internet\s+explorer|trident/.+? rv:', 'msie'),
('lynx', 'lynx'),
('links', 'links'),
('seamonkey|mozilla', 'seamonkey')
)
_browser_version_re = r'(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?(?i)'
_language_re = re.compile(
r'(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|'
r'(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)'
)
def __init__(self):
self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
self.browsers = [(b, re.compile(self._browser_version_re % a))
for a, b in self.browsers]
def __call__(self, user_agent):
for platform, regex in | self.platforms:
match = regex.search(user_agent)
if match is not None:
break
else:
platform = None
for browser, regex in self.browsers:
match = regex.search(user_agent)
if match is not None:
version = match.group(1)
| break
else:
browser = version = None
match = self._language_re.search(user_agent)
if match is not None:
language = match.group(1) or match.group(2)
else:
language = None
return platform, browser, version, language
class UserAgent(object):
"""Represents a user agent. Pass it a WSGI environment or a user agent
string and you can inspect some of the details from the user agent
string via the attributes. The following attributes exist:
.. attribute:: string
the raw user agent string
.. attribute:: platform
the browser platform. The following platforms are currently
recognized:
- `aix`
- `amiga`
- `android`
- `bsd`
- `chromeos`
- `hpux`
- `iphone`
- `ipad`
- `irix`
- `linux`
- `macos`
- `sco`
- `solaris`
- `wii`
- `windows`
.. attribute:: browser
the name of the browser. The following browsers are currently
recognized:
- `aol` *
- `ask` *
- `camino`
- `chrome`
- `firefox`
- `galeon`
- `google` *
- `kmeleon`
- `konqueror`
- `links`
- `lynx`
- `msie`
- `msn`
- `netscape`
- `opera`
- `safari`
- `seamonkey`
- `webkit`
- `yahoo` *
(Browsers maked with a star (``*``) are crawlers.)
.. attribute:: version
the version of the browser
.. attribute:: language
the language of the browser
"""
_parser = UserAgentParser()
def __init__(self, environ_or_string):
if isinstance(environ_or_string, dict):
environ_or_string = environ_or_string.get('HTTP_USER_AGENT', '')
self.string = environ_or_string
self.platform, self.browser, self.version, self.language = \
self._parser(environ_or_string)
def to_header(self):
return self.string
def __str__(self):
return self.string
def __nonzero__(self):
return bool(self.browser)
__bool__ = __nonzero__
def __repr__(self):
return '<%s %r/%s>' % (
self.__class__.__name__,
self.browser,
self.version
)
# conceptionally this belongs in this module but because we want to lazily
# load the user agent module (which happens in wrappers.py) we have to import
# it afterwards. The class itself has the module set to this module so
# pickle, inspect and similar modules treat the object as if it was really
# implemented here.
from werkzeug.wrappers import UserAgentMixin # noqa
|
google/jax | tests/lax_autodiff_test.py | Python | apache-2.0 | 51,968 | 0.003964 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from functools import partial
import itertools
from unittest import SkipTest
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import jax
from jax import dtypes
from jax import lax
from jax._src import test_util as jtu
from jax.test_util import check_grads
from jax._src.util import prod
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
compatible_shapes = [[(3,)],
[(), (3, 4), (3, 1), (1, 4)],
[(2, 3, 4), (2, 1, 4)]]
GradTestSpec = collections.namedtuple(
"GradTestSpec",
["op", "nargs", "order", "rng_factory", "dtypes", "name", "tol"])
def grad_test_spec(op, nargs, order, rng_factory, dtypes, name=None, tol=None):
return GradTestSpec(
op, nargs, order, rng_factory, dtypes, name or op.__name__, tol)
float_dtypes = jtu.dtypes.all_floating
inexact_dtypes = jtu.dtypes.all_inexact
grad_float_dtypes = jtu.dtypes.floating
grad_complex_dtypes = jtu.dtypes.complex
grad_inexact_dtypes = jtu.dtypes.inexact
LAX_GRAD_OPS = [
grad_test_spec(lax.neg, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.floor, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, low=0.1, high=0.4),
dtypes=grad_float_dtypes),
grad_test_spec(lax.ceil, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, low=0.1, high=0.4),
dtypes=grad_float_dtypes),
grad_test_spec(lax.round, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, low=0.1, high=0.4),
dtypes=grad_float_dtypes),
grad_test_spec(lax.exp, nargs=1, order=2, rng_factory=jtu.rand_small,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.expm1, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.log, nargs=1, order=2, rng_factory=jtu.rand_positive,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.log1p, nargs=1, order=2, rng_factory=jtu.rand_positive,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.sinh, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_float_dtypes + [np.complex64], tol=1e-5),
grad_test_spec(lax.cosh, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes, tol=1e-5),
grad_test_spec(lax.tanh, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes, tol=1e-5),
grad_test_spec(lax.sin, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes, tol={np.float32: 5e-1}),
grad_test_spec(lax.cos, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.tan, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, low=-1.3, high=1.3),
dtypes=grad_inexact_dtypes, tol=1e-3),
grad_test_spec(lax.asin, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, low=-1.3, high=1.3),
dtypes=grad_float_dtypes, tol=1e-3),
grad_test_spec(lax.acos, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, low=-1.3, high=1.3),
dtypes=grad_float_dtypes, tol=2e-2),
# TODO(proteneer): atan2 input is already a representation of a
# complex number. Need to think harder about what this even means
# if each input itself is a complex number.
grad_test_spec(lax.atan2, nargs=2, order=2, rng_factory=jtu.rand_default,
dtypes=grad_float_dtypes),
grad_test_spec(lax.erf, nargs=1, order=2, rng_factory=jtu.rand_small,
dtypes=grad_float_dtypes),
grad_test_spec(lax.erfc, nargs=1, order=2, rng_factory=jtu.rand_small,
dtypes=grad_float_dtypes),
grad_test_spec(lax.erf_inv, nargs=1, order=2, rng_factory=jtu.rand_small,
dtypes=grad_float_dtypes),
# grad_test_spec(lax.lgamma, nargs=1, order=2, rng_factory=jtu.rand_small,
# dtypes=grad_float_dtypes), # TODO(mattjj): enable
grad_test_spec(lax.bessel_i0e, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_float_dtypes),
grad_test_spec(lax.bessel_i1e, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_float_dtypes),
grad_test_spec(lax.real, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_complex_dtypes),
grad_test_spec(lax.imag, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_complex_dtypes),
grad_test_spec(lax.complex, nargs=2, order=2, rng_factory=jtu.rand_default,
dtypes=grad_float_dtypes),
grad_test_spec(lax.conj, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.abs, nargs=1, order=2, rng_factory=jtu.rand_positive,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.pow, nargs=2, order=2, rng_factory=jtu.rand_positive,
dtypes=grad_inexact_dtypes, tol={np.float32: 3e-1}),
grad_test_spec(lax.sqrt, nargs=1, order=2, rng_factory=jtu.rand_positive,
dtypes=grad_float_dtypes),
grad_test_spec(lax.sqrt, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_complex_dtypes),
grad_test_spec(lax.rsqrt, nargs=1, order=2, rng_factory=jtu.rand_positive,
dtypes=grad_float_dtypes),
grad_test_spec(lax.rsqrt, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_complex_dtypes),
grad_test_spec(lax.cbrt, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_float_dtypes, tol={np.float64: 3e-5}),
grad_test_spec(lax.add, nargs=2, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.sub, nargs=2, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.mul, nargs=2, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.div, nargs=2, order=1, rng_factory=jtu.rand_not_small,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.max, nargs=2, order=2, rng_factory=jtu.rand_default,
dtypes=grad_float_dtypes),
grad_test_spec(lax.min, nargs=2, order=2, rng_factory=jtu.rand_default,
dtypes=grad_float_dtypes),
# TODO(mattjj): make some-equal checks more rob | ust, enable second-order
# grad_test_spec(lax.max, nargs=2, order=1, rng_factory=jtu.rand_some_equal,
# dtypes=grad_float_dtypes, name="MaxSomeEqual"),
# grad_test_spec(lax.min, nargs=2, order=1, rng_factory=jtu.rand_some_equal,
# dtypes=grad_float_dtypes, name="MinSomeEqual"),
]
GradSpecialValuesTestSpec = collections.namedtuple(
"GradSpecialValuesTestSpec", ["op", "values", "tol"])
def grad_special_values_test_spec(op, values, tol=None):
return Gra | dSpecialValuesTestSpec(op, values, tol)
LAX_GRAD_SPECIAL_VALUE_TESTS = [
grad_special_values_test_spec(
lax.sinh, [0.],
tol={np.float32: 1e-2} if jtu.device_under_test() == "tpu" else None),
grad_special_values_test_spec(
lax.cosh, [0.],
tol={np.float32: 1e-2} if jtu.device_under_test() == "tpu" els |
nylas/sync-engine | inbox/scheduling/queue.py | Python | agpl-3.0 | 6,779 | 0 | """This module contains code for managing Redis-backed account sync allocation.
Structurally, that works like this:
Redis list Redis hash
(acct ids to be (acct ids -> proc ids)
synced) +---------+---------+
+------------+ +----+----+----+ | 33 | 42 |
| mysql db | -------- -> | 44 | 37 | 22 | ----> +---------+---------+
+------------+ | +----+----+----+ | | hostA:3 | hostB:7 |
| | +---------+---------+
QueuePopulator SyncService
The QueuePopulator is responsible for pulling all syncable account ids from the
core mailsync MySQL database. It populates a Redis queue with any account ids
not currently being synced. Individual sync processes pull account ids off of
this queue, and claim ownership by updating a Redis hash that maps account
ids to process identifiers. We use a bit of Redis Lua scripting to ensure that
this happens atomically.
"""
import gevent
import itertools
from inbox.config import config
from inbox.ignition import engine_ma | nager
from inbox.models | .session import session_scope_by_shard_id
from inbox.models import Account
from inbox.util.concurrency import retry_with_logging
from inbox.util.stats import statsd_client
from nylas.logging import get_logger
from redis import StrictRedis
log = get_logger()
SOCKET_CONNECT_TIMEOUT = 5
SOCKET_TIMEOUT = 5
class QueueClient(object):
"""Interface to a Redis queue/hashmap combo for managing account sync
allocation.
"""
# Lua scripts for atomic assignment and conflict-free unassignment.
ASSIGN = '''
local k = redis.call('RPOP', KEYS[1])
if k then
local s = redis.call('HSETNX', KEYS[2], k, ARGV[1])
if s then
return k
end
end'''
UNASSIGN = '''
if redis.call('HGET', KEYS[1], KEYS[2]) == ARGV[1] then
return redis.call('HDEL', KEYS[1], KEYS[2])
else
return 0
end
'''
def __init__(self, zone):
self.zone = zone
redis_host = config['ACCOUNT_QUEUE_REDIS_HOSTNAME']
redis_db = config['ACCOUNT_QUEUE_REDIS_DB']
self.redis = StrictRedis(host=redis_host, db=redis_db,
socket_connect_timeout=SOCKET_CONNECT_TIMEOUT,
socket_timeout=SOCKET_TIMEOUT)
def all(self):
"""
Returns all keys being tracked (either pending in the queue, or
already assigned).
"""
p = self.redis.pipeline(transaction=True)
p.hgetall(self._hash)
p.lrange(self._queue, 0, -1)
unassigned, assigned = p.execute()
return {int(k) for k in itertools.chain(unassigned, assigned)}
def assigned(self):
"""
Returns a dictionary of all currently assigned key/value pairs (keys
are coerced to integers).
"""
return {int(k): v for k, v in self.redis.hgetall(self._hash).items()}
def enqueue(self, key):
"""
Adds a new key onto the pending queue.
"""
self.redis.lpush(self._queue, key)
def claim_next(self, value):
"""
Pulls the next key off of the pending queue (if any exists), and sets
it to `value` in the hash. Returns None if the queue is empty or if the
key is already present in the hash; otherwise returns the key.
"""
s = self.redis.register_script(self.ASSIGN)
return s(keys=[self._queue, self._hash], args=[value])
def unassign(self, key, value):
"""
Removes `key` from the hash, if and only if it is present and set to
`value` (to prevent removing a key actually assigned to someone else).
"""
s = self.redis.register_script(self.UNASSIGN)
return s(keys=[self._hash, key], args=[value])
def qsize(self):
"""
Returns current length of the queue.
"""
return self.redis.llen(self._queue)
@property
def _queue(self):
return 'unassigned_{}'.format(self.zone)
@property
def _hash(self):
return 'assigned_{}'.format(self.zone)
class QueuePopulator(object):
"""
Polls the database for account ids to sync and queues them. Run one of
these per zone.
"""
def __init__(self, zone, poll_interval=1):
self.zone = zone
self.poll_interval = poll_interval
self.queue_client = QueueClient(zone)
self.shards = []
for database in config['DATABASE_HOSTS']:
if database.get('ZONE') == self.zone:
shard_ids = [shard['ID'] for shard in database['SHARDS']]
self.shards.extend(shard_id for shard_id in shard_ids
if shard_id in engine_manager.engines)
def run(self):
log.info('Queueing accounts', zone=self.zone, shards=self.shards)
while True:
retry_with_logging(self._run_impl)
def _run_impl(self):
self.enqueue_new_accounts()
self.unassign_disabled_accounts()
statsd_client.gauge('syncqueue.queue.{}.length'.format(self.zone),
self.queue_client.qsize())
statsd_client.incr('syncqueue.service.{}.heartbeat'.
format(self.zone))
gevent.sleep(self.poll_interval)
def enqueue_new_accounts(self):
"""
Finds any account ids that should sync, but are not currently being
tracked by the QueueClient. Enqueue them. (Note: it's okay to enqueue
the same id twice. QueueClient.claim_next will identify and discard
duplicates.)
"""
new_accounts = self.runnable_accounts() - self.queue_client.all()
for account_id in new_accounts:
log.info('Enqueuing new account', account_id=account_id)
self.queue_client.enqueue(account_id)
def unassign_disabled_accounts(self):
runnable_accounts = self.runnable_accounts()
disabled_accounts = {
k: v for k, v in self.queue_client.assigned().items()
if k not in runnable_accounts
}
for account_id, sync_host in disabled_accounts.items():
log.info('Removing disabled account', account_id=account_id)
self.queue_client.unassign(account_id, sync_host)
def runnable_accounts(self):
accounts = set()
for key in self.shards:
with session_scope_by_shard_id(key) as db_session:
accounts.update(
id_ for id_, in db_session.query(Account.id).filter(
Account.sync_should_run))
return accounts
|
Zsailer/latticeproteins | latticeproteins/sequences.py | Python | gpl-3.0 | 4,953 | 0.002827 | #!/usr/bin/python
# Begin sequences.py
#---------------------------------------------------------------------------
"""
Originally written by Jesse Bloom, 2004.
Updated by Zach Sailer, 2017."""
#---------------------------------------------------------------------------
import random, shelve, os
#---------------------------------------------------------------------------
class SequenceError(Exception):
"""Error with a lattice protein sequence."""
pass
#---------------------------------------------------------------------------
# codes for all residues
_residues = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P',
'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
assert len(_residues) == 20
def hamming_distance(seq1, seq2):
"""Returns the Hamming distance between two sequences."""
if len(seq1) != len(seq2):
raise SequenceError("Sequences differ in length.")
d = 0
for i in range(len(seq1)):
if seq1[i] != seq2[i]:
d += 1
return d
def find_differences(s1, s2):
"""Return the index of differences between two sequences."""
indices = list()
for i in range(len(s1)):
if s1[i] != s2[i]:
indices.append(i)
return indices
def random_sequence(length):
"""Returns a random sequence of the specified length."""
if not (isinstance(length, int) and length > 0):
raise SequenceError("Invalid sequence length of %r." % length)
s = [random.choice(_residues) for i in range(length)]
return s
def mutate_sequence(seq, mutrate):
"""Mutates a protein sequence.
Parameters
----------
seq :
is a protein sequence, specified as either a string or a list.
mutrate :
Mutates each residue in 'seq' to some different residue with
probability 'mutrate'. So 'mutrate' is the per residue
mutation rate.
Returns
-------
newseq :
the new sequence as a list."""
| mutated = False
for ires in range(len(seq)):
if random.random() < mutrate:
if not mutated:
mutated = True
newseq = list(seq)
newres = random.choice(_residues)
while newres == seq[ires]:
newres = random.choice(_residues)
newseq[ires] = newres
if mutated:
return newseq
else | :
return seq
def n_mutants(seq, nmutations, nsequences):
"""Returns sequences with a specified number of mutations.
Parameters
----------
seq :
is a string or list specifying the protein we wish to mutate.
nmutations :
is the number of mutations each mutant of 'seq' should
have. It must be <= 'len(seq)' and > 0.
nsequences :
is the number of mutant sequences to make. It can be
'ALL', in which case we make all possible mutants with 'nmutations',
or it can be some positive integer in which case we make this
many randomly chosen mutants with 'nmutations' mutations.
'ALL' is only a valid option only when 'nmutations' is 1 or 2.
Return
------
seqlist : list
List of mutant sequences n mutations away.
"""
if not (0 < nmutations <= len(seq)):
raise SequenceError("Invalid 'nmutations' of %r." % nmutations)
seqlist = []
if nsequences == 'ALL':
if nmutations == 1:
for ires in range(len(seq)):
for mutres in _residues:
if mutres != seq[ires]:
newseq = list(seq)
newseq[ires] = mutres
seqlist.append(newseq)
elif nmutations == 2:
for ires in range(len(seq)):
for imutres in _residues:
if imutres != seq[ires]:
for jres in range(ires + 1, len(seq)):
for jmutres in _residues:
if jmutres != seq[jres]:
newseq = list(seq)
newseq[ires] = imutres
newseq[jres] = jmutres
seqlist.append(newseq)
else:
raise SequenceError("'nsequences' cannot be 'ALL' when 'nmutations' is %r." % nmutations)
elif isinstance(nsequences, int) and nsequences > 0:
for imutant in range(nsequences):
newseq = list(seq)
for imut in range(nmutations):
ires = random.choice(range(len(seq)))
while newseq[ires] != seq[ires]:
ires = random.choice(range(len(seq)))
mutres = random.choice(_residues)
while mutres == seq[ires]:
mutres = random.choice(_residues)
newseq[ires] = mutres
seqlist.append(newseq)
else:
raise SequenceError("Invalid 'nsequences' of %r." % nsequences)
return seqlist
|
tysonholub/twilio-python | tests/integration/taskrouter/v1/workspace/worker/test_reservation.py | Python | mit | 9,315 | 0.003972 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class ReservationTestCase(IntegrationTestCase):
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.workers(sid="WKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.reservations.list()
self.holodeck.assert_has_request(Request(
'get',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Workers/WKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Reservations',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"first_page_url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Workers/WKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Reservations?PageSize=50&Page=0",
"key": "reservations",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Workers/WKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Reservations?PageSize=50&Page=0"
},
"reservations": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2014-05-14T10:50:02Z",
"date_updated": "2014-05-15T16:03:42Z",
"links": | {
"task": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaa | aaaaaaaaaaa/Tasks/WTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"worker": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Workers/WKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
},
"reservation_status": "accepted",
"sid": "WRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"task_sid": "WTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Workers/WKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Reservations/WRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"worker_name": "Doug",
"worker_sid": "WKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
]
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.workers(sid="WKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.reservations.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"first_page_url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Workers/WKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Reservations?PageSize=50&Page=0",
"key": "reservations",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Workers/WKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Reservations?PageSize=50&Page=0"
},
"reservations": []
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.workers(sid="WKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.reservations.list()
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.workers(sid="WKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.reservations(sid="WRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Workers/WKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Reservations/WRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2014-05-14T10:50:02Z",
"date_updated": "2014-05-15T16:03:42Z",
"links": {
"task": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Tasks/WTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"worker": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Workers/WKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
},
"reservation_status": "accepted",
"sid": "WRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"task_sid": "WTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Workers/WKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Reservations/WRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"worker_name": "Doug",
"worker_sid": "WKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.workers(sid="WKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.reservations(sid="WRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.workers(sid="WKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.reservations(sid="WRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Workers/WKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Reservations/WRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2014-05-14T10:50:02Z",
"date_updated": "2014-05-15T16:03:42Z",
"links": {
"task": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Tasks/WTaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"worker": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaa |
by46/geek | basic/yield_t.py | Python | mit | 205 | 0.009756 | class Demo(object):
@property
def names(self):
for i in range(10): |
yield i
if __name__ == '__main__':
demo = Demo()
for name | in demo.names:
print(name) |
mlperf/training_results_v0.6 | Google/benchmarks/transformer/implementations/tpu-v3-512-transformer/transformer/layers/modalities_test.py | Python | apache-2.0 | 1,941 | 0.002061 | """Tests for Modalities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from layers import common_hparams
from layers import modalities
class ModalityTest(tf.test.TestCase):
def testSymbolModalityInputs(self):
batch_size = 10
length = 5
vocab_size = 5000
hidden_size = 9
model_hparams = common_hparams.basic_params1()
model_hparams.hidden_size = hidden_size
model_hparams.mode = tf.estimator.ModeKeys.TRAIN
x = -1 + np.random.random_integers(
vocab_size, size=(batch_size, length, 1, 1))
m = modalities.SymbolModality(model_hparams, vocab_size)
output = m.bottom(tf.convert_to_tensor(x))
init = self.evaluate(tf.global_variables_initializer())
res = self.evaluate(output)
tf.logging.info(init)
self.assertEqual(res.shape, (batch_size, length, 1, hidden_size))
def testSymbolModalityTargets(self):
batch_size = 10
length = 6
height = 7
hidden_size = 9
vocab_size = 11
model_hparams = common_hparams.basic_params1()
model_hparams.hidden_size = hidden_size
model_hparams.mode = tf.estimator.ModeKeys.TRAIN
body_output = -1 + np.random.random_integers(
100, size=(batch_size, length, height, hidden_size))
targets = -1 + np.random.random_integers(
vocab_size, size=(batch_size, length, height, 1))
m = modalities.SymbolModality(model_hparams, vocab_size)
logits = m.top(tf.to_float(body_output), targets)
train_loss_num, train_loss_den = m.loss(logits, targets)
train_loss = train_loss_num / tf.maximum(1.0, train_loss_den)
self.evaluate(tf.global_variables_initializer())
res1, res2 = self.evaluate((logits, train_loss))
self.assertEqual(res1.shape, (batch_size, length, | height, 1, vocab_size))
self.assertEqual(res2.shape, ())
if __name__ == "__main__":
tf. | test.main()
|
lizaifang/yubico-yubiserve | yubiserve.py | Python | gpl-3.0 | 18,768 | 0.029039 | #!/usr/bin/env python
import re, os, time, socket, sys, signal
import urlparse, SocketServer, urllib, BaseHTTPServer
from Crypto.Cipher import AES
from OpenSSL import SSL
import hmac, hashlib
from threading import Thread
isThereDatabaseSupport = False
try:
import MySQLdb
isThereDatabaseSupport = True
except ImportError:
pass
try:
import sqlite3
isThereDatabaseSupport = True
except ImportError:
pass
try:
import sqlite
isThereDatab | aseSupport = True
except ImportErro | r:
pass
if isThereDatabaseSupport == False:
print "Cannot continue without any database support.\nPlease read README.\n\n"
sys.exit(1)
def parseConfigFile(): # Originally I wrote this function to parse PHP configuration files!
config = open(os.path.dirname(os.path.realpath(__file__)) + '/yubiserve.cfg', 'r').read().splitlines()
keys = {}
for line in config:
match = re.search('(.*?)=(.*);', line)
try: # Check if it's a string or a number
if ((match.group(2).strip()[0] != '"') and (match.group(2).strip()[0] != '\'')):
keys[match.group(1).strip()] = int(match.group(2).strip())
else:
keys[match.group(1).strip()] = match.group(2).strip('"\' ')
except:
pass
return keys
config = parseConfigFile()
class OATHValidation():
def __init__(self, connection):
self.status = {'OK': 1, 'BAD_OTP': 2, 'NO_AUTH': 3, 'NO_CLIENT': 5}
self.validationResult = 0
self.con = connection
def testHOTP(self, K, C, digits=6):
counter = ("%x"%C).rjust(16,'0').decode('hex') # Convert it into 8 bytes hex
HS = hmac.new(K, counter, hashlib.sha1).digest()
offset = ord(HS[19]) & 0xF
# It doesn't look pretty, but it is optimized! :D
bin_code = int((chr(ord(HS[offset]) & 0x7F) + HS[offset+1:offset+4]).encode('hex'),16)
return str(bin_code)[-digits:]
def validateOATH(self, OATH, publicID):
cur = self.con.cursor()
cur.execute("SELECT counter, secret FROM oathtokens WHERE publicname = '" + publicID + "' AND active = '1'")
res = cur.fetchone()
if not res:
validationResult = self.status['BAD_OTP']
return validationResult
(actualcounter, key) = res
if len(OATH) % 2 != 0:
self.validationResult = self.status['BAD_OTP']
return self.validationResult
K = key.decode('hex') # key
for C in range(actualcounter+1, actualcounter+256):
if OATH == self.testHOTP(K, C, len(OATH)):
cur.execute("UPDATE oathtokens SET counter = " + str(C) + " WHERE publicname = '" + publicID + "' AND active = '1'")
self.con.commit()
return self.status['OK']
return self.status['NO_AUTH']
class OTPValidation():
def __init__(self, connection):
self.status = {'OK': 1, 'BAD_OTP': 2, 'REPLAYED_OTP': 3, 'DELAYED_OTP': 4, 'NO_CLIENT': 5}
self.validationResult = 0
self.con = connection
def hexdec(self, hex):
return int(hex, 16)
def modhex2hex(self, string):
hex = "0123456789abcdef"
modhex = "cbdefghijklnrtuv"
retVal = ''
for i in range (0, len(string)):
pos = modhex.find(string[i])
if pos > -1:
retVal += hex[pos]
else:
raise Exception, '"' + string[i] + '": Character is not a valid hex string'
return retVal
def CRC(self):
crc = 0xffff;
for i in range(0, 16):
b = self.hexdec(self.plaintext[i*2] + self.plaintext[(i*2)+1])
crc = crc ^ (b & 0xff)
for j in range(0, 8):
n = crc & 1
crc = crc >> 1
if n != 0:
crc = crc ^ 0x8408
self.OTPcrc = crc
return [crc]
def isCRCValid(self):
return (self.OTPcrc == 0xf0b8)
def aes128ecb_decrypt(self, aeskey, aesdata):
return AES.new(aeskey.decode('hex'), AES.MODE_ECB).decrypt(aesdata.decode('hex')).encode('hex')
def getResult(self):
return self.validationResult
def getResponse(self):
return self.validationResponse
def validateOTP(self, OTP):
global config
self.OTP = re.escape(OTP)
self.validationResult = 0
if (len(OTP) <= 32) or (len(OTP) > 48):
self.validationResult = self.status['BAD_OTP']
return self.validationResult
match = re.search('([cbdefghijklnrtuv]{0,16})([cbdefghijklnrtuv]{32})', re.escape(OTP))
if match == None:
print "OTP does not match expected syntax.\n"
sys.stdout.flush()
self.validationResult = self.status['BAD_OTP']
return self.validationResult
try:
if match.group(1) and match.group(2):
self.userid = match.group(1)
self.token = self.modhex2hex(match.group(2))
# pdb.set_trace()
cur = self.con.cursor()
cur.execute('SELECT aeskey, internalname FROM yubikeys WHERE publicname = "' + self.userid + '" AND active = "1"')
res = cur.fetchone()
if not res:
if config['yubiserveDebugLevel'] > 0:
print "Yubikey rejected because it is not found in the database, using the query: 'SELECT aeskey, internalname FROM yubikeys WHERE publicname = \"%s\" AND active = \"1\"'" % (self.userid)
self.validationResult = self.status['BAD_OTP']
return self.validationResult
(self.aeskey, self.internalname) = res
self.plaintext = self.aes128ecb_decrypt(self.aeskey, self.token)
uid = self.plaintext[:12]
if (self.internalname != uid):
if config['yubiserveDebugLevel'] > 0:
print "Yubikey rejected because the uid (6 byte secret) in the decrypted AES key (set with with ykpersonalise -ouid) does not match the secret key (internalname) in the database"
print "Decrypted AES: %s\n Username from yubikey: %s should equal the database username: %s" % (self.plaintext, uid, self.internalname)
self.validationResult = self.status['BAD_OTP']
return self.validationResult
if not self.CRC() or not self.isCRCValid():
self.validationResult = self.status['BAD_OTP']
return self.validationResult
self.internalcounter = self.hexdec(self.plaintext[14:16] + self.plaintext[12:14] + self.plaintext[22:24])
self.timestamp = self.hexdec(self.plaintext[20:22] + self.plaintext[18:20] + self.plaintext[16:18])
cur.execute('SELECT counter, time FROM yubikeys WHERE publicname = "' + self.userid + '" AND active = "1"')
res = cur.fetchone()
if not res:
self.validationResult = self.status['BAD_OTP']
return self.validationResult
(self.counter, self.time) = res
if (self.counter) >= (self.internalcounter):
self.validationResult = self.status['REPLAYED_OTP']
return self.validationResult
if (self.time >= self.timestamp) and ((self.counter >> 8) == (self.internalcounter >> 8)):
self.validationResult = self.status['DELAYED_OTP']
return self.validationResult
except IndexError:
self.validationResult = self.status['BAD_OTP']
return self.validationResult
self.validationResult = self.status['OK']
cur.execute('UPDATE yubikeys SET counter = ' + str(self.internalcounter) + ', time = ' + str(self.timestamp) + ' WHERE publicname = "' + self.userid + '"')
self.con.commit()
return self.validationResult
class DB():
conn = None
cur = None
def fetchone(self):
return (self.cur.fetchone())
def commit(self):
return self.conn.commit()
def __init__(self):
self.connect()
def connect(self):
self.conn = MySQLdb.connect(host=config['yubiMySQLHost'], user=config['yubiMySQLUser'], passwd=config['yubiMySQLPass'], db=config['yubiMySQLName'])
def cursor(self):
self.cur = self.conn.cursor()
return self
def execute(self, sql):
try:
self.cur.execute(sql)
except MySQLdb.Error, e:
if e[0] == 2006:
print e[1]
self.cur.close()
self.connect()
self.cursor()
self.cur.execute(sql)
else:
print "unhandled MySQL exception"
print e
sys.exit(1)
except Exception, e:
print "unhandled exception"
print repr(e)
return self
class YubiServeHandler (BaseHTTPServer.BaseHTTPRequestHandler):
__base = BaseHTTPServer.BaseHTTPRequestHandler
__base_handle = __base.handle
server_version = 'Yubiserve/3.1'
global config
#try:
if config['yubiDB'] == 'sqlite3':
con = sqlite3.connect(os.path.dirname(os.path.realpath(__file__)) + '/yubikeys.sqlite3', check_same_thread = False)
elif config['yubiDB'] == 'sqlite':
con = sqlite.connect(os.path.dirname(os.path.realpath(__file__)) + '/yubikey |
corywalker/selenium-crawler | seleniumcrawler/filesystem.py | Python | mit | 846 | 0.001182 | # Global modules
import os
# Local modules
from seleniumcrawler.config import sites_dict
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
SITES_DIR = os.path.join(THIS_DIR, 'sites')
def locate_sites():
location_list = []
for site, re | gex in sites_dict.items():
this_site_dir = os.path.join(SITES_DIR, site)
# This is only the EXPECTED script name.
# All scripts should follow this convention.
script_name = site + '_raw.py'
script_path = os.path.join(this_site_dir, script_name)
config_path = os.path.join(this_site_dir, 'config.py')
location_dict = {
'name': site,
'script_path': script_path,
'config_path': config_path,
| 'site_dir': this_site_dir
}
location_list.append(location_dict)
return location_list
|
suttond/MODOI | ase/ga/population.py | Python | lgpl-3.0 | 15,749 | 0.001016 | """ Implementaiton of a population for maintaining a GA population and
proposing structures to pair. """
from random import randrange, random
from math import tanh, sqrt, exp
from operator import itemgetter
import numpy as np
from ase.db.core import now
def count_looks_like(a, all_cand, comp):
"""Utility method for counting occurences."""
n = 0
for b in all_cand:
if a.info['confid'] == b.info['confid']:
continue
if comp.looks_like(a, b):
n += 1
return n
class Population(object):
"""Population class which maintains the current population
and proposes which candidates to pair together.
Parameters:
data_connection: DataConnection object
Bla bla bla.
population_size: int
The number of candidates in the population.
comparator: Comparator object
this will tell if two configurations are equal.
Default compare atoms objects directly.
logfile: str
Text file that contains information about the population
The format is::
timestamp: generation(if available): id1,id2,id3...
Using this file greatly speeds up convergence checks.
Default None meaning that no file is written.
use_extinct: boolean
Set this to True if mass extinction and the extinct key
are going to be used. Default is False.
"""
def __init__(self, data_connection, population_size,
comparator=None, logfile=None, use_extinct=False):
self.dc = data_connection
self.pop_size = population_size
if comparator is None:
from ase.ga.standard_comparators import AtomsComparator
comparator = AtomsComparator()
self.comparator = comparator
self.logfile = logfile
self.use_extinct = use_extinct
self.pop = []
self.pairs = None
self.all_cand = None
self.__initialize_pop__()
def __initialize_pop__(self):
""" Private method that initalizes the population when
the population is created. """
# Get all relaxed candidates from the database
ue = self.use_extinct
all_cand = self.dc.get_all_relaxed_candidates(use_extinct=ue)
all_cand.sort(key=lambda x: x.get_raw_score(), reverse=True)
# all_cand.sort(key=lambda x: x.get_potential_energy())
# Fill up the population with the self.pop_size most stable
# unique candidates.
i = 0
while i < len(all_cand) and len(self.pop) < self.pop_size:
c = all_cand[i]
i += 1
eq = False
for a in self.pop:
if self.comparator.looks_like(a, c):
eq = True
break
if not eq:
self.pop.append(c)
for a in self.pop:
a.info['looks_like'] = count_looks_like(a, all_cand,
self.comparator)
self.all_cand = all_cand
self.__calc_participation__()
def __calc_participation__(self):
""" Determines, from the database, how many times each
candidate has been used to generate new candidates. """
(participation, pairs) = self.dc.get_participation_in_pairing()
for a in self.pop:
if a.info['confid'] in participation.keys():
a.info['n_paired'] = participation[a.info['confid']]
else:
a.info['n_paired'] = 0
self.pairs = pairs
def update(self, new_cand=None):
""" New candidates can be added to the database
after the population object has been created.
This method extracts these new candidates from the
database and includes them in the population. """
if len(self.pop) == 0:
self.__initialize_pop__()
if new_cand is None:
ue = self.use_extinct
new_cand = self.dc.get_all_relaxed_candidates(only_new=True,
use_extinct=ue)
for a in new_cand:
self.__add_candidate__(a)
self.all_cand.append(a)
self.__calc_participation__()
self._write_log()
def get_current_population(self):
""" Returns a copy of the current population. """
self.update()
return [a.copy() for a in self.pop]
def get_population_after_generation(self, gen):
""" Returns a copy of the population as it where
after generation gen"""
if self.logfile is not None:
f = open(self.logfile, 'r')
gens = {}
for l in f:
_, no, popul = l.split(':')
gens[int(no)] = [int(i) for i in popul.split(',')]
f.close()
return [c.copy() for c in self.all_cand[::-1]
if c.info['relax_id'] in gens[gen]]
all_candidates = [c for c in self.all_cand
if c.info['key_value_pairs']['generation'] <= gen]
cands = [all_candidates[0]]
for b in all_candidates:
if b not in cands:
for a in cands:
if self.comparator.looks_like(a, b):
break
else:
cands.append(b)
pop = cands[:self.pop_size]
return [a.copy() for a in pop]
def __add_candidate__(self, a):
""" Adds a single candidate to the population. """
# check if the structure is too low in raw score
if a.get_raw_score() < self.pop[-1].get_raw_score() \
and len(self.pop) == self.pop_size:
return
# check if the new candidate should
# replace a similar structure in the population
for (i, b) in enumerate(self.pop):
if self.comparator.looks_like(a, b):
if b.get_raw_score() < a.get_raw_score():
del self.pop[i]
a.info['looks_like'] = count_looks_like(a,
self.all_cand,
self.comparator)
self.pop.append(a)
self.pop.sort(key=lambda x: x.get_raw_score(),
reverse=True)
return
# the new candidate needs to be added, so remove the highest
# energy one
if len(self.pop) == self.pop_size:
del self.pop[-1]
# add the new candidate
a.info['looks_like'] = count_looks_like(a,
self.all_cand,
self.comparator)
self.pop.append(a)
self.pop.sort(key=lambda x: x.get_raw_score(), reverse=True)
def __get_fitness__(self, indecies, with_history=True):
"""Calculates the fitness using the formula from
L.B. Vilhelmsen et al., JACS, 2012, 134 (30), pp 12807-12816
Sign change on the fitness compared to the formulation in th | e
abovementioned paper due to maximizing raw_score instead of
minimizing energy. (Set raw_score=-energy to optimize the energy)
"""
scores = [x.get_raw_score() for x in self.pop]
min_s = | min(scores)
max_s = max(scores)
T = min_s - max_s
if isinstance(indecies, int):
indecies = [indecies]
f = [0.5 * (1. - tanh(2. * (scores[i] - max_s) / T - 1.))
for i in indecies]
if with_history:
M = [float(self.pop[i].info['n_paired']) for i in indecies]
L = [float(self.pop[i].info['looks_like']) for i in indecies]
f = [f[i] * 1. / sqrt(1. + M[i]) * 1. / sqrt(1. + L[i])
for i in range(len(f))]
return f
def get_two_candidates(self, with_history=True):
""" Returns two candidates for pairing employing the
fitness criteria from
L.B. Vilhelmsen et al., JACS, 2012, 134 (30), pp 12807-12816
and the roulete wheel selection scheme described in
|
saleemjaveds/https-github.com-openstack-nova | nova/tests/api/openstack/compute/contrib/test_admin_actions.py | Python | apache-2.0 | 25,222 | 0.000079 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack import common
from nova.api.openstack.compute.contrib import admin_actions
from nova.compute import vm_states
import nova.context
from nova import exception
from nova import objects
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
class CommonMixin(object):
def setUp(self):
super(CommonMixin, self).setUp()
self.controller = admin_actions.AdminActionsController()
self.compute_api = self.controller.compute_api
self.context = nova.context.RequestContext('fake', 'fake')
def _fake_controller(*args, **kwargs):
return self.controller
self.stubs.Set(admin_actions, 'AdminActionsController',
_fake_controller)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Admin_actions'])
self.app = fakes.wsgi_app(init_only=('servers',),
fake_auth_context=self.context)
self.mox.StubOutWithMock(self.compute_api, 'get')
def _make_request(self, url, body):
req = webob.Request.blank('/v2/fake' + url)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.content_type = 'application/json'
return req.get_response(self.app)
def _stub_instance_get(self, uuid=None):
if uuid is None:
uuid = uuidutils.generate_uuid()
instance = fake_instance.fake_db_instance(
id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
instance = objects.Instance._from_db_object(
self.context, objects.Instance(), instance)
self.compute_api.get(self.context, uuid, expected_attrs=None,
want_objects=True).AndReturn(instance)
return instance
def _stub_instance_get_failure(self, exc_info, uuid=None):
if uuid is None:
uuid = uuidutils.generate_uuid()
self.compute_api.get(self.context, uuid, expected_attrs=None,
want_objects=True).AndRaise(exc_info)
return uuid
def _test_non_existing_instance(self, action, body_map=None):
uuid = uuidutils.generate_uuid()
self._stub_instance_get_failure(
exception.InstanceNotFound(instance_id=uuid), uuid=uuid)
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % uuid,
{action: body_map.get(action)})
self.assertEqual(404, res.status_int)
# Do these here instead of tearDown because this method is called
# more than once for the same test case
self.mox.VerifyAll()
self.mox.UnsetStubs()
def _test_action(self, action, body=None, method=None):
if method is None:
method = action
instance = self._stub_instance_get()
getattr(self.compu | te_api, method)(self.context, instance)
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance['uuid'],
| {action: None})
self.assertEqual(202, res.status_int)
# Do these here instead of tearDown because this method is called
# more than once for the same test case
self.mox.VerifyAll()
self.mox.UnsetStubs()
def _test_invalid_state(self, action, method=None, body_map=None,
compute_api_args_map=None):
if method is None:
method = action
if body_map is None:
body_map = {}
if compute_api_args_map is None:
compute_api_args_map = {}
instance = self._stub_instance_get()
args, kwargs = compute_api_args_map.get(action, ((), {}))
getattr(self.compute_api, method)(self.context, instance,
*args, **kwargs).AndRaise(
exception.InstanceInvalidState(
attr='vm_state', instance_uuid=instance['uuid'],
state='foo', method=method))
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance['uuid'],
{action: body_map.get(action)})
self.assertEqual(409, res.status_int)
self.assertIn("Cannot \'%s\' while instance" % action, res.body)
# Do these here instead of tearDown because this method is called
# more than once for the same test case
self.mox.VerifyAll()
self.mox.UnsetStubs()
def _test_locked_instance(self, action, method=None, body_map=None,
compute_api_args_map=None):
if method is None:
method = action
instance = self._stub_instance_get()
args, kwargs = compute_api_args_map.get(action, ((), {}))
getattr(self.compute_api, method)(self.context, instance,
*args, **kwargs).AndRaise(
exception.InstanceIsLocked(instance_uuid=instance['uuid']))
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance['uuid'],
{action: body_map.get(action)})
self.assertEqual(409, res.status_int)
# Do these here instead of tearDown because this method is called
# more than once for the same test case
self.mox.VerifyAll()
self.mox.UnsetStubs()
class AdminActionsTest(CommonMixin, test.NoDBTestCase):
def test_actions(self):
actions = ['pause', 'unpause', 'suspend', 'resume', 'migrate',
'resetNetwork', 'injectNetworkInfo', 'lock',
'unlock']
method_translations = {'migrate': 'resize',
'resetNetwork': 'reset_network',
'injectNetworkInfo': 'inject_network_info'}
for action in actions:
method = method_translations.get(action)
self.mox.StubOutWithMock(self.compute_api, method or action)
self._test_action(action, method=method)
# Re-mock this.
self.mox.StubOutWithMock(self.compute_api, 'get')
def test_actions_raise_conflict_on_invalid_state(self):
actions = ['pause', 'unpause', 'suspend', 'resume', 'migrate',
'os-migrateLive']
method_translations = {'migrate': 'resize',
'os-migrateLive': 'live_migrate'}
body_map = {'os-migrateLive':
{'host': 'hostname',
'block_migration': False,
'disk_over_commit': False}}
args_map = {'os-migrateLive': ((False, False, 'hostname'), {})}
for action in actions:
method = method_translations.get(action)
self.mox.StubOutWithMock(self.compute_api, method or action)
self._test_invalid_state(action, method=method, body_map=body_map,
compute_api_args_map=args_map)
# Re-mock this.
self.mox.StubOutWithMock(self.compute_api, 'get')
def test_actions_with_non_existed_instance(self):
actions = ['pause', 'unpause', 'suspend', 'resume',
'resetNetwork', 'injectNetworkInfo', |
bigbangdev/cityhelpdeskdjango | cityhelpdesk/utility/serializers.py | Python | gpl-3.0 | 333 | 0 | # coding=utf-8
"""
Utility Serializers
"""
from rest_framework.serializer | s import HyperlinkedModelSerializer
class HybridModelSerializer(HyperlinkedModelSerializer):
"""
ModelSerializer which provides both a `url` and `id` field
"""
def get_pk_field(self, model_field) | :
return self.get_field(model_field)
|
daniel-noland/config | root/home/dnoland/.bin/gdb.py | Python | mit | 214 | 0 | #!/ | usr/bin/gdb --eval-command=python
# vim: set filetype=python:
import sys
import gdb
gdb.execute("file " + sys.argv[1])
t = gdb.Type(sys.argv[0])
print("sizeof " + str(sys.argv[0]) + " = " + str(t.sizeof()) | )
end
|
fbradyirl/home-assistant | tests/components/demo/test_calendar.py | Python | apache-2.0 | 49 | 0 | """The | tests for the demo | calendar component."""
|
absoludity/django-financial-transactions | financial_transactions/tests/test_models.py | Python | bsd-3-clause | 664 | 0 | import datetime
from decimal import Decimal
from django_factory import TestCase
from financial_transactions.models import (
Transaction,
)
class TransactionTestCase(TestCase):
def tes | t_unicode(self):
trans = self.factory.make_one(
Transaction, memo=u'S | ublime purchase',
date=datetime.date(2013, 2, 5), amount=Decimal('59.95'),
currency=u'EUR')
self.assertEqual(u'2013-02-05 59.95 EUR - Sublime purchase',
unicode(trans))
def test_factory_makes_category(self):
transaction = self.factory.make_one(Transaction)
self.assertIsNotNone(transaction.category)
|
bally12345/enigma2 | lib/python/Plugins/SystemPlugins/Videomode/VideoWizard.py | Python | gpl-2.0 | 5,933 | 0.034384 | from Screens.Wizard import WizardSummary
from Screens.WizardLanguage import WizardLanguage
from Screens.Rc import Rc
from VideoHardware import video_hw
from Components.Pixmap import Pixmap, MovingPixmap, MultiPixmap
from Components.config import config, ConfigBoolean, configfile
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
from Tools.HardwareInfo import HardwareInfo
config.misc.showtestcard = ConfigBoolean(default = False)
class VideoWizardSummary(WizardSummary):
def __init__(self, session, parent):
WizardSummary.__init__(self, session, parent)
def setLCDPicCallback(self):
self.parent.setLCDTextCallback(self.setText)
def setLCDPic(self, file):
self["pic"].instance.setPixmapFromFile(file)
class VideoWizard(WizardLanguage, Rc):
skin = """
<screen position="fill" title="Welcome..." flags="wfNoBorder" >
<panel name="WizardMarginsTemplate"/>
<panel name="WizardPictureLangTemplate"/>
<panel name="RemoteControlTemplate"/>
<panel position="left" size="10,*" />
<panel position="right" size="10,*" />
<panel position="fill">
<widget name="text" position="top" size="*,270" font="Regular;23" valign="center" />
<panel position="fill">
<panel position="left" size="150,*">
<widget name="portpic" position="top" zPosition="10" size="150,150" transparent="1" alphatest="on"/>
</panel>
<panel position="fill" layout="stack">
<widget source="list" render="Listbox" position="fill" scrollbarMode="showOnDemand" >
<convert type="StringList" />
</widget>
<!--<widget name="config" position="fill" zPosition="1" scrollbarMode="showOnDemand" />-->
</panel>
</panel>
</panel>
</screen>"""
def __init__(self, session):
# FIXME anyone knows how to use relative paths from the plugin's directory?
self.xmlfile = resolveFilename(SCOPE_PLUGINS, "SystemPlugins/Videomode/videowizard.xml")
self.hw = video_hw
WizardLanguage.__init__(self, session, showSteps = False, showStepSlider = False)
Rc.__init__(self)
self["wizard"] = Pixmap()
self["portpic"] = Pixmap()
self.port = None
self.mode = None
self.rate = None
def createSummary(self):
print "++++++++++++***++**** VideoWizard-createSummary"
from Screens.Wizard import WizardSummary
return VideoWizardSummary
def markDone(self):
config.misc.videowizardenabled.value = 0
config.misc.videowizardenabled.save()
configfile.save()
def listInputChannels(self):
hw_type = HardwareInfo().get_device_name()
has_hdmi = HardwareInfo().has_hdmi()
list = []
for port in self.hw.getPortList():
if self.hw.isPortUsed(port):
descr = port
if descr == 'DVI' and has_hdmi:
descr = 'HDMI'
if port != "DVI-PC":
list.append((descr,port))
list.sort(key = lambda x: x[0])
print "listInputChannels:", list
return list
def inputSelectionMade(self, index):
print "inputSelectionMade:", index
self.port = index
self.inputSelect(index)
def inputSelectionMoved(self):
hw_type = HardwareInfo().get_device_name()
has_hdmi = HardwareInfo().has_hdmi()
print "input selection moved:", self.selection
self.inputSelect(self.selection)
if self["portpic"].instance is not None:
picname = self.selection
if picname == 'DVI' and has_hdmi:
picname = "HDMI"
self["portpic"].instance.setPixmapFromFile(resolveFilename(SCOPE_PLUGINS, "SystemPlugins/Videomode/" + picname + ".png"))
def inputSelect(self, port):
print "inputSelect:", port
modeList = self.hw.getModeList(self.selection)
print "modeList:", modeList
self.port = port
if (len(modeList) > 0):
ratesList = self.listRates(modeList[0][0])
self.hw.setMode(port = port, mode = modeList[0][0], rate = ratesList[0][0])
def listModes(self):
list = []
print "modes for port", self.port
for mode in self.hw.getModeList(self.port):
#if mode[0] != "PC":
list.append((mode[0], mode[0]))
print "modeslist:", list
return list
def modeSelectionMade(self, index):
print "modeSelectionMade:", index
self.mode = index
self.modeSelect(index)
def modeSelectionMoved(self):
print "mode selection moved:", self.selection
self.modeSelect(self.selection)
def modeSelect(self, mode):
ratesList = self.listRates(mode)
print "ratesList:", ratesList
if self.port == "DVI" and mode in ("720p", "1080i"):
self.rate = "multi"
self.hw.setMode(port = self.port, mode = mode, rate = "multi")
else:
self.hw.setMode(port | = self.port, mode = mode, rate = ratesList[0][0])
def listRates(self, querymode = None):
if querymode is None:
querymode = self.mode
list = []
print "modes for port", self.port, "and mode", querymode
for mode in self.hw.getModeList(self.port):
print mode
if mode[0] == querymode:
for rate in mode[1]:
if self.port == "DVI-PC":
print "rate:", rate
if rate == "640x480":
list.insert(0, (ra | te, rate))
continue
list.append((rate, rate))
return list
def rateSelectionMade(self, index):
print "rateSelectionMade:", index
self.rate = index
self.rateSelect(index)
def rateSelectionMoved(self):
print "rate selection moved:", self.selection
self.rateSelect(self.selection)
def rateSelect(self, rate):
self.hw.setMode(port = self.port, mode = self.mode, rate = rate)
def showTestCard(self, selection = None):
if selection is None:
selection = self.selection
print "set config.misc.showtestcard to", {'yes': True, 'no': False}[selection]
if selection == "yes":
config.misc.showtestcard.value = True
else:
config.misc.showtestcard.value = False
def keyNumberGlobal(self, number):
if number in (1,2,3):
if number == 1:
self.hw.saveMode("DVI", "720p", "multi")
elif number == 2:
self.hw.saveMode("DVI", "1080i", "multi")
elif number == 3:
self.hw.saveMode("Scart", "Multi", "multi")
self.hw.setConfiguredMode()
self.close()
WizardLanguage.keyNumberGlobal(self, number)
|
linostar/timeline-clone | test/specs/plugin/PluginFactory.py | Python | gpl-3.0 | 1,487 | 0.002017 | # Copyright (C) 2009, 2010, 2011 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import unittest
from timelinelib.plugin import factory
from timelinelib.plugin.factory import EVENTBOX_DRAWER
class describe_plugin_factory(unittest.TestCase):
def test_can_return_a_named_plugin(self):
plugin = factory.get_plugin(EVENTBOX_DRAWER, "Default Event box drawer")
self.assertEquals(_("Default Event box drawer"), plugin.display_name())
def test_can_return_another_named_plugin(self):
plugin = factory.get_plugin(EVENTBOX_DRAW | ER, "Gradient Event box drawer")
self.assertEquals(_("Gradient Event box drawer"), plugin.display_name())
def test_returns_none_when_named_plugin_cant_be_foun | d(self):
plugin = factory.get_plugin(EVENTBOX_DRAWER, "xyz drawer")
self.assertEquals(None, plugin)
|
Samuel-Phillips/pv-viability-map | import_tool.py | Python | gpl-3.0 | 4,857 | 0.001235 | import tempfile
import traceback
import shutil
import os.path
import os
from zipfile import ZipFile
from contextlib import contextmanager
import shapefile
import interface
import pyproj
from osgeo import osr
import log
## Web Mercator (Not used for interaction)
#leaflet_proj = pyproj.Proj(
# '+proj=merc +lon_0=0 +k=1 +x_0=0 +y_0=0 +a=6378137 '
# '+b=6378137 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs')
# EPSG 4326 (Not the actual CRS, but is used for interaction
leaflet_proj = pyproj.Proj('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
@contextmanager
def tempdir():
"""Contect manager for a temporary directory. Directory is deleted
when the context manager exits."""
the_dir = tempfile.mkdtemp()
try:
yield the_dir
finally:
shutil.rmtree(the_dir)
def import_shape_file(saveable, db):
"""Imports a zipped shapefile (form the saveable parameter, which must
ha | ve a .save method) into the interface.Rooftops object db. Raises
import_tool.error with messages relating to the error encountered."""
log.note("Importing a shapefile")
with tempdir() as root:
zip_name = os.path.join(root, "map.zip")
sf_dir = os.path.join(root, "shapes")
saveable.save(zip_name)
try:
ZipFile(zip_name, mode='r').extractall(path=sf_dir)
except:
log.err("Invalid zip uploaded!")
| raise error("Error while opening the uploaded file. Make sure "
"it is in zip format.")
sf_names = set(
name[:-4] for name in os.listdir(sf_dir) if name.endswith(
'.shp') or name.endswith('.shx') or name.endswith('.dbf'))
if len(sf_names) == 0:
log.err("Zip with no shapefile uploaded")
raise error("No shapefile found in zip. The zip must contain "
"exactly one shapefile, and it must not be in a "
"subdirectory.")
elif len(sf_names) == 1:
name = sf_names.pop()
joined = os.path.join(sf_dir, name)
for ext in 'shp dbf prj'.split():
if not os.path.isfile(joined + '.' + ext):
log.err("Incomplete shapefile uploaded")
return error('.' + ext + ' file missing from zip! Please '
'include the entire shapefile.')
srs = osr.SpatialReference()
with open(joined + '.prj', mode='r', encoding='ascii') as f:
txf = f.read()
log.note(txf)
srs.ImportFromWkt(txf)
p4str = srs.ExportToProj4()
sf_projection = pyproj.Proj(p4str)
try:
sf = shapefile.Reader(joined)
perform_import(sf, sf_projection, db)
except shapefile.ShapefileException:
log.err("Invalid shapefile")
raise error("Invalid shapefile")
else:
log.warn("Zip uploaded with multiple shapefiles")
raise error("Found multiple shapefiles with names {}. Only one "
"shapefile may be present in the zip.".format(
', '.join(sf_names)))
def perform_import(sf, proj, db):
"""Takes a pyshp instance and imports its point to the database."""
cols = {n: None for n in
'kwhs BuidArea Perc System Savings UseRoof Zone'.split()}
for i, f in enumerate(sf.fields):
if f[0] in cols:
cols[f[0]] = i - 1
try:
db.add_rects(
interface.Rect(
wktshape=points2wkt(row.shape.points, proj),
building_area=row.record[cols['BuidArea']],
useable_build_area=row.record[cols['UseRoof']],
percent_usable=row.record[cols['Perc']],
kwhs=row.record[cols['kwhs']],
system_size_kw=row.record[cols['System']],
savings=int(100 * float(row.record[cols['Savings']]))
) for row in sf.shapeRecords() if is_useful(row)
)
except:
traceback.print_exc()
log.err("Aw, shit.")
raise error("Database error, see log")
def points2wkt(points, inproj):
"""Converts a list of points into a WKT polygon."""
points.append(points[0]) # work around for polygons not being connected
return "POLYGON(({}))".format(
','.join(
' '.join(str(dim) for dim in xfrm(pyproj.transform(
inproj, leaflet_proj, *reversed(point))[:2]
)) for point in points
))
def xfrm(point):
x = point[0]
y = point[1]
return x + 81.48064307833366, y - 79.60004366038598
def is_useful(row):
return True
class error(Exception):
"""Generic error from the import process that contains a human readable
error string."""
pass
|
glassalle/Rad4Snps | RAD4SNPs_Main.py | Python | gpl-3.0 | 12,997 | 0.015465 | #!/usr/bin/python
########################################################################## RAD4SNPs:##############################################################################
# A set of Python scripts to select and validate independent SNPs markers from a list of read files #
##################################################################################################################################################################
# MAIN PROGRAM
# Authors: G.LASSALLE (gilles.lassalle@inra.fr) & C.DELORD (chrystelle.delord@inra.fr)
# Last update: AUGUST 2017
#################### PRE-CONDITIONS
#- [-i] Working directory where to store results of the pipeline for the focal species X
#- [-d] exact name of MySQL database where denovo_map.pl Stacks data are available for the focal species X
#- [-i1] single-end reads (reads 1) for focal species X duplicate 1
#- [-i2] single-end reads (reads 1) for focal species X duplicate 2
#- [-i3] paired-end reads (reads 2) for focal species X duplicate 1
#- [-i4] paired-end reads (reads 2) for focal species X duplicate 2
#- BWA and SAMtools available
#- Connexion to the Stacks MySQL database available: databases of Stacks 'denovo_map output' for each species.
###############################################################################
import argparse
import os
import sys
import MySQLdb
###############################################################################
parser = argparse.ArgumentParser()
parser.add_argument('-i', action='store', dest='InputDir', help='Working Directory')
parser.add_argument('-d', action='store', dest='database', help='Stacks database')
parser.add_argument('-c', action='store', dest='CodeSp', help='ID of the species')
parser.add_argument('-i1', action='store', dest='R11', help='First R1 file')
parser.add_argument('-i2', action='store', dest='R12', help='Second R1 file')
parser.add_argument('-i3', action='store', dest='R21', help='First R2 file')
parser.add_argument('-i4', action='store', dest='R22', help='Second R2 file')
parser.add_argument('--version', action='version', version='%(prog)s 0.1')
results = parser.parse_args()
print 'input directory =', results.InputDir
##############################################################################
# Arguments testing
##############################################################################
if results.InputDir:
if os.path.isdir(results.InputDir):
print "Working directory is valid."
else :
print "Caution: working directory is invalid, please ckeck [-i]."
sys.exit()
|
else :
print "Please insert path for working directory [-i]. End of program."
sys.exit()
############################## | ################################################
if results.database:
db = MySQLdb.connect(host="", # your host, usually localhost
user="", # your username
passwd="", # your password
db=results.database) # name of the database
cur1= db.cursor() # connexion
print "Currently working on MySQL database: "+str(results.database)
else:
print "Incorrect ID for database: database not found, please check [-d]"
sys.exit()
###############################################################################
#
if results.R11:
if os.path.isfile(results.R11):
print "First file of single-end reads: found."
else :
print "Path to single-end reads data is not a file: please check out [-i1]."
sys.exit()
else :
print "Please insert path to single-end read files [-i1]. End of program."
sys.exit()
#
if results.R12:
if os.path.isfile(results.R12):
print "Second file of single-end reads: found."
else :
print "Path to single-end reads data is not a file: please check out [-i2]."
sys.exit()
else :
print "Please insert path to single-end read files [-2]. End of program."
sys.exit()
#
if results.R21:
if os.path.isfile(results.R21):
print "First file of paired-end reads: found."
else :
print "Path to paired-end reads data is not a file: please check out [-i3]."
sys.exit()
else :
print "Please insert path to paired-end read files [-i3]. End of program."
sys.exit()
#
if results.R22:
if os.path.isfile(results.R22):
print "Second file of paired-end reads: found."
else :
print "Path to paired-end reads data is not a file: please check out [-i4]."
sys.exit()
else :
print "Please insert path to paired-end read files [-i4]. End of program."
sys.exit()
###############################################################################
if results.CodeSp:
CodeEspece=str(results.CodeSp)
if CodeEspece[:1]!="_":
CodeEspece=str(results.CodeSp)+str("_")
else:
CodeEspece="std_"
###############################################################################
WorkDir=os.path.abspath(results.InputDir) # Current working directory
FastaCatalog=str(WorkDir)+"/"+str(results.CodeSp)+"Catalog.fasta" # Formatting name of candidates fasta file -output of MySQL filtering
###############################################################################
# Main program
###############################################################################
if os.path.isfile("/usr/bin/bwa"):
print "BWA program is found."
else :
print "Cannot find BWA: please check out pipeline requirements."
sys.exit()
###samtools
if os.path.isfile("/usr/bin/samtools"):
print "SAMtools program is found."
else :
print "Cannot find SAMtools: please check out pipeline requirements."
sys.exit()
#####################################################
# Working directory writable
filepath = results.InputDir+'/file.txt'
try:
filehandle = open( filepath, 'w' )
except IOError:
sys.exit( 'Working directory is not accessible' + filepath )
###############################################################################
# Pipeline commands:
###############################################################################
#################################### FIRST FILTERING ##########################
print os.getcwd()
commandeExtractFasta="./RAD4SNPs_SQL2Fasta.py -o "+str(FastaCatalog)+" -d "+str(results.database)+" -c "+str(CodeEspece)
print "Extraction du fichier fasta"
print commandeExtractFasta
os.system(commandeExtractFasta)
############################## Fusion of single-end reads #####################
if results.R11:
if results.R12:
commandFusionR1="cat "+str(results.R11)+" "+str(results.R12)+" > "+str(WorkDir)+"/allR1.fq.gz"
else :
commandFusionR1="cp "+str(results.R11)+" "+str(WorkDir)+"/allR1.fq.gz"
#############################fin de fusion
############################## Fusion of paired-end reads #####################
if results.R21:
if results.R22:
commandFusionR2="cat "+str(results.R21)+" "+str(results.R22)+" > "+str(WorkDir)+"/allR2.fq.gz"
else :
commandFusionR2="cp "+str(results.R21)+" "+str(WorkDir)+"/allR2.fq.gz"
#################################### SECOND FILTERING (1) #####################
command1="bwa index "+str(FastaCatalog) # Indexing
command2="bwa mem -a -M "+str(FastaCatalog)+" "+str(WorkDir)+"/allR1.fq.gz > "+str(WorkDir)+"/PremierAlign.sam" # SE reads alignment
command3="samtools view -Sb "+str(WorkDir)+"/PremierAlign.sam | samtools sort - "+str(WorkDir)+"/PremierAlign1Sorted" # Conversion to bam file
command4="samtools view -F4 "+str(WorkDir)+"/PremierAlign1Sorted.bam > "+str(WorkDir)+"/PremierAlign1Sorted-F4.sam" # Elimination of unmapped SE reads
print "SE reads merging: "+str(commandFusionR1)
os.system(commandFusionR1)
print "PE reads merging: "+str(commandFusionR2)
os.system(commandFusionR2)
print "BWA indexing: "+str(command1)
os.system(command1)
print |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sympy/core/assumptions.py | Python | agpl-3.0 | 12,183 | 0.003119 | from facts import FactRules
from sympy.core.compatibility import cmp
class CycleDetected(Exception):
"""(internal) used to detect cycles when evaluating assumptions
through prerequisites
"""
pass
class AssumeMeths(object):
""" Define default assumption methods.
AssumeMeths should be used to derive Basic class only.
All symbolic objects have assumption attributes that can be accessed via
.is_<assumption name> attribute.
Assumptions determine certain properties of symbolic objects. Assumptions
can have 3 possible values: True, False, None. None is returned when it is
impossible to say something about the property. For example, a Symbol is
not know beforehand to be positive.
By default, all symbolic values are in the largest set in the given context
without specifying the property. For example, a symbol that has a property
being integer, is also real, complex, etc.
Here follows a list of possible assumption names:
- commutative - object commutes with any other object with
respect to multiplication operation.
- real - object can have only values from the set
of real numbers
- integer - object can have only values | from the set
of integers
- bounded - object absolute value is bounded
- positive - object can have only positive values
- negative - object can have only negative values
- nonpositive - object can have only nonpositive values
- nonnegative - object can have only nonnegative values
- comparable - object.evalf() returns Number obj | ect.
- irrational - object value cannot be represented exactly by Rational
- unbounded - object value is arbitrarily large
- infinitesimal - object value is infinitesimal
Example rules:
positive=T -> nonpositive=F, real=T
real=T & positive=F -> nonpositive=T
unbounded=F|T -> bounded=not unbounded XXX ok?
irrational=T -> real=T
Implementation note: assumption values are stored in
._assumption dictionary or are returned by getter methods (with
property decorators) or are attributes of objects/classes.
Examples:
- True, when we are sure about a property. For example, when we are
working only with real numbers:
>>> from sympy import Symbol
>>> Symbol('x', real = True)
x
- False
- None (if you don't know if the property is True or false)
"""
__slots__ = ['_assumptions', # assumptions
'_a_inprogress', # already-seen requests (when deducing
# through prerequisites -- see CycleDetected)
]
# This are the rules under which our assumptions function
#
# References
# ----------
#
# negative, -- http://en.wikipedia.org/wiki/Negative_number
# nonnegative
#
# even, odd -- http://en.wikipedia.org/wiki/Parity_(mathematics)
# imaginary -- http://en.wikipedia.org/wiki/Imaginary_number
# composite -- http://en.wikipedia.org/wiki/Composite_number
# finite -- http://en.wikipedia.org/wiki/Finite
# infinitesimal -- http://en.wikipedia.org/wiki/Infinitesimal
# irrational -- http://en.wikipedia.org/wiki/Irrational_number
# ...
_assume_rules = FactRules([
'integer -> rational',
'rational -> real',
'real -> complex',
'imaginary -> complex',
'complex -> commutative',
'odd == integer & !even',
'even == integer & !odd',
'real == negative | zero | positive',
'positive -> real & !negative & !zero',
'negative -> real & !positive & !zero',
'nonpositive == real & !positive',
'nonnegative == real & !negative',
'zero -> infinitesimal & even',
'prime -> integer & positive',
'composite == integer & positive & !prime',
'irrational == real & !rational',
'imaginary -> !real',
'!bounded == unbounded',
'noninteger == real & !integer',
'!zero == nonzero',
# XXX do we need this ?
'finite -> bounded', # XXX do we need this?
'finite -> !zero', # XXX wrong?
'infinitesimal -> !finite', # XXX is this ok?
])
_assume_defined = _assume_rules.defined_facts.copy()
_assume_defined.add('comparable')
_assume_defined = frozenset(_assume_defined)
###################################
# positive/negative from .evalf() #
###################################
# properties that indicate ordering on real axis
_real_ordering = set(['negative', 'nonnegative', 'positive', 'nonpositive'])
# what can be said from cmp(x.evalf(),0)
# NOTE: if x.evalf() is zero we can say nothing
_real_cmp0_table= {
'positive': {1: True, -1: False, 0: None},
'negative': {1: False, -1: True, 0: None},
}
# because we can say nothing if x.evalf() is zero, nonpositive is the same
# as negative
_real_cmp0_table['nonpositive'] = _real_cmp0_table['negative']
_real_cmp0_table['nonnegative'] = _real_cmp0_table['positive']
def __getstate__(self, cls=None):
if cls is None:
# This is the case for the instance that gets pickled
cls = self.__class__
d = {}
# Get all data that should be stored from super classes
for c in cls.__bases__:
if hasattr(c, "__getstate__"):
d.update(c.__getstate__(self, c))
# Get all information that should be stored from cls and return the dic
for name in cls.__slots__:
if hasattr(self, name):
d[name] = getattr(self, name)
return d
def __setstate__(self, d):
# All values that were pickled are now assigned to a fresh instance
for name, value in d.iteritems():
try:
setattr(self, name, value)
except:
pass
def _what_known_about(self, k):
"""tries hard to give an answer to: what is known about fact `k`
NOTE: You should not use this directly -- see make__get_assumption
instead
This function is called when a request is made to see what a fact
value is.
If we are here, it means that the asked-for fact is not known, and
we should try to find a way to find its value.
For this we use several techniques:
1. _eval_is_<fact>
------------------
first fact-evaluation function is tried, for example
_eval_is_integer
2. relations
------------
if the first step did not succeeded (no such function, or its return
is None) then we try related facts. For example
means
rational --> integer
another example is joined rule:
integer & !odd --> even
so in the latter case if we are looking at what 'even' value is,
'integer' and 'odd' facts will be asked.
3. evalf() for comparable
-------------------------
as a last resort for comparable objects we get their numerical value
-- this helps to determine facts like 'positive' and 'negative'
In all cases when we settle on some fact value, it is given to
_learn_new_facts to deduce all its implications, and also the result
is cached in ._assumptions for later quick access.
"""
# 'defined' assumption
if k not in self._assume_defined:
raise AttributeError('undefined assumption %r' % (k))
assumptions = self._assumptions
seen = self._a_inpr |
sshuttle/sshuttle | sshuttle/ssyslog.py | Python | lgpl-2.1 | 588 | 0 | import sys
im | port os
import subprocess as ssubprocess
_p = None
def start_syslog():
global _p
with open(os.devnull, 'w') as devnull:
_p = ssubprocess.Popen(
['logger', '-p', 'daemon.notice', '-t', 'sshuttle'],
stdin=ssubprocess.PIPE,
| stdout=devnull,
stderr=devnull
)
def close_stdin():
sys.stdin.close()
def stdout_to_syslog():
sys.stdout.flush()
os.dup2(_p.stdin.fileno(), sys.stdout.fileno())
def stderr_to_syslog():
sys.stderr.flush()
os.dup2(_p.stdin.fileno(), sys.stderr.fileno())
|
ratilicus/sdtd | dev/sdtd-eac.py | Python | apache-2.0 | 2,696 | 0.002967 | #!/usr/bin/python
import time
import telnetlib
import re
line_pat = re.compile(r'\d+\. (?P<username>[^,]*), id=(?P<id>\d+), steamid=(?P<steamid>\d+), online=(?P<online>True|False), ip=[\d.]*, playtime=(?P<playtime>\d+).*')
slot_pat = re.compile(r'Slot \d+: (\d{3}) * (.*)')
ban_pat = re.compile(r'^ \d{2}/\d{2}/\d{4} \d{2}:\d{2}:\d{2} - (\d+) -.*$')
if __name__ == "__main__":
telnet = telnetlib.Telnet('localhost', 25025)
telnet.write('loglevel all off\n')
time.sleep(2)
telnet.write('ban list\n')
time.sleep(2)
line = telnet.read_until('Ban list entries:')
line = telnet.read_until('\n')
ban_list = []
cont = True
while cont:
line = telnet.read_until('\n', 1)
print 'line >%s<' % line
if not line.strip():
cont = False
else:
steamid = ban_pat.findall(line)
if steamid:
ban_list.extend(steamid)
print 'banned>', ban_list
telnet.write('lkp\n')
cont = True
players = []
print 'getting player list'
while cont:
line = telnet.read_until('\n')
print 'line>', line
if line.startswith('Total'):
cont = False
else:
result = line_pat.match(line)
if result:
#print 'user>', result.groupdict()
data = result.groupdict()
if int(data['playtime']) < 300:
players.append(data)
print 'scanning players'
for p in players:
if p['steamid'] in ban_list:
print 'player %s is already banned.. | . skipping' % p['steamid']
continue
print | 'scanning', p
suspect = False
items = []
telnet.write('si %s\n' % p['steamid'])
ct = 0
while ct < 3:
line = telnet.read_until('\n', 0.25)
if not line:
time.sleep(0.1)
ct+=1
else:
result = slot_pat.findall(line)
if result:
count, item = result[0]
# print item, count
if (
'sniperRifle' in item
or 'mp5' in item
or 'tnt' in item
or 'reinforcedConcrete' in item
):
suspect = True
items.append((item, count, True))
else:
items.append((item, count, False))
if suspect:
for item, count, suspect_item in items:
print 'SUSPECT %s x%s %s' % (item, count, '<- SUSPECT' if suspect_item else '')
|
vertigo235/Sick-Beard-XEM | sickbeard/providers/generic.py | Python | gpl-3.0 | 15,171 | 0.004482 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import datetime
import os
import sys
import re
import urllib2
import copy
import itertools
import operator
import collections
import sickbeard
from sickbeard import helpers, classes, logger, db
from sickbeard.common import Quality, MULTI_EP_RESULT, SEASON_RESULT#, SEED_POLICY_TIME, SEED_POLICY_RATIO
from sickbeard import tvcache
from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
from lib.hachoir_parser import createParser
from sickbeard.name_parser.parser import NameParser, InvalidNameException
from sickbeard import scene_numbering
from sickbeard.common import Quality, Overview
class GenericProvider:
NZB = "nzb"
TORRENT = "torrent"
def __init__(self, name):
# these need to be set in the subclass
self.providerType = None
self.name = name
self.url = ''
self.supportsBacklog = False
self.cache = tvcache.TVCache(self)
def getID(self):
return GenericProvider.makeID(self.name)
@staticmethod
def makeID(name):
return re.sub("[^\w\d_]", "_", name.strip().lower())
def imageName(self):
return self.getID() + '.png'
def _checkAuth(self):
return
def isActive(self):
if self.providerType == GenericProvider.NZB and sickbeard.USE_NZBS:
return self.isEnabled()
elif self.providerType == GenericProvider.TORRENT and sickbeard.USE_TORRENTS:
return self.isEnabled()
else:
return False
def isEnabled(self):
"""
This should be overridden and should return the config setting eg. sickbeard.MYPROVIDER
"""
return False
def getResult(self, episodes):
"""
Returns a result of the correct type for this provider
"""
if self.providerType == GenericProvider.NZB:
result = classes.NZBSearchResult(episodes)
elif self.providerType == GenericProvider.TORRENT:
result = classes.TorrentSearchResult(episodes)
else:
result = classes.SearchResult(episodes)
result.provider = self
return result
def getURL(self, url, post_data=None, headers=None):
"""
By default this is just a simple urlopen call but this method should be overridden
for providers with special URL requirements (like cookies)
"""
if not headers:
headers = []
data = helpers.getURL(url, post_data, headers)
if not data:
logger.log(u"Error loading " + self.name + " URL: " + url, logger.ERROR)
return None
return data
def downloadResult(self, result):
"""
Save the result to disk.
"""
logger.log(u"Downloading a result from " + self.name+" at " + result.url)
data = self.getURL(result.url)
if data == None:
return False
# use the appropriate watch folder
if self.providerType == GenericProvider.NZB:
saveDir = sickbeard.NZB_DIR
writeMode = 'w'
elif self.providerType == GenericProvider.TORRENT:
saveDir = sickbeard.TORRENT_DIR
writeMode = 'wb'
else:
return False
# use the result name as the filename
file_name = ek.ek(os.path.join, saveDir, helpers.sanitizeFileName(result.name) + '.' + self.providerType)
logger.log(u"Saving to " + file_name, logger.DEBUG)
try:
with open(file_name, writeMode) as fileOut:
fileOut.write(data)
helpers.chmodAsParent(file_name)
except EnvironmentError, e:
logger.log("Unable to save the file: " + ex(e), logger.ERROR)
return False
# as long as it's a valid download then consider it a successful snatch
return self._verify_download(file_name)
def _verify_download(self, file_name=None):
"""
Checks th | e saved file to see if it was actually valid, if not then consider the download a failure.
"""
# primitive verification of torrents, just make sure we didn't get a text file or something
if self.providerType == GenericProvider.TORR | ENT:
parser = createParser(file_name)
if parser:
mime_type = parser._getMimeType()
try:
parser.stream._input.close()
except:
pass
if mime_type != 'application/x-bittorrent':
logger.log(u"Result is not a valid torrent file", logger.WARNING)
return False
return True
def searchRSS(self):
self._checkAuth()
self.cache.updateCache()
return self.cache.findNeededEpisodes()
def getQuality(self, item):
"""
Figures out the quality of the given RSS item node
item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
Returns a Quality value obtained from the node's data
"""
(title, url) = self._get_title_and_url(item) # @UnusedVariable
quality = Quality.sceneQuality(title)
return quality
def _doSearch(self):
return []
def _get_season_search_strings(self, show, season, wantedEp, searchSeason=False):
return []
def _get_episode_search_strings(self, ep_obj):
return []
def _get_title_and_url(self, item):
"""
Retrieves the title and URL data from the item XML node
item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
Returns: A tuple containing two strings representing title and URL respectively
"""
title = helpers.get_xml_text(item.find('title'))
if title:
title = title.replace(' ', '.')
url = helpers.get_xml_text(item.find('link'))
if url:
url = url.replace('&', '&')
return (title, url)
def findEpisode(self, episode, manualSearch=False):
self._checkAuth()
# XEM episode scene numbering
sceneEpisode = copy.copy(episode)
sceneEpisode.convertToSceneNumbering()
logger.log(u'Searching "%s" for "%s" as "%s"'
% (self.name, episode.prettyName() , sceneEpisode.prettyName()))
self.cache.updateCache()
results = self.cache.searchCache(episode, manualSearch)
logger.log(u"Cache results: " + str(results), logger.DEBUG)
logger.log(u"manualSearch: " + str(manualSearch), logger.DEBUG)
# if we got some results then use them no matter what.
# OR
# return anyway unless we're doing a manual search
if results or not manualSearch:
return results
itemList = []
for cur_search_string in self._get_episode_search_strings(sceneEpisode):
itemList += self._doSearch(cur_search_string, show=episode.show)
for item in itemList:
(title, url) = self._get_title_and_url(item)
# pa |
flexo/evolutron | evolutron/food.py | Python | mit | 1,376 | 0.00218 |
import pygame
from pygame.locals import *
class Food(pygame.sprite.Sprite):
def __init__(self, tile, x, y):
super(Food, self).__init__()
self.tile = tile
self.energy = 1000
w = self.w = 9
h = self.h = 5
self.image = pygame.Surface((w, h), SRCALPHA).convert_alpha()
self.rect = self.image.get_rect()
self.rect.x = tile.rect.x + x
self.rect.y = tile.rect.y + y
self.intersect_lines = [
((self.rect.x, self.rect.y), (self.rect.x + w, self.rect.y + h)),
]
self.redraw = True
self.height = 0.1 # used for vision
def eaten(self):
self.tile.allfood.remove(self)
self.tile.world.allfood.remove(self)
def draw(self):
if self.redraw:
pygame.draw.ellipse(self.image, (96, 96, 0), R | ect(3, 2, 3, 3), 0)
pygame.draw.ellipse(self.image, (128, 128, 0), Rect(0, 0, 9, 5), 0)
if self.tile.world.active_item is self:
pygame.draw.lines(self.image, (0, 0, 255), 1, [
| (0, 0), (self.w - 1, 0), (self.w - 1, self.h - 1), (0, self.h - 1)
], 3)
self.redraw = False
def __str__(self):
return "Food at {},{}".format(self.rect.x, self.rect.y)
def __repr__(self):
return "<Food at {},{}>".format(self.rect.x, self.rect.y)
|
richm/designate | designate/api/v2/__init__.py | Python | apache-2.0 | 1,327 | 0 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for | the specific language governing permissions and limitations
# under the License.
from designate.api.v2 import patches # flake8: noqa
import pecan.deploy
from oslo.config import cfg
from designate.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def factory(global_config, | **local_conf):
if not cfg.CONF['service:api'].enable_api_v2:
def disabled_app(environ, start_response):
status = '404 Not Found'
start_response(status, [])
return []
return disabled_app
conf = {
'app': {
'root': 'designate.api.v2.controllers.root.RootController',
'modules': ['designate.api.v2']
}
}
app = pecan.deploy.deploy(conf)
return app
|
sidharta/hansel-app | main/auth/google.py | Python | mit | 1,359 | 0 | # coding: utf-8
# pylint: disable=missing-docstring, invalid-name
from __future__ import absolute_import
from google.appengine.api import users
import flask
import auth
import model
import util
from main import app
@app.route('/signin/google/')
def signin_google():
auth.save_request_params()
google_url = users.create_login_url(flask.url_for('google_authorized'))
return flask.redirect(google_url)
@app.route('/_s/callback/google/authorized/')
def google_authorized():
google_user = users.get_current_user()
if google_user is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(flask.url_for('index') | )
user_db = retrieve_user_from_google(google_user)
return auth.signin_via_social(user_db)
def retrieve_user_from_google(google_user):
auth_id = 'federated_%s' % google_user.user_id()
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
if not user_db.admin | and users.is_current_user_admin():
user_db.admin = True
user_db.put()
return user_db
return auth.create_or_get_user_db(
auth_id=auth_id,
name=util.create_name_from_email(google_user.email()),
username=google_user.email(),
email=google_user.email(),
verified=True,
admin=users.is_current_user_admin(),
)
|
JioCloud/oslo-incubator | openstack/common/lockutils.py | Python | apache-2.0 | 9,378 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import os
import threading
import time
import weakref
from oslo.config import cfg
from openstack.common import fileutils
from openstack.common.gettextutils import _ # noqa
from openstack.common import local
from openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
help=('Directory to use for lock files.'))
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
au | tomatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are rel | eased when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
:param lock_path: The lock_path keyword argument is used to specify a
special location for external lock files to live. If nothing is set, then
CONF.lock_path is used as a default.
"""
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, threading.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
{'lock': name})
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
raise cfg.RequiredOptError('lock_path')
if not os.path.exists(local_lock_path):
fileutils.ensure_tree(local_lock_path)
LOG.info(_('Created lock path: %s'), local_lock_path)
def add_prefix(name, prefix):
if not prefix:
return name
sep = '' if prefix.endswith('-') else '-'
return '%s%s%s' % (prefix, sep, name)
# NOTE(mikal): the lock name cannot contain directory
# separators
lock_file_name = add_prefix(name.replace(os.sep, '_'),
lock_file_prefix)
lock_file_path = os.path.join(local_lock_path, lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock as lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
yield lock
finally:
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
else:
yield sem
finally:
local.strong_store.locks_held.remove(name)
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.