repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
skosukhin/spack
|
lib/spack/spack/cmd/clone.py
|
Python
|
lgpl-2.1
| 3,972
| 0
|
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
|
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1,
|
February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
import llnl.util.tty as tty
from llnl.util.filesystem import mkdirp, working_dir
import spack
from spack.util.executable import ProcessError, which
_SPACK_UPSTREAM = 'https://github.com/spack/spack'
description = "create a new installation of spack in another prefix"
section = "admin"
level = "long"
def setup_parser(subparser):
subparser.add_argument(
'-r', '--remote', action='store', dest='remote',
help="name of the remote to clone from", default='origin')
subparser.add_argument(
'prefix',
help="names of prefix where we should install spack")
def get_origin_info(remote):
git_dir = os.path.join(spack.prefix, '.git')
git = which('git', required=True)
try:
branch = git('symbolic-ref', '--short', 'HEAD', output=str)
except ProcessError:
branch = 'develop'
tty.warn('No branch found; using default branch: %s' % branch)
if remote == 'origin' and \
branch not in ('master', 'develop'):
branch = 'develop'
tty.warn('Unknown branch found; using default branch: %s' % branch)
try:
origin_url = git(
'--git-dir=%s' % git_dir,
'config', '--get', 'remote.%s.url' % remote,
output=str)
except ProcessError:
origin_url = _SPACK_UPSTREAM
tty.warn('No git repository found; '
'using default upstream URL: %s' % origin_url)
return (origin_url.strip(), branch.strip())
def clone(parser, args):
origin_url, branch = get_origin_info(args.remote)
prefix = args.prefix
tty.msg("Fetching spack from '%s': %s" % (args.remote, origin_url))
if os.path.isfile(prefix):
tty.die("There is already a file at %s" % prefix)
mkdirp(prefix)
if os.path.exists(os.path.join(prefix, '.git')):
tty.die("There already seems to be a git repository in %s" % prefix)
files_in_the_way = os.listdir(prefix)
if files_in_the_way:
tty.die("There are already files there! "
"Delete these files before boostrapping spack.",
*files_in_the_way)
tty.msg("Installing:",
"%s/bin/spack" % prefix,
"%s/lib/spack/..." % prefix)
with working_dir(prefix):
git = which('git', required=True)
git('init', '--shared', '-q')
git('remote', 'add', 'origin', origin_url)
git('fetch', 'origin', '%s:refs/remotes/origin/%s' % (branch, branch),
'-n', '-q')
git('reset', '--hard', 'origin/%s' % branch, '-q')
git('checkout', '-B', branch, 'origin/%s' % branch, '-q')
tty.msg("Successfully created a new spack in %s" % prefix,
"Run %s/bin/spack to use this installation." % prefix)
|
JoakimLindbom/agocontrol
|
devices/squeezeboxserver/slimtest.py
|
Python
|
gpl-3.0
| 443
| 0.002257
|
import squeezeboxserver
import time
squeezebox = squeezeboxserver.SqueezeboxServer("192.168.1.65:9000")
players = squeezebox.players()
for p in players:
print ("MAC:
|
%s" % p['playerid'])
time.sleep(10)
squ
|
eezebox.power("00:04:20:06:8c:55", "on")
squeezebox.playlist("00:04:20:06:8c:55", "play")
time.sleep(10)
squeezebox.playlist("00:04:20:06:8c:55", "stop")
time.sleep(3)
squeezebox.power("00:04:20:06:8c:55", "off")
|
houzhenggang/hiwifi-openwrt-HC5661-HC5761
|
staging_dir/host/lib64/scons-2.1.0/SCons/Tool/MSCommon/__init__.py
|
Python
|
gpl-2.0
| 2,107
| 0.000949
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTI
|
CULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/MSCommon/__init__.py 5357 2
|
011/09/09 21:31:03 bdeegan"
__doc__ = """
Common functions for Microsoft Visual Studio and Visual C/C++.
"""
import copy
import os
import re
import subprocess
import SCons.Errors
import SCons.Platform.win32
import SCons.Util
from SCons.Tool.MSCommon.sdk import mssdk_exists, \
mssdk_setup_env
from SCons.Tool.MSCommon.vc import msvc_exists, \
msvc_setup_env, \
msvc_setup_env_once
from SCons.Tool.MSCommon.vs import get_default_version, \
get_vs_by_version, \
merge_default_version, \
msvs_exists, \
query_versions
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
kakaba2009/MachineLearning
|
python/src/algorithm/coding/regex/email.py
|
Python
|
apache-2.0
| 614
| 0.004886
|
import re
def fun(s):
# return True if s is a valid email, else return False
f = "^[a-zA-Z][\w-]*@[a-zA-Z0-9]+\.[a-
|
zA-Z]{1,3}$"
if not re.match(f, s):
return False
usernam
|
e, after = re.split(r'[@]', s)
websitename, extension = re.split(r'[.]', after)
if(len(extension) > 3):
return False
return True
def filter_mail(emails):
return list(filter(fun, emails))
if __name__ == '__main__':
n = int(input())
emails = []
for _ in range(n):
emails.append(input())
filtered_emails = filter_mail(emails)
filtered_emails.sort()
print(filtered_emails)
|
feinheit/feincms-elephantagenda
|
elephantagenda/views.py
|
Python
|
mit
| 403
| 0
|
from models import Event
from django.views.gen
|
eric import DetailView, ListView
class EventListView(ListView):
template_name = 'agenda/event_list.html'
queryset = Event.objects.upcoming()
paginate_by = 20
class EventArchiveview(EventListView):
queryset = Event.objects.past()
class EventDetailView(DetailView):
model = Event
|
template_name = 'agenda/event_detail.html'
|
keras-team/keras
|
keras/layers/merging/maximum.py
|
Python
|
apache-2.0
| 2,842
| 0.001759
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layer that computes the maximum (element-wise) of several inputs."""
# pylint: disable=g-direct-tensorflow-import
from keras.layers.merging.base_merge import _Merge
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.Maximum')
class Maximum(_Merge):
"""Layer that computes the maximum (element-wise) a list of inputs.
It takes as input a list of tensors, all of the same shape, and returns
a single tensor (also of the same shape).
>>> tf.keras.layers.Maximum()([np.arange(5).reshape(5, 1),
... np.arange(5, 10).reshape(5, 1)])
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[5],
[6],
[7],
[8],
[9]])>
>>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> maxed = tf.keras.layers.Maximum()([x1, x2])
>>> maxed.shape
TensorShape([5, 8])
"""
def _merge_function(self, inputs):
output
|
= inputs[0]
for i in range(1, len(inputs)):
output = tf.maximum(output, inputs[i])
return output
@keras_export('keras.layers.maximum')
def maximum(inputs, **kwargs):
"""Functional interface to compute maximum (element-wise) list of `inputs`.
This is equivalent to the `tf.keras.layers.Maximum` layer.
For example:
```python
input1 = tf.keras.layers.Inpu
|
t(shape=(16,))
x1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8)
input2 = tf.keras.layers.Input(shape=(32,))
x2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8)
max_inp=tf.keras.layers.maximum([x1,x2]) #shape=(None, 8)
out = tf.keras.layers.Dense(4)(max_inp)
model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)
```
Args:
inputs: A list of input tensors (at least 2) of same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor (of same shape as input tensor) with the element-wise
maximum of the inputs.
Raises:
ValueError: If input tensors are of different shape.
"""
return Maximum(**kwargs)(inputs)
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/lib/mpl_examples/user_interfaces/embedding_in_gtk3.py
|
Python
|
mit
| 834
| 0.014388
|
#!/usr/bin/env python
"""
demonstrate adding a FigureCanvasGTK3Agg widget to a Gtk.ScrolledWindow
using GTK3 accessed via pygobject
"""
from gi.repository import Gtk
from matplotlib.figure import Figure
from numpy import arange, sin, pi
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas
win = Gtk.Window()
win.connect("delete-event", Gtk.main_quit )
win.set_default_size(400,300)
win.set_title("Embedding in GTK")
f = Figure(figsize=(5,4), dpi=100)
a = f.add_subplot(111)
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
a.plot(t,s)
sw = Gtk.ScrolledWindow()
win.add (sw)
# A scrolled window border goes outside the scrollbars and viewport
sw.set_border_width (10)
canvas = FigureCanvas(f) # a Gtk.DrawingArea
canvas.set_size_
|
request(800,
|
600)
sw.add_with_viewport (canvas)
win.show_all()
Gtk.main()
|
fxb22/BioGUI
|
plugins/Tools/ETOOLSPlugins/ESummary.py
|
Python
|
gpl-2.0
| 3,669
| 0.016898
|
import os
import sys
from Bio import Entrez
import wx
from xml.dom import minidom
import re
class etPlugin():
def GetName(self):
'''
Method to return name of tool
'''
return "ESummary"
def GetBMP(self, dirH):
'''
Method to return identifying image
'''
return dirH + r"\Utils\Icons\ncbi_logoESum.bmp"
def GetOutFile(self):
self.outfile=dirH + r"\plugins\clustal.aln"
return self.outfile
def GetExec(self,parent,dbName,query):
self.parent = parent
erl = GetExec(dbName,query)
for line in erl.split('\n'):
|
if not re.search('.*<Item Name=
|
"?',line) == None:
if not re.search('.*<.*>.*<.*>',line) == None:
e = re.sub('.*<Item Name="?','',line)
alpha = re.sub('" Type=".*">?','\n',e)
beta = re.sub('<.*','\n',alpha)
parent.text2.write(str(beta))
parent.text2.write('\n')
def helpEXE(self,parent):
parent.l1.Show(True)
parent.text1.Show(True)
parent.l2.Show(True)
parent.l3.Show(True)
parent.text2 = wx.TextCtrl(parent.panelRSLT, -1, "", size=(892, 370),
style=wx.TE_MULTILINE|wx.TE_PROCESS_ENTER, pos=(75,10))
wx.CallAfter(parent.text2.SetInsertionPoint, 0)
tempVal = parent.dbCB.GetValue()
parent.dbList =['pubmed', 'protein', 'nuccore', 'nucleotide', 'nucgss', 'nucest',
'structure', 'genome', 'genomeprj', 'bioproject', 'biosample',
'biosystems', 'blastdbinfo', 'books', 'cancerchromosomes', 'cdd',
'gap', 'dbvar', 'epigenomics', 'gene', 'gensat', 'gds', 'geo',
'geoprofiles', 'homologene', 'journals', 'mesh', 'ncbisearch',
'nlmcatalog', 'omia', 'omim', 'pmc', 'popset', 'probe',
'proteinclusters', 'pcassay', 'pccompound', 'pcsubstance',
'seqannot', 'snp', 'sra', 'taxonomy', 'toolkit', 'toolkitall',
'unigene', 'unists', 'gencoll', 'gcassembly']
parent.dbGoMenu=[] # Database menu options.
for dummy in parent.dbList: # Iterate through all available databases
parent.dbGoMenu.append(wx.NewId())
tempIdNum = len(parent.menuHandlers)
for itnum,tool in enumerate(parent.dbList):
parent.menuHandlers.append((parent.dbGoMenu[itnum], parent.helpDB))
parent.lowDB,dummy = parent.menuHandlers[tempIdNum]
#Setup the database menu options
parent.dbaseMenu = wx.Menu()
for itnum,tool in enumerate(parent.dbList):
parent.dbaseMenu.Append(parent.dbGoMenu[itnum], tool, kind=wx.ITEM_RADIO)
#Update the menu bar
parent.menuBar.Replace(2,parent.dbaseMenu, "Database")
parent.menuBar.UpdateMenus()
parent.dbCB = wx.ComboBox(parent=parent.panelSQ, id=-1, pos=(256,6),
choices=parent.dbList, style=wx.CB_READONLY)
tempChk = 0
while tempChk < len(parent.dbList):
if tempVal == parent.dbList[tempChk]:
parent.dbCB.SetSelection(tempChk)
tempChk = len(parent.dbList)
tempChk += 1
if tempChk == len(parent.dbList):
parent.dbCB.SetSelection(0)
parent.Bind(wx.EVT_COMBOBOX, parent.helpDB, parent.dbCB)
def GetExec(dbName,idName):
handle = Entrez.esummary(db=dbName,id=idName,rettype='xml')
erl = handle.read()
return erl
|
giannotr/misc-python
|
twelvetone.py
|
Python
|
gpl-2.0
| 1,207
| 0.014085
|
#!/usr/bin/env
import subprocess, os, random, copy
output_filename = "twelvetone_ex.ly"
output_file = open(output_filename, "w")
notes = ['c','cis','d','dis','e','f','fis','g','gis','a','ais','b']
temp_tt_array = []
def twelvetone_gen():
notes_svd = cop
|
y.copy(notes)
a = 11
while len(temp_tt_array) < 12:
r = random.randint(0,a)
temp_tt_array.append(notes_svd[r])
notes_svd.remove(notes_svd[r])
a = a-1
return temp_tt_array
output_file.write(r'''\version "2.16.0"\header{tagline=""}\paper{indent=0 line-width=130 top-margin=13}\layout{\context{\Staff \remove "Stem_engraver" \remove
|
"Time_signature_engraver" \override Stem #'transparent = ##t}}\score{\transpose c c' << \new Staff''')
temp_tt_string = ''
for x in range(0, 16):
output_file.write('\n' + r'{ \time 12/4 ')
twelvetone_gen()
for element in temp_tt_array:
temp_tt_string+=element + ' '
output_file.write(temp_tt_string)
temp_tt_string = ''
temp_tt_array = []
output_file.write(r'\bar "||" }')
output_file.write('\n>>}')
output_file.close()
try:
os.startfile(output_filename)
except AttributeError:
subprocess.call(['open', output_filename])
|
etingof/pyasn1-modules
|
tests/test_rfc4043.py
|
Python
|
bsd-2-clause
| 4,870
| 0.000411
|
#
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1.type import univ
from pyasn1_modules import pem
from pyasn1_modules import rfc5280
from pyasn1_modules import rfc4043
class PermIdCertTestCase(unittest.TestCase):
cert_pem_text = """\
MIIDDTCCApOgAwIBAgIJAKWzVCgbsG5HMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
dXMgQ0EwHhcNMTkxMTEwMDA0MDIyWhcNMjAxMTA5MDA0MDIyWjBNMQswCQYDVQQG
EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
YW1wbGUxDTALBgNVBAMTBEdhaWwwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQBoktg
/68xL+uEQaWBoHyOjw8EMLeMEng3R2H7yiEzTGoaMJgPOKvSfzB2P0paHYPL+B5y
Gc0CK5EHRujMl9ljH+Wydpk57rKBLo1ZzpWUS6anLGIkWs1sOakcgGGr7hGjggFL
MIIBRzAdBgNVHQ4EFgQU1pCNZuMzfEaJ9GGhH7RKy6Mvz+cwbwYDVR0jBGgwZoAU
8jXbNATapVXyvWkDmbBi7OIVCMGhQ6RBMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQI
DAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0GCCQDokdYG
kU/O8jAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBhjBCBglghkgBhvhCAQ0E
NRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1
cnBvc2UuMFMGA1UdEQRMMEqgNgYIKwYBBQUHCAOgKjAoDBs4MjYyMDgtNDE3MDI4
LTU0ODE5NS0yMTUyMzMGCSsGAQQBgaxgMIEQZ2FpbEBleGFtcGxlLmNvbTAKBggq
hkjOPQQDAwNoADBlAjBT+36Y/LPaGSu+61P7kR97M8jAjtH5DtUwrWR02ChshvYJ
x0bpZq3PJaO0WlBgFicCMQCf+67wSvjxxtjI/OAg4t8NQIJW1LcehSXizlPDc772
/FC5OiUAxO+iFaSVMeDFsCo=
"""
def setUp(self):
self.asn1Spec = rfc5280.Certificate()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.cert_pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
perm_id_oid = rfc4043.id_on_permanentIdentifier
assigner_oid = univ.ObjectIdentifier('1.3.6.1.4.1.22112.48')
permanent_identifier_found = False
for extn in asn1Object['tbsCertificate']['extensions']:
if extn['extnID'] == rfc5280.id_ce_subjectAltName:
extnValue, rest = der_decoder(
extn['extnValue'], asn1Spec=rfc5280.SubjectAltName())
self.assertFalse(rest)
self.assertTrue(extnValue.prettyPrint())
self.assertEqual(extn['extnValue'], der_encoder(extnValue))
for gn in extnValue:
if gn['otherName'].hasValue():
self.assertEqual(perm_id_oid, gn['otherName']['type-id'])
onValue, rest = der_decoder(
gn['otherName']['value'],
asn1Spec=rfc4043.PermanentIdentifier())
self.assertFalse(rest)
self.assertTrue(onValue.prettyPrint())
self.assertEqual(gn['otherName']['value'], der_encoder(onValue))
self.assertEqual(assigner_oid, onValue['assigner'])
permanent_identifier_found = True
self.assertTrue(permanent_identifier_found)
def testOpenTypes(self):
substrate = pem.readBase64fromText(self.cert_pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
perm_id_oid = rfc4043.id_on_permanentIdentifier
assigner_oid = univ.ObjectIdentifier('1.3.6.1.4.1.22112.48')
permanent_identifier_found = False
for extn in asn1Object['tbsCertificate']['extensions']:
if extn['extnID'] == rfc5280.id_ce_subjectAltName:
extnValue, rest = der_decoder(
extn['extnValue'], asn1Spec=rfc5280.SubjectAltName(),
decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(extnValue.prettyPrint())
self.assertEqual(extn['extnValue'], der_encoder(extnValue))
for gn in extnValue:
if gn['otherName'].hasValue():
on = gn['otherName']
self.assertEqual(perm_id_oid, on['type-id'])
self.assertEqual(assigner_oid, on['value']['assigner'])
permanent_identifier_found = True
|
self.assertTrue(permanent_identifier_found)
suit
|
e = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
|
jmbeuken/abinit
|
scripts/post_processing/ElectronPhononCoupling/ElectronPhononCoupling/data/LiF_g2_2/__init__.py
|
Python
|
gpl-3.0
| 668
| 0
|
"""
Filenames of the tests
"""
import os
from os.path import join as pjoin
from .. impor
|
t LiF_g4
# This is a 2x2x2 q-point grid. The weights can be
|
obtained from abinit.
nqpt = 3
wtq = [0.125, 0.5, 0.375]
# Indices of the q-points in the 4x4x4 grid.
iqpt_subset = [0, 2, 6]
dirname = os.path.dirname(__file__)
fnames = dict(
eigk_fname=LiF_g4.fnames['eigk_fname'],
eigq_fnames=list(),
ddb_fnames=list(),
eigr2d_fnames=list(),
gkk_fnames=list(),
)
for key in ('eigq_fnames', 'ddb_fnames', 'eigr2d_fnames', 'gkk_fnames'):
for i in iqpt_subset:
fnames[key].append(LiF_g4.fnames[key][i])
refdir = pjoin(dirname, 'epc_outputs')
|
kk9599/vy
|
vyapp/notevi.py
|
Python
|
mit
| 2,502
| 0.003597
|
"""
"""
from Tkinter import *
from areavi import AreaVi
from ttk import Notebook
class PanedHorizontalWindow(PanedWindow):
|
"""
"""
def __init__(self, *args, **kwargs):
"""
"""
PanedWindow.__init__(self, orient=HORIZONTAL, *args, **kwargs)
def create_area(self):
"""
"""
frame = Frame(master=self)
scrollbar = Scrollbar(master=frame)
area = AreaVi('none', frame , border=3, relief=RAISED,
|
yscrollcommand=scrollbar.set)
scrollbar.config(command=area.yview)
scrollbar.pack(side='right', fill=Y)
from vyapp.plugins import INSTALL, HANDLE
for plugin, args, kwargs in INSTALL:
plugin.install(area, *args, **kwargs)
for handle, args, kwargs in HANDLE:
handle(area, *args, **kwargs)
area.pack(expand=True, side='left', fill=BOTH)
area.focus_set()
self.add(frame)
return area
def create(self):
"""
"""
area = self.create_area()
self.add(area.master)
return area
def load(self, filename):
"""
"""
area = self.create_area()
self.add(area.master)
area.load_data(filename)
return area
class PanedVerticalWindow(PanedWindow):
"""
"""
def __init__(self, *args, **kwargs):
"""
"""
PanedWindow.__init__(self, orient=VERTICAL, *args, **kwargs)
def create(self):
"""
"""
base = PanedHorizontalWindow(master=self)
self.add(base)
base.create()
return base
def load(self, *args):
"""
"""
base = PanedHorizontalWindow(master=self)
self.add(base)
for ind in args:
base.load(ind)
return base
class NoteVi(Notebook):
"""
"""
def __init__(self, *args, **kwargs):
"""
"""
Notebook.__init__(self, *args, **kwargs)
def create(self, filename):
"""
"""
base = PanedVerticalWindow(master=self)
base.create()
self.add(base, text=filename)
return base
def load(self, *args):
"""
"""
for indi in args:
base = PanedVerticalWindow(master=self)
base.pack(side='left', expand=True, fill=BOTH)
self.add(base)
for indj in indi:
base.load(*indj)
|
plantigrade/geni-tools
|
src/gcf/omnilib/frameworks/framework_base.py
|
Python
|
mit
| 14,946
| 0.004416
|
#----------------------------------------------------------------------
# Copyright (c) 2011-2015 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
from __future__ import absolute_import
import json
import logging
import os
import sys
import M2Crypto.SSL
from ..util.paths import getAbsPath
from ..util import OmniError
from ..util import credparsing as credutils
from ..util import json_encoding
from ..xmlrpc import client as xmlrpcclient
from ...sfa.trust.credential import Credential
class Framework_Base():
"""
Framework_Base is an abstract class that identifies the minimal set of functions
that must be implemented in order to add a control framework to omni.
Instructions for adding a new framework:
Create "framework_X" in the frameworks directory, where X is your control framework.
Create a Framework class in the file that inherits "Framework_Base" and fill out each of the functions.
Edit the sample "omni_config" file and add a section for your framework, giving the section
the same name as X used in framework_X. For instance, 'sfa' or 'gcf'. Your framework's section
of the omni config *MUST* have a cert and key entry, which omni will use when talking to
the GENI Aggregate managers.
"""
def __init__(self, config):
self.cert = getAbsPath(config['cert'])
if not os.path.exists(self.cert):
sys.exit("Frameworks certfile %s doesn't exist" % self.cert)
if not os.path.getsize(self.cert) > 0:
sys.exit("Frameworks certfile %s is empty" % self.cert)
self.key = getAbsPath(config['key'])
if not os.path.exists(self.key):
sys.exit("Frameworks keyfile %s doesn't exist" % self.key)
if not os.path.getsize(self.key) > 0:
sys.exit("Frameworks keyfile %s is empty" % self.key)
self.sslctx = None
def init_user_cred( self, opts ):
"""Initialize user credential either from file (if
--usercredfile) or else to None.
Must call this method in framework's __init__ in order for
--usercredfile to be handled properly.
Returns the usercred - in XML string format.
"""
try:
if self.user_cred_struct is not None:
pass
except:
self.user_cred_struct = None
# read the usercred from supplied f
|
ile
cred = None
if opts.usercredfile and os.path.exists(opts.usercredfile) and os.path.isfile(opts.userc
|
redfile) and os.path.getsize(opts.usercredfile) > 0:
# read the user cred from the given file
if hasattr(self, 'logger'):
logger = self.logger
else:
logger = logging.getLogger("omni.framework")
logger.info("Getting user credential from file %s", opts.usercredfile)
# cred = _load_cred(logger, opts.usercredfile)
with open(opts.usercredfile, 'r') as f:
cred = f.read()
try:
cred = json.loads(cred, encoding='ascii', cls=json_encoding.DateTimeAwareJSONDecoder)
if cred and isinstance(cred, dict) and \
cred.has_key('geni_type') and \
cred.has_key('geni_value') and \
cred['geni_type'] == Credential.SFA_CREDENTIAL_TYPE and \
cred['geni_value'] is not None:
self.user_cred_struct = cred
except Exception, e:
logger.debug("Failed to get a JSON struct from cred in file %s. Treat as a string: %s", opts.usercredfile, e)
cred2 = credutils.get_cred_xml(cred)
if cred2 is None or cred2 == "":
logger.info("Did NOT get valid user cred from %s", opts.usercredfile)
if opts.devmode:
logger.info(" ... but using it anyhow")
else:
cred = None
else:
# This would force a saved user cred in struct to be XML. Is that correct?
#cred = cred2
target = ""
try:
target = credutils.get_cred_target_urn(logger, cred)
if "+authority+sa" in target:
self.logger.debug("Got target %s - PG user creds list the user as the owner only", target)
target = credutils.get_cred_owner_urn(logger, cred)
except:
if not opts.devmode:
logger.warn("Failed to parse target URN from user cred?")
logger.info("Read user %s credential from file %s", target, opts.usercredfile)
elif opts.usercredfile:
if hasattr(self, 'logger'):
logger = self.logger
else:
logger = logging.getLogger("omni.framework")
logger.info("NOT getting user credential from file %s - file doesn't exist or is empty", opts.usercredfile)
return cred
def get_version(self):
"""
Returns a dict of the GetVersion return from the control framework. And an error message if any.
"""
raise NotImplementedError('get_version')
def get_user_cred(self):
"""
Returns a user credential from the control framework as a string. And an error message if any.
"""
raise NotImplementedError('get_user_cred')
def get_slice_cred(self, urn):
"""
Retrieve a slice with the given urn and returns the signed credential as a string.
"""
raise NotImplementedError('get_slice_cred')
def create_slice(self, urn):
"""
If the slice already exists in the framework, it returns that. Otherwise it creates the slice
and returns the new slice as a string.
"""
raise NotImplementedError('create_slice')
def delete_slice(self, urn):
"""
Removes the slice from the control framework.
"""
raise NotImplementedError('delete_slice')
def list_aggregates(self):
"""
Get a list of available GENI Aggregates from the control framework.
Returns: a dictionary where keys are urns and values are aggregate urls
"""
raise NotImplementedError('list_aggregates')
def list_my_slices(self, username):
"""
Get a list of slices for this user.
Returns: a list of slice URNs
"""
raise NotImplementedError('list_my_slices')
def list_my_projects(self, username):
"""
'''List projects owned by the user (name or URN) provided, returning a list of structs, containing
PROJECT_URN, PROJECT_UID, EXPIRED, and PROJECT_ROLE. EXPIRED is a boolean.'''
"""
raise NotImplementedError('list_my_projects')
def list_ssh_keys(self, username=None):
"""
Get a list of SSH key pairs for the given user or the configured current user if not specified.
Private key will be omitted if not known or found.
|
bio-tools/biotoolsregistry
|
backend/elixirapp/wsgi.py
|
Python
|
gpl-3.0
| 487
| 0.004107
|
"""
WSGI config for elixirapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os, sys
from django.core.wsgi import get_wsgi_application
os.envir
|
on.setdefault("DJANGO_SETTINGS_MODULE", "elixirapp.settings")
os.environ["CELERY_LOADER"] = "django"
sys.path.insert(0,'/elixir/application/backend')
application = get_wsgi_applica
|
tion()
|
fengsp/flask-snippets
|
database/use_tornado_database.py
|
Python
|
bsd-3-clause
| 1,247
| 0.004812
|
# -*- coding: utf-8 -*-
"""
database.use_tornado_database
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Using tornado.database with MySQL
http://flask.pocoo.org/snippets/11/
"""
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from tornado.database import Connection
from flask import g, render_template
from app import app
import config
@app.before_request
def connec
|
t_db():
g.db = Connection(config.DB_HOST,
config.DB_NAME,
config.DB_USER,
config.DB_PASSWD)
@app.after_request
def close_connection(response):
g.db.close()
return response
@app.route("/")
def index():
newsitems = g.db.iter("select * from newsitems")
return render_template("index.html", newsitems=newsitems)
"""
{% for item in newsitems %}
<h3>{{ ite
|
m.title }}</h3>
{% endfor %}
You can get much of the same functionality in SQLAlchemy 0.6 using NamedTuples, without using the ORM:
from sqlalchemy import create_engine
@app.before_request
def connect_db():
g.db = create_engine(config.DB_URI)
@app.route("/")
def index():
newsitems = g.db.execute("select * from newsitems")
# now you can do newsitem.title...
"""
|
pranjan77/narrative
|
src/biokbase/narrative/clients.py
|
Python
|
mit
| 1,813
| 0.002758
|
from biokbase.workspace.client import Workspace
from biokbase.narrative_method_store.client import NarrativeMethodStore
from biokbase.userandjobstate.client import UserAndJobState
from biokbase.catalog.Client import Catalog
from biokbase.service.Client import Client as ServiceClient
from biokbase.execution_engine2.execution_engine2Client import execution_engine2
from biokbase.narrative.common.url_config import URLS
def get(client_name, token=None):
return __init_client(client_name, token=token)
def reset():
__clients = dict()
def __init_client(client_name, token=None):
if client_name == 'workspace':
c = Workspace(URLS.workspace, token=token)
elif client_name == 'narrative_method_store':
c = NarrativeMethodStore(URLS.narrative_method_store, token=token)
elif client_name == 'user_and_job_state':
c = UserAndJobState(URLS.user_and_job_state, token=token)
elif client_name == 'catalog':
c = Catalog(URLS.catalog, token=token)
elif client_name == 'service' or client_name == 'service_wizard':
c = ServiceClient(URLS.service_wizard, use_url_lookup=True, token=token)
elif client_name == 'execution_engine2' or client_name == 'execution_engine' or client_name == 'job_service':
c = execution_engine2(URLS.execution_engine2, token=token)
elif client_name == 'job_service_mock':
c = JobServiceMock()
else:
raise ValueError('Unknown client name "%s"' % client_name)
return c
class JobServiceMock():
def __init__(self):
self.client = get('service')
def check_job(self, job_id):
return self.client.sync_call('narrative_job_mock.check_job', [job_id])[0]
def check_jobs(self, params):
return self.client.sync_call('narrative_
|
job_mock.che
|
ck_jobs', [params])[0]
|
glibin/tortik
|
tortik_tests/util_test.py
|
Python
|
mit
| 7,001
| 0.00432
|
# _*_ coding: utf-8 _*_
import os
try:
from cStringIO import StringIO # python 2
except ImportError:
from io import StringIO # python 3
from collections import OrderedDict
import unittest
from tornado.escape import to_unicode
from tortik.util import make_qs, update_url, real_ip
from tortik.util.xml_etree import parse, tostring
class Request(object):
headers = {}
remote_ip = None
class BaseTest(unittest.TestCase):
def assertQueriesEqual(self, qs1, qs2):
qs1_list = sorted(qs1.split('&'))
qs2_list = sorted(qs2.split('&'))
self.assertEqual(qs1_list, qs2_list)
def assertUrlsEqual(self, url1, url2):
u1 = url1.split('?')
u2 = url2.split('?')
self.assertEqual(len(u1), len(u2))
self.assertEqual(u1[0], u2[0])
if len(u1) > 1:
self.assertQueriesEqual(u1[1], u2[1])
class TestMakeQs(BaseTest):
"""This is copy of Frontik's make_qs test: https://github.com/hhru/frontik/blob/master/tests/test_util.py
"""
def test_make_qs_simple(self):
query_args = {'a': '1', 'b': '2'}
self.assertQueriesEqual(make_qs(query_args), 'a=1&b=2')
def test_make_qs_not_str(self):
query_args = {'a': 1, 'b': 2.0, 'c': True}
self.assertQueriesEqual(make_qs(query_args), 'a=1&b=2.0&c=True')
def test_make_qs_iterables(self):
query_args = {'a': [1, 2], 'b': {1, 2}, 'c': (1, 2), 'd': frozenset((1, 2
|
))}
self.assertQueriesEqual(make_qs(query_args), 'a=1&a=2&b=1&b=2&c=1&c=2&d=1&d=2')
def test_make_qs_none(self):
query_args = {'a': None, 'b': None}
self.assertQueriesEqual(make_qs(query_args), '')
def test_make_qs_encode(self):
query_args = {'a': u'тест', 'b': 'тест'}
qs = make_qs(query_args)
self.assertIsInstance(qs, str)
self.assertQueriesEqual(qs, 'a=%D1%82%D0%B5%D1%81%D1%82&b=%D1%82
|
%D0%B5%D1%81%D1%82')
def test_from_ordered_dict(self):
qs = make_qs(OrderedDict([('z', 'я'), ('г', 'd'), ('b', ['2', '1'])]))
self.assertIsInstance(qs, str)
self.assertEqual(qs, 'z=%D1%8F&%D0%B3=d&b=2&b=1')
def test_unicode_params(self):
self.assertQueriesEqual(
make_qs({'при': 'вет', u'по': u'ка'}),
'%D0%BF%D1%80%D0%B8=%D0%B2%D0%B5%D1%82&%D0%BF%D0%BE=%D0%BA%D0%B0'
)
def test_make_qs_comma(self):
query_args = {'a': '1,2,3', 'b': 'asd'}
self.assertQueriesEqual(make_qs(query_args, '/,'), 'a=1,2,3&b=asd')
def test_make_qs_comma_quoted(self):
# default value for `safe` parameter of make_qs is '/' so commas
# should be encoded
query_args = {'a': '1,2,3', 'b': 'asd'}
self.assertQueriesEqual(make_qs(query_args), 'a=1%2C2%2C3&b=asd')
class TestUpdateUrl(BaseTest):
def test_simple(self):
self.assertUrlsEqual(update_url('http://google.com'), 'http://google.com')
self.assertUrlsEqual(update_url('https://google.com'), 'https://google.com')
self.assertUrlsEqual(update_url('google.com'), 'google.com')
self.assertUrlsEqual(update_url('//google.com'), '//google.com')
self.assertUrlsEqual(update_url('http://google.com?a=1'), 'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com?a=1&b=2'), 'http://google.com?a=1&b=2')
self.assertUrlsEqual(update_url('http://google.com?привет=1'),
'http://google.com?%D0%BF%D1%80%D0%B8%D0%B2%D0%B5%D1%82=1')
self.assertUrlsEqual(update_url(u'http://google.com?привет=1'),
'http://google.com?%D0%BF%D1%80%D0%B8%D0%B2%D0%B5%D1%82=1')
def test_update_args(self):
self.assertUrlsEqual(update_url('http://google.com', update_args={'a': 1}), 'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com', update_args={'a': '1'}), 'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com', update_args={'a': u'1'}), 'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com', update_args={u'a': u'1'}), 'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com?a=2', update_args={'a': 1}), 'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com?a=2&b=1', update_args={'a': 1}), 'http://google.com?a=1&b=1')
def test_remove_args(self):
self.assertUrlsEqual(update_url('http://google.com?a=2', remove_args=['a']), 'http://google.com')
self.assertUrlsEqual(update_url('http://google.com?a=2', remove_args=[u'a']), 'http://google.com')
self.assertUrlsEqual(update_url('http://google.com?привет=2', remove_args=['привет']), 'http://google.com')
self.assertUrlsEqual(update_url(u'http://google.com?привет=2', remove_args=[u'привет']), 'http://google.com')
self.assertUrlsEqual(update_url('http://google.com?a=2&a=1', remove_args=['a']), 'http://google.com')
self.assertUrlsEqual(update_url('http://google.com?a=2&a=1&b=3', remove_args=['a']), 'http://google.com?b=3')
self.assertUrlsEqual(update_url('http://google.com?a=2&a=1&b=3', remove_args=['b']),
'http://google.com?a=2&a=1')
def test_both(self):
self.assertUrlsEqual(update_url('http://google.com?b=3', update_args={'a': 1}, remove_args=['b']),
'http://google.com?a=1')
self.assertUrlsEqual(update_url('http://google.com?a=2&b=3&c=4', update_args={'a': 1}, remove_args=['b']),
'http://google.com?a=1&c=4')
class TestParse(BaseTest):
def test_parse_xml(self):
fd = open(os.path.join(os.path.dirname(__file__), 'data', 'simple.xml'), 'r')
tree = parse(fd)
self.assertEqual(tree.getroot().tag, 'data')
convert = tostring(tree.getroot(), pretty_print=True, xml_declaration=True, encoding='UTF-8')
# replace any possible conversion differences that are ok
# Python 3+ native etree does not include xml declaration so we should remove it everywhere
converted = to_unicode(convert).replace('\n', '').replace(' ', '').replace('\'', '"').\
replace('<?xmlversion="1.0"encoding="UTF-8"?>', '').strip()
fd.seek(0)
base = to_unicode(fd.read()).replace('\n', '').replace(' ', '').\
replace('<?xmlversion="1.0"encoding="UTF-8"?>', '').strip()
self.assertEqual(converted, base)
fd.close()
class TestRealIp(BaseTest):
def test_real_ip(self):
# default
request = Request()
self.assertEqual('127.0.0.1', real_ip(request))
request = Request()
request.headers = {'X-Real-Ip': '8.8.8.8', 'X-Forwarded-For': '10.0.0.1'}
self.assertEqual('8.8.8.8', real_ip(request))
request = Request()
request.headers = {'X-Forwarded-For': '10.0.0.1, 127.0.0.1'}
self.assertEqual('10.0.0.1', real_ip(request))
|
vlegoff/tsunami
|
src/primaires/pnj/commandes/chemin/voir.py
|
Python
|
bsd-3-clause
| 3,490
| 0.000287
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'voir' de la commande 'chemin'."""
from primaires.format.fonctions import oui_ou_non
from primaires.interpreteur.masque.parametre import Parametre
from primaires.pnj.chemin import FLAGS
class PrmVoir(Parametre):
"""Commande 'chemin voir'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "voir", "view")
self.schema = "<cle>"
self.aide_courte = "affiche le détail d'un chemin"
self.aide_longue = \
"Cette commande permet d'obtenir plus d'informations sur " \
"un chemin (ses flags actifs, ses salles et sorties...)."
def ajouter(self):
"""Méthode appelée lors de l'ajout de la commande à l'interpréteur"""
cle = self.noeud.get_masque("cle")
cle.proprietes["regex"] = r"'[a-z0-9_:]{3,}'"
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
cle = dic_masques["cle"].cle
if cle not in importeur.pnj.chemins:
personnage << "|err|Ce chemin n'existe pas.|ff|"
return
chemin = importeur.pnj.chemins[cle]
msg = "Détail sur le chemin {} :".format(chemin.cle)
msg += "\n Flags :"
for nom_flag in FLAGS.keys():
msg += "\n {}".format(nom_flag.capitalize())
msg += " : " + oui_ou_non(chemin.a_flag(nom_flag))
msg += "\n Salles du chemin :"
if len(chemin.salles)
|
== 0:
msg += "\n Aucune"
else:
for salle, direction in chemin.salles.items():
msg += "\n " + salle.ident.ljust(20
|
) + " "
msg += direction.ljust(10)
if salle in chemin.salles_retour and \
chemin.salles_retour[salle]:
msg += " (retour " + chemin.salles_retour[salle] + ")"
personnage << msg
|
synsun/robotframework
|
src/robot/utils/normalizing.py
|
Python
|
apache-2.0
| 3,790
| 0.000264
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from collections import MutableMapping
from .robottypes import is_dict_like
_WHITESPACE_REGEXP = re.compile('\s+')
def normalize(string, ignore=(), caseless=True, spaceless=True):
"""Normalizes given string according to given spec.
By default string is turned to lower case and all whitespace is removed.
Additional characters can be removed by giving them in `ignore` list.
"""
if spaceless:
string = _WHITESPACE
|
_REGEXP.sub('', string)
if caseless:
string = lower(string)
ignore = [lower(i) for i in ignore]
for ign in ignore:
if ign in string: # performance optimization
string = string.replace(ign, '')
return string
# http://ironpython.codeplex.com/workitem/33133
if sys.platform == 'cli' and sys.version_info < (2, 7, 5):
def lower(string):
return ('A' + string).lower()[1:]
else:
|
def lower(string):
return string.lower()
class NormalizedDict(MutableMapping):
"""Custom dictionary implementation automatically normalizing keys."""
def __init__(self, initial=None, ignore=(), caseless=True, spaceless=True):
"""Initializes with possible initial value and normalizing spec.
Initial values can be either a dictionary or an iterable of name/value
pairs. In the latter case items are added in the given order.
Normalizing spec has exact same semantics as with `normalize` method.
"""
self._data = {}
self._keys = {}
self._normalize = lambda s: normalize(s, ignore, caseless, spaceless)
if initial:
self._add_initial(initial)
def _add_initial(self, initial):
items = initial.items() if hasattr(initial, 'items') else initial
for key, value in items:
self[key] = value
def __getitem__(self, key):
return self._data[self._normalize(key)]
def __setitem__(self, key, value):
norm_key = self._normalize(key)
self._data[norm_key] = value
self._keys.setdefault(norm_key, key)
def __delitem__(self, key):
norm_key = self._normalize(key)
del self._data[norm_key]
del self._keys[norm_key]
def __iter__(self):
return (self._keys[norm_key] for norm_key in sorted(self._keys))
def __len__(self):
return len(self._data)
def __str__(self):
return '{%s}' % ', '.join('%r: %r' % (key, self[key]) for key in self)
def __eq__(self, other):
if not is_dict_like(other):
return False
if not isinstance(other, NormalizedDict):
other = NormalizedDict(other)
return self._data == other._data
def __ne__(self, other):
return not self == other
def copy(self):
copy = NormalizedDict()
copy._data = self._data.copy()
copy._keys = self._keys.copy()
copy._normalize = self._normalize
return copy
# Speed-ups. Following methods are faster than default implementations.
def __contains__(self, key):
return self._normalize(key) in self._data
def clear(self):
self._data.clear()
self._keys.clear()
|
schettino72/serveronduty
|
websod/database.py
|
Python
|
mit
| 1,071
| 0.002801
|
import os
from sqlalchemy import create_engine, MetaData
from sqlalchemy.orm import scoped_session, sessionmaker
metadata = MetaData()
def get_sa_db_uri(driver='', username='', password='', host='', port='', database=''):
"""get SQLAlchemy DB URI: driver://username:password@host:port/database"""
assert driver
if driver == 'sqlite':
# get absolute file path
if not database.startswith('/'):
db_file = os.path.abspath(database)
else:
db_file = database
db_uri = '%s:///%s' % (driver, db_file)
else:
db_uri = ('%s://%s:%s@%s:%s/%s' %
(driver, username, password, host, port, database))
return db_uri
class DB(object):
def __init__(self, db_uri):
self.engine = create_en
|
gine(db_uri, convert_unicode=True)
self.session = scoped_session(
sessionmaker(autocommit=False,
autoflush=False,
bind=self.engine))
def init_database(self):
metadata.create_all(bi
|
nd=self.engine)
|
hehongliang/tensorflow
|
tensorflow/python/training/optimizer_test.py
|
Python
|
apache-2.0
| 12,617
| 0.00959
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class OptimizerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testBasic(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a_%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b_%d' % i)
def loss():
return 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
# Note that for eager execution, minimize expects a function instead of a
# Tensor.
global_step = resource_variable_ops.ResourceVariable(
array_ops.zeros([], dtypes.int64), name='global_step_%d' % i)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd_op.minimize(loss, global_step, [var0, var1])
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
def testAggregationMethod(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(
cost,
global_step, [var0, var1],
aggregation_method=gradients_impl.AggregationMethod.
EXPERIMENTAL_ACCUMULATE_N)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
def testPrecomputedGradient(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
grad_loss = constant_op.constant([42, -42], dtype=dtype)
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(
cost, global_step, [var0, var1], grad_loss=grad_loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([1.0 - 3 * 5 * 42.0, 2.0 - 3 * 5 * (-42.0)],
self.evaluate(var0))
self.assertAllClose([3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)],
self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testNoVariables(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# pylint: disable=cell-var-from-loop
def loss():
var0 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype, trainable=False, name='a')
var1 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtype, trainable=False, name='b')
return 5 * var0 + var1
# pylint: enable=cell-var-from-loop
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError, 'No.*variables'):
sgd_op.minimize(loss)
@test_util.run_in_graph_and_eager_modes
def testNoGradients(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b%d' % i)
# pylint: disable=cell-var-from-loop
def loss():
return 5 * var0
# pylint: enable=cell-var-from-loop
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError, 'No gradients'):
# var1 has no gradient
sgd_op.minimize(loss, var_list=[var1])
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_Minimize(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variables don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a_%d' % i)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
name='b_%d' % i)
def loss():
return constant_op.constant(5.0)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError,
|
'No gradients provided for any variable'):
sgd_op.minimize(loss, var_list=[var0, var1])
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_ApplyGradients(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
# Note that we name the variables uniquely here since the variab
|
les don't
# seem to be getting deleted at the end of the loop.
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype,
name='a_%d' % i)
v
|
shinpeimuraoka/ryu
|
ryu/lib/netdevice.py
|
Python
|
apache-2.0
| 4,034
| 0.002231
|
# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Constants defined in netdevice(7)
# Interface flags
# from net/if.h
IFF_UP = 1 << 0 # Interface is running.
IFF_BROADCAST = 1 << 1 # Valid broadcast address set.
IFF_DEBUG = 1 << 2 # Internal debugging flag.
IFF_LOOPBACK = 1 << 3 # Interface is a loopback interface.
IFF_POINTOPOINT = 1 << 4 # Interface is a point-to-point link.
IFF_NOTRAILERS = 1 << 5 # Avoid use of trailers.
IFF_RUNNING = 1 << 6 # Resources allocated.
IFF_NOARP = 1 << 7 # No arp protocol, L2 destination address not set.
IFF_PROMISC = 1 << 8 # Interface is in promiscuous mode.
IFF_ALLMULTI = 1 << 9 # Receive all multicast packets.
IFF_MASTER = 1 << 10 # Master of a load balancing bundle.
IFF_SLAVE = 1 << 11 # Slave of a load balancing bundle.
IFF_MULTICAST = 1 << 12 # Supports multicast.
IFF_PORTSEL = 1 << 13 # Is able to select media type via ifmap.
IFF_AUTOMEDIA = 1 << 14 # Auto media selection active.
IFF_DYNAMIC = 1 << 15 # The addresses are lost when the interface goes down.
# from linux/if.h
IFF_LOWER_UP = 1 << 16 # Driver signals L1 up. (since Linux 2.6.17)
IFF_DORMANT = 1 << 17 # Driver signals dormant. (since Linux 2.6.17)
IFF_ECHO = 1 << 18 # Echo sent packets. (since Linux 2.6.25)
# Private interface flags
# from linux/netdevice.h
IFF_802_1Q_VLAN = 1 << 0 # 802.1Q VLAN device.
IFF_EBRIDGE = 1 << 1 # Ethernet bridging device.
IFF_BONDING = 1 << 2 # bonding master or slave.
IFF_ISATAP = 1 << 3 # ISATAP interface (RFC4214).
IFF_WAN_HDLC = 1 << 4 # WAN HDLC device.
IFF_XMIT_DST_RELEASE = 1 << 5 # dev_hard_start_xmit() is allowed to release skb->dst.
IFF_DONT_BRIDGE = 1 << 6 # disallow bridging this ether dev.
IFF_DISABLE_NETPOLL = 1 << 7 # disable netpoll at run-time.
IFF_MACVLAN_PORT = 1 << 8 # device used as macvlan port.
IFF_BRIDGE_PORT = 1 << 9 # device used as bridge port.
IFF_OVS_DATAPATH = 1 << 10 # device used as Open vSwitch datapath port.
IFF_TX_SKB_SHARING = 1 << 11 # The interface supports sharing skbs on transmit.
IFF_UNICAST_FLT = 1 << 12 # Supports unicast filtering.
IFF_TEAM_PORT = 1 << 13 # device used as team port.
IFF_SUPP_NOFCS = 1 << 14 # device supports sending custom FCS.
IFF_LIVE_ADDR_CHANGE = 1 << 15 # device supports hardware address change when it's running.
IFF_MACVLAN = 1 << 16 # Macvlan device.
IFF_XMIT_DST_RELEASE_PERM = 1 << 17 # IFF_XMIT_DST_RELEASE not taking into account underlying stacked devices.
IFF_IPVLAN_MASTER = 1 << 18 # IPvlan master device.
IFF_IPVLAN_SLAVE = 1 << 19
|
# IPvlan slave device.
IFF_L3MDEV_MASTER = 1 << 20 # device is an L3 master device.
IFF_NO_QUEUE = 1 << 21 # device can run without qdisc attached.
IFF_OPENVSWITCH = 1 << 22 # device is a Open vSwitch master.
IFF_L3MDEV_SLAVE
|
= 1 << 23 # device is enslaved to an L3 master device.
IFF_TEAM = 1 << 24 # device is a team device.
IFF_RXFH_CONFIGURED = 1 << 25 # device has had Rx Flow indirection table configured.
IFF_PHONY_HEADROOM = 1 << 26 # the headroom value is controlled by an external entity. (i.e. the master device for bridged veth)
IFF_MACSEC = 1 << 27 # device is a MACsec device.
|
robinson96/GRAPE
|
keyring/demo/keyring_demo.py
|
Python
|
bsd-3-clause
| 2,347
| 0.005113
|
"""
keyring_demo.py
|
This demo shows how to create a new keyring and enable it in keyring lib.
Created by Kang Zhang on 2009-07-12
"""
import os
KEYRINGRC = "keyringrc.cfg"
def load_keyring_by_config():
"""This function shows how to enable a keyring using config file
"""
# create the config file
config_file = open(KEYRINGRC,'w')
config_file.writelines(["[backend]\n",
# the path for the user created keyring
"k
|
eyring-path= %s\n" % str(os.path.abspath(__file__))[:-16],
# the name of the keyring class
"default-keyring=simplekeyring.SimpleKeyring\n" ])
config_file.close()
# import the keyring lib, the lib will automaticlly load the
# config file and load the user defined module
import keyring
# invoke the keyring to store and fetch the password
try:
keyring.set_password("demo-service", "tarek", "passexample")
print "password stored sucessfully"
except keyring.backend.PasswordSetError:
print "failed to store password"
print "password", keyring.get_password("demo-service", "tarek")
os.remove(KEYRINGRC)
def set_keyring_in_runtime():
"""This function shows how to create a keyring manully and use it
in runtime
"""
# define a new keyring class which extends the KeyringBackend
import keyring.backend
class TestKeyring(keyring.backend.KeyringBackend):
"""A test keyring which always outputs same password
"""
def supported(self): return 0
def set_password(self, servicename, username, password): return 0
def get_password(self, servicename, username):
return "password from TestKeyring"
# set the keyring for keyring lib
import keyring
keyring.set_keyring(TestKeyring())
# invoke the keyring lib
try:
keyring.set_password("demo-service", "tarek", "passexample")
print "password stored sucessfully"
except keyring.backend.PasswordSetError:
print "failed to store password"
print "password", keyring.get_password("demo-service", "tarek")
def main():
"""This script shows how to enable the keyring using the config
file and in runtime.
"""
load_keyring_by_config()
set_keyring_in_runtime()
if __name__ == '__main__':
main()
|
pymber/algorithms
|
tests/run-test-sort-selection.py
|
Python
|
mit
| 154
| 0.006494
|
#!/usr/bin/env python
from algorithms.sorting.selec
|
tion_sort import
|
*
from __prototype__ import *
if __name__ == '__main__':
test_all(selection_sort)
|
chrox/RealTimeElectrophy
|
Experimenter/DataProcessing/Fitting/Fitters.py
|
Python
|
bsd-2-clause
| 7,874
| 0.04318
|
# Data fitting wrappers with optimized parameters.
#
# Copyright (C) 2010-2011 Huang Xin
#
# See LICENSE.TXT that came with this file.
from __future__ import division
import math
import numpy as np
import scipy.ndimage as nd
from sinusoidfitter import onedsinusoidfit,onedsinusoid
from gaussfitter import gaussfit,onedgaussfit,onedgaussian,onedloggaussfit,onedloggaussian
from gaborfitter import gaborfit,onedgaborfit,onedgabor
class SinusoidFit(object):
def sinusoid1d(self,xax,data,modelx=None,return_models=True,return_all=False,**kwargs):
"""
1d sinusoidal params: (height, amplitude, frequency, phase)
"""
frequency = 2*np.pi/(xax.max()-xax.min())
amplitude = (data.max()-data.min())/2
params=[(data.max()+data.min())/2,amplitude,frequency,0]
fixed=[False,False,True,False]
limitedmin=[True,True,True,True]
limitedmax=[True,True,True,True]
minpars=[data.min(),0.8*amplitude,0.8*frequency,-180]
maxpars=[data.max(),1.2*amplitude,1.2*frequency,540]
params,_model,errs,chi2 = onedsinusoidfit(xax,data,params=params,fixed=fixed,\
limitedmin=limitedmin,limitedmax=limitedmax,\
minpars=minpars,maxpars=maxpars,**kwargs)
if modelx == None:
modelx = xax
model_xdata = onedsinusoid(xax,*params)
model_fitting = onedsinusoid(modelx,*params)
if return_all:
return params,model_xdata,model_fitting,errs,chi2
elif return_models:
return (model_xdata, model_fitting)
class GaussFit(object):
def gaussfit1d(self,xax,data,modelx=None,return_models=True,return_all=False,**kwargs):
"""
1d gaussian params: (height, amplitude, shift, width)
"""
width = xax.max()-xax.min()
lower_bound = np.sort(data)[:3].mean()
params=[0,(data.max()-data.min())*0.5,0,width*0.2]
fixed=[False,False,False,False]
limitedmin=[False,True,True,True]
limitedmax=[True,True,True,True]
minpars=[0,(data.max()-data.min())*0.5,xax.min()-width,width*0.05]
maxpars=[lower_bound*1.5,data.max()-data.min(),xax.max(),width*3.0]
params,_model,errs,chi2 = onedgaussfit(xax,data,params=params,fixed=fixed,\
limitedmin=limitedmin,limitedmax=limitedmax,\
minpars=minpars,maxpars=maxpars,**kwargs)
if modelx == None:
modelx = xax
model_xdata = onedgaussian(xax,*params)
model_fitting = onedgaussian(modelx,*params)
if return_all:
return params,model_xdata,model_fitting,errs,chi2
elif return_models:
return (model_xdata, model_fitting)
def loggaussfit1d(self,xax,data,modelx=None,return_models=True,return_all=False,**kwargs):
"""
1d gaussian params: (height, amplitude, shift, width)
"""
width = xax.max()-xax.min()
lower_bound = np.
|
sort(data)[:3].mean()
params=[0,(data.max()-data.min())*0.5,0,width*0.2]
fixed=[False,False,False,False]
limitedmin=[False,Tr
|
ue,True,True]
limitedmax=[True,True,True,True]
minpars=[0,(data.max()-data.min())*0.5,xax.min()-width,width*0.05]
maxpars=[lower_bound*1.5,data.max()-data.min(),xax.max(),width*3.0]
params,_model,errs,chi2 = onedloggaussfit(xax,data,params=params,fixed=fixed,\
limitedmin=limitedmin,limitedmax=limitedmax,\
minpars=minpars,maxpars=maxpars,**kwargs)
if modelx == None:
modelx = xax
model_xdata = onedloggaussian(xax,*params)
model_fitting = onedloggaussian(modelx,*params)
if return_all:
return params,model_xdata,model_fitting,errs,chi2
elif return_models:
return (model_xdata, model_fitting)
def gaussfit2d(self,img,returnfitimage=True,return_all=False):
"""
2d gaussian params: (height, amplitude, center_x, center_y, width_x, width_y, theta)
"""
x_dim,y_dim = img.shape
limitedmin = [False,False,True,True,True,True,True]
limitedmax = [False,False,True,True,True,True,True]
minpars = [0.0, 0.0, 0, 0, x_dim*0.1, y_dim*0.1, 0.0]
maxpars = [0.0, 0.0, x_dim, y_dim, x_dim*0.8, y_dim*0.8, 360.0]
usemoment= np.array([True,True,False,False,False,False,True],dtype='bool')
#usemoment=np.array([],dtype='bool')
params = [0.0, 0.0, x_dim/2, y_dim/2, x_dim/3, y_dim/3, 0.0]
img = nd.filters.gaussian_filter(img,0.2)
if returnfitimage:
params,img = gaussfit(img,params=params,returnfitimage=True,limitedmin=limitedmin,\
limitedmax=limitedmax,minpars=minpars,maxpars=maxpars,usemoment=usemoment)
return params,img
elif return_all:
params,errors = gaussfit(img,params=params,return_all=True,limitedmin=limitedmin,\
limitedmax=limitedmax,minpars=minpars,maxpars=maxpars,usemoment=usemoment)
return params,errors
class GaborFit(object):
def gaborfit1d(self,xax,data,modelx=None,return_models=True,return_all=False,**kwargs):
"""
1d gabor params: (height,amplitude,shift,width,wavelength,phase)
"""
wavelength = xax.max()-xax.min()
width = xax.max()-xax.min()
params=[(data.max()+data.min())/2,(data.max()-data.min())/2,width*0.5,width*0.2,wavelength,0]
fixed=[False,False,False,False,True,False]
limitedmin=[True,False,True,True,False,True]
limitedmax=[True,False,True,True,False,True]
minpars=[data.min(),0,xax.min()-3*width,width*0.05,0,0]
maxpars=[data.max(),0,xax.max()+3*width,width*3.00,0,360]
params,_model,errs,chi2 = onedgaborfit(xax,data,params=params,fixed=fixed,\
limitedmin=limitedmin,limitedmax=limitedmax,\
minpars=minpars,maxpars=maxpars,**kwargs)
if modelx == None:
modelx = xax
model_xdata = onedgabor(xax,*params)
model_fitting = onedgabor(modelx,*params)
if return_all:
return params,model_xdata,model_fitting,errs,chi2
elif return_models:
return (model_xdata, model_fitting)
def gaborfit2d(self,img,returnfitimage=True,return_all=False):
"""
2d gabor params: (height,amplitude,center_x,center_y,width_x,width_y,theta,frequency,phase)
These parameters determine the properties of the spatial receptive field. see Dayan etc., 2002
"""
x_dim,y_dim = img.shape
diag = math.sqrt(x_dim**2+y_dim**2)
limitedmin=[False,False,True,True,True,True,True,True,True]
limitedmax=[False,False,True,True,True,True,True,True,True]
minpars=[0.0, 0.0, 0.0, 0.0, x_dim*0.2, y_dim*0.2, 0.0, diag, 0.0]
maxpars=[0.0, 0.0, x_dim, y_dim, x_dim*0.5, y_dim*0.5, 360.0, diag*2, 180.0]
params = [0.0, 0.0, x_dim/2, y_dim/2, x_dim/3, y_dim/3, 0.0, diag, 0.0]
img = nd.filters.gaussian_filter(img,0.2)
if returnfitimage:
params,img = gaborfit(img,params=params,returnfitimage=True,limitedmin=limitedmin,\
limitedmax=limitedmax,minpars=minpars,maxpars=maxpars)
return params,img
elif return_all:
params,errors = gaborfit(img,params=params,return_all=True,limitedmin=limitedmin,\
limitedmax=limitedmax,minpars=minpars,maxpars=maxpars)
return params,errors
|
frreiss/tensorflow-fred
|
tensorflow/python/distribute/collective_util.py
|
Python
|
apache-2.0
| 8,799
| 0.002842
|
# coding=utf-8
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for collectives."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import enum
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# TODO(b/170340570): print deprecation warning for CollectiveCommunication.
@tf_export("distribute.experimental.CommunicationImplementation",
"distribute.experimental.CollectiveCommunication")
class CommunicationImplementation(enum.Enum):
"""Cross device communication implementation.
Warning: The alias `tf.distribute.experimental.CollectiveCommunication` is
deprecated and will be removed in a future version. Use
`tf.distribute.experimental.CommunicationImplementation` instead.
* `AUTO`: Automatically chosen by Tensorflow.
* `RING`: TensorFlow's ring algorithms for all-reduce and
all-gather.
* `NCCL`: NVIDIA®'s NCCL library. This is now only used for all-reduce on
GPUs; all-reduce on CPU, all-gather and broadcast fallbacks to RING.
"""
AUTO = "AUTO"
RING = "RING"
NCCL = "NCCL"
# TODO(ayushd): add ncclAllGather implementation.
CollectiveCommunication = CommunicationImplementation
@tf_export("distribute.experimental.CommunicationOptions")
class _OptionsExported(object):
"""Options for cross device communications like All-reduce.
This can be passed to methods like
`tf.distribute.get_replica_context().all_reduce()` to optimize collective
operation performance. Note that these are only hints, which may or may not
change the actual behavior. Some options only apply to certain strategy and
are ignored by others.
One common optimization is to break gradients all-reduce into multiple packs
so that weight updates can overlap with gradient all-reduce
|
.
Examples:
```python
options = tf.distribute.experimental.CommunicationOptions(
bytes_per_pack=50 * 1024 * 1024,
timeout_seconds=120,
implementation=tf.distribute.experimental.CommunicationImplementation.NCCL
)
grads = tf.distribute.get_replica_context().all_reduce(
'sum', grads, options=options)
optimizer.apply_gradients(zip(grads, vars),
experimental_aggregate_gradients=False)
```
"""
def __new__(cls
|
, *args, **kwargs):
# We expose a dummy class so that we can separate internal and public APIs.
# Note that __init__ won't be called on the returned object if it's a
# different class [1].
# [1] https://docs.python.org/3/reference/datamodel.html#object.__new__
return Options(*args, **kwargs)
def __init__(self,
bytes_per_pack=0,
timeout_seconds=None,
implementation=CommunicationImplementation.AUTO):
"""Creates a CollectiveHints.
Args:
bytes_per_pack: a non-negative integer. Breaks collective operations into
packs of certain size. If it's zero, the value is determined
automatically. This only applies to all-reduce with
`MultiWorkerMirroredStrategy` currently.
timeout_seconds: a float or None, timeout in seconds. If not None, the
collective raises `tf.errors.DeadlineExceededError` if it takes longer
than this timeout. Zero disables timeout. This can be useful when
debugging hanging issues. This should only be used for debugging since
it creates a new thread for each collective, i.e. an overhead of
`timeout_seconds * num_collectives_per_second` more threads. This only
works for `tf.distribute.experimental.MultiWorkerMirroredStrategy`.
implementation: a
`tf.distribute.experimental.CommunicationImplementation`. This is a hint
on the preferred communication implementation. Possible values include
`AUTO`, `RING`, and `NCCL`. NCCL is generally more performant for GPU,
but doesn't work for CPU. This only works for
`tf.distribute.experimental.MultiWorkerMirroredStrategy`.
Raises:
ValueError: When arguments have invalid value.
"""
pass
class Options(object):
"""Implementation of OptionsInterface."""
def __init__(self,
bytes_per_pack=0,
timeout_seconds=None,
implementation=CommunicationImplementation.AUTO):
if bytes_per_pack < 0:
raise ValueError(
f"Argument `bytes_per_pack` must be >=0, Received {bytes_per_pack}.")
if isinstance(implementation, str):
implementation = CommunicationImplementation(implementation.upper())
if not isinstance(implementation, CommunicationImplementation):
raise ValueError(
"Argument `implementation` must be instance of "
"`tf.distribute.experimental.CommunicationImplementation`.")
self.bytes_per_pack = bytes_per_pack
self.timeout_seconds = timeout_seconds
self.implementation = implementation
__init__.__doc__ = _OptionsExported.__init__.__doc__
def merge(self, options):
"""Merges with another options and returns a new one.
Values specified in the `options` takes precedence if they're not the
default.
Args:
options: a `tf.distribute.experimental.CollectiveCommunication`.
Returns:
A new `tf.distribute.experimental.CollectiveCommunication`.
"""
merged = copy.deepcopy(self)
if options is None:
return merged
if options.bytes_per_pack != 0:
merged.bytes_per_pack = options.bytes_per_pack
if options.timeout_seconds is not None:
merged.timeout_seconds = options.timeout_seconds
if options.implementation != CommunicationImplementation.AUTO:
merged.implementation = options.implementation
return merged
@tf_export("distribute.experimental.CollectiveHints")
class Hints(object):
"""Hints for collective operations like AllReduce.
This can be passed to methods like
`tf.distribute.get_replica_context().all_reduce()` to optimize collective
operation performance. Note that these are only hints, which may or may not
change the actual behavior. Some options only apply to certain strategy and
are ignored by others.
One common optimization is to break gradients all-reduce into multiple packs
so that weight updates can overlap with gradient all-reduce.
Examples:
- bytes_per_pack
```python
hints = tf.distribute.experimental.CollectiveHints(
bytes_per_pack=50 * 1024 * 1024)
grads = tf.distribute.get_replica_context().all_reduce(
'sum', grads, experimental_hints=hints)
optimizer.apply_gradients(zip(grads, vars),
experimental_aggregate_gradients=False)
```
- timeout_seconds
```python
strategy = tf.distribute.MirroredStrategy()
hints = tf.distribute.experimental.CollectiveHints(
timeout_seconds=120)
try:
strategy.reduce("sum", v, axis=None, experimental_hints=hints)
except tf.errors.DeadlineExceededError:
do_something()
```
"""
@deprecation.deprecated(
None, "use distribute.experimental.CommunicationOptions instead")
def __new__(cls, bytes_per_pack=0, timeout_seconds=None):
return Options(
bytes_per_pack=bytes_per_pack, timeout_seconds=timeout_seconds)
def __init__(self, bytes_per_pack=0, timeout_seconds=None):
"""Creates a CollectiveHints.
Args:
bytes_per_pack: a non-negative integer. Breaks collective operations into
packs of certain size. If it's zero, the value is determined
automatically. This only applies to all-reduce with
`MultiWorkerMirroredS
|
rustychris/stompy
|
test/test_exact_delaunay2.py
|
Python
|
mit
| 2,185
| 0.040732
|
from __future__ import print_function
import numpy as np
from stompy.grid import exact_delaunay, unstructured_grid
Triangulation=exact_delaunay.Triangulation
from stompy.spatial import robust_predicates
def test_gen_intersected_elements():
dt = Triangulation()
pnts = [ [0,0],
[5,0],
[10,0],
[5,5] ]
nA=dt.add_node( x=pnts[0] ) # This tests insert into empty
dt.add_node( x=pnts[1] ) # adjacent_vertex
dt.add_node( x=pnts[2] ) # adjacent_vertex
dt.add_node( x=pnts[3] ) # adjacent_edge
dt.add_node( x=[3,0] ) # colinear
dt.add_node( x=[6,2] ) # into cell interior
nB=dt.add_node( x=[12,4] ) # collinear cell interior
nodes=list(dt.valid_node_iter())
for iA,nA in enumerate(nodes):
for nB in nodes[iA+1:]:
print("test_gen_intersected_elements: %s to %s"%(dt.nodes['x'][nA],
dt.nodes['x'][nB]))
fwd=list(dt.gen_intersected_elements(nA=nA,nB=nB))
rev=list(dt.gen_intersected_elements(nA=nB,nB=nA))
assert len(fwd) == len(rev)
def test_gen_int_elts_dim1():
dt = Triangulation()
pnts = [ [0,0],
[5,0],
[10,0] ]
for pnt in pnts:
dt.add_node( x=pnt )
assert len(list(dt.gen_intersected_elements(0,1)))==3
assert len(list(dt.gen_intersected_elements(0,2)))==5
assert len(list(dt.gen_intersected_elements(1,2)))==3
# and with some points
assert len(list(dt.gen_intersected_elements(pA=[-1,-1],
pB=[-1,1])))==0
elts=list(dt.gen_i
|
ntersected_elements(pA=[0,-1],pB=[0,1]))
assert len(elts)==1
assert elts[0][0]=='node'
elts=list(dt.gen_intersected_elements(pA=[0,-1],pB=[1,1]))
assert len(elts)==1
assert elts[0][0]=='edge'
def test_gen_int_elts_dim0(
|
):
dt = Triangulation()
assert len(list(dt.gen_intersected_elements(pA=[-1,0],pB=[1,0])))==0
dt.add_node(x=[0,0])
assert len(list(dt.gen_intersected_elements(pA=[-1,0],pB=[1,0])))==1
assert len(list(dt.gen_intersected_elements(pA=[-1,0],pB=[1,1])))==0
|
shawnadelic/shuup
|
shuup/simple_supplier/admin_module/views.py
|
Python
|
agpl-3.0
| 6,626
| 0.002717
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.http import JsonResponse
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from shuup.admin.utils.picotable import ChoicesFilter, Column, TextFilter
from shuup.admin.utils.views import PicotableListView
from shuup.core.models import Product, StockBehavior, Supplier
from shuup.simple_supplier.forms import AlertLimitForm, StockAdjustmentForm
from shuup.simple_supplier.models import StockCount
from shuup.simple_supplier.utils import (
get_stock_adjustment_div, get_stock_information_div_id,
get_stock_information_html
)
class StocksListView(PicotableListView):
template_name = "shuup/simple_supplier/admin/base_picotable.jinja"
model = Product
default_columns = [
Column(
"sku", _("SKU"), sort_field="product__sku", display="product__sku", linked=True,
filter_config=TextFilter(filter_field="product__sku", placeholder=_("Filter by SKU..."))
),
Column(
"name", _("Name"), sort_field="product__translations__name", display="product__name", linked=True,
filter_config=TextFilter(filter_field="product__translations__name", placeholder=_("Filter by name..."))
),
Column(
"supplier", _("Supplier"), display="supplier", linked=False,
filter_config=ChoicesFilter(Supplier.objects.filter(module_identifier="simple_supplier"))
),
Column(
"stock_information", _("Stock information"), display="get_stock_information",
linked=False, sortable=False, raw=True
),
Column(
"adjust_stock", _("Adjust stock"), display="get_stock_adjustment_form",
sortable=False, linked=False, raw=True
)
]
def __init__(self):
super(StocksListView, self).__init__()
self.columns = self.default_columns
def get_object_abstract(self, instance, item):
item.update({"_linked_in_mobile": False, "_url": self.get_object_url(instance.product)})
return [
{"text": item.get("name"), "class": "header"},
{"title": "", "text": item.get("sku")},
{"title": "", "text": " ", "raw": item.get("stock_information")},
{"title": "", "text": " ", "raw": item.get("adjust_stock")},
]
def get_queryset(self):
return StockCount.objects.filter(
supplier__module_identifier="simple_supplier",
product__stock_behavior=StockBehavior.STOCKED,
product__deleted=False
).order_by("product__id")
def get_context_data(self, **kwargs):
context = super(PicotableListView, self).get_context_data(**kwargs)
context["toolbar"] = None
context["title"] = _("Stock management")
return context
def get_stock_information(self, instance):
return get_stock_information_html(instance.supplier, instance.product)
def get_stock_adjustment_form(self, instance):
return get_stock_adjustment_div(self.request, instance.supplier, instance.product)
def get_adjustment_success_message(stock_adjustment):
arguments = {
"delta": stock_adjustment.d
|
elta,
"unit_short_name": stock_adjustment.product.sales_unit.short
|
_name,
"product_name": stock_adjustment.product.name,
"supplier_name": stock_adjustment.supplier.name
}
if stock_adjustment.delta > 0:
return _(
"Added %(delta)s %(unit_short_name)s for product %(product_name)s stock (%(supplier_name)s)"
) % arguments
else:
return _(
"Removed %(delta)s %(unit_short_name)s from product %(product_name)s stock (%(supplier_name)s)"
) % arguments
def _process_stock_adjustment(form, request, supplier_id, product_id):
data = form.cleaned_data
supplier = Supplier.objects.get(id=supplier_id)
stock_adjustment = supplier.module.adjust_stock(
product_id,
delta=data.get("delta"),
purchase_price=data.get("purchase_price"),
created_by=request.user
)
success_message = {
"stockInformationDiv": "#%s" % get_stock_information_div_id(
stock_adjustment.supplier, stock_adjustment.product),
"updatedStockInformation": get_stock_information_html(
stock_adjustment.supplier, stock_adjustment.product),
"message": get_adjustment_success_message(stock_adjustment)
}
return JsonResponse(success_message, status=200)
def process_stock_adjustment(request, supplier_id, product_id):
return _process_and_catch_errors(
_process_stock_adjustment, StockAdjustmentForm, request, supplier_id, product_id)
def _process_alert_limit(form, request, supplier_id, product_id):
supplier = Supplier.objects.get(id=supplier_id)
product = Product.objects.get(id=product_id)
sc = StockCount.objects.get(supplier=supplier, product=product)
data = form.cleaned_data
sc.alert_limit = data.get("alert_limit")
sc.save()
supplier = Supplier.objects.get(id=supplier_id)
success_message = {
"stockInformationDiv": "#%s" % get_stock_information_div_id(supplier, product),
"updatedStockInformation": get_stock_information_html(supplier, product),
"message": _("Alert limit for product %(product_name)s set to %(value)s.") % {
"product_name": product.name, "value": sc.alert_limit},
}
return JsonResponse(success_message, status=200)
def process_alert_limit(request, supplier_id, product_id):
return _process_and_catch_errors(
_process_alert_limit, AlertLimitForm, request, supplier_id, product_id)
def _process_and_catch_errors(process, form_class, request, supplier_id, product_id):
try:
if request.method != "POST":
raise Exception(_("Not allowed"))
form = form_class(request.POST)
if form.is_valid():
return process(form, request, supplier_id, product_id)
error_message = ugettext("Error, please check submitted values and try again.")
return JsonResponse({"message": error_message}, status=400)
except Exception as exc:
error_message = ugettext(
"Error, please check submitted values and try again (%(error)s).") % {"error": exc}
return JsonResponse({"message": error_message}, status=400)
|
fcvarela/beerprogress
|
beerprogress/__init__.py
|
Python
|
bsd-3-clause
| 3,534
| 0.001698
|
import os
import psutil
import sys
__all__ = [
'BeerProgress'
]
_default_display = {
'cpu': True,
'mem': True,
'progressbar': True,
'percent': True,
'tasks_ratio': True,
'skipped_tasks': True,
'fd_count': True,
'context_switches': True
}
class BeerProgress(object):
def __init__(self, indicator='#', total_tasks=0, display=_default_display, progress_character='\U0001F37A'):
self.indicator = indicator
self._total_tasks = total_tasks
self._completed_tasks = 0
self._skipped_tasks = 0
self.progress_character = progress_character
for s in display:
if s not in _default_display:
raise ValueError("Unsupported display item: %s", s)
self.displa
|
y = display
self.proc = psutil.Process(os.getpid())
self.metrics = {
'cpu': 0,
'mem': 0,
'percent': 0,
'fds': 0,
'ctxv': 0,
'ctxi': 0
}
@property
def completed_tasks(self):
return self._completed_tasks
@completed_tasks.setter
def completed_tasks(self, completed_tasks):
self._completed_tasks = completed_tasks
@
|
property
def total_tasks(self):
return self._total_tasks
@total_tasks.setter
def total_tasks(self, total_tasks):
self._total_tasks = total_tasks
@property
def skipped_tasks(self):
return self._skipped_tasks
@skipped_tasks.setter
def skipped_tasks(self, skipped_tasks):
self._skipped_tasks = skipped_tasks
def print_progress(self, same_line=True, stream=sys.stderr):
if same_line:
stream.write('\r' + self.progress_string())
else:
stream.write(self.progress_string()+'\n')
def tick(self):
if self._total_tasks == 0:
raise ValueError("Cannot tick without total tasks set")
self.metrics['percent'] = float((self.completed_tasks + self.skipped_tasks) * 100.0 / self.total_tasks)
self.metrics['cpu'] = float(self.proc.get_cpu_percent(interval=0))
self.metrics['mem'] = float(self.proc.get_memory_info()[0]/1024.0/1024.0)
self.metrics['fds'] = self.proc.get_num_fds()
self.metrics['ctxv'] = self.proc.get_num_ctx_switches()[0]
self.metrics['ctxi'] = self.proc.get_num_ctx_switches()[1]
def progress_string(self, length=20):
# in characters, not bytes
pb_symbol_length = int(self.metrics['percent'] * length / 100)
pb_spaces_length = length - pb_symbol_length
full_pb = self.progress_character * length
pb_string = full_pb[:pb_symbol_length*len(self.progress_character)] + pb_spaces_length * ' '
status = ""
if "cpu" in self.display:
status += "CPU %6.2f%% " % self.metrics['cpu']
if "mem" in self.display:
status += "Mem %6.2fMB " % self.metrics['mem']
if "progressbar" in self.display:
status += pb_string + " "
if "percent" in self.display:
status += "[%6.2f%%] " % self.metrics['percent']
if "tasks_ratio" in self.display:
status += "Complete: %d/%d " % (self.completed_tasks+self.skipped_tasks, self.total_tasks)
if "skipped_tasks" in self.display:
status += "Skipped: %d " % self.skipped_tasks
if "fd_count" in self.display:
status += "CTXvol: %d CTXinvol: %d" % (self.metrics['ctxv'], self.metrics['ctxi'])
return status
|
SulavKhadka/Sorting-Algorithms
|
sort_algos.py
|
Python
|
mit
| 4,236
| 0.004013
|
import random
import sys
sys.setrecursionlimit(7000)
def selection_sort(array, counter):
for i in range(0,len(array)):
min_val = array[i:len(array)+1][0]
for j in array[i:l
|
en(array)+1]:
counter += 1
if j < min_val:
min_val = j
k = array[i:len(array)+1].index(min_val)+i
counter += 2
array[k], array[i] = array[i], array[k]
return [array, counter]
def insertion_sort(array, counter):
for i in range(1, len(array)):
k = i-1
x = arra
|
y[i]
counter += 1
while k >= 0 and x < array[k]:
counter += 1
array[k+1] = array[k]
k -= 1
array[k+1] = x
return [array, counter]
def quick_sort_random(array, counter):
n = len(array)
if n <= 50:
return insertion_sort(array, counter)
rand_num = random.randrange(len(array))
pivot = array[rand_num]
L_lst = []
R_lst = []
M_lst = []
for i in array:
counter += 1
if i < pivot:
L_lst.append(i)
elif i > pivot:
R_lst.append(i)
else:
M_lst.append(i)
return quick_sort_random(L_lst, counter) + M_lst + quick_sort_random(R_lst, counter)
def quick_sort_first(array, counter):
n = len(array)
if n <= 50:
return insertion_sort(array, counter)
pivot = array[0]
L_lst = []
R_lst = []
M_lst = []
for i in array:
counter += 1
if i < pivot:
L_lst.append(i)
elif i > pivot:
R_lst.append(i)
else:
M_lst.append(i)
return quick_sort_first(L_lst, counter) + M_lst + quick_sort_first(R_lst, counter)
def merge_sort(array, counter):
n = len(array)
if n > 1:
return merge(merge_sort(array[0:n/2], counter), merge_sort(array[n/2:n], counter), counter)
else:
return array
def merge(arr1, arr2, counter):
k = len(arr1)
l = len(arr2)
counter += 1
if k == 0: return arr2
if l == 0: return arr1
if arr1[0] <= arr2[0]:
return [arr1[0]] + merge(arr1[1:k], arr2[0:l], counter)
else:
return [arr2[0]] + merge(arr1[0:k], arr2[1:l], counter)
def print_menu():
print "\n>Press A to run Selection Sort"
print ">Press B to run Insertion Sort"
print ">Press C to run Quick Sort with random pivot selection"
print ">Press D to run Quick Sort with first pivot selection"
print ">Press E to run Merge Sort"
print ">Press F to change the initial list to sort"
print ">Press Q to Quit"
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
def print_list(array):
print "[",
for i in array[0]:
print i,
print "] --> comparisons: {}".format(array[1])
def user_input_list():
user_lst = []
try:
while True:
sort_input = int(raw_input("Please enter a number: "))
user_lst.append(sort_input)
except ValueError:
pass
return user_lst
if __name__ == "__main__":
print "~~~~ WELCOME TO THE SORTER ~~~~"
opt_input = ""
input_list = user_input_list()
og_list = list(input_list)
while opt_input.upper() != 'Q':
print_menu()
opt_input = raw_input("Enter an option from the menu above: ")
input_list = list(og_list)
counter = 0
if opt_input.upper() == 'A':
array = selection_sort(input_list, counter)
print_list(array)
elif opt_input.upper() == 'B':
array = insertion_sort(input_list, counter)
print_list(array)
elif opt_input.upper() == 'C':
array = quick_sort_random(input_list, counter)
print_list(array)
elif opt_input.upper() == 'D':
array = quick_sort_first(input_list, counter)
print_list(array)
elif opt_input.upper() == 'E':
array = merge_sort(input_list, counter)
print "[",
for i in array:
print i,
print "]"
elif opt_input.upper() == 'F':
og_list = user_input_list()
elif opt_input.upper() == 'Q':
exit()
else:
print "Your input is invalid. Try again."
|
awsdocs/aws-doc-sdk-examples
|
python/example_code/apigateway/aws_service/test/conftest.py
|
Python
|
apache-2.0
| 311
| 0.003215
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Contains common test fixture
|
s used to run unit tests.
"""
import sys
# This is needed so Python can find test_tools
|
on the path.
sys.path.append('../../..')
from test_tools.fixtures.common import *
|
chromium/chromium
|
ppapi/generators/idl_diff.py
|
Python
|
bsd-3-clause
| 9,131
| 0.02048
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import glob
import os
import subprocess
import sys
from idl_option import GetOption, Option, ParseOptions
from idl_outfile import IDLOutFile
#
# IDLDiff
#
# IDLDiff is a tool for comparing sets of IDL generated header files
# with the standard checked in headers. It does this by capturing the
# output of the standard diff tool, parsing it into separate changes, then
# ignoring changes that are know to be safe, such as adding or removing
# blank lines, etc...
#
Option('gen', 'IDL generated files', default='hdir')
Option('src', 'Original ".h" files', default='../c')
Option('halt', 'Stop if a difference is found')
Option('diff', 'Directory holding acceptable diffs', default='diff')
Option('ok', 'Write out the diff file.')
# Change
#
# A Change object contains the previous lines, new news and change type.
#
class Change(object):
def __init__(self, mode, was, now):
self.mode = mode
self.was = was
self.now = now
def Dump(self):
if not self.was:
print('Adding %s' % self.mode)
elif not self.now:
print('Missing %s' % self.mode)
else:
print('Modifying %s' % self.mode)
for line in self.was:
print('src: >>%s<<' % line)
for line in self.now:
print('gen: >>%s<<' % line)
print
#
# IsCopyright
#
# Return True if this change is only a one line change in the copyright notice
# such as non-matching years.
#
def IsCopyright(change):
if len(change.now) != 1 or len(change.was) != 1: return False
if 'Copyright (c)' not in change.now[0]: return False
if 'Copyright (c)' not in change.was[0]: return False
return True
#
# IsBlankComment
#
# Return True if this change only removes a blank line from a comment
#
def IsBlankComment(change):
if change.now: return False
if len(change.was) != 1: return False
if change.was[0].strip() != '*': return False
return True
#
# IsBlank
#
# Return True if this change only adds or removes blank lines
#
def IsBlank(change):
for line in change.now:
if line: return False
for line in change.was:
if line: return False
return True
#
# IsCppComment
#
# Return True if this change only going from C++ to
|
C style
#
def IsToCppComment(change):
if not len(change.now) or len(change.now) != len(change.was):
return False
for index in range(len(change.now)):
was = change.was[index].strip()
if was[:2] != '//':
return False
was = was[2:].strip()
no
|
w = change.now[index].strip()
if now[:2] != '/*':
return False
now = now[2:-2].strip()
if now != was:
return False
return True
return True
def IsMergeComment(change):
if len(change.was) != 1: return False
if change.was[0].strip() != '*': return False
for line in change.now:
stripped = line.strip()
if stripped != '*' and stripped[:2] != '/*' and stripped[-2:] != '*/':
return False
return True
#
# IsSpacing
#
# Return True if this change is only different in the way 'words' are spaced
# such as in an enum:
# ENUM_XXX = 1,
# ENUM_XYY_Y = 2,
# vs
# ENUM_XXX = 1,
# ENUM_XYY_Y = 2,
#
def IsSpacing(change):
if len(change.now) != len(change.was): return False
for i in range(len(change.now)):
# Also ignore right side comments
line = change.was[i]
offs = line.find('//')
if offs == -1:
offs = line.find('/*')
if offs >-1:
line = line[:offs-1]
words1 = change.now[i].split()
words2 = line.split()
if words1 != words2: return False
return True
#
# IsInclude
#
# Return True if change has extra includes
#
def IsInclude(change):
for line in change.was:
if line.strip().find('struct'): return False
for line in change.now:
if line and '#include' not in line: return False
return True
#
# IsCppComment
#
# Return True if the change is only missing C++ comments
#
def IsCppComment(change):
if len(change.now): return False
for line in change.was:
line = line.strip()
if line[:2] != '//': return False
return True
#
# ValidChange
#
# Return True if none of the changes does not patch an above "bogus" change.
#
def ValidChange(change):
if IsToCppComment(change): return False
if IsCopyright(change): return False
if IsBlankComment(change): return False
if IsMergeComment(change): return False
if IsBlank(change): return False
if IsSpacing(change): return False
if IsInclude(change): return False
if IsCppComment(change): return False
return True
#
# Swapped
#
# Check if the combination of last + next change signals they are both
# invalid such as swap of line around an invalid block.
#
def Swapped(last, next):
if not last.now and not next.was and len(last.was) == len(next.now):
cnt = len(last.was)
for i in range(cnt):
match = True
for j in range(cnt):
if last.was[j] != next.now[(i + j) % cnt]:
match = False
break;
if match: return True
if not last.was and not next.now and len(last.now) == len(next.was):
cnt = len(last.now)
for i in range(cnt):
match = True
for j in range(cnt):
if last.now[i] != next.was[(i + j) % cnt]:
match = False
break;
if match: return True
return False
def FilterLinesIn(output):
was = []
now = []
filter = []
for index in range(len(output)):
filter.append(False)
line = output[index]
if len(line) < 2: continue
if line[0] == '<':
if line[2:].strip() == '': continue
was.append((index, line[2:]))
elif line[0] == '>':
if line[2:].strip() == '': continue
now.append((index, line[2:]))
for windex, wline in was:
for nindex, nline in now:
if filter[nindex]: continue
if filter[windex]: continue
if wline == nline:
filter[nindex] = True
filter[windex] = True
if GetOption('verbose'):
print("Found %d, %d >>%s<<" % (windex + 1, nindex + 1, wline))
out = []
for index in range(len(output)):
if not filter[index]:
out.append(output[index])
return out
#
# GetChanges
#
# Parse the output into discrete change blocks.
#
def GetChanges(output):
# Split on lines, adding an END marker to simply add logic
lines = output.split('\n')
lines = FilterLinesIn(lines)
lines.append('END')
changes = []
was = []
now = []
mode = ''
last = None
for line in lines:
#print("LINE=%s" % line)
if not line: continue
elif line[0] == '<':
if line[2:].strip() == '': continue
# Ignore prototypes
if len(line) > 10:
words = line[2:].split()
if len(words) == 2 and words[1][-1] == ';':
if words[0] == 'struct' or words[0] == 'union':
continue
was.append(line[2:])
elif line[0] == '>':
if line[2:].strip() == '': continue
if line[2:10] == '#include': continue
now.append(line[2:])
elif line[0] == '-':
continue
else:
change = Change(line, was, now)
was = []
now = []
if ValidChange(change):
changes.append(change)
if line == 'END':
break
return FilterChanges(changes)
def FilterChanges(changes):
if len(changes) < 2: return changes
out = []
filter = [False for change in changes]
for cur in range(len(changes)):
for cmp in range(cur+1, len(changes)):
if filter[cmp]:
continue
if Swapped(changes[cur], changes[cmp]):
filter[cur] = True
filter[cmp] = True
for cur in range(len(changes)):
if filter[cur]: continue
out.append(changes[cur])
return out
def Main(args):
filenames = ParseOptions(args)
if not filenames:
gendir = os.path.join(GetOption('gen'), '*.h')
filenames = sorted(glob.glob(gendir))
srcdir = os.path.join(GetOption('src'), '*.h')
srcs = sorted(glob.glob(srcdir))
for name in srcs:
name = os.path.split(name)[1]
name = os.path.join(GetOption('gen'), name)
if name not in filenames:
print('Missing: %s' % name)
for filename in
|
subhrm/google-code-jam-solutions
|
solutions/2017/1B/B/B_small.py
|
Python
|
mit
| 1,604
| 0.01808
|
# Problem B
# Small dataset
# O = G = V = 0. (Each unicorn has only one hair color in its mane.)
def solve(N, R, Y, B):
res = ""
imp = "IMPOSSIBLE "
if sum((R,Y,B)) == 0:
return ""
if max(R,Y,B) > N//2 :
return imp
if (R==Y) and (Y==B):
return "RYB"*R
elif (R==Y) and (B == 0):
return "RY"*R
elif (R==B) and (Y == 0) :
return "RB"*R
elif (Y==B) and (R == 0) :
return "YB"*Y
arr = [["R",R],["Y",Y],["B",B]]
arr.sort(key=lamb
|
da x:x[1], reverse=True)
sum_arr = lambda x : x[0][1] + x[1][1] + x[2][1]
while(sum_arr(arr) > 0 ):
if (arr[0][1] == arr[1][1]) and (arr[1][1] == arr[2][1]):
m = arr[0][1]
s = set(["B","R","Y"])
s.remove(res[-1])
first = min(s)
s.add(res[-1])
s.remove(first)
s.difference_update(set([res[0]]))
last = min(s)
s = set(["B",
|
"R","Y"])
s.remove(first)
s.remove(last)
mid = list(s)[0]
r0 = first+mid+last
r = r0*m
res += r
break
if arr[0][1] > 0:
res += arr[0][0]
arr[0][1] -= 1
if arr[1][1] > 0 :
res += arr[1][0]
arr[1][1] -= 1
arr.sort(key=lambda x:x[1], reverse=True)
return res
if __name__ == "__main__":
tc = int(input())
for ti in range(tc):
N, R, O, Y, G, B, V = map(int,input().strip().split())
r = solve(N, R, Y, B)
print("Case #{0}: {1}".format(ti + 1, r))
|
sociateru/django-iprestrict
|
iprestrict/migrations/__init__.py
|
Python
|
bsd-3-clause
| 117
| 0.008547
|
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import, division, print_function, unicode_literals
|
)
|
|
xstreck1/TREMPPI
|
python/tremppi/server_errors.py
|
Python
|
gpl-3.0
| 1,278
| 0.000782
|
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
class MethodNotAllowed(Exception):
status_code = 405
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
class Conflict(Exception):
status_code = 409
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is
|
not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
|
rv['message'] = self.message
return rv
|
CameronLonsdale/lantern
|
tests/analysis/test_frequency.py
|
Python
|
mit
| 2,466
| 0.001622
|
"""Tests for the frequency module in analysis"""
import pytest
from lantern.analysis import frequency
def test_frequency_analyze():
"""Testing frequency analyze works for ngram = 1"""
assert frequency.frequency_analyze("abb") == {'a': 1, 'b': 2}
def test_frequency_analyze_bigram():
"""Testing frequency analyze works for ngram = 2"""
assert frequency.frequency_analyze("abb", 2) == {'ab': 1, 'bb': 1}
def test_frequency_analyze_empty_string():
"""Testing empty string can be frequency analyzed"""
assert frequency.frequency_analyze("") == {}
def test_frequency_to_probability():
"""Testing frequency map is converted to probability distribution succesfully"""
frequency_map = {'a': 1, 'b': 2}
assert frequency.frequency_to_probability(frequency_map) == {'a': 1.0 / 3, 'b': 2.0 / 3}
def test_frequency_to_probability_empty():
"""Testing empty frequency_map is converted to empty probability distribution"""
assert frequency.frequency_to_probability({}) == {}
def test_index_of_coincidence():
"""Testing index of coincidence for a piece of text"""
assert frequency.index_of_coincidence("aabbc") == 0.2
def test_index_of_coincidence_multiple_texts():
"""Testing index of coincidence with multiple texts"""
assert frequency.index_of_coincidence("aabbc", "abbcc") == 0.2
def test_index_of_coincidence_none():
"""Testing index of coincidence raises value error on empty texts"""
with pytest.raises(ValueError):
frequency.index_of_coincidence()
def test_index_of_coincidence_empty():
"""Testing index of coincidence for empty string returns 0"""
assert frequency.index_of_coincidence("") == 0
def test_chi_squared():
"""Testing matching frequency distributions have chi squared of 0"""
assert frequency.chi_squared({'a': 2, 'b': 3}, {'a': 2, 'b': 3}) == 0
def test_chi_squared_similar():
"""Testing similar frequency distributions have chi squared of 0.1"""
assert frequency.chi_squared({'a': 2, 'b
|
': 3}, {'a': 1, 'b': 2}) == 0.1
def test_chi_squared_different_symbols():
"""Testing different symbols are handled appropriately"""
assert frequency.chi_squared({'a': 1, 'd': 3}, {'a': 1}) == 0
def test_languagefrequency_attribute_access():
"""Testing
|
correct attributes are found, incorrect attributes raise AttributeErrors"""
frequency.english.unigrams
with pytest.raises(AttributeError):
frequency.english.invalid
|
hypergravity/hrs
|
twodspec/normalization.py
|
Python
|
bsd-3-clause
| 8,493
| 0.000118
|
# -*- coding: utf-8 -*-
"""
Author
------
Bo Zhang
Email
-----
bozhang@nao.cas.cn
Created on
----------
- Sat Sep 03 12:00:00 2016
Modifications
-------------
- Sat Sep 03 12:00:00 2016
Aims
----
- normalization
Notes
-----
This is migrated from **SLAM** package
"""
from __future__ import division
import numpy as np
from joblib import Parallel, delayed
from .extern.interpolate import SmoothSpline
def normalize_spectrum(wave, flux, norm_range, dwave,
p=(1E-6, 1E-6), q=0.5, ivar=None, eps=1e-10,
rsv_frac=1.):
""" A double smooth normalization of a spectrum
Converted from Chao Liu's normSpectrum.m
Updated by Bo Zhang
Parameters
----------
wave: ndarray (n_pix, )
wavelegnth array
flux: ndarray (n_pix, )
flux array
norm_range: tuple
a tuple consisting (wave_start, wave_stop)
dwave: float
binning width
p: tuple of 2 ps
smoothing parameter between 0 and 1:
0 -> LS-straight line
1 -> cubic spline interpolant
q: float in range of [0, 100]
percentile, between 0 and 1
ivar: ndarray (n_pix, ) | None
ivar array, default is None
eps: float
the ivar threshold
rsv_frac: float
the fraction of pixels reserved in terms of std. default is 3.
Returns
-------
flux_norm: ndarray
normalized flux
flux_cont: ndarray
continuum flux
Example
-------
>>> flux_norm, flux_cont = normalize_spectrum(
>>> wave, flux, (4000., 8000.), 100., p=(1E-8, 1E-7), q=0.5,
>>> rsv_frac=2.0)
"""
if ivar is not None:
# ivar is set
ivar = np.where(np.logical_or(wave < norm_range[0],
wave > norm_range[1]), 0, ivar)
ivar = np.where(ivar <= eps, eps, ivar)
# mask = ivar <= eps
var = 1. / ivar
else:
# default config is even weight
var = np.ones_like(flux)
# wave = wave[~mask]
# flux = flux[~mask]
# check q region
assert 0. < q < 1.
# n_iter = len(p)
n_bin = np.int(np.fix(np.diff(norm_range) / dwave) + 1)
wave1 = norm_range[0]
# SMOOTH 1
# print(wave.shape, flux.shape, var.shape)
if ivar is not None:
ind_good_init = 1. * (ivar > 0.) * (flux > 0.)
else:
ind_good_init = 1. * (flux > 0.)
ind_good_init = ind_good_init.astype(np.bool)
# print("@Cham: sum(ind_good_init)", np.sum(ind_good_init))
flux_smoothed1 = SmoothSpline(wave[ind_good_init], flux[ind_good_init],
p=p[0], var=var[ind_good_init])(wave)
dflux = flux - flux_smoothed1
# collecting continuum pixels --> ITERATION 1
ind_good = np.zeros(wave.shape, dtype=np.bool)
for i_bin in range(n_bin):
ind_bin = np.logical_and(wave > wave1 + (i_bin - 0.5) * dwave,
wave <= wave1 + (i_bin + 0.5) * dwave)
if np.sum(ind_bin > 0):
# median & sigma
bin_median = np.median(dflux[ind_bin])
bin_std = np.median(np.abs(dflux - bin_median))
# within 1 sigma with q-percentile
ind_good_ = ind_bin * (
np.abs(dflux - np.nanpercentile(dflux[ind_bin], q * 100.)) < (
rsv_frac * bin_std))
ind_good = np.logical_or(ind_good, ind_good_)
ind_good = np.logical_and(ind_good, ind_good_init)
# assert there is continuum pixels
try:
assert np.sum(ind_good) > 0
except AssertionError:
Warning("@Keenan.normalize_spectrum(): unable to find continuum! ")
ind_good = np.ones(wave.shape, dtype=np.bool)
# SMOOTH 2
# continuum flux
flux_smoothed2 = SmoothSpline(
wave[ind_good], flux[ind_good], p=p[1], var=var[ind_good])(wave)
# normalized flux
flux_norm = flux / flux_smoothed2
return flux_norm, flux_smoothed2
def normalize_spectra_block(wave, flux_block, norm_range, dwave,
p=(1E-6, 1E-6), q=0.5, ivar_block=None, eps=1e-10,
rsv_frac=3., n_jobs=1, verbose=10):
""" normalize multiple spectra using the same configuration
This is specially designed for TheKeenan
Parameters
----------
wave: ndarray (n_pix, )
wavelegnth array
flux_block: ndarray (n_obs, n_pix)
flux array
norm_range: tuple
a tuple consisting (wave_start, wave_stop)
dwave: float
binning width
p: tuple of 2 ps
smoothing parameter between 0 and 1:
0 -> LS-straight line
1 -> cubic spline interpolant
q: float in range of [0, 100]
percentile, between 0 and 1
ivar_block: ndarray (n_pix, ) | None
ivar array, default is None
eps: float
the ivar threshold
rsv_frac: float
the fraction of pixels reserved in terms of std. default is 3.
n_jobs: int
number of processes launched by joblib
verbose: int / bool
verbose level
Returns
-------
flux_norm: ndarray
normalized flux
"""
if ivar_block is None:
ivar_block = np.ones_like(flux_block)
if flux_block.ndim == 1:
flux_block.reshape(1, -1)
n_spec = flux_block.shape[0]
results = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(normalize_spectrum)(
wave, flux_block[i], norm_range, dwave, p=p, q=q,
ivar=ivar_block[i], eps=eps, rsv_frac=rsv_frac)
for i in range(n_spec))
# unpack results
flux_norm_block = []
flux_cont_block = []
for result in results:
flux_norm_block.append(result[0])
flux_cont_block.append(result[1])
return np.array(flux_norm_block), np.array(flux_cont_block)
def get_stable_pixels(pixel_disp, wave_arm=100, frac=0.20):
"""
Parameters
----------
pixel_disp: np.ndarray
dispersion array
wave_arm: int
the arm length in terms of pixels
frac: float
the reserved fraction, between 0.00 and 1.00
Returns
-------
ind_stable
"""
ind_stable = np.zeros_like(pixel_disp, dtype=np.bool)
for i in range(len(ind_stable)):
edge_l = np.max([i - wave_arm, 0])
edge_r = np.min([i + wave_arm, len(pixel_disp)])
if pixel_disp[i] <= \
np.percentile(pixel_disp[edge_l:edge_r], frac * 100.):
ind_stable[i] = True
return ind_stable
# TODO: this is a generalized version
def normalize_spectra(wave_flux_tuple_list, norm_range, dwave,
p=(1E-6, 1E-6), q=50, n_jobs=1, verbose=False):
""" normalize multiple spectra using the same configuration
Parameters
----------
|
wave_flux_tuple_list: list[n_obs]
a list of (wa
|
ve, flux) tuple
norm_range: tuple
a tuple consisting (wave_start, wave_stop)
dwave: float
binning width
p: tuple of 2 ps
smoothing parameter between 0 and 1:
0 -> LS-straight line
1 -> cubic spline interpolant
q: float in range of [0, 100]
percentile, between 0 and 1
n_jobs: int
number of processes launched by joblib
verbose: int / bool
verbose level
Returns
-------
flux_norm: ndarray
normalized flux
"""
pass
def test_normaliza_spectra_block():
import os
os.chdir('/pool/projects/TheKeenan/data/TheCannonData')
from TheCannon import apogee
import matplotlib.pyplot as plt
tr_ID, wl, tr_flux, tr_ivar = apogee.load_spectra("example_DR10/Data")
tr_label = apogee.load_labels("example_DR10/reference_labels.csv")
test_ID = tr_ID
test_flux = tr_flux
test_ivar = tr_ivar
r = normalize_spectra_block(wl, tr_flux, (15200., 16900.), 30., q=0.9,
rsv_frac=0.5,
p=(1E-10, 1E-10), ivar_block=tr_ivar,
n_jobs=10, verbose=10)
flux_norm, flux_cont = r
flux_norm = np.array(flux_norm)
flux_cont = np.array(flux_cont)
flux_ivar = tr_ivar * flux_cont ** 2
fig = plt.figure()
ax = fig.add_subplot(111)
for
|
nturaga/tools-iuc
|
data_managers/data_manager_bowtie_index_builder/data_manager/bowtie_index_builder.py
|
Python
|
mit
| 4,124
| 0.023763
|
#!/usr/bin/env python
import json
import optparse
import os
import subprocess
import sys
import tempfile
CHUNK_SIZE = 2**20
DEFAULT_DATA_TABLE_NAME = "bowtie_indexes"
def get_id_name( params, dbkey, fasta_description=None):
# TODO: ensure sequence_id is unique and does not already appear in location file
sequence_id = params['param_dict']['sequence_id']
if not sequence_id:
sequence_id = dbkey
sequence_name = params['param_dict']['sequence_name']
if not sequence_name:
sequence_name = fasta_description
if not sequence_name:
sequence_name = dbkey
return sequence_id, sequence_name
def build_bowtie_index( data_manager_dict, fasta_filename, params, target_directory, dbkey, sequence_id, sequence_name, data_table_name=DEFAULT_DATA_TABLE_NAME, color_space=False ):
# TODO: allow multiple FASTA input files
fasta_base_name = os.path.split( fasta_filename )[-1]
sym_linked_fasta_filename = os.path.join( target_directory, fasta_base_name )
os.symlink( fasta_filename, sym_linked_fasta_filename )
args = [ 'bowtie-build' ]
if color_space:
args.append( '-C' )
args.append( sym_linked_fasta_filename)
args.append( fasta_base_name )
args.append( sym_linked_fasta_filename )
tmp_stderr = tempfile.NamedTemporaryFile( prefix="tmp-data-manager-bowtie-index-builder-stderr" )
proc = subprocess.Popen( args=args, shell=False, cwd=target_directory, stderr=tmp_stderr.fileno() )
return_code = proc.wait()
if return_code:
tmp_stderr.flush()
tmp_stderr.seek(0)
print >> sys.stderr, "Error building index:"
while True:
chunk = tmp_stderr.read( CHUNK_SIZE )
if not chunk:
break
sys.stderr.write( chunk )
sys.exit( return_code )
tmp_stderr.close()
data_table_entry = dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_name )
_add_data_table_entry( data_manager_dict, data_table_name, data_table_entry )
def _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ):
data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} )
data_manager_dict['data_tables'][ data_table_name ] = data_manager_dict['data_tables'].get( data_table_name, [] )
|
data_manager_dict['data_tables'][ data_table_name ].append( data_table_entry )
return data_manager_dict
def main():
# Parse Command Line
parser = optparse.OptionParser()
parser.add_option
|
( '-f', '--fasta_filename', dest='fasta_filename', action='store', type="string", default=None, help='fasta_filename' )
parser.add_option( '-d', '--fasta_dbkey', dest='fasta_dbkey', action='store', type="string", default=None, help='fasta_dbkey' )
parser.add_option( '-t', '--fasta_description', dest='fasta_description', action='store', type="string", default=None, help='fasta_description' )
parser.add_option( '-n', '--data_table_name', dest='data_table_name', action='store', type="string", default=None, help='data_table_name' )
parser.add_option( '-c', '--color_space', dest='color_space', action='store_true', default=False, help='color_space' )
(options, args) = parser.parse_args()
filename = args[0]
params = json.loads( open( filename ).read() )
target_directory = params[ 'output_data' ][0]['extra_files_path']
os.mkdir( target_directory )
data_manager_dict = {}
dbkey = options.fasta_dbkey
if dbkey in [ None, '', '?' ]:
raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( dbkey ) )
sequence_id, sequence_name = get_id_name( params, dbkey=dbkey, fasta_description=options.fasta_description )
# build the index
build_bowtie_index( data_manager_dict, options.fasta_filename, params, target_directory, dbkey, sequence_id, sequence_name, data_table_name=options.data_table_name or DEFAULT_DATA_TABLE_NAME, color_space=options.color_space )
# save info to json file
open( filename, 'wb' ).write( json.dumps( data_manager_dict ) )
if __name__ == "__main__":
main()
|
minhphung171093/GreenERP_V9
|
openerp/addons/mail/tests/test_mail_followers.py
|
Python
|
gpl-3.0
| 7,126
| 0.004491
|
# -*- coding: utf-8 -*-
from psycopg2 import IntegrityError
from openerp.addons.mail.tests.common import TestMail
class TestMailFollowers(TestMail):
def setUp(self):
super(TestMailFollowers, self).setUp()
Subtype = self.env['mail.message.subtype']
self.mt_mg_def = Subtype.create({'name': 'mt_mg_def', 'default': True, 'res_model': 'mail.channel'})
self.mt_cl_def = Subtype.create({'name': 'mt_cl_def', 'default': True, 'res_model': 'crm.lead'})
self.mt_al_def = Subtype.create({'name': 'mt_al_def', 'default': True, 'res_model': False})
self.mt_mg_nodef = Subtype.create({'name': 'mt_mg_nodef', 'default': False, 'res_model': 'mail.channel'})
self.mt_al_nodef = Subtype.create({'name': 'mt_al_nodef', 'default': False, 'res_model': False})
self.default_group_subtypes = Subtype.search([('default', '=', True), '|', ('res_model', '=', 'mail.channel'), ('res_model', '=', False)])
def test_m2o_command_new(self):
test_channel = self.env['mail.channel'].create({'name': 'Test'})
groups = self.group_pigs | self.group_public
generic, specific = self.env['mail.followers']._add_follower_command(
'mail.channel', groups.ids,
{self.user_employee.partner_id.id: [self.mt_mg_nodef.id]},
{test_channel.id: [self.mt_al_nodef.id]})
self.assertFalse(specific)
self.assertEqual(len(generic), 2)
self.assertEqual(set([generic[0][2]['res_model'], generic[1][2]['res_model']]),
set(['mail.channel']))
self.assertEqual(set(filter(None, [generic[0][2].get('channel_id'), generic[1][2].get('channel_id')])),
set([test_channel.id]))
self.assertEqual(set(filter(None, [generic[0][2].get('partner_id'), generic[1][2].get('partner_id')])),
set([self.user_employee.partner_id.id]))
self.assertEqual(set(generic[0][2]['subtype_ids'][0][2] + generic[1][2]['subtype_ids'][0][2]),
|
set([self.mt_mg_nodef.id, self.mt_al_nodef.id]))
def test_m2o_command_update_selec
|
tive(self):
test_channel = self.env['mail.channel'].create({'name': 'Test'})
groups = self.group_pigs | self.group_public
self.env['mail.followers'].create({'partner_id': self.user_employee.partner_id.id, 'res_model': 'mail.channel', 'res_id': self.group_pigs.id})
generic, specific = self.env['mail.followers']._add_follower_command(
'mail.channel', groups.ids,
{self.user_employee.partner_id.id: [self.mt_mg_nodef.id]},
{test_channel.id: False},
force=False)
self.assertEqual(len(generic), 1)
self.assertEqual(len(specific), 1)
self.assertEqual(generic[0][2]['res_model'], 'mail.channel')
self.assertEqual(generic[0][2]['channel_id'], test_channel.id)
self.assertEqual(set(generic[0][2]['subtype_ids'][0][2]), set(self.default_group_subtypes.ids))
self.assertEqual(specific.keys(), [self.group_public.id])
self.assertEqual(specific[self.group_public.id][0][2]['res_model'], 'mail.channel')
self.assertEqual(specific[self.group_public.id][0][2]['partner_id'], self.user_employee.partner_id.id)
self.assertEqual(set(specific[self.group_public.id][0][2]['subtype_ids'][0][2]), set([self.mt_mg_nodef.id]))
def test_message_is_follower(self):
self.assertFalse(self.group_pigs.sudo(self.user_employee).message_is_follower)
self.group_pigs.message_subscribe_users(user_ids=[self.user_employee.id])
self.assertTrue(self.group_pigs.sudo(self.user_employee).message_is_follower)
def test_followers_subtypes_default(self):
self.group_pigs.message_subscribe_users(user_ids=[self.user_employee.id])
self.assertEqual(self.group_pigs.message_follower_ids.mapped('partner_id'), self.user_employee.partner_id)
self.assertEqual(self.group_pigs.message_follower_ids.mapped('channel_id'), self.env['mail.channel'])
follower = self.env['mail.followers'].search([
('res_model', '=', 'mail.channel'),
('res_id', '=', self.group_pigs.id),
('partner_id', '=', self.user_employee.partner_id.id)])
self.assertEqual(len(follower), 1)
self.assertEqual(follower.subtype_ids, self.default_group_subtypes)
def test_followers_subtypes_specified(self):
self.group_pigs.sudo(self.user_employee).message_subscribe_users(subtype_ids=[self.mt_mg_nodef.id])
self.assertEqual(self.group_pigs.message_follower_ids.mapped('partner_id'), self.user_employee.partner_id)
self.assertEqual(self.group_pigs.message_follower_ids.mapped('channel_id'), self.env['mail.channel'])
follower = self.env['mail.followers'].search([
('res_model', '=', 'mail.channel'),
('res_id', '=', self.group_pigs.id),
('partner_id', '=', self.user_employee.partner_id.id)])
self.assertEqual(len(follower), 1)
self.assertEqual(follower.subtype_ids, self.mt_mg_nodef)
def test_followers_multiple_subscription(self):
self.group_pigs.sudo(self.user_employee).message_subscribe_users(subtype_ids=[self.mt_mg_nodef.id])
self.assertEqual(self.group_pigs.message_follower_ids.mapped('partner_id'), self.user_employee.partner_id)
self.assertEqual(self.group_pigs.message_follower_ids.mapped('channel_id'), self.env['mail.channel'])
follower = self.env['mail.followers'].search([
('res_model', '=', 'mail.channel'),
('res_id', '=', self.group_pigs.id),
('partner_id', '=', self.user_employee.partner_id.id)])
self.assertEqual(len(follower), 1)
self.assertEqual(follower.subtype_ids, self.mt_mg_nodef)
self.group_pigs.sudo(self.user_employee).message_subscribe_users(subtype_ids=[self.mt_mg_nodef.id, self.mt_al_nodef.id])
self.assertEqual(self.group_pigs.message_follower_ids.mapped('partner_id'), self.user_employee.partner_id)
self.assertEqual(self.group_pigs.message_follower_ids.mapped('channel_id'), self.env['mail.channel'])
follower = self.env['mail.followers'].search([
('res_model', '=', 'mail.channel'),
('res_id', '=', self.group_pigs.id),
('partner_id', '=', self.user_employee.partner_id.id)])
self.assertEqual(len(follower), 1)
self.assertEqual(follower.subtype_ids, self.mt_mg_nodef | self.mt_al_nodef)
def test_no_DID(self):
"""Test that a follower cannot suffer from dissociative identity disorder.
It cannot be both a partner and a channel.
"""
test_channel = self.env['mail.channel'].create({
'name': 'I used to be schizo, but now we are alright.'
})
with self.assertRaises(IntegrityError):
self.env['mail.followers'].create({
'res_model': 'mail.channel',
'res_id': test_channel.id,
'partner_id': self.user_employee.partner_id.id,
'channel_id': self.group_pigs.id,
})
|
google/clusterfuzz
|
src/clusterfuzz/_internal/bot/minimizer/html_minimizer.py
|
Python
|
apache-2.0
| 7,964
| 0.005399
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Minimizer that attempts to use specialized minimizers on different parts of
an HTML test case."""
import functools
from clusterfuzz._internal.bot.tokenizer.antlr_tokenizer import AntlrTokenizer
from clusterfuzz._internal.bot.tokenizer.grammars.HTMLLexer import HTMLLexer
from clusterfuzz._internal.bot.tokenizer.grammars.JavaScriptLexer import \
JavaScriptLexer
from . import chunk_minimizer
from . import delta_minimizer
from . import js_minimizer
from . import minimizer
from . import utils
SCRIPT_START_STRING = b'<script'
SCRIPT_END_STRING = b'</script>'
class HTMLMinimizer(minimizer.Minimizer): # pylint:disable=abstract-method
"""Specialized HTML minimizer.
Note that this will not work properly with normal tokenizers. It simply
acts as a wrapper around other minimizers and passes pieces of the HTML
file to those."""
class Token(object):
"""Helper class to represent a single token."""
TYPE_HTML = 0
TYPE_SCRIPT = 1
def __init__(self, data, token_type):
self.data = data
self.token_type = token_type
class TokenizerState(object):
"""Enum for tokenizer states."""
SEARCHING_FOR_SCRIPT = 0
SEARCHING_FOR_TAG_END = 1
SEARCHING_FOR_CLOSE_SCRIPT = 2
HTMLTOKENIZER = AntlrTokenizer(HTMLLexer).tokenize
JSTOKENIZER = AntlrTokenizer(JavaScriptLexer).tokenize
TOKENIZER_MAP = {
Token.TYPE_HTML: [HTMLTOKENIZER, HTMLTOKENIZER, HTMLTOKENIZER],
Token.TYPE_SCRIPT: [JSTOKENIZER, JSTOKENIZER],
}
CHUNK_SIZES = [
[400, 100, 20, 5],
[400, 100, 20, 5, 2],
[400, 100, 20, 5, 1],
]
def __init__(self, test_function, *args, **kwargs):
# The HTML minimizer will not be used directly. Instead, preserve its
# arguments and pass them along when creating subminimizers.
super(HTMLMinimizer, self).__init__(lambda: False)
assert not args, 'Positional arguments not supported.'
assert 'tokenizer' not in kwargs, 'Custom tokenizers not supported.'
assert 'token_combiner' not in kwargs, 'Custom tokenizers not supported.'
self.test_function = test_function
self.kwargs = kwargs
def minimize(self, data):
"""Wrapper to perform common tasks and call |_execute|."""
# Do an initial line-by-line minimization to filter out noise.
line_minimizer = delta_minimizer.DeltaMinimizer(self.test_function,
**self.kwargs)
# Do two line minimizations to make up for the fact that minimzations on
# bots don't always minimize as much as they can.
for _ in range(2):
data = line_minimizer.minimize(data)
tokens = self.get_tokens_and_metadata(data)
for index, token in enumerate(tokens):
current_tokenizers = self.TOKENIZER_MAP[token.token_type]
prefix = self.combine_tokens(tokens[:index])
suffix = self.combine_tokens(tokens[index + 1:])
token_combiner = functools.partial(
self.combine_worker_tokens, prefix=prefix, suffix=suffix)
for level, current_tokenizer in enumerate(current_tokenizers):
# We need to preserve the parts of the test case that are not currently
# being minimized. Create a special token combiner that adds these
# portions of the test to the combined tokens.
if token.token_type == HTMLMinimizer.Token.TYPE_HTML:
current_minimizer = chunk_minimizer.ChunkMinimizer(
self.test_function,
chunk_sizes=HTMLMinimizer.CHUNK_SIZES[level],
token_combiner=token_combiner,
tokenizer=current_tokenizer,
**self.kwargs)
else:
current_minimizer = js_minimizer.JSMinimizer(
self.test_function,
token_combiner=token_combiner,
tokenizer=current_tokenizer,
**self.kwargs)
result_data = current_minimizer.minimize(token.data)
start = len(prefix)
end = len(result_data) - len(suffix)
token.data = result_data[start:end]
# TODO(mbarbella): Remove this once other minimizers are improved.
# Do a final line-by-line minimization pass.
data = self.combine_tokens(tokens)
return line_minimizer.minimize(data)
@staticmethod
def get_tokens_and_metadata(data):
"""Get the token list with associated metadata."""
tokens = []
state = HTMLMinimizer.TokenizerState.SEARCHING_FOR_SCRIPT
current_token_start = 0
current_token_type = HTMLMinimizer.Token.TYPE_HTML
index = 0
while 0 <= index < len(data):
if state == HTMLMinimizer.TokenizerState.SEARCHING_FOR_SCRIPT:
# In this case, we are simply searching for the next script tag.
index = data.find(SCRIPT_START_STRING, index)
state = HTMLMinimizer.TokenizerState.SEARCHING_FOR_TAG_END
elif state == HTMLMinimizer.TokenizerState.SEARCHING_FOR_TAG_END:
# Make sure that this really looks like a script tag.
next_newline = data.find(b'\n', index)
tag_end = data.find(b'>', index)
if 0 <= tag_end < next_newline or next_newline < 0 <= tag_end:
# The end of the script tag is before the next newline, so it should
# be safe to attempt to split this.
index = tag_end + 1
token = HTMLMinimizer.Token(data[current_token_start:index],
current_token_type)
tokens.append(token)
# Update state.
current_token_type = HTMLMinimizer.Token.TYPE_SCRIPT
current_token_start = index
state = HTMLMinimizer.TokenizerState.SEARCHING_FOR_CLOSE_SCRIPT
else:
# We found a newline before the end of tag or did not find the end
# of the tag, so something seems wrong. Skip this one.
index += len(SCRIPT_START_STRING)
elif state == HTMLMinimizer.TokenizerState.SEARCHING_FOR_CLOSE_SCRIPT:
# Simply look for the end of this script.
index = data.find(SCRIPT_END_STRING, index)
if index < 0:
break
# TODO(mbarbella): Optimize for empty script case (e.g. for "src=").
token = HTMLMinimizer.Token(data[current_token_start:index],
current_token_type)
tokens.append(token)
|
current_token_start = index
current_token_type = HTMLMinimizer.Token.TYPE_HTML
state = HTMLMinimizer.TokenizerState.SEARCHING_FOR
|
_SCRIPT
token = HTMLMinimizer.Token(data[current_token_start:], current_token_type)
tokens.append(token)
return tokens
@staticmethod
def combine_worker_tokens(tokens, prefix=b'', suffix=b''):
"""Combine tokens for a worker minimizer."""
# The Antlr tokenizer decodes the bytes objects we originally pass to it.
encoded_tokens = [
t if isinstance(t, bytes) else t.encode('utf-8') for t in tokens
]
return prefix + b''.join(encoded_tokens) + suffix
@staticmethod
def combine_tokens(tokens):
"""Combine tokens into a usable format, stripping metadata."""
return b''.join([t.data for t in tokens])
@staticmethod
def run(data,
thread_count=minimizer.DEFAULT_THREAD_COUNT,
file_extension='.html'):
"""Attempt to minimize an html test case."""
html_minimizer = HTMLMinimizer(
utils.test, max_threads=thread_count, file_extension=file_extension)
return html_minimizer.minimize(data)
|
setten/pymatgen
|
pymatgen/analysis/chemenv/coordination_environments/tests/test_coordination_geometries.py
|
Python
|
mit
| 14,930
| 0.004488
|
#!/usr/bin/env python
__author__ = 'waroquiers'
import unittest
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import ExplicitPermutationsAlgorithm
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import SeparationPlane
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import CoordinationGeometry
allcg = AllCoordinationGeometries()
class FakeSite(object):
def __init__(self, coords):
self.coords = coords
class CoordinationGeometriesTest(PymatgenTest):
def test_algorithms(self):
expl_algo = ExplicitPermutationsAlgorithm(permutations=[[0, 1, 2], [1, 2, 3]])
expl_algo2 = ExplicitPermutationsAlgorithm.from_dict(expl_algo.as_dict)
self.assertEqual(expl_algo.permutations, expl_algo2.permutations)
sepplane_algos_oct = allcg['O:6'].algorithms
self.assertEqual(len(sepplane_algos_oct[0].safe_separation_permutations()), 24)
self.assertEqual(len(sepplane_algos_oct[1].safe_separation_permutations()), 36)
sepplane_algos_oct_0 = SeparationPlane.from_dict(sepplane_algos_oct[0].as_dict)
self.assertEqual(sepplane_algos_oct[0].plane_points, sepplane_algos_oct_0.plane_points)
self.assertEqual(sepplane_algos_oct[0].mirror_plane, sepplane_algos_oct_0.mirror_plane)
self.assertEqual(sepplane_algos_oct[0].ordered_plane, sepplane_algos_oct_0.ordered_plane)
self.assertEqual(sepplane_algos_oct[0].point_groups, sepplane_algos_oct_0.point_groups)
self.assertEqual(sepplane_algos_oct[0].ordered_point_groups, sepplane_algos_oct_0.ordered_point_groups)
self.assertEqual(sepplane_algos_oct[0].explicit_optimized_permutations,
sepplane_algos_oct_0.explicit_optimized_permutations)
self.assertEqual(sepplane_algos_oct[0].__str__(),
'Separation plane algorithm with the following reference separation :\n'
'[[4]] | [[0, 2, 1, 3]] | [[5]]')
def test_hints(self):
hints = CoordinationGeometry.NeighborsSetsHints(hints_type='single_cap',
options={'cap_index': 2, 'csm_max': 8})
myhints = hints.hints({'csm': 12.0})
self.assertEqual(myhints, [])
hints2 = CoordinationGeometry.NeighborsSetsHints.from_dict(hints.as_dict())
self.assertEqual(hints.hints_type, hints2.hints_type)
self.assertEqual(hints.options, hints2.options)
def test_coordination_geometry(self):
cg_oct = allcg['O:6']
cg_oct2 = CoordinationGeometry.from_dict(cg_oct.as_dict())
self.assertArrayAlmostEqual(cg_oct.central_site, cg_oct2.central_site)
self.assertArrayAlmostEqual(cg_oct.points, cg_oct2.points)
self.assertEqual(cg_oct.__str__(), 'Coordination geometry type : Octahedron (IUPAC: OC-6 || IUCr: [6o])\n'
'\n'
' - coordination number : 6\n'
' - list of points :\n'
' - [0.0, 0.0, 1.0]\n'
' - [0.0, 0.0, -1.0]\n'
' - [1.0, 0.0, 0.0]\n'
' - [-1.0, 0.0, 0.0]\n'
' - [0.0, 1.0, 0.0]\n'
' - [0.0, -1.0, 0.0]\n'
'------------------------------------------------------------\n')
self.assertEqual(cg_oct.__len__(), 6)
self.assertEqual(cg_oct.ce_symbol, cg_oct.mp_symbol)
self.assertTrue(cg_oct.is_implemented())
self.assertEqual(cg_oct.get_name(), 'Octahedron')
self.assertEqual(cg_oct.IUPAC_symbol, 'OC-6')
self.assertEqual(cg_oct.IUPAC_symbol_str, 'OC-6')
self.assertEqual(cg_oct.IUCr_symbol, '[6o]')
self.assertEqual(cg_oct.IUCr_symbol_str, '[6o]')
cg_oct.permutations_safe_override = True
self.assertEqual(cg_oct.number_of_permutations, 720.0)
self.assertEqual(cg_oct.ref_permutation([0, 3, 2, 4, 5, 1]), (0, 3, 1, 5, 2, 4))
sites = [FakeSite(coords=pp) for pp in cg_oct.points]
faces = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]],
[[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]]
self.assertArrayAlmostEqual(cg_oct.faces(sites=sites, permutation=[0, 3, 2, 4, 5, 1]), faces)
faces = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, -1
|
.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, -1.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, -1.0],
|
[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]]]
self.assertArrayAlmostEqual(cg_oct.faces(sites=sites), faces)
edges = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 0.0, -1.0]],
[[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[1.0, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[0.0, 1.0, 0.0], [0.0, -1.0, 0.0]],
[[0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]]
self.assertArrayAlmostEqual(cg_oct.edges(sites=sites, permutation=[0, 3, 2, 4, 5, 1]), edges)
edges = [[[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0], [-1.0, 0.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, -1.0, 0.0]],
[[0.0, 0.0, -1.0], [1.0, 0.0, 0.0]],
[[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]],
[[0.0, 0.0, -1.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, -1.0], [0.0, -1.0, 0.0]],
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0]]]
self.assertArrayAlmostEqual(cg_oct.edges(sites=sites), edges)
self.assertArrayAlmostEqual(cg_oct.solid_angles(),
[2.0943951, 2.0943951, 2.0943951, 2.0943951, 2.0943951, 2.0943951])
pmeshes = cg_oct.get_pmeshes(sites=sites)
self.assertEqual(pmeshes[0]['pmesh_string'],
'14\n 0.00000000 0.00000000 1.00000000\n'
' 0.00000000 0.00000000 -1.00000000\n'
' 1.00000000 0.00000000 0.00000000\n'
' -1.00000000 0.00000000 0.00000000\n'
' 0.00000000 1.00000000 0.00000000\n'
' 0.00000000 -1.00000000 0.00000000\n'
' 0.33333333 0.33333333 0.33333333\n'
' 0.33333333 -0.33333333 0.33333
|
XianwuLin/AsynMongo
|
__init__.py
|
Python
|
mit
| 33
| 0
|
f
|
rom AsynMongo import Collection
| |
rory/osm-find-first
|
tests/test_osm_find_first.py
|
Python
|
gpl-3.0
| 3,556
| 0.002812
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_osm_find_first
----------------------------------
Tests for `osm-find-first` module.
"""
import unittest
import tempfile
import re
import httpretty
import osm_find_first
def v(string):
return string.format(version=osm_find_first.__version__)
class TestOsmFindFirst(unittest.TestCase):
def testInt(self):
for good in [1, '1', 1.0]:
self.assertTrue(osm_find_first.is_int(good), msg=repr(good))
for bad in ['foo']:
self.assertFalse(osm_find_first.is_int(bad), msg=repr(bad))
def testOSMType(self):
for good in ['node', 'way', 'relation']:
self.assertTrue(osm_find_first.is_osm_type(good), msg=repr(good))
for bad in ['foo']:
self.assertFalse(osm_find_first.is_osm_type(bad), msg=repr(bad))
def testWriteResultsToCSV(self):
outputfile = tempfile.NamedTemporaryFile()
outputfilename = outputfile.name
results = [{'osm_type': 'relation', 'osm_id': 2227344, 'osm_user':
'brianh', 'osm_uid': 19612, 'osm_timestamp': '2012-06-12 15:24:49+01'}]
osm_find_first.write_to_csv(outputfilename, results)
outputfile.seek(0, 0)
self.assertEqual(outputfile.read().decode("utf8"),
'osm_type,osm_id,osm_user,osm_uid,osm_timestamp\nrelation,2227344,brianh,19612,2012-06-12 15:24:49+01\n')
outputfile.close()
def testReadMissingFromCSV(self):
csv_content = 'osm_type,osm_id\nrelation,123\n'
outputfile = tempfile.NamedTemporaryFile()
outputfilename = outputfile.name
outputfile.write(csv_content.encode("utf8"))
outputfile.seek(0)
missing = osm_find_first.read_missing_from_csv(outputfilename)
self.assertEqual(missing, [{'osm_type': 'relation', 'osm_id': '123'}])
outputfile.close()
@httpretty.activate
def testGettingResult(self):
httpretty.register_uri(httpretty.GET,
re.compile(
"http://api.openstreetmap.org/api/0.6/(node|way|relation)/[0-9]+/1"),
|
body='<osm><relation id="1" uid="123" user="testuser" timestamp="2000-01-01 115:24:02"></relation></osm>',
content_type="text/xml")
|
result = osm_find_first.find_first(
[], [{'osm_type': 'relation', 'osm_id': '1'}])
self.assertEqual(
result, [{'osm_timestamp': '2000-01-01 115:24:02', 'osm_type': 'relation', 'osm_uid': '123', 'osm_user': 'testuser', 'osm_id': '1'}])
self.assertEqual(httpretty.last_request().method, "GET")
self.assertEqual(
httpretty.last_request().path, "/api/0.6/relation/1/1")
self.assertEqual(httpretty.last_request().headers[
'user-agent'], v("osm-find-first/{version}"))
@httpretty.activate
def testEmptyDoesNothing(self):
httpretty.register_uri(httpretty.GET,
re.compile(
"http://api.openstreetmap.org/api/0.6/(node|way|relation)/[0-9]+/1"),
body='<osm><relation id="1" uid="123" user="testuser" timestamp="2000-01-01 115:24:02"></relation></osm>',
content_type="text/xml")
result = osm_find_first.find_first([], [])
self.assertEqual(result, [])
self.assertEqual(
httpretty.last_request().__class__, httpretty.core.HTTPrettyRequestEmpty)
if __name__ == '__main__':
unittest.main()
|
RevansChen/online-judge
|
Codewars/8kyu/find-out-whether-the-shape-is-a-cube/Python/test.py
|
Python
|
mit
| 579
| 0.01209
|
# Python - 3.6.0
Test.assert_equals(cube_checker(-12,2), False)
Test.assert_e
|
quals(cube_checker(8, 3), False)
Test.assert_equals(cube_checker(8, 2), True)
Test
|
.assert_equals(cube_checker(-8,-2), False)
Test.assert_equals(cube_checker(0, 0), False)
Test.assert_equals(cube_checker(27, 3), True)
Test.assert_equals(cube_checker(1, 5), False)
Test.assert_equals(cube_checker(125, 5),True)
Test.assert_equals(cube_checker(125,-5),False)
Test.assert_equals(cube_checker(0, 12), False)
Test.assert_equals(cube_checker(12, -1),False)
Test.assert_equals(cube_checker(1, 1), True)
|
Francis-Liu/animated-broccoli
|
nova/scheduler/host_manager.py
|
Python
|
apache-2.0
| 32,388
| 0.000494
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manage hosts in the current zone.
"""
import collections
import functools
import time
try:
from collections import UserDict as IterableUserDict # Python 3
except ImportError:
from UserDict import IterableUserDict # Python 2
import iso8601
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import requests
import six
from nova import context as context_module
from nova import exception
from nova.i18n import _LI, _LW
from nova import objects
from nova.pci import stats as pci_stats
from nova.scheduler import filters
from nova.scheduler import weights
from nova import utils
from nova.virt import hardware
host_manager_opts = [
cfg.MultiStrOpt('scheduler_available_filters',
default=['nova.scheduler.filters.all_filters'],
help='Filter classes available to the scheduler which may '
'be specified more than once. An entry of '
'"nova.scheduler.filters.all_filters" '
'maps to all filters included with nova.'),
cfg.ListOpt('scheduler_default_filters',
default=[
'RetryFilter',
'AvailabilityZoneFilter',
'RamFilter',
'DiskFilter',
'ComputeFilter',
'ComputeCapabilitiesFilter',
'ImagePropertiesFilter',
'ServerGroupAntiAffinityFilter',
'ServerGroupAffinityFilter',
],
help='Which filter class names to use for filtering hosts '
'when not specified in the request.'),
cfg.ListOpt('scheduler_weight_classes',
default=['nova.scheduler.weights.all_weighers'],
help='Which weight class names to use for weighing hosts'),
cfg.BoolOpt('scheduler_tracks_instance_changes',
default=True,
help='Determines if the Scheduler tracks changes to instances '
'to help with its filtering decisions.'),
]
CONF = cfg.CONF
CONF.register_opts(host_manager_opts)
LOG = logging.getLogger(__name__)
HOST_INSTANCE_SEMAPHORE = "host_instance"
class ReadOnlyDict(IterableUserDict):
"""A read-only dict."""
def __init__(self, source=None):
self.data = {}
if source:
self.data.update(source)
def __setitem__(self, key, item):
raise TypeError()
def __delitem__(self, key):
raise TypeError()
def clear(self):
raise TypeError()
def pop(self, key, *args):
raise TypeError()
def popitem(self):
raise TypeError()
def update(self):
raise TypeError()
@utils.expects_func_args('self', 'instance')
def set_update_time_on_success(function):
"""Set updated time of HostState when consuming succeed."""
@functools.wraps(function)
def decorated_function(self, instance):
return_value = None
try:
return_value = function(self, instance)
except Exception as e:
# Ignores exception raised from consume_from_instance() so that
# booting instance would fail in the resource claim of compute
# node, other suitable node may be chosen during scheduling retry.
LOG.warning(_LW("Selected host: %(host)s failed to consume from "
"instance. Error: %(error)s"),
{'host': self.host, 'error': e},
instance=instance)
else:
now = timeutils.utcnow()
# NOTE(sbauza): Objects are UTC tz-aware by default
self.updated = now.replace(tzinfo=iso8601.iso8601.Utc())
return return_value
return decorated_function
class HostState(object):
"""Mutable and immutable information tracked for a host.
This is an attempt to remove the ad-hoc data structures
previously used and lock down access.
"""
def __init__(self, host, node, compute=None):
self.host = host
self.nodename = node
# Mutable available resources.
# These will change as resources are virtually "consumed".
self.total_usable_ram_mb = 0
self.total_usable_disk_gb = 0
self.disk_mb_used = 0
self.free_ram_mb = 0
self.free_disk_mb = 0
self.vcpus_total = 0
self.vcpus_used = 0
self.pci_stats = None
self.numa_topology = None
# Additional host information from the compute node stats:
self.num_instances = 0
self.num_io_ops = 0
# Other information
self.host_ip = None
self.hypervisor_type = None
self.hypervisor_version = None
self.hypervisor_hostname = None
self.cpu_info = None
self.supported_instances = None
# Resource oversubscription values for the compute host:
self.limits = {}
# Generic metrics from compute nodes
self.metrics = None
# List of aggregates the host belongs to
self.aggregates = []
# Instances on this host
self.instances = {}
# Allocation ratios for this host
self.ram_allocation_ratio = None
self.cpu_allocation_ratio = None
self.updated = None
if compute:
self.update_from_compute_node(compute)
def update_service(self, service):
self.service = ReadOnlyDict(service)
def update_from_compute_node(self, compute):
"
|
""Update information about a host from a ComputeNode object.
|
"""
if (self.updated and compute.updated_at
and self.updated > compute.updated_at):
return
all_ram_mb = compute.memory_mb
# Assume virtual size is all consumed by instances if use qcow2 disk.
free_gb = compute.free_disk_gb
least_gb = compute.disk_available_least
if least_gb is not None:
if least_gb > free_gb:
# can occur when an instance in database is not on host
LOG.warning(_LW("Host %(hostname)s has more disk space than "
"database expected "
"(%(physical)sgb > %(database)sgb)"),
{'physical': least_gb, 'database': free_gb,
'hostname': compute.hypervisor_hostname})
free_gb = min(least_gb, free_gb)
free_disk_mb = free_gb * 1024
self.disk_mb_used = compute.local_gb_used * 1024
# NOTE(jogo) free_ram_mb can be negative
self.free_ram_mb = compute.free_ram_mb
self.total_usable_ram_mb = all_ram_mb
self.total_usable_disk_gb = compute.local_gb
self.free_disk_mb = free_disk_mb
self.vcpus_total = compute.vcpus
self.vcpus_used = compute.vcpus_used
self.updated = compute.updated_at
self.numa_topology = compute.numa_topology
self.pci_stats = pci_stats.PciDeviceStats(
compute.pci_device_pools)
# All virt drivers report host_ip
self.host_ip = compute.host_ip
self.hypervisor_type = compute.hypervisor_type
self.hypervisor_version = compute.hypervisor_version
self.hypervisor_hostname = compute.hypervisor_hostname
self.cpu_info = compute.cpu_info
if compute.supported_hv_specs:
self.supported_instances = [spec.to_list() for spec
i
|
skeledrew/web-dl
|
cookies.py
|
Python
|
agpl-3.0
| 10,859
| 0.000829
|
# -*- coding: utf-8 -*-
"""
Cookie handling module. (Ripped from coursera-dl)
TODO: Convert to Hy and make generic.
"""
import logging
import os
import ssl
import tempfile
import getpass
import requests
from requests.adapters import HTTPAdapter
try: # Workaround for broken Debian/Ubuntu packages? (See issue #331)
from requests.packages.urllib3.poolmanager import PoolManager
except ImportError:
from urllib3.poolmanager import PoolManager
from six.moves import StringIO
from six.moves import http_cookiejar as cookielib
#from .define import CLASS_URL, AUTH_REDIRECT_URL, PATH_COOKIES, AUTH_URL_V3
#from .utils import mkdir_p
# Monkey patch cookielib.Cookie.__init__.
# Reason: The expires value may be a decimal string,
# but the Cookie class uses int() ...
__orginal_init__ = cookielib.Cookie.__init__
if os.name == "posix": # pragma: no cover
import pwd
_USER = pwd.getpwuid(os.getuid())[0]
else:
_USER = getpass.getuser()
PATH_CACHE = os.path.join(tempfile.gettempdir(), _USER + "_populi_dl_cache") # from define.py
PATH_COOKIES = os.path.join(PATH_CACHE, 'cookies') # ^
def __fixed_init__(self, version, name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest,
rfc2109=False):
if expires is not None:
expires = float(expires)
__orginal_init__(self, version, name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest,
rfc2109=False)
cookielib.Cookie.__init__ = __fixed_init__
class ClassNotFound(BaseException):
"""
Raised if a course is not found in Coursera's site.
"""
class AuthenticationFailed(BaseException):
"""
Raised if we cannot authenticate on Coursera's site.
"""
def login(session, username, password, class_name=None):
"""
Login on coursera.org with the given credentials.
This adds the following cookies to the session:
sessionid, maestro_login, maestro_login_flag
"""
logging.debug('Initiating login.')
try:
session.cookies.clear('.coursera.org')
logging.debug('Cleared .coursera.org cookies.')
except KeyError:
logging.debug('There were no .coursera.org cookies to be cleared.')
"""
# Hit class url
if class_name is not None:
class_url = CLASS_URL.format(class_name=class_name)
r = requests.get(class_url, allow_redirects=False)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
logging.error(e)
raise ClassNotFound(class_name)
# csrftoken is simply a 20 char random string.
csrftoken = random_string(20)
# Now make a call to the authenticator url.
csrf2cookie = 'csrf2_token_%s' % random_string(8)
csrf2token = random_string(24)
cookie = "csrftoken=%s; %s=%s" % (csrftoken, csrf2cookie, csrf2token)
logging.debug('Forging cookie header: %s.', cookie)
headers = {
'Cookie': cookie,
'X-CSRFToken': csrftoken,
'X-CSRF2-Cookie': csrf2cookie,
'X-CSRF2-Token': csrf2token,
}
"""
data = {
'email': username,
'password': password,
'webrequest': 'true'
}
# Auth API V3
r = session.post(AUTH_URL_V3, data=data,
headers=headers, allow_redirects=False)
try:
r.raise_for_status()
# Some how the order of cookies parameters are important
# for coursera!!!
v = session.cookies.pop('CAUTH')
session.cookies.set('CAUTH', v)
except requests.exceptions.HTTPError:
raise AuthenticationFailed('Cannot login on coursera.org.')
logging.info('Logged in on coursera.org.')
def down_the_wabbit_hole(session, class_name):
"""
Authenticate on class.coursera.org
"""
auth_redirector_url = AUTH_REDIRECT_URL.format(class_name=class_name)
r = session.get(auth_redirector_url)
logging.debug('Following %s to authenticate on class.coursera.org.',
auth_redirector_url)
try:
r.raise_for_status()
except requests.exceptions.HTTPError:
raise AuthenticationFailed('Cannot login on class.coursera.org.')
logging.debug('Exiting "deep" authentication.')
def get_authentication_cookies(session, class_name, username, password):
"""
Get the necessary cookies to authenticate on class.coursera.org.
To access the class pages we need two cookies on class.coursera.org:
csrf_token, session
"""
# First, check if we already have the .coursera.org cookies.
if session.cookies.get('CAUTH', domain=".coursera.org"):
logging.debug('Already logged in on accounts.coursera.org.')
else:
login(session, username, password, class_name=class_name)
try:
session.cookies.clear('class.coursera.org', '/' + class_name)
except KeyError:
pass
down_the_wabbit_hole(session, class_name)
enough = do_we_have_enough_cookies(session.cookies, class_name)
if not
|
enough:
raise AuthenticationFailed('Did not find necessary cookies.')
logging.in
|
fo('Found authentication cookies.')
def do_we_have_enough_cookies(cj, class_name):
"""
Check whether we have all the required cookies
to authenticate on class.coursera.org.
"""
domain = 'class.coursera.org'
path = "/" + class_name
return cj.get('csrf_token', domain=domain, path=path) is not None
def validate_cookies(session, class_name):
"""
Checks whether we have all the required cookies
to authenticate on class.coursera.org. Also check for and remove
stale session.
"""
if not do_we_have_enough_cookies(session.cookies, class_name):
return False
url = CLASS_URL.format(class_name=class_name) + '/class'
r = session.head(url, allow_redirects=False)
if r.status_code == 200:
return True
else:
logging.debug('Stale session.')
try:
session.cookies.clear('.coursera.org')
except KeyError:
pass
return False
def make_cookie_values(cj, class_name):
"""
Makes a string of cookie keys and values.
Can be used to set a Cookie header.
"""
path = "/" + class_name
cookies = [c.name + '=' + c.value
for c in cj
if c.domain == class_name]
return '; '.join(cookies)
def find_cookies_for_class(cookies_file, class_name):
"""
Return a RequestsCookieJar containing the cookies for
.coursera.org and class.coursera.org found in the given cookies_file.
TODO: Purge coursera specific code.
"""
path = "/" + class_name
def cookies_filter(c):
# return c.domain == ".coursera.org" \
# or (c.domain == "class.coursera.org" and c.path == path)
return c.domain == class_name
cj = get_cookie_jar(cookies_file)
new_cj = requests.cookies.RequestsCookieJar()
for c in filter(cookies_filter, cj):
new_cj.set_cookie(c)
return new_cj
def load_cookies_file(cookies_file):
"""
Load cookies file.
We pre-pend the file with the special Netscape header because the cookie
loader is very particular about this string.
"""
logging.debug('Loading cookie file %s into memory.', cookies_file)
cookies = StringIO()
cookies.write('# Netscape HTTP Cookie File')
cookies.write(open(cookies_file, 'rU').read())
cookies.flush()
cookies.seek(0)
return cookies
def get_cookie_jar(cookies_file):
cj = cookielib.MozillaCookieJar()
cookies = load_cookies_file(cookies_file)
# nasty hack: cj.load() requires a filename not a file, but if I use
|
persepolisdm/translation-API
|
pdm_api/settings/__init__.py
|
Python
|
gpl-3.0
| 688
| 0.001453
|
from .settings import info
class get_settings:
def __init__(self):
self.settings = info()
self.repo = self.settings['repo']
self.repo_path = self.repo['repo_path']
self
|
.repo_url = self.repo['repo_url']
self.key = self.repo['ssh_key']
self.slack = self.settings['slack']
self.slack_token = self.slack['token']
def repo(self):
return(self.repo)
def repo_url(self):
|
return(self.repo_url)
def repo_path(self):
return(self.repo_path)
def ssh_key(self):
return(self.key)
def slack(self):
return(self.slack)
def slack_token(self):
return(self.slack_token)
|
adrifloresm/sssweep
|
setup.py
|
Python
|
bsd-3-clause
| 2,611
| 0.003064
|
"""
* Copyright (c) 2012-2017, Nic McDonald and Adriana Flores
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the name of prim nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
"""
import codecs
imp
|
ort re
import os
import sys
try:
from setuptools import setup
except:
print('please install setuptools via pip:')
print(' pip3 install setuptools')
|
sys.exit(-1)
def find_version(*file_paths):
version_file = codecs.open(os.path.join(os.path.abspath(
os.path.dirname(__file__)), *file_paths), 'r').read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='sssweep',
version=find_version('sssweep', '__init__.py'),
description='Automatic task generation for SuperSim sweeps and plot web viewer',
author='Nic McDonald and Adriana Flores',
author_email='nicci02@hotmail.com and adrifloresm@gmail.com',
license='BSD',
url='http://github.com/nicmcd/sssweep',
packages=['sssweep'],
install_requires=['taskrun >= 3.0.0',
'ssplot >= 0.1.0'],
)
|
vovanbo/aiohttp_json_api
|
punch_version.py
|
Python
|
mit
| 31
| 0
|
major = 0
minor = 3
|
7
patch =
|
0
|
Bioto/Huuey-python
|
huuey/requester.py
|
Python
|
mit
| 809
| 0.001236
|
import json
from requests import Request, Session
class Requester:
@staticmethod
def verifyconnection(url="http://google.com"):
return Requester.request(url, method='GET', decode=False)
@staticmethod
def request(url, method=None, data=None, decode=True):
if not url.startswith('http://') and not url.startswith('https://'):
|
url = 'http://' + url
request = Request(method, url)
if data:
request.data = json.dumps(data)
with Session() as session:
prepped = session.prepare_request(request)
try:
|
response = session.send(prepped)
except:
return False
if decode:
return json.loads(response.text)
else:
return response
|
stefanseefeld/synopsis
|
Synopsis/Formatters/HTML/Views/FileListing.py
|
Python
|
lgpl-2.1
| 2,722
| 0.019471
|
#
# Copyright (C) 2000 Stephen Davies
# Copyright (C) 2000 Stefan Seefeld
# All rights reserved.
# Licensed to the public under the terms of the GNU LGPL (>= 2),
# see the file COPYING for details.
#
from Synopsis.Processor import Parameter
from Synopsis import FileTree
from Synopsis.Formatters.HTML.View import View
from Synopsis.Formatters.HTML.Tags import *
import os
class FileListing(View):
"""A view that creates an index of files, and an index for each file.
First the index of files is created, intended for the top-left frame.
Second a view is created for each file, listing the major declarations for
that file, eg: classes, global functions, namespaces, etc."""
def filename(self):
if self.main:
return self.directory_layout.index()
else:
return self.directory_layout.special('FileListing')
def title(self):
return 'Files'
def r
|
oot(self):
return self.filename(), self.title()
def register_filenames(self):
"""Registers a view for each file indexed."""
self.processor.register_filename(self.filename(), self, None)
def process(self):
"""Creates the listing using the recursive process_file_tree_node method"""
# Start the file
self.start_file()
self.write_navigation_bar()
self.write('<ul class="tree">')
#
|
recursively visit all nodes
self.process_file_tree_node(self.processor.file_tree.root())
self.write('</ul>')
self.end_file()
def _node_sorter(self, a, b):
"""Compares file nodes a and b depending on whether they are leaves
or not"""
a_leaf = isinstance(a, FileTree.File)
b_leaf = isinstance(b, FileTree.File)
if a_leaf != b_leaf:
return cmp(b_leaf, a_leaf)
return cmp(a.path.upper(), b.path.upper())
def process_file_tree_node(self, node):
"""Creates a portion of the tree for the given file node. This method
assumes that the file is already in progress, and just appends to
it. This method is recursive, calling itself for each child of node
(file or directory)."""
if isinstance(node, FileTree.File):
# Leaf node
ref = rel(self.filename(), self.directory_layout.file_index(node.path))
text = href(ref, node.filename, target='detail')
self.write('<li>%s</li>'%text)
return
# Non-leaf node
children = node.children
children.sort(self._node_sorter)
if len(node.path):
self.write('<li>%s<ul class="tree">'%node.filename+os.sep)
if len(children):
for child in children:
self.process_file_tree_node(child)
if len(node.path):
self.write('</ul></li>')
|
theapricot/oppapp2
|
app.py
|
Python
|
mit
| 13,357
| 0.013626
|
mustBeAdmin = ['You must be the webadmin to access this page.','danger']
mustBeStudentCoord = ['You must be a student coordinator to access this page.','danger']
from sockdefs import *
print("forming routes...")
monkey.patch_all()
# MAIN EVENTS PAGE #
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'GET':
item = Meta.query.get(1)
return render_template('index.html', ip = request.environ['REMOTE_ADDR'], item = item)
email = request.form['email'].split('@')[0] + "@gcc.edu"
password = request.form['password']
remember_me = False
if 'remember_me' in request.form:
remember_me = True
registered_user = Users.query.filter(Users.email.ilike(email)).first()
if registered_user is None:
flash('Username is invalid' , 'danger')
return redirect(url_for('index'))
if not registered_user.check_password(password):
flash('Password is invalid','danger')
return redirect(url_for('index'))
login_user(registered_user, remember = remember_me)
return redirect(request.args.get('next') or url_for('index'))
@app.route('/admin', methods=['GET', 'POST'])
@login_required
def admin():
if not g.user.is_editor():
flash(mustBeStudentCoord[0],mustBeStudentCoord[1])
return redirect(url_for('index'))
#allusers = Users.query.order_by(Users.lname.asc())
if request.method == 'POST': # happens when form is submitted (a button is clicked on the page)
def getuserevent(addorremove):
# parses string sent by form (add user to/remove user from event)
# returns the user and opp described by that string
[userid,eventid] = request.form[addorremove].split(',')
usr = Users.query.get(int(userid))
opp = Opps.query.get(int(eventid))
return [usr, opp]
if 'addtoevent' in request.form: # if "add tech to event" button was clicked
[usr, opp] = getuserevent('addtoevent')
usr.opps.append(opp)
if 'lockevent' in request.form:
opp = Opps.query.get(int(request.form['lockevent']))
if opp.locked:
opp.locked = False
else:
opp.locked = True
if 'removefromevent' in request.form: # if tech is removed from an event
[usr, opp] = getuserevent('removefromevent')
if usr in opp.usersPreferred:
usr.preferredOpps.remove(opp)
else:
usr.opps.remove(opp)
#flash(usr.fname + ' ' + usr.lname + ' removed from "' + opp.name + '"', 'success')
if 'movetoevent' in request.form: # if tech is moved to different event
[u
|
srid, frmid, toid] =r
|
equest.form['movetoevent'].split(',')
usr = Users.query.get(int(usrid))
frm = Opps.query.get(int(frmid))
to = Opps.query.get(int(toid))
if usr in frm.usersPreferred:
usr.preferredOpps.remove(frm)
else:
usr.opps.remove(frm)
usr.opps.append(to)
#flash(usr.fname + ' ' + usr.lname + ' moved from "' + frm.name + '" to "' + to.name + '"', 'success')
if 'togglesignup' in request.form: # if "sign-ups open/closed" button is clicked
if allcansignup():
for user in allusers:
user.cansignup = False
#flash('Sign-Ups are now closed.')
else:
for user in allusers:
user.cansignup = True
#flash('Sign-Ups are now open.')
db.session.commit()
allusers = Users.query.order_by(Users.lname.asc())
events = Opps.query.filter(Opps.date > datetime.now()).order_by(asc(Opps.date)).all()
def renderAdmin():
for event in events:
#time.sleep(.01)
yield event
g.user = current_user
return Response(stream_with_context(stream_template('adminevent.html', eventiter = renderAdmin(), events=events, allusers=allusers)))
@app.route('/past', methods=['GET'])
@login_required
def past():
# render past events
if not g.user.is_editor():
flash(mustBeStudentCoord[0],mustBeStudentCoord[1])
return redirect(url_for('index'))
allusers = Users.query.order_by(Users.lname.asc())
events = Opps.query.filter(Opps.date < datetime.now()).order_by(desc(Opps.date)).all()
def render_past():
for event in events:
#time.sleep(.01)
yield event
g.user = current_user
return Response(stream_with_context(stream_template('past_event.html', eventiter = render_past(), events=events, allusers=allusers)))
@app.route('/events', methods=['GET', 'POST'])
@login_required
def events():
allEvents = db.session.query(Opps).filter(Opps.date > datetime.now()).order_by(asc(Opps.date)).all()
return render_template('events.html',allEvents = allEvents)
@app.route('/pastevents', methods=['GET'])
@login_required
def pastevents():
#allEvents = db.session.query(Opps).filter(Opps.date < datetime.now()).order_by(desc(Opps.date)).all()
return render_template('pastevents.html')
# NEW EVENT PAGE #
@app.route('/new', methods=['GET', 'POST'])
@login_required
def new():
if not g.user.is_editor() and not g.user.is_webadmin():
flash(mustBeStudentCoord[0],mustBeStudentCoord[1])
return redirect(url_for('events'))
if request.method == 'POST': # form was submitted
# do a whole bunch of form verification (is there a better way to do this?)
if not request.form['title']:
flash('Title is required', 'danger')
elif not request.form['location']:
flash('Location is required', 'danger')
elif not request.form['time']:
flash('Start time is requried', 'danger')
elif not request.form['endtime']:
flash('End time is required', 'danger')
elif not request.form['ntechs']:
flash('Number of techs is required', 'danger')
else: # finally, if we pass inspection, add the event to the database
title = request.form['title']
todo = Opps(title, request.form['location'])
todo.date = datetime.strptime(request.form['date']+request.form['time'],'%m/%d/%Y%I:%M %p')
todo.enddate = datetime.strptime(request.form['date']+request.form['endtime'],'%m/%d/%Y%I:%M %p')
todo.user = g.user
todo.techsneeded = int(request.form['ntechs'])
todo.info = request.form['info']
db.session.add(todo)
db.session.commit()
flash('"' + title + '" was successfully created', 'success')
#flash(todo.uuid,'info')
return redirect(url_for('admin'))
return render_template('new.html') # page was loaded
# EDIT EVENT PAGE #
@app.route('/events/<int:eventID>', methods = ['GET' , 'POST'])
@login_required
def show_or_update(eventID):
if not g.user.is_editor() and not g.user.is_webadmin():
flash(mustBeStudentCoord[0],mustBeStudentCoord[1])
return redirect(url_for('index'))
todo_item = Opps.query.get(eventID)
if request.method == 'GET':
return render_template('view.html',todo=todo_item)
if request.form['submit'] == 'submit':
todo_item.name = request.form['title']
todo_item.desc = request.form['location']
todo_item.date = datetime.strptime(request.form['date']+request.form['time'],'%m/%d/%Y%I:%M %p')
todo_item.enddate = datetime.strptime(request.form['date']+request.form['endtime'],'%m/%d/%Y%I:%M %p')
todo_item.techsneeded = request.form['ntechs']
todo_item.info = request.form['info']
flash('Event updated.', 'info')
else:
db.session.delete(todo_item)
flash('Event deleted.', 'info')
db.session.commit()
return redirect(url_for('admin'))
flash('You are not authorized to edit this todo item','danger')
return redirect(u
|
MarcosCommunity/odoo
|
comunity_modules/costing_method_settings/__openerp__.py
|
Python
|
agpl-3.0
| 1,761
| 0
|
#!/usr/bin/python
###############################################################################
# Module Writen to OpenERP, Open Source Managem
|
ent Solution
# Copyright (C) OpenERP Venezuela (<http://www.vauxoo.com>).
# All Right
|
s Reserved
###############################################################################
# Credits:
# Coded by: Katherine Zaoral <kathy@vauxoo.com>
# Planified by: Katherine Zaoral <kathy@vauxoo.com>
# Audited by: Katherine Zaoral <kathy@vauxoo.com>
###############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
{
"name": "Costing Method Settings",
"summary": "Set product default cost method",
"version": "1.6",
"author": "Vauxoo",
"website": "http://www.vauxoo.com/",
"category": "Settings",
"depends": [
"purchase",
],
"data": [
"wizard/res_config_view.xml"
],
"demo": [],
"test": [],
"qweb": [],
"js": [],
"css": [],
"installable": True,
}
|
googleapis/python-error-reporting
|
google/cloud/errorreporting_v1beta1/services/error_group_service/client.py
|
Python
|
apache-2.0
| 25,457
| 0.001453
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
|
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
Optiona
|
lRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.errorreporting_v1beta1.types import common
from google.cloud.errorreporting_v1beta1.types import error_group_service
from .transports.base import ErrorGroupServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import ErrorGroupServiceGrpcTransport
from .transports.grpc_asyncio import ErrorGroupServiceGrpcAsyncIOTransport
class ErrorGroupServiceClientMeta(type):
"""Metaclass for the ErrorGroupService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ErrorGroupServiceTransport]]
_transport_registry["grpc"] = ErrorGroupServiceGrpcTransport
_transport_registry["grpc_asyncio"] = ErrorGroupServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[ErrorGroupServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ErrorGroupServiceClient(metaclass=ErrorGroupServiceClientMeta):
"""Service for retrieving and updating individual error groups."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "clouderrorreporting.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ErrorGroupServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ErrorGroupServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ErrorGroupServiceTransport:
"""Returns the transport used by the client instance.
Returns:
ErrorGroupServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def error_group_path(project: str, group: str,) -> str:
"""Returns a fully-qualified error_group string."""
return "projects/{project}/groups/{group}".format(project=project, group=group,)
@staticmethod
def parse_error_group_path(path: str) -> Dict[str, str]:
"""Parses a error_group path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/groups/(?P<group>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{proje
|
standage/tag
|
tag/__init__.py
|
Python
|
bsd-3-clause
| 1,438
| 0
|
#!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (C) 2015 Daniel Standage <daniel.standage@gmail.com>
#
# This file is part of tag (http://github.com/sta
|
ndage/tag) and is licensed
# under the BSD 3-clause license: see LICENSE.
# -----------------------------------------------------------------------------
"""Package-wide configuration"""
try:
import __builtin__ as builtins
except ImportError: # pragma: no cover
import builtins
from tag.comment import Comment
from tag.directive import Directive
from tag.feature import Feature
from tag.sequence import Sequence
from tag.range import Range
from tag.reader i
|
mport GFF3Reader
from tag.writer import GFF3Writer
from tag.score import Score
from tag import bae
from tag import cli
from tag import index
from tag import locus
from tag import select
from tag import transcript
from gzip import open as gzopen
import sys
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
def open(filename, mode):
if mode not in ['r', 'w']:
raise ValueError('invalid mode "{}"'.format(mode))
if filename in ['-', None]: # pragma: no cover
filehandle = sys.stdin if mode == 'r' else sys.stdout
return filehandle
openfunc = builtins.open
if filename.endswith('.gz'):
openfunc = gzopen
mode += 't'
return openfunc(filename, mode)
|
jobec/django-auth-adfs
|
tests/settings.py
|
Python
|
bsd-2-clause
| 2,308
| 0
|
SECRET_KEY = 'secret'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
'DIRS': 'templates'
},
]
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_auth_adfs.middleware.LoginRequiredMiddleware',
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.c
|
ontrib.
|
sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_auth_adfs',
'tests',
)
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
'django_auth_adfs.backend.AdfsAuthCodeBackend',
'django_auth_adfs.backend.AdfsAccessTokenBackend',
)
ROOT_URLCONF = 'tests.urls'
STATIC_ROOT = '/tmp/' # Dummy
STATIC_URL = '/static/'
AUTH_ADFS = {
"SERVER": "adfs.example.com",
"CLIENT_ID": "your-configured-client-id",
"RELYING_PARTY_ID": "your-adfs-RPT-name",
"AUDIENCE": "microsoft:identityserver:your-RelyingPartyTrust-identifier",
"CA_BUNDLE": "/path/to/ca-bundle.pem",
"CLAIM_MAPPING": {"first_name": "given_name",
"last_name": "family_name",
"email": "email"},
"BOOLEAN_CLAIM_MAPPING": {"is_staff": "user_is_staff",
"is_superuser": "user_is_superuser"},
"CONFIG_RELOAD_INTERVAL": 0, # Always reload settings
}
LOGIN_URL = "django_auth_adfs:login"
LOGIN_REDIRECT_URL = "/"
|
zhangyage/Python-oldboy
|
day14/BBS/web/models.py
|
Python
|
apache-2.0
| 1,693
| 0.022011
|
# -*- coding:utf-8 -*-
from django.db import models
# Create your models here.
class UserType(models.Model):
display = models.CharField(max_length=50)
def __unicode__(self):
return self.display
class Admin(models.Model):
username = models.CharField(max_length=50)
password = models.CharField(max_length=50)
email = models.EmailField()
CreateDate = models.DateTimeField(auto_now_add = True)
user_type = models.ForeignKey('UserType')
def __unicode__(self):
return self.username
class Chat(models.Model):
content = models.TextField()
user = models.ForeignKey('Admin')
create_date = models.DateTimeField(auto_now_add = True)
def __unicode__(self):
return self.content
class NewType(models.Model):
display = models.CharField(max_length=50)
def __unicode__(self):
return self.display
class News(models.Model):
title = models.CharField(max_length=30)
summary = models.CharFi
|
eld(max_length=256)
url = models.URLField()
favor_count = models.IntegerField(default=0)
#点赞数
replay_count = models.IntegerField(default=0)
#评论数
news_type = models.ForeignKey('NewType')
user = models.ForeignKey('Adm
|
in')
create_date = models.DateTimeField(auto_now_add = True)
def __unicode__(self):
return self.title
class Reply(models.Model):
content = models.TextField()
user = models.ForeignKey('Admin')
new = models.ForeignKey('News')
create_date = models.DateTimeField(auto_now_add = True)
def __unicode__(self):
return self.content
|
Darkkey/hartinsecurity
|
hart_change_longtag.py
|
Python
|
bsd-2-clause
| 1,068
| 0.015918
|
import serial,time,socket
import hart_protocol
import sys
port = 3
if len(sys.argv) < 4:
print "Error, usage " + sys.argv[0] + " port long_address new_longtag"
print "Usage hex string (5 hex digits) as address and LATIN-1 string as new long tag"
quit()
address = sys.argv[2].decode('hex')
if len(address) != 5:
print "Error
|
, address should be 5 bytes long!"
longtag = sys.argv[3]
if len(longtag) != 32:
print "Error, long tag should be 32 bytes long!"
port = int(sys.argv[1]) - 1
print "Opening COM" + str(port + 1) + "..."
preambles = 10
delimiter = '\x82' # master command with long address
command = '\x16' # write long tag
ln = 32 # long address exact length
pack = delimiter + address + command + chr(ln) + lo
|
ngtag
packet = '\xff' * preambles + pack + hart_protocol.get_checksum(pack)
ser = serial.Serial(port, 1200)
print "writing: " + hart_protocol.dump_hex(packet)
ser.write(packet)
print "packet sent succesfully!"
|
tortib/nzbToMedia
|
nzbtomedia/linktastic/linktastic.py
|
Python
|
gpl-3.0
| 4,260
| 0.00493
|
# Linktastic Module
# - A python2/3 compatible module that can create hardlinks/symlinks on windows-based systems
#
# Linktastic is distributed under the MIT License. The follow are the terms and conditions of using Linktastic.
#
# The MIT License (MIT)
# Copyright (c) 2012 Solipsis Development
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import subprocess
from subprocess import CalledProcessError
import os
if os.name == 'nt':
info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# Prevent spaces from messing with us!
def _escape_param(param):
return '"%s"' % param
# Private function to create link on nt-based systems
def _link_windows(src, dest):
try:
subprocess.check_output(
'cmd /C mklink /H %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT, startupinfo=info)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
def _symlink_windows(src, dest):
try:
subprocess.check_output(
'cmd /C mklink %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT, startupinfo=info)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
def _dirlink_windows(src, dest):
try:
subprocess.check_output(
'cmd /C mklink /J %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT, startupinfo=info)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
def _junctionlink_windows(src, dest):
try:
subprocess.check_output(
'cmd /C mklink /D %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT, startupinfo=info)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
# Create a hard link to src named as dest
# This version of link, unlike os.link, su
|
pports nt systems as well
def link(src, dest):
if os.name == 'nt':
_link_windows(src, dest)
else:
os.link(src, dest)
# Create a symlink to src named as dest, but don't fail if yo
|
u're on nt
def symlink(src, dest):
if os.name == 'nt':
_symlink_windows(src, dest)
else:
os.symlink(src, dest)
# Create a symlink to src named as dest, but don't fail if you're on nt
def dirlink(src, dest):
if os.name == 'nt':
_dirlink_windows(src, dest)
else:
os.symlink(src, dest)
# Create a symlink to src named as dest, but don't fail if you're on nt
def junctionlink(src, dest):
if os.name == 'nt':
_junctionlink_windows(src, dest)
else:
os.symlink(src, dest)
|
angr/angr
|
angr/analyses/decompiler/peephole_optimizations/remove_redundant_nots.py
|
Python
|
bsd-2-clause
| 531
| 0
|
from ailment.expression import UnaryOp
from .base import P
|
eepholeOptimizationExprBase
class RemoveRedundantNots(PeepholeOptimizationExprBase):
__slots__ = ()
name = "Remove redundant Nots"
expr_classes = (UnaryOp, ) # all expressions are allowed
def optimize(self, expr: UnaryOp):
# Not(Not(expr)) ==> expr
if expr.op == "Not" \
and isinst
|
ance(expr.operand, UnaryOp) \
and expr.operand.op == "Not":
return expr.operand.operand
return None
|
RudolfCardinal/crate
|
crate_anon/nlp_webserver/views.py
|
Python
|
gpl-3.0
| 30,270
| 0
|
#!/usr/bin/env python
r"""
crate_anon/nlp_webserver/views.py
===============================================================================
Copyright (C) 2015-2021 Rudolf Cardinal (rudolf@pobox.com).
This file is part of CRATE.
CRATE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CRATE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CRATE. If not, see <https://www.gnu.org/licenses/>.
===============================================================================
Pyramid views making up the CRATE NLPRP web server.
"""
from contextlib import contextmanager
import datetime
import logging
import json
from typing import Dict, Generator, List, Optional, Tuple, Any
import redis
from cardinal_pythonlib.httpconst import HttpStatus
from cardinal_pythonlib.json.typing_helpers import (
JsonArrayType,
JsonObjectType,
JsonValueType,
)
from cardinal_pythonlib.sqlalchemy.core_query import fetch_all_first_values
from celery.result import AsyncResult, ResultSet
from pyramid.view import view_config, view_defaults
from pyramid.request import Request
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.sql.expression import and_, ClauseElement, select
import transaction
from crate_anon.common.constants import JSON_SEPARATORS_COMPACT
from crate_anon.nlp_webserver.security import (
check_password,
get_auth_credentials,
encrypt_password,
)
# from crate_anon.common.profiling import do_cprofile
from crate_anon.nlprp.api import (
json_get_array,
json_get_array_of_str,
json_get_bool,
json_get_str,
json_get_toplevel_args,
json_get_value,
pendulum_to_nlprp_datetime,
)
from crate_anon.nlprp.constants import (
NlprpCommands,
NlprpKeys as NKeys,
NlprpValues,
)
from crate_anon.nlprp.errors import (
BAD_REQUEST,
INTERNAL_SERVER_ERROR,
key_missing_error,
NlprpError,
mkerror,
NOT_FOUND,
UNAUTHORIZED,
)
from crate_anon.nlprp.version import NLPRP_VERSION_STRING
from crate_anon.nlp_webserver.manage_users import get_users
from crate_anon.nlp_webserver.models import (
dbsession,
Document,
DocProcRequest,
make_unique_id,
)
from crate_anon.nlp_webserver.server_processor import ServerProcessor
from crate_anon.nlp_webserver.constants import (
SERVER_NAME,
SERVER_VERSION,
NlpServerConfigKeys,
)
from crate_anon.nlp_webserver.tasks import (
celery_app,
process_nlp_text,
process_nlp_text_immediate,
TaskSession,
start_task_session,
)
from crate_anon.nlp_webserver.settings import SETTINGS
log = logging.getLogger(__name__)
# =============================================================================
# Debugging settings
# =============================================================================
DEBUG_SHOW_REQUESTS = False
if DEBUG_SHOW_REQUESTS:
log.warning("Debugging options enabled! Turn off for production.")
# =============================================================================
# Constants
# =============================================================================
COOKIE_SESSION_TOKEN = 'session_token'
DEFAULT_REDIS_HOST = "localhost"
DEFAULT_R
|
EDIS_PORT = 6379 # https://redis.io/t
|
opics/quickstart
DEFAULT_REDIS_DB_NUMBER = 0 # https://redis.io/commands/select
REDIS_HOST = SETTINGS.get(NlpServerConfigKeys.REDIS_HOST, DEFAULT_REDIS_HOST)
REDIS_PORT = SETTINGS.get(NlpServerConfigKeys.REDIS_PORT, DEFAULT_REDIS_PORT)
REDIS_DB_NUMBER = SETTINGS.get(NlpServerConfigKeys.REDIS_DB_NUMBER,
DEFAULT_REDIS_DB_NUMBER)
REDIS_PASSWORD = SETTINGS.get(NlpServerConfigKeys.REDIS_PASSWORD, None)
# If the redis server doesn't require a password, it's fine to pass
# 'password=None' to StrictRedis.
REDIS_SESSIONS = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT,
db=REDIS_DB_NUMBER,
password=REDIS_PASSWORD)
SESSION_TOKEN_EXPIRY_S = 300
# =============================================================================
# SQLAlchemy context
# =============================================================================
@contextmanager
def sqla_transaction_commit():
try:
yield
transaction.commit()
except SQLAlchemyError as e:
log.critical(f"SQLAlchemy error: {e}")
dbsession.rollback()
raise INTERNAL_SERVER_ERROR
# =============================================================================
# NlprpProcessRequest
# =============================================================================
class NlprpProcessRequest(object):
"""
Represents an NLPRP :ref:`process <nlprp_process>` command. Takes the
request JSON, and offers efficient views on it.
Uses the global :class:`crate_anon.nlp_server.procs.Processors` class to
find processors.
"""
def __init__(self, nlprp_request: JsonObjectType) -> None:
"""
Args:
nlprp_request: dictionary from the (entire) JSON NLPRP request
Raises:
:exc:`NlprpError` for malformed requests
"""
self.nlprp_request = nlprp_request
args = json_get_toplevel_args(nlprp_request)
# The processors being requested. We fetch all of them now, so they
# can be iterated through fast for each document.
requested_processors = json_get_array(args, NKeys.PROCESSORS,
required=True)
self.processors = [ServerProcessor.get_processor_nlprp(d)
for d in requested_processors]
# Queue?
self.queue = json_get_bool(args, NKeys.QUEUE, default=False)
# Client job ID
self.client_job_id = json_get_str(args, NKeys.CLIENT_JOB_ID,
default="")
# Include the source text in the reply?
self.include_text = json_get_bool(args, NKeys.INCLUDE_TEXT)
# Content: list of objects (each with text and metadata)
self.content = json_get_array(args, NKeys.CONTENT, required=True)
def processor_ids(self) -> List[str]:
"""
Return the IDs of all processors.
"""
return [p.processor_id for p in self.processors]
def processor_ids_jsonstr(self) -> str:
"""
Returns the IDs of all processors as a string of JSON-encoded IDs.
"""
return json.dumps(self.processor_ids(),
separators=JSON_SEPARATORS_COMPACT)
def gen_text_metadataobj(self) -> Generator[Tuple[str, JsonValueType],
None, None]:
"""
Generates text and metadata pairs from the request, with the metadata
in JSON object (Python dictionary) format.
Yields:
tuple: ``(text, metadata)``, as above
"""
for document in self.content:
text = json_get_str(document, NKeys.TEXT, required=True)
metadata = json_get_value(document, NKeys.METADATA,
default=None, required=False)
yield text, metadata
def gen_text_metadatastr(self) -> Generator[Tuple[str, str],
None, None]:
"""
Generates text and metadata pairs from the request, with the metadata
in string (serialized JSON) format.
Yields:
tuple: ``(text, metadata)``, as above
"""
try:
for document in self.content:
text = json_get_str(document, NKeys.TEXT, required=True)
metadata = json_get_value(document, NKeys.METADATA,
default=None, required=False)
|
scheib/chromium
|
third_party/blink/web_tests/external/wpt/resource-timing/resources/multi_redirect.py
|
Python
|
bsd-3-clause
| 2,682
| 0.003356
|
import urllib.parse
from wptserve.utils import isomorphic_encode
def main(request, response):
"""Handler that causes multiple redirections. Redirect chain is as follows:
1. Initial URL containing multi-redirect.py
2. Redirect to cross-origin URL
3. Redirect to same-origin URL
4. Final URL containing the final same-origin resource.
Mandatory parameters:
page_origin - The page origin, used for redirection and to set TAO. This is a mandatory parameter.
cross_origin - The cross origin used to make this a cross-origin redirect. This is a mandatory parameter.
final_resource - Path of the final resource, without origin. This is a mandatory parameter.
Optional parameters:
tao_steps - Number of redirects for which the TAO header will be present (a number 0 - 3 makes the most sense). Default value is 0.
tao_value - The value of the TAO header, when present. Default value is "*".
Note that |step| is a parameter used internally for the multi-redirect. It's the step we're at in the redirect chain.
"""
step = 1
if b"step" in request.GET:
try:
step = int(request.GET.first(b"step"))
except ValueError:
pass
page_origin = request.GET.first(b"page_origin")
cross_origin = request.GET.first(b"cross_origin")
final_resource = request.GET.first(b"final_resource")
tao_value = b"
|
*"
if b"tao_value" in request.GET:
tao_value = request.GET.first(b"tao_value")
tao_steps = 0
if b"tao_steps" in request.GET:
tao_steps = int(request.GET.first(b"tao_steps"))
|
next_tao_steps = tao_steps - 1
redirect_url_path = b"/resource-timing/resources/multi_redirect.py?"
redirect_url_path += b"page_origin=" + page_origin
redirect_url_path += b"&cross_origin=" + cross_origin
redirect_url_path += b"&final_resource=" + urllib.parse.quote(final_resource).encode('ascii')
redirect_url_path += b"&tao_value=" + tao_value
redirect_url_path += b"&tao_steps=" + isomorphic_encode(str(next_tao_steps))
redirect_url_path += b"&step="
if tao_steps > 0:
response.headers.set(b"timing-allow-origin", tao_value)
if step == 1:
# On the first request, redirect to a cross origin URL
redirect_url = cross_origin + redirect_url_path + b"2"
elif step == 2:
# On the second request, redirect to a same origin URL
redirect_url = page_origin + redirect_url_path + b"3"
else:
# On the third request, redirect to a static response
redirect_url = page_origin + final_resource
response.status = 302
response.headers.set(b"Location", redirect_url)
|
Ideabin/Ideabin
|
server/login.py
|
Python
|
gpl-3.0
| 197
| 0
|
f
|
rom flask
|
_login import LoginManager
from server.users.models import User
login_manager = LoginManager()
@login_manager.user_loader
def load_user(user_id):
return User.get(user_id=user_id)
|
indictranstech/reciphergroup-frappe
|
frappe/permissions.py
|
Python
|
mit
| 13,272
| 0.026597
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, copy, json
from frappe import _, msgprint
from frappe.utils import cint
import frappe.share
rights = ("read", "write", "create", "delete", "submit", "cancel", "amend",
"print", "email", "report", "import", "export", "set_user_permissions", "share")
def check_admin_or_system_manager(user=None):
if not user: user = frappe.session.user
if ("System Manager" not in frappe.get_roles(user)) and (user!="Administrator"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
def has_permission(doctype, ptype="read", doc=None, verbose=False, user=None):
"""Returns True if user has permission `ptype` for given `doctype`.
If `doc` is passed, it also checks user, share and owner permissions.
Note: if Table DocType is passed, it always returns True.
"""
if not user: user = frappe.session.user
if frappe.is_table(doctype):
if verbose: print "Table type, always true"
return True
meta = frappe.get_meta(doctype)
if ptype=="submit" and not cint(meta.is_submittable):
if verbose: print "Not submittable"
return False
if ptype=="import" and not cint(meta.allow_import):
if verbose: print "Not importable"
return False
if user=="Administrator":
if verbose: print "Administrator"
return True
def false_if_not_shared():
if ptype in ("read", "write", "share", "email", "print"):
shared = frappe.share.get_shared(doctype, user,
["read" if ptype in ("email", "print") else ptype])
if doc:
doc_name = doc if isinstance(doc, basestring) else doc.name
if doc_name in sh
|
ared:
if verbose: print "Shared"
if ptype in ("read", "write", "share") or meta.permissions[0].get(ptype):
return True
elif shared:
# if atleast one shared d
|
oc of that type, then return True
# this is used in db_query to check if permission on DocType
if verbose: print "Has a shared document"
return True
return False
role_permissions = get_role_permissions(meta, user=user, verbose=verbose)
if not role_permissions.get(ptype):
return false_if_not_shared()
if doc:
if isinstance(doc, basestring):
doc = frappe.get_doc(meta.name, doc)
# if owner match, then return True
if doc.owner == frappe.session.user and role_permissions["if_owner"].get(ptype) and ptype!="create":
return True
# check if user permission
if role_permissions["apply_user_permissions"].get(ptype):
if not user_has_permission(doc, verbose=verbose, user=user,
user_permission_doctypes=role_permissions.get("user_permission_doctypes", {}).get(ptype) or []):
if verbose: print "No user permission"
return false_if_not_shared()
if not has_controller_permissions(doc, ptype, user=user):
if verbose: print "No controller permission"
return false_if_not_shared()
if verbose:
print "Has Role"
return True
def get_doc_permissions(doc, verbose=False, user=None):
"""Returns a dict of evaluated permissions for given `doc` like `{"read":1, "write":1}`"""
if not user: user = frappe.session.user
if frappe.is_table(doc.doctype):
return {"read":1, "write":1}
meta = frappe.get_meta(doc.doctype)
role_permissions = copy.deepcopy(get_role_permissions(meta, user=user, verbose=verbose))
if not cint(meta.is_submittable):
role_permissions["submit"] = 0
if not cint(meta.allow_import):
role_permissions["import"] = 0
if role_permissions.get("apply_user_permissions"):
# no user permissions, switch off all user-level permissions
for ptype in role_permissions:
if role_permissions["apply_user_permissions"].get(ptype) and not user_has_permission(doc, verbose=verbose, user=user,
user_permission_doctypes=role_permissions.get("user_permission_doctypes", {}).get(ptype) or []):
role_permissions[ptype] = 0
# apply owner permissions on top of existing permissions
if doc.owner == frappe.session.user:
role_permissions.update(role_permissions.if_owner)
update_share_permissions(role_permissions, doc, user)
return role_permissions
def update_share_permissions(role_permissions, doc, user):
"""Updates share permissions on `role_permissions` for given doc, if shared"""
share_ptypes = ("read", "write", "share")
permissions_by_share = frappe.db.get_value("DocShare",
{"share_doctype": doc.doctype, "share_name": doc.name, "user": user},
share_ptypes, as_dict=True)
if permissions_by_share:
for ptype in share_ptypes:
if ptype:
role_permissions[ptype] = 1
def get_role_permissions(meta, user=None, verbose=False):
"""Returns dict of evaluated role permissions like `{"read": True, "write":False}`
If user permissions are applicable, it adds a dict of user permissions like
{
// user permissions will apply on these rights
"apply_user_permissions": {"read": 1, "write": 1},
// doctypes that will be applicable for each right
"user_permission_doctypes": {
"read": [
// AND between "DocType 1" and "DocType 2"
["DocType 1", "DocType 2"],
// OR
["DocType 3"]
]
}
"if_owner": {"read": 1, "write": 1}
}
"""
if not user: user = frappe.session.user
cache_key = (meta.name, user)
if not frappe.local.role_permissions.get(cache_key):
perms = frappe._dict({ "apply_user_permissions": {}, "user_permission_doctypes": {}, "if_owner": {} })
user_roles = frappe.get_roles(user)
dont_match = []
for p in meta.permissions:
if cint(p.permlevel)==0 and (p.role in user_roles):
# apply only for level 0
for ptype in rights:
# build if_owner dict if applicable for this right
perms[ptype] = perms.get(ptype, 0) or cint(p.get(ptype))
if ptype != "set_user_permissions" and p.get(ptype):
perms["apply_user_permissions"][ptype] = (perms["apply_user_permissions"].get(ptype, 1)
and p.get("apply_user_permissions"))
if p.if_owner and p.get(ptype):
perms["if_owner"][ptype] = 1
if p.get(ptype) and not p.if_owner and not p.get("apply_user_permissions"):
dont_match.append(ptype)
if p.apply_user_permissions:
if p.user_permission_doctypes:
# set user_permission_doctypes in perms
user_permission_doctypes = json.loads(p.user_permission_doctypes)
if user_permission_doctypes:
# perms["user_permission_doctypes"][ptype] would be a list of list like [["User", "Blog Post"], ["User"]]
for ptype in rights:
if p.get(ptype):
perms["user_permission_doctypes"].setdefault(ptype, []).append(user_permission_doctypes)
else:
user_permission_doctypes = get_linked_doctypes(meta.name)
# if atleast one record having both Apply User Permission and If Owner unchecked is found,
# don't match for those rights
for ptype in rights:
if ptype in dont_match:
if perms["apply_user_permissions"].get(ptype):
del perms["apply_user_permissions"][ptype]
if perms["if_owner"].get(ptype):
del perms["if_owner"][ptype]
# if one row has only "Apply User Permissions" checked and another has only "If Owner" checked,
# set Apply User Permissions as checked
for ptype in rights:
if perms["if_owner"].get(ptype) and perms["apply_user_permissions"].get(ptype)==0:
perms["apply_user_permissions"][ptype] = 1
# delete 0 values
for key, value in perms.get("apply_user_permissions").items():
if not value:
del perms["apply_user_permissions"][key]
frappe.local.role_permissions[cache_key] = perms
return frappe.local.role_permissions[cache_key]
def user_has_permission(doc, verbose=True, user=None, user_permission_doctypes=None):
from frappe.defaults import get_user_permissions
user_permissions = get_user_permissions(user)
user_permission_doctypes = get_user_permission_doctypes(user_permission_doctypes, user_permissions)
def check_user_permission(d):
meta = frappe.get_meta(d.get("doctype"))
end_result = False
messages = {}
# check multiple sets of user_permission_doctypes using OR condition
for doctypes in user_permission_doctypes:
result = True
for df in meta.get_fields_to_check_permissions(doctypes):
if (df.options in user_permissions and d.get(df.fieldname)
|
JeffHoogland/qAndora
|
playerGst/__init__.py
|
Python
|
bsd-3-clause
| 24
| 0
|
fro
|
m playerGst
|
import *
|
VISTAS-IVES/pyvistas
|
source/vistas/ui/windows/legend.py
|
Python
|
bsd-3-clause
| 8,666
| 0.001154
|
import wx
from PIL import Image
from vistas.core.threading import Thread
from vistas.ui.controls.static_image import StaticImage
from vistas.ui.utils import make_window_transparent
LegendRenderEvent, EVT_LEGEND_RENDERED = wx.lib.newevent.NewEvent()
class LegendWindow(wx.Frame):
""" A window for showing a rendered image derived from a Legend. """
class RenderThread(Thread):
def __init__(self, plugin, size, handler):
super().__init__()
self.plugin = plugin
self.size = size
self.handler = handler
def run(self):
wx.PostEvent(self.handler, LegendRenderEvent(image=self.plugin.get_legend(*self.size)))
RESET_LEGEND = 0
def __init__(self, parent, id):
super().__init__(parent, id, size=wx.Size(140, 300), style=wx.FRAME_NO_TASKBAR | wx.FRAME_FLOAT_ON_PARENT)
self.max_size = self.GetSize()
make_window_transparent(self)
self.canvas = parent.gl_canvas
self.mouse_pos = wx.DefaultPosition
self.start_pos = wx.DefaultPosition
self.visualization = None
self.width = 1
self.height = 1
self.dragging = False
self.translucent_background = wx.Frame(
parent, wx.ID_ANY, pos=self.GetScreenPosition(), size=self.GetSize(),
style=wx.FRAME_NO_TASKBAR | wx.FRAME_FLOAT_ON_PARENT
)
self.translucent_background.SetTransparent(150)
self.translucent_background.SetBackgroundColour(wx.BLACK)
self.legend_image = StaticImage(self, wx.ID_ANY, Image.new("RGBA", self.GetSize().Get()))
self.legend_image.SetSize(self.GetSize())
self.legend_image.fit = False
main_sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(main_sizer)
main_sizer.Add(self.legend_image, 0, wx.EXPAND | wx.BOTTOM, 0)
self.legend_image.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.legend_image.Bind(wx.EVT_MOTION, self.OnMotion)
self.legend_image.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.legend_image.Bind(wx.EVT_MOUSE_CAPTURE_LOST, self.OnCaptureLost)
self.legend_image.Bind(wx.EVT_RIGHT_DOWN, self.OnRightClick)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy)
self.Bind(EVT_LEGEND_RENDERED, self.OnLegendRendered)
self.translucent_background.Bind(wx.EVT_LEFT_DOWN, self.OnBackgroundFocus)
self.translucent_background.Bind(wx.EVT_RIGHT_DOWN, self.OnBackgroundFocus)
parent = self.GetParent()
while parent is not None:
parent.Bind(wx.EVT_MOVE, self.OnMove)
parent.Bind(wx.EVT_PAINT, self.OnPaintParent)
parent = parent.GetParent()
self.reset = True
def OnBackgroundFocus(self, event: wx.MouseEvent):
self.legend_image.SetFocus()
wx.PostEvent(self.legend_image, event)
def OnDestroy(self, event):
parent = self.GetParent()
while parent is not None:
parent.Unbind(wx.EVT_MOVE)
parent.Unbind(wx.EVT_PAINT)
|
parent = parent.GetParent()
event.Skip()
def OnLegendRendered(self, event: LegendRenderEvent):
self.legend_image.image = event.image
self.Refresh()
def CalculateProportions(self):
canvas_size = self.canvas.GetSize()
size = self.GetSize()
center = wx.Point(self.start_pos.x + size. x / 2, self.st
|
art_pos.y + size.y / 2)
min_x = (size.x / 2) / canvas_size.x
min_y = (size.y / 2) / canvas_size.y
max_x = (canvas_size.x - size.x / 2) / canvas_size.x
max_y = (canvas_size.y - size.y / 2) / canvas_size.y
self.width = center.x / canvas_size.x
if self.width <= min_x:
self.width = 0.0
elif self.width >= max_x:
self.width = 1.0
self.height = center.y / canvas_size.y
if self.height <= min_y:
self.height = 0.0
elif self.height >= max_y:
self.height = 1.0
def RepaintLegend(self):
canvas_pos = self.canvas.GetScreenPosition()
canvas_size = self.canvas.GetSize()
size = self.GetSize()
if self.reset and self.IsShown():
self.start_pos = wx.Point(0, canvas_size.y - size.y)
self.CalculateProportions()
self.reset = False
x = canvas_pos.x + canvas_size.x * self.width - size.x / 2
y = canvas_pos.y + canvas_size.y * self.height - size.y / 2
if x < canvas_pos.x:
x = canvas_pos.x
elif x + size.x > canvas_pos.x + canvas_size.x:
x = canvas_pos.x + canvas_size.x - size.x
if y < canvas_pos.y:
y = canvas_pos.y
elif y + size.y > canvas_pos.y + canvas_size.y:
y = canvas_pos.y + canvas_size.y - size.y
new_pos = wx.Point(x, y)
self.SetPosition(new_pos)
self.translucent_background.SetPosition(new_pos)
new_size = wx.Size(self.max_size)
if canvas_size.x < self.max_size.x:
new_size.SetWidth(canvas_size.x)
if canvas_size.y < self.max_size.y:
new_size.SetHeight(canvas_size.y)
self.legend_image.SetSize(new_size)
self.SetSize(new_size)
self.translucent_background.SetSize(new_size)
self.translucent_background.Refresh()
self.legend_image.Refresh()
def OnMove(self, event):
self.RepaintLegend()
event.Skip()
def OnPaint(self, event):
dc = wx.BufferedPaintDC(self)
dc.SetBackground(wx.Brush(wx.Colour(0, 0, 0), wx.BRUSHSTYLE_TRANSPARENT))
dc.Clear()
trans_dc = wx.BufferedPaintDC(self.translucent_background)
trans_dc.Clear()
trans_dc.SetBrush(wx.BLACK_BRUSH)
trans_dc.DrawRectangle(0, 0, *self.GetSize().Get())
self.RepaintLegend()
event.Skip()
def OnPaintParent(self, event):
self.Refresh()
event.Skip()
def OnLeftDown(self, event):
self.dragging = True
self.legend_image.CaptureMouse()
def OnMotion(self, event: wx.MouseEvent):
canvas_pos = self.canvas.GetScreenPosition()
canvas_size = self.canvas.GetSize()
if self.dragging and event.LeftIsDown():
if self.mouse_pos.x != -1 and self.mouse_pos.y != -1:
pos = self.GetPosition()
new_pos = wx.Point(pos.x + event.GetX() - self.mouse_pos.x, pos.y + event.GetY() - self.mouse_pos.y)
size = self.GetSize()
if new_pos.x < canvas_pos.x:
new_pos.x = canvas_pos.x
if new_pos.y < canvas_pos.y:
new_pos.y = canvas_pos.y
if new_pos.x + size.x > canvas_pos.x + canvas_size.x:
new_pos.x = canvas_pos.x + canvas_size.x - size.x
if new_pos.y + size.y > canvas_pos.y + canvas_size.y:
new_pos.y = canvas_pos.y + canvas_size.y - size.y
self.SetPosition(new_pos)
self.translucent_background.SetPosition(new_pos)
else:
self.mouse_pos = event.GetPosition()
else:
self.mouse_pos = wx.DefaultPosition
def OnLeftUp(self, event):
if self.legend_image.HasCapture():
self.dragging = False
self.legend_image.ReleaseMouse()
current_pos = self.GetPosition()
canvas_pos = self.canvas.GetScreenPosition()
self.start_pos = wx.Point(current_pos.x - canvas_pos.x, current_pos.y - canvas_pos.y)
self.CalculateProportions()
self.RepaintLegend()
def OnCaptureLost(self, event):
self.dragging = False
def OnRightClick(self, event):
menu = wx.Menu()
menu.Append(self.RESET_LEGEND, "Reset Legend")
menu.Bind(wx.EVT_MENU, self.OnPopupMenu)
self.PopupMenu(menu, event.GetPosition())
def OnPopupMenu(self, event: wx.MenuEvent):
id = event.GetId()
if id == self.RESET_LEGEND:
self.reset = True
self.RepaintLegend()
def ShowWindow(self):
self.mouse_pos = wx.Default
|
VRaviTheja/SDN-policy
|
flowgenerator/random_priority.py
|
Python
|
apache-2.0
| 180
| 0.005556
|
import random
def prio():
ac
|
tion_lst = []
lim = 1000
for _ in range(lim):
k = random.randint(1, 201)
action_lst.append(k)
return action_ls
|
t
|
tb-animator/tbtools
|
updater.py
|
Python
|
mit
| 5,495
| 0.008735
|
__author__ = 'Tom'
import pickle
import urllib2
import os
import pymel.core as pm
import project_data as prj
reload(prj)
class updater():
def __init__(self):
self.master_url = 'https://raw.githubusercontent.com/tb-animator/tbtools/master/'
self.realPath = os.path.realpath(__file__)
self.basename = os.path.basename(__file__)
self.base_dir = os.path.normpath(os.path.dirname(__file__))
self.data_file = "prj_files.poo"
self.out_files = []
self.local_project_info = self.load_project_data_from_local()
self.version = pm.optionVar.get('tb_version', self.local_project_info.version )
self.project_info = self.load_project_data_from_git()
def check_version(self):
if self.project_info.version > self.version:
updaterWindow().showUI()
print "where's the window"
def get_url_dir(self, dir):
print "in", dir
print self.base_dir
out = dir.replace(self.base_dir,self.master_url).replace("\\","/")
return out
def load_project_data(self):
data = pickle.load(open(os.path.join(self.base_dir,self.data_file), "rb" ))
return data
def load_project_data_from_git(self):
url = self.master_url + self.data_file
print url
data = pickle.load(urllib2.urlopen(url, "rb"))
return data
def load_project_data_from_local(self):
file_location = os.path.join(self.base_dir+"\\",self.data_file)
print file_location
data = pickle.load(open(file_location, "rb"))
return data
def create_url(self, item):
url = (self.master_url + item).replace("\\","/")
return url
def read_from_url(self, url):
lines = []
data = urllib2.urlopen(url)
for line in data:
lines.append(line)
return lines
def copy_from_url(self, url, fileName):
if fileName:
dirName = os.path.split(fileName)[0]
if not os.path.isdir(dirName):
print "making folder", dirName
os.mkdir(dirName)
'''
# read the target script from git
01file_data = self.read_from_url(url)
print "downloading:: ", fileName
if file_data:
# nukes the current file
f = open(fileName,"w")
# writes into the file from the url
f.writelines(file_data)
f.close()
'''
print "dowloading file:", fileName
remote_file = urllib2.urlopen(url)
localFile = open(fileName, 'wb')
localFile.write(remote_file.read())
localFile.close()
else:
print "no fileName"
def download_project_files(self, win, *args):
files = self.project_info.scripts
print "downloading module to ", self.base_dir
for fileName in files:
# print self.base_dir, fileName
local_dir = '%s\%s' % (self.base_dir,fileName)
url = self.create_url(fileName)
# increment progress bar
win.step_bar()
# set current downloading label
win.set_label(fileName)
try:
self.copy_from_url(url, local_dir)
except:
print "skipping", url
win.finish_bar()
pm.optionVar(floatValue=('tb_version', self.project_info.version) )
class updaterWindow():
def __init__(self):
self.project_data = updater().project_info
def set_label(self, text=""):
pm.text(self.file_text, edit=True, label=text)
def step_bar(self,):
pm.progressBar(self.progress_bar, edit=True, step=1)
def finish_bar(self):
max_value = pm.progressBar(self.progress_bar, query=True, maxValue=True)
pm.progressBar(self.progress_bar, edit=True, maxValue=max_value)
pm.text(self.file_text, edit=True, label="Complete")
def showUI(self):
if pm.window("update", exists=True):
pm.deleteUI("update")
window = pm.window("update", title="tb tools update")
layout = pm.columnLayout(adjustableColumn=True )
pm.text(font="boldLabelFont",label="There's a new version")
pm.text(label=self.project_data.version)
pm.text(label="release notes")
pm.scrollField( editable=True, wordWrap=True, text=self.project_data.relaseNotes )
'''
for items in self.command_list:
self.command_widget(command_name=items, parent=layout)
'''
|
self.file_text = pm.text(label="")
self.progress_bar = pm.progressBar(maxValue=len(self.project_data.scripts)-1)
# pm.button( label='Delete all', parent=layout)
pm.button( label='Update',
command=lambda *args : updater().download_project_files(self),
parent=layout)
pm.button( label='Ignore this version', command=('cmds.deleteUI(\"' + window + '\", window=True)') ,
|
parent=layout)
pm.button( label='Close', command=('cmds.deleteUI(\"' + window + '\", window=True)') , parent=layout)
pm.setParent( '..' )
pm.showWindow(window)
def update_hotkeys():
try:
import tb_keyCommands as tb_hotKeys
reload(tb_hotKeys)
tb_hotKeys.hotkey_tool().update_commands()
except:
print "warning, hotkey update failed, please restart maya"
|
oicr-ibc/cssscl
|
cssscl/configure.py
|
Python
|
gpl-3.0
| 1,425
| 0.006316
|
import pymongo
import getpass
import os
import base64
import ConfigParser
import sys
from database import *
from pymongo.errors import DuplicateKeyError
db, logger = None, None
def setup_config(args):
'''Saves MongoDB settings to a configuration file'''
config = ConfigParser.SafeConfigParser()
config.add_section('MongoDB')
print 'Please enter the settings for your MongoDB server:'
config.set('MongoDB', 'host', args.host or raw_input('Host [localhost]: ') or 'localhost')
config.set('MongoDB', 'port', args.port or raw_input('Port [27017]: ') or '27017')
config.set('MongoDB', 'database', args.database or raw_input('Database [cssscl]: ') or 'cssscl')
#config.set('MongoDB', 'username', ar
|
gs.username or raw_input('Username [none]: '))
#config.set('MongoDB', 'password', args.password or getpass.getpass('Password [none]: '))
# Writing our configuration file
with open(os.path.expanduser('~/.cssscl/cssscl.cfg'), 'wb') as configfile:
config.write(configfile)
def main(args):
|
'''Setup MongoDB for use by cssscl'''
global db, logger
logger = args.logging.getLogger(__name__)
# Setup config files
setup_config(args)
db = connect(args)
logger.info('Done!')
if __name__ == '__main__':
print 'This program should be run as part of the cssscl package:\n\t$ cssscl configure -h\n\tor\n\t$ /path/to/cssscl/bin/cssscl configure -h'
|
Mellthas/quodlibet
|
quodlibet/ext/songsmenu/website_search.py
|
Python
|
gpl-2.0
| 6,154
| 0.000163
|
# Copyright 2011-2018 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
from urllib.parse import quote_plus
from gi.repository import Gtk
import quodlibet
from quodlibet import _
from quodlibet import qltk
from quodlibet.formats import AudioFile
from quodlibet.pattern import Pattern
from quodlibet.plugins.songsmenu import SongsMenuPlugin
from quodlibet.qltk import Icons
from quodlibet.qltk.cbes import StandaloneEditor
from quodlibet.qltk.x import SeparatorMenuItem
from quodlibet.util import connect_obj, print_w, print_d
from quodlibet.util import website
from quodlibet.util.path import uri_is_valid
from quodlibet.util.tags import USER_TAGS, MACHINE_TAGS
class WebsiteSearch(SongsMenuPlugin):
"""Loads a browser with a URL designed to search on tags of the song.
This may include a standard web search engine, eg Google, or a more
specific site look-up. The URLs are customisable using tag patterns.
"""
PLUGIN_ICON = Icons.APPLICATION_INTERNET
PLUGIN_ID = "Website Search"
PLUGIN_NAME = _("Website Search")
PLUGIN_DESC_MARKUP = (_(
"Searches your choice of website using any song tags.\n"
"Supports patterns e.g. <tt>%(pattern-example)s</tt>.")
% {"pattern-example": "https://duckduckgo.com?q=<~artist~title>"}
)
# Here are some starters...
DEFAULT_URL_PATS = [
("Google song search",
"https://google.com/search?q=<artist~title>"),
("Wikipedia (en) artist entry",
"https://wikipedia.org/wiki/<albumartist|<albumartist>|<artist>>"),
("Musicbrainz album listing",
"https://musicbrainz.org/<musicbrainz_albumid|release/"
"<musicbrainz_albumid>|search?query=<album>&type=release>"),
("Discogs album search",
"https://www.discogs.com/search?type=release&artist="
"<albumartist|<albumartist>|<artist>>&title=<album>"),
("Youtube video search",
"https://www.youtube.com/results?search_query=<artist~title>"),
("Go to ~website", "<website>"),
]
PATTERNS_FILE = os.path.join(
quodlibet.get_user_dir(), 'lists', 'searchsites')
_no_launch = False
def __set_site(self, name):
self.chosen_site = name
def get_url_pattern(self, key):
"""Gets the pattern for a given key"""
return dict(self._url_pats).get(key, self.DEFAULT_URL_PATS[0][1])
@classmethod
def edit_patterns(cls, button):
def valid_uri(s):
# TODO: some pattern validation too (that isn't slow)
try:
p = Pattern(s)
return (p and uri_is_valid(s))
except ValueError:
return False
win = StandaloneEditor(filename=cls.PATTERNS_FILE,
title=_("Search URL patterns"),
initial=cls.DEFAULT_URL_PATS,
validator=valid_uri)
win.show()
@classmethod
def PluginPreferences(cls, parent):
hb = Gtk.HBox(spacing=3)
hb.set_border_width(0)
button = qltk.Button(_("Edit search URLs"), Icons.EDIT)
button.connect("clicked", cls.edit_patterns)
hb.pack_start(button, True, True, 0)
hb.show_all()
return hb
def _get_saved_searches(self):
filename = self.PATTERNS_FILE + ".saved"
self._url_pats = StandaloneEditor.load_values(filename)
# Failing all else...
if not len(self._url_pats):
print_d("No saved searches found in %s. Using defaults." %
filename)
self._url_pats = self.DEFAULT_URL_PATS
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.chosen_site = None
self._url_pats = []
submenu = Gtk.Menu()
self._get_saved_searches()
for name, url_pat in self._url_pats:
item = Gtk.MenuItem(label=name)
connect_obj(item, 'activate', self.__set_site, name)
submenu.append(item)
# Add link to editor
configure = Gtk.MenuItem(label=_(u"Configure Searches…"))
|
connect_obj(configure, 'activate', self.edit_patterns, configure)
submenu.append(SeparatorMenuItem())
submenu.append(configure)
if submenu.get_children():
self.set_submenu(submenu)
|
else:
self.set_sensitive(False)
def plugin_songs(self, songs):
# Check this is a launch, not a configure
if self.chosen_site:
url_pat = self.get_url_pattern(self.chosen_site)
pat = Pattern(url_pat)
# Remove Nones, and de-duplicate collection
urls = set(filter(None, (website_for(pat, s) for s in songs)))
if not urls:
print_w("Couldn't build URLs using \"%s\"."
"Check your pattern?" % url_pat)
return False
print_d("Got %d websites from %d songs" % (len(urls), len(songs)))
if not self._no_launch:
for url in urls:
website(url)
return True
def website_for(pat, song):
"""Gets a utf-8 encoded string for a website from the given pattern"""
# Generate a sanitised AudioFile; allow through most tags
subs = AudioFile()
# See issue 2762
for k in (USER_TAGS + MACHINE_TAGS + ['~filename']):
vals = song.comma(k)
if vals:
try:
# Escaping ~filename stops ~dirname ~basename etc working
# But not escaping means ? % & will cause problems.
# Who knows what user wants to do with /, seems better raw.
subs[k] = (vals if k in ['website', '~filename']
else quote_plus(vals))
except KeyError:
print_d("Problem with %s tag values: %r" % (k, vals))
return pat.format(subs) or None
|
cbartz/git-lfs-swift-server
|
git_lfs_swift_server/__init__.py
|
Python
|
apache-2.0
| 637
| 0
|
# coding=utf-8
# Copyright 2017 Christopher Bartz <bartz@dkrz.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES
|
OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
|
and
# limitations under the License.
from .server import app
|
guiandmag/scrum-django
|
board/forms.py
|
Python
|
apache-2.0
| 1,021
| 0
|
import django_filters
from django.contrib.auth import get_user_model
from .models import Sprint
|
, Task
User = get_user_model()
class NullFilter(django_filters.BooleanFilter):
"""Filter on a field set as null or not."""
def filter(self, qs, value):
if value is not None:
return qs.filter(**{'%s__isnull' % self.name: value})
retu
|
rn qs
class SprintFilter(django_filters.FilterSet):
end_min = django_filters.DateFilter(name='end', lookup_type='gte')
end_max = django_filters.DateFilter(name='end', lookup_type='lte')
class Meta:
model = Sprint
fields = ('end_min', 'end_max', )
class TaskFilter(django_filters.FilterSet):
backlog = NullFilter(name='sprint')
class Meta:
model = Task
fields = ('sprint', 'status', 'assigned', 'backlog', )
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filters['assigned'].extra.update(
{'to_field_name': User.USERNAME_FIELD})
|
Ezetowers/AppEngine_EventsManagement
|
load_tests/QueryGuest_Case/query_guests.py
|
Python
|
mit
| 1,268
| 0.002366
|
from lxml import etree
import sys
REQUEST_BODY_PART_1 = '<![CDATA[actualEvent='
REQUEST_BODY_PART_2 = '&queryEmail='
REQUEST_BODY_PART_3 = ']]>'
CONTENT_TYPE = 'Content-type: application/x-www-form-urlencoded'
def usage():
print "python create_test_case [URL]"\
" [EVENT_NAME] [AMOUNT_CASES] [TEST_CASE_FILENAME]"
def main():
if len(sys.argv) != 5:
usage()
root = etree.Element('testcases')
url = sys.argv[1]
event = sys.argv[2]
amount_cases = int(sys.argv[3])
test_case_filename = sys.argv[4]
for case in range(1,amount_cases):
case_node = etree.Element('case')
etree.SubElement(case_node, 'url').text = url + "/query_guest"
etree.SubElement(case_node, 'method').text = 'POS
|
T'
body = REQUEST_BODY_PART_1 + event + REQUEST_BODY_PART_2 + "Email" + str(case) + REQUEST_BODY_PART_3
etree.SubElement(case_node, 'body').text = body
etree.SubElement(case_node, 'add_header').text = CONTENT_TYPE
root.append(case_node)
etree.ElementTree(root).write(test_case_filename,
pretty_print=True,
e
|
ncoding='iso-8859-1')
# Line to indicate that this is the main
if __name__ == "__main__":
main()
|
inspectorbean/spat
|
reqs/apps.py
|
Python
|
gpl-3.0
| 83
| 0
|
from django.apps import AppConfig
class ReqsConfig(AppCo
|
nfig):
name = 'reqs'
|
|
jeffbuttars/pcm
|
pcmpy/cmds/__init__.py
|
Python
|
mit
| 876
| 0.001142
|
import sys
import os
import importlib
import glob
# Imoprt and instantiate each Cmd object.
_this_dir = os.path.dirname(__file__)
_this_mod = os.path.basename(_this_dir)
def build_cmds(sub_parser):
cmd_objs = {}
imlist = glob.glob(os.path.join(_this_dir, "*.py"))
imlist.remove(os.path.join(_this_dir, "__init__.py"))
imlist.remove(os.path.join(_this_dir, "base.py"))
imlist = [os.path.basename(x) for x in imlist]
imlist = [os.path.splitext(x)[0] for x in imlist]
for im in imlist:
# print(im)
mod = importlib.impo
|
rt_module("pcm." + _this_mod + '.' + im)
if hasattr(mod, 'Cmd'):
# print("Found Command: ", mod.Cmd.name)
cmd_objs[mod.Cmd.name] = mod.Cmd(sub_parser)
cmd_objs[mod.Cmd.name].build()
# end for im in imlist
|
# print(cmd_objs)
return cmd_objs
#build_cmds()
|
nateprewitt/werkzeug
|
werkzeug/serving.py
|
Python
|
bsd-3-clause
| 25,464
| 0.000589
|
# -*- coding: utf-8 -*-
"""
werkzeug.serving
~~~~~~~~~~~~~~~~
There are many ways to serve a WSGI application. While you're developing
it you usually don't want a full blown webserver like Apache but a simple
standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in
the standard library. If you're using older versions of Python you can
download the package from the cheeseshop.
However there are some caveats. Sourcecode won't reload itself when
changed and each time you kill the server using ``^C`` you get an
`KeyboardInterrupt` error. While the latter is easy to solve the first
o
|
ne can be a pain in the ass in some situations.
The easiest way is creating a small ``start-myproject.py`` that runs the
application::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from myproject import make_app
from werkzeug.serving import run_simple
app = make_app(...)
run_simpl
|
e('localhost', 8080, app, use_reloader=True)
You can also pass it a `extra_files` keyword argument with a list of
additional files (like configuration files) you want to observe.
For bigger applications you should consider using `werkzeug.script`
instead of a simple start file.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import os
import socket
import sys
import ssl
import signal
def _get_openssl_crypto_module():
try:
from OpenSSL import crypto
except ImportError:
raise TypeError('Using ad-hoc certificates requires the pyOpenSSL '
'library.')
else:
return crypto
try:
from SocketServer import ThreadingMixIn, ForkingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from socketserver import ThreadingMixIn, ForkingMixIn
from http.server import HTTPServer, BaseHTTPRequestHandler
import werkzeug
from werkzeug._internal import _log
from werkzeug._compat import reraise, wsgi_encoding_dance
from werkzeug.urls import url_parse, url_unquote
from werkzeug.exceptions import InternalServerError
class WSGIRequestHandler(BaseHTTPRequestHandler, object):
"""A request handler that implements WSGI dispatching."""
@property
def server_version(self):
return 'Werkzeug/' + werkzeug.__version__
def make_environ(self):
request_url = url_parse(self.path)
def shutdown_server():
self.server.shutdown_signal = True
url_scheme = self.server.ssl_context is None and 'http' or 'https'
path_info = url_unquote(request_url.path)
environ = {
'wsgi.version': (1, 0),
'wsgi.url_scheme': url_scheme,
'wsgi.input': self.rfile,
'wsgi.errors': sys.stderr,
'wsgi.multithread': self.server.multithread,
'wsgi.multiprocess': self.server.multiprocess,
'wsgi.run_once': False,
'werkzeug.server.shutdown': shutdown_server,
'SERVER_SOFTWARE': self.server_version,
'REQUEST_METHOD': self.command,
'SCRIPT_NAME': '',
'PATH_INFO': wsgi_encoding_dance(path_info),
'QUERY_STRING': wsgi_encoding_dance(request_url.query),
'CONTENT_TYPE': self.headers.get('Content-Type', ''),
'CONTENT_LENGTH': self.headers.get('Content-Length', ''),
'REMOTE_ADDR': self.client_address[0],
'REMOTE_PORT': self.client_address[1],
'SERVER_NAME': self.server.server_address[0],
'SERVER_PORT': str(self.server.server_address[1]),
'SERVER_PROTOCOL': self.request_version
}
for key, value in self.headers.items():
key = 'HTTP_' + key.upper().replace('-', '_')
if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
environ[key] = value
if request_url.netloc:
environ['HTTP_HOST'] = request_url.netloc
return environ
def run_wsgi(self):
if self.headers.get('Expect', '').lower().strip() == '100-continue':
self.wfile.write(b'HTTP/1.1 100 Continue\r\n\r\n')
self.environ = environ = self.make_environ()
headers_set = []
headers_sent = []
def write(data):
assert headers_set, 'write() before start_response'
if not headers_sent:
status, response_headers = headers_sent[:] = headers_set
try:
code, msg = status.split(None, 1)
except ValueError:
code, msg = status, ""
self.send_response(int(code), msg)
header_keys = set()
for key, value in response_headers:
self.send_header(key, value)
key = key.lower()
header_keys.add(key)
if 'content-length' not in header_keys:
self.close_connection = True
self.send_header('Connection', 'close')
if 'server' not in header_keys:
self.send_header('Server', self.version_string())
if 'date' not in header_keys:
self.send_header('Date', self.date_time_string())
self.end_headers()
assert isinstance(data, bytes), 'applications must write bytes'
self.wfile.write(data)
self.wfile.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
reraise(*exc_info)
finally:
exc_info = None
elif headers_set:
raise AssertionError('Headers already set')
headers_set[:] = [status, response_headers]
return write
def execute(app):
application_iter = app(environ, start_response)
try:
for data in application_iter:
write(data)
if not headers_sent:
write(b'')
finally:
if hasattr(application_iter, 'close'):
application_iter.close()
application_iter = None
try:
execute(self.server.app)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e, environ)
except Exception:
if self.server.passthrough_errors:
raise
from werkzeug.debug.tbtools import get_current_traceback
traceback = get_current_traceback(ignore_system_exceptions=True)
try:
# if we haven't yet sent the headers but they are set
# we roll back to be able to set them again.
if not headers_sent:
del headers_set[:]
execute(InternalServerError())
except Exception:
pass
self.server.log('error', 'Error on request:\n%s',
traceback.plaintext)
def handle(self):
"""Handles a request ignoring dropped connections."""
rv = None
try:
rv = BaseHTTPRequestHandler.handle(self)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e)
except Exception:
if self.server.ssl_context is None or not is_ssl_error():
raise
if self.server.shutdown_signal:
self.initiate_shutdown()
return rv
def initiate_shutdown(self):
"""A horrible, horrible way to kill the server for Python 2.6 and
later. It's the best we can do.
"""
# Windows does not provide SIGKILL, go with SIGTERM then.
sig = getattr(signal, 'SIGKILL', signal.SIGTERM)
# reloader activ
|
tu-darmstadt-ros-pkg/hector_diagnostics
|
hector_computer_monitor/scripts/cpu_monitor.py
|
Python
|
bsd-3-clause
| 31,998
| 0.013938
|
#!/usr/bin/env python
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain
|
the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the a
|
bove
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
##\author Kevin Watts
from __future__ import with_statement
import roslib
#roslib.load_manifest('pr2_computer_monitor')
import rospy
import traceback
import threading
from threading import Timer
import sys, os, time
from time import sleep
import subprocess
import string
import socket
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
##### monkey-patch to suppress threading error message in python 2.7.3
##### See http://stackoverflow.com/questions/13193278/understand-python-threading-bug
if sys.version_info[:3] == (2, 7, 3):
import threading
threading._DummyThread._Thread__stop = lambda x: 42
#####
stat_dict = { 0: 'OK', 1: 'Warning', 2: 'Error' }
# Output entire IPMI data set
def check_ipmi():
diag_vals = []
diag_msgs = []
diag_level = DiagnosticStatus.OK
try:
p = subprocess.Popen('sudo ipmitool sdr',
stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
diag_level = DiagnosticStatus.ERROR
diag_msgs = [ 'ipmitool Error' ]
diag_vals = [ KeyValue(key = 'IPMI Error', value = stderr) ]
return diag_vals, diag_msgs, diag_level
lines = stdout.split('\n')
if len(lines) < 2:
diag_vals = [ KeyValue(key = 'ipmitool status', value = 'No output') ]
diag_msgs = [ 'No ipmitool response' ]
diag_level = DiagnosticStatus.ERROR
return diag_vals, diag_msgs, diag_level
for ln in lines:
if len(ln) < 3:
continue
words = ln.split('|')
if len(words) < 3:
continue
name = words[0].strip()
ipmi_val = words[1].strip()
stat_byte = words[2].strip()
# CPU temps
if words[0].startswith('CPU') and words[0].strip().endswith('Temp'):
if words[1].strip().endswith('degrees C'):
tmp = ipmi_val.rstrip(' degrees C').lstrip()
if unicode(tmp).isnumeric():
temperature = float(tmp)
diag_vals.append(KeyValue(key = name + ' (C)', value = tmp))
cpu_name = name.split()[0]
if temperature >= self._cpu_warm_warn_threshold and temperature < self._cpu_hot_error_threshold:
diag_level = max(diag_level, DiagnosticStatus.WARN)
if diag_msgs.count('CPU Hot') == 0:
diag_msgs.append('CPU Warm')
if temperature >= self._cpu_hot_error_threshold: # CPU should shut down here
diag_level = max(diag_level, DiagnosticStatus.ERROR)
diag_msgs.append('CPU Hot')
# Don't keep CPU Warm in list if CPU is hot
if diag_msgs.count('CPU Warm') > 0:
idx = diag_msgs.index('CPU Warm')
diag_msgs.pop(idx)
else:
diag_vals.append(KeyValue(key = name, value = words[1]))
# MP, BP, FP temps
if name == 'MB Temp' or name == 'BP Temp' or name == 'FP Temp':
if ipmi_val.endswith('degrees C'):
tmp = ipmi_val.rstrip(' degrees C').lstrip()
diag_vals.append(KeyValue(key = name + ' (C)', value = tmp))
# Give temp warning
dev_name = name.split()[0]
if unicode(tmp).isnumeric():
temperature = float(tmp)
if temperature >= 60 and temperature < 75:
diag_level = max(diag_level, DiagnosticStatus.WARN)
diag_msgs.append('%s Warm' % dev_name)
if temperature >= 75:
diag_level = max(diag_level, DiagnosticStatus.ERROR)
diag_msgs.append('%s Hot' % dev_name)
else:
diag_level = max(diag_level, DiagnosticStatus.ERROR)
diag_msgs.append('%s Error' % dev_name)
else:
diag_vals.append(KeyValue(key = name, value = ipmi_val))
# CPU fan speeds
if (name.startswith('CPU') and name.endswith('Fan')) or name == 'MB Fan':
if ipmi_val.endswith('RPM'):
rpm = ipmi_val.rstrip(' RPM').lstrip()
if unicode(rpm).isnumeric():
if int(rpm) == 0:
diag_level = max(diag_level, DiagnosticStatus.ERROR)
diag_msgs.append('CPU Fan Off')
diag_vals.append(KeyValue(key = name + ' RPM', value = rpm))
else:
diag_vals.append(KeyValue(key = name, value = ipmi_val))
# If CPU is hot we get an alarm from ipmitool, report that too
# CPU should shut down if we get a hot alarm, so report as error
if name.startswith('CPU') and name.endswith('hot'):
if ipmi_val == '0x01':
diag_vals.append(KeyValue(key = name, value = 'OK'))
else:
diag_vals.append(KeyValue(key = name, value = 'Hot'))
diag_level = max(diag_level, DiagnosticStatus.ERROR)
diag_msgs.append('CPU Hot Alarm')
except Exception, e:
diag_vals.append(KeyValue(key = 'Exception', value = traceback.format_exc()))
diag_level = DiagnosticStatus.ERROR
diag_msgs.append('Exception')
return diag_vals, diag_msgs, diag_level
##\brief Check CPU core temps
##
## Use 'find /sys -name temp1_input' to find cores
## Read from every core, divide by 1000
def check_core_temps(sys_temp_strings, warm_threshold, hot_threshold):
diag_vals = []
diag_level = 0
diag_msgs = []
for index, temp_str in enumerate(sys_temp_strings):
if len(temp_str) < 5:
continue
cmd = 'cat %s' % temp_str
p = subprocess.Popen(cmd, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode != 0:
diag_level = DiagnosticSta
|
SickGear/SickGear
|
lib/simplejson/__init__.py
|
Python
|
gpl-3.0
| 24,480
| 0.001348
|
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility back to Python 2.5 and (currently) has significant performance
advantages, even without using the optional C extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print(json.dumps("\"foo\bar"))
"\"foo\bar"
>>> print(json.dumps(u'\u1234'))
"\u1234"
>>> print(json.dumps('\\'))
"\\"
>>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True))
{"a": 0, "b": 0, "c": 0}
>>> from simplejson.compat import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> obj = [1,2,3,{'4': 5, '6': 7}]
>>> json.dumps(obj, separators=(',',':'), sort_keys=True)
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' '))
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from simplejson.compat import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(
|
1+2j)
>>> from decimal import Decim
|
al
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError('Object of type %s is not JSON serializable' %
... obj.__class__.__name__)
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 3 (char 2)
Parsing multiple documents serialized as JSON lines (newline-delimited JSON)::
>>> import simplejson as json
>>> def loads_lines(docs):
... for doc in docs.splitlines():
... yield json.loads(doc)
...
>>> sum(doc["count"] for doc in loads_lines('{"count":1}\n{"count":2}\n{"count":3}\n'))
6
Serializing multiple objects to JSON lines (newline-delimited JSON)::
>>> import simplejson as json
>>> def dumps_lines(objs):
... for obj in objs:
... yield json.dumps(obj, separators=(',',':')) + '\n'
...
>>> ''.join(dumps_lines([{'count': 1}, {'count': 2}, {'count': 3}]))
'{"count":1}\n{"count":2}\n{"count":3}\n'
"""
from __future__ import absolute_import
__version__ = '3.16.1'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict', 'simple_first', 'RawJSON'
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decimal import Decimal
from .errors import JSONDecodeError
from .raw_json import RawJSON
from .decoder import JSONDecoder
from .encoder import JSONEncoder, JSONEncoderForHTML
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
from . import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from ._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
iterable_as_array=False,
bigint_as_string=False,
item_sort_key=None,
for_json=False,
ignore_nan=False,
int_as_string_bitcount=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
bigint_as_string=False, sort_keys=False, item_sort_key=None,
for_json=False, ignore_nan=False, int_as_string_bitcount=None,
iterable_as_array=False, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If *skipkeys* is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If *ensure_ascii* is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If *check_circular* is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If *allow_nan* is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the original JSON specification, instead of using
the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). See
*ignore_nan* for ECMA-262 compliant behavior.
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, *separators* should be an
``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')``
if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most
compact JSON representation, you should specify ``(',', ':')`` to eliminate
whitespace.
*encoding* is the character encoding for str instances, default is UTF-8.
*default(obj)* is a function that should return a serializable version
of obj or raise ``TypeError``. The default simply raises ``TypeError``.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
If *iterable_as_array* is true (default: ``False``),
any object not in the above table that implements ``
|
jabber-at/hp
|
hp/core/tests/base.py
|
Python
|
gpl-3.0
| 8,383
| 0.002863
|
# -*- coding: utf-8 -*-
#
# This file is part of the jabber.at homepage (https://github.com/jabber-at/hp).
#
# This project is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This project is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along with this project. If not, see
# <http://www.gnu.org/licenses/>.
import os
import re
from contextlib import contextmanager
from unittest import mock
from celery import task
from pyvirtualdisplay import Display
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.support.wait import WebDriverWait
from django.conf import settings
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test import TestCase as DjangoTestCase
VIRTUAL_DISPLAY = os.environ.get('VIRTUAL_DISPLAY', 'y').lower().strip() == 'y'
class HomepageTestCaseMixin(object):
def assertIsTask(self, t, expected):
self.assertEqual(t, task(expected))
def a
|
ssertTaskCount(self, mocked, count):
"""Assert that `count` Celery tasks h
|
ave been called."""
self.assertEqual(mocked.call_count, count)
def assertNoTasks(self, mocked):
self.assertTaskCount(mocked, 0)
def assertTaskCall(self, mocked, task, *args, **kwargs):
self.assertTrue(mocked.called)
a, k = mocked.call_args
self.assertEqual(k, {}) # apply_async receives task args/kwargs as tuple/dict arg
instance, called_args, called_kwargs = a
self.assertIsTask(instance, task)
self.assertEqual(args, called_args)
self.assertEqual(kwargs, called_kwargs)
@contextmanager
def mock_celery(self):
def run(self, args, kwargs):
return self.run(*args, **kwargs)
with mock.patch('celery.app.task.Task.apply_async', side_effect=run, autospec=True) as mocked:
yield mocked
class SeleniumMixin(object):
@classmethod
def setUpClass(cls):
super().setUpClass()
if VIRTUAL_DISPLAY:
cls.vdisplay = Display(visible=0, size=(1024, 768))
cls.vdisplay.start()
cls.selenium = WebDriver(executable_path=settings.GECKODRIVER_PATH)
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
if VIRTUAL_DISPLAY:
cls.vdisplay.stop()
super().tearDownClass()
class wait_for_css_property(object):
def __init__(self, elem, prop, value):
self.elem = elem
self.prop = prop
self.value = value
def __call__(self, driver):
if self.elem.value_of_css_property(self.prop) == self.value:
return self.elem
else:
return False
def wait_for_display(self, elem, wait=2):
WebDriverWait(self.selenium, wait).until(lambda d: elem.is_displayed())
def wait_for_page_load(self, wait=2):
WebDriverWait(self.selenium, wait).until(lambda driver: driver.find_element_by_tag_name('body'))
def wait_for_valid_form(self, form=None, wait=2):
"""Wait until a form becomes valid according to HTML5 form validation.
The registration form becomes valid only after a split second, for some reason.
"""
if form is None:
form = self.find('form')
WebDriverWait(self.selenium, wait).until(
lambda driver: self.selenium.execute_script('return arguments[0].checkValidity() === true', form))
def wait_for_focus(self, elem):
# when an element gets focus, it turns blue:
wait = WebDriverWait(self.selenium, 10)
wait.until(self.wait_for_css_property(elem, 'border-top-color', 'rgb(128, 189, 255)'))
def wait_for_invalid(self, elem):
wait = WebDriverWait(self.selenium, 10)
wait.until(self.wait_for_css_property(elem, 'border-top-color', 'rgb(220, 53, 69)'))
def wait_for_valid(self, elem):
wait = WebDriverWait(self.selenium, 10)
wait.until(self.wait_for_css_property(elem, 'border-top-color', 'rgb(40, 167, 69)'))
def find(self, selector):
"""Find an element by CSS selector."""
return self.selenium.find_element_by_css_selector(selector)
def get_classes(self, elem):
"""Get CSS classes from the passed Element."""
return re.split(r'\s+', elem.get_attribute('class').strip())
def get_validity(self, elem):
"""Get validity object from a HTML5 form field."""
return self.selenium.execute_script('return arguments[0].validity', elem)
def get_valid(self, elem):
val = self.get_validity(elem)
return val['valid']
def assertNoElementExists(self, selector, wait=0):
"""Assert that no element with the passed selector is present on the page."""
if wait:
with self.assertRaises(TimeoutException):
WebDriverWait(self.selenium, wait).until(lambda d: self.find(selector))
else:
with self.assertRaises(NoSuchElementException):
self.find(selector)
def assertDisplayed(self, elem):
if isinstance(elem, str):
elem = self.find(elem)
self.assertTrue(elem.is_displayed())
def assertNotDisplayed(self, elem):
if isinstance(elem, str):
elem = self.find(elem)
self.assertFalse(elem.is_displayed())
def assertClass(self, elem, cls):
"""Assert that an element has a CSS class."""
self.assertIn(cls, self.get_classes(elem))
def assertNotClass(self, elem, cls):
"""Assert that an element does **not** have a CSS class."""
self.assertNotIn(cls, self.get_classes(elem))
def assertCSSBorderColor(self, elem, color):
"""Assert that an element has a given border color."""
self.assertEqual(elem.value_of_css_property('border-right-color'), color)
self.assertEqual(elem.value_of_css_property('border-left-color'), color)
self.assertEqual(elem.value_of_css_property('border-top-color'), color)
self.assertEqual(elem.value_of_css_property('border-bottom-color'), color)
def assertNotValidated(self, fg, elem):
"""Assert that a Bootstrap input element is not validated."""
self.assertNotClass(fg, 'was-validated')
for feedback in fg.find_elements_by_css_selector('.invalid-feedback'):
self.assertFalse(feedback.is_displayed())
if self.selenium.switch_to.active_element != elem: # passed element is not currently active
self.assertCSSBorderColor(elem, 'rgb(206, 212, 218)')
else:
self.assertCSSBorderColor(elem, 'rgb(128, 189, 255)')
def assertInvalid(self, fg, elem, *errors):
"""Assert that a Bootstrap input element validates as invalid."""
self.assertClass(fg, 'was-validated')
errors = set(['invalid-%s' % e for e in errors])
for feedback in fg.find_elements_by_css_selector('.invalid-feedback'):
classes = set(self.get_classes(feedback))
if errors & classes:
self.assertTrue(feedback.is_displayed(), '.%s is not displayed' % ('.'.join(classes)))
else:
self.assertFalse(feedback.is_displayed(), '.%s is displayed' % ('.'.join(classes)))
self.wait_for_invalid(elem)
self.assertFalse(self.get_valid(elem))
def assertValid(self, fg, elem):
"""Assert that a Bootstrap input element validates as valid."""
self.assertClass(fg, 'was-validated')
for feedback in fg.find_elements_by_css_selector('.invalid-feedback'):
self.assertFalse(feedback.is_displayed())
self.wait_for_valid(elem)
|
devoid/nova
|
nova/tests/integrated/v3/test_lock_server.py
|
Python
|
apache-2.0
| 1,568
| 0
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.integrat
|
ed.v3 import test_servers
class LockServerSamplesJsonTest(test_servers.ServersSampleBase):
extension_name = "os-lock-server"
def setUp(self):
"""setUp Method for LockServer api samples extension
This method creates the server that will be used
|
in each tests
"""
super(LockServerSamplesJsonTest, self).setUp()
self.uuid = self._post_server()
def test_post_lock_server(self):
# Get api samples to lock server request.
response = self._do_post('servers/%s/action' % self.uuid,
'lock-server', {})
self.assertEqual(response.status, 202)
def test_post_unlock_server(self):
# Get api samples to unlock server request.
self.test_post_lock_server()
response = self._do_post('servers/%s/action' % self.uuid,
'unlock-server', {})
self.assertEqual(response.status, 202)
|
noxora/flask-base
|
flask/lib/python3.4/site-packages/Crypto/__init__.py
|
Python
|
mit
| 1,836
| 0.001089
|
# -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Python Cryptogr
|
aphy Toolkit
A collection of cryptographic modules implementing various algorithms
and protocols.
Subpackages:
Crypto.Cipher
Secret-key (AES, TDES, Salsa20, ChaCha20, CAST, Blowfish, ARC4) and public-
|
key encryption (RSA PKCS#1) algorithms
Crypto.Hash
Hashing algorithms (SHA-1, SHA-2, SHA-3, BLAKE2, HMAC, MD5)
Crypto.IO
Encodings useful for cryptographic data (PEM, PKCS#8)
Crypto.Protocol
Cryptographic protocols (key derivation functions, Shamir's Secret Sharing scheme)
Crypto.PublicKey
Public-key generation, import, export (RSA, DSA, ECC)
Crypto.Signature
Public-key signature algorithms (RSA PKCS#1, DSA, ECDSA)
Crypto.Util
Various useful modules and functions (padding, ASN.1, XOR)
"""
__all__ = ['Cipher', 'Hash', 'Protocol', 'PublicKey', 'Util', 'Signature',
'IO', 'Math']
version_info = (3, 4, 5)
|
vileopratama/vitech
|
src/addons/hr_recruitment/models/hr_job.py
|
Python
|
mit
| 5,037
| 0.003574
|
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import _, api, fields, models
class Job(models.Model):
_inherit = "hr.job"
_name = "hr.job"
_inherits = {'mail.alias': 'alias_id'}
@api.model
def _default_address_id(self):
return self.env.user.company_id.partner_id
address_id = fields.Many2one(
'res.partner', "Job Location", default=_default_address_id,
help="Address where employees are working")
application_ids = fields.One2many('hr.applicant', 'job_id', "Applications")
application_count = fields.Integer(compute='_compute_application_count', string="Applications")
manager_id = fields.Many2one(
'hr.employee', related='department_id.manager_id', string="Department Manager",
readonly=True, store=True)
user_id = fields.Many2one('res.users', "Recruitment Responsible", track_visibility='onchange')
stage_ids = fields.Many2many(
'hr.recruitment.stage', 'job_stage_rel', 'job_id', 'stage_id',
'Job Stages',
default=[(0, 0, {'name': _('New')})])
document_ids = fields.One2many('ir.attachment', compute='_compute_document_ids', string="Applications")
documents_count = fields.Integer(compute='_compute_document_ids', string="Documents")
survey_id = fields.Many2one(
'survey.survey', "Interview Form",
help="Choose an interview form for this job position and you will be able to print/answer this interview from all applicants who apply for this job")
alias_id = fields.Many2one(
'mail.alias', "Alias", ondelete="restrict", required=True,
help="Email alias for this job position. New emails will automatically create new applicants for this job position.")
color = fields.Integer("Color Index")
def _compute_document_ids(self):
applicants = self.mapped('application_ids').filtered(lambda self: not self.emp_id)
app_to_job = dict((applicant.id, applicant.job_id.id) for applicant in applicants)
attachments = self.env['ir.attachment'].search([
'|',
'&', ('res_model', '=', 'hr.job'), ('res_id', 'in', self.ids),
'&', ('res_model', '=', 'hr.applicant'), ('res_id', 'in', applicants.ids)])
result = dict.fromkeys(self.ids, self.env['ir.attachment'])
for attachment in attachments:
if attachment.res_model == 'hr.applicant':
result[app_to_job[attachment.res_id]] |= attachment
else:
result[attachment.res_id] |= attachment
for job in self:
job.document_ids = result[job.id]
job.documents_count = len(job.document_ids)
@api.multi
def _compute_application_count(self):
read_group_result = self.env['hr.applicant'].read_group([('job_id', '=', self.id)], ['job_id'], ['job_id'])
result = dict(
|
(data['job_id'][0], data['job_id_count']) for data in read_group_result)
for job in self:
job.application_count = result.get(job.id, 0)
@api.model
def create(self, vals):
job = super(Job, self.with_context(alias_model_name='hr.applicant',
mail_create_nolog=True,
|
alias_parent_model_name=self._name)).create(vals)
job.alias_id.write({'alias_parent_thread_id': job.id, "alias_defaults": {'job_id': job.id}})
return job
@api.multi
def unlink(self):
# Cascade-delete mail aliases as well, as they should not exist without the job position.
aliases = self.mapped('alias_id')
res = super(Job, self).unlink()
aliases.unlink()
return res
def _auto_init(self, cr, context=None):
"""Installation hook to create aliases for all jobs and avoid constraint errors."""
return self.pool.get('mail.alias').migrate_to_alias(
cr, self._name, self._table, super(Job, self)._auto_init,
'hr.applicant', self._columns['alias_id'], 'name',
alias_prefix='job+', alias_defaults={'job_id': 'id'}, context=context)
@api.multi
def _track_subtype(self, init_values):
if 'state' in init_values and self.state == 'open':
return 'hr_recruitment.mt_job_new'
return super(Job, self)._track_subtype(init_values)
@api.multi
def action_print_survey(self):
return self.survey_id.action_print_survey()
@api.multi
def action_get_attachment_tree_view(self):
action = self.env.ref('base.action_attachment').read()[0]
action['context'] = {
'default_res_model': self._name,
'default_res_id': self.ids[0]
}
action['domain'] = ['|', '&', ('res_model', '=', 'hr.job'), ('res_id', 'in', self.ids), '&', ('res_model', '=', 'hr.applicant'), ('res_id', 'in', self.mapped('application_ids').ids)]
return action
@api.multi
def action_set_no_of_recruitment(self, value):
return self.write({'no_of_recruitment': value})
|
spigwitmer/mysqlproxy
|
mysqlproxy/packet.py
|
Python
|
bsd-3-clause
| 7,817
| 0.001407
|
"""
Wire-level packet handling
"""
from mysqlproxy.types import FixedLengthInteger, \
FixedLengthString, LengthEncodedInteger, \
RestOfPacketString
from mysqlproxy import capabilities
from StringIO import StringIO
__all__ = [
'PacketMeta', 'IncomingPacketChain', 'OutgoingPacketChain',
'Packet', 'OKPacket', 'ERRPacket', 'EOFPacket'
]
class PacketMeta(object):
"""
Useful packet metadata for chains
"""
def __init__(self, length, seq_id):
self.length = length
self.seq_id = seq_id
class IncomingPacketChain(object):
"""
List of packets containing one payload
"""
def __init__(self):
self.packet_meta = []
self.seq_id = 0
self.payload = None
def read_in(self, fde):
"""
Read in full payload
"""
total_read = 0
packet_length = FixedLengthInteger(3, 0xffffff)
seq_id = FixedLengthInteger(1)
self.payload = StringIO()
while packet_length.val == 0xffffff:
packet_length.read_in(fde, label=None)
seq_id.read_in(fde, label=None)
cur_payload = FixedLengthString(packet_length.val)
cur_payload.read_in(fde, label=None)
self.payload.write(cur_payload.val)
self.packet_meta.append(PacketMeta(packet_length.val, seq_id.val))
total_read += packet_length.val
self.seq_id = seq_id.val
self.payload.seek(0)
return total_read
@property
def chain_l
|
ength(self):
"""
Amount of packets needed to be read to retrieve
the entire payload
"""
return len(self.packet_meta)
@property
def total_length(self):
"""
Total payload length
"""
return sum([x.length for x in self.packet_meta])
class OutgoingPacketChain(object):
def __init__(self, start_seq_id=0):
self.fields = []
self.start_seq_id = sta
|
rt_seq_id
def add_field(self, field, label='<unlabeled>'):
"""
Add field to payload
"""
self.fields.append((label, field))
def _write_packet_header(self, length, seq, fde):
"""
Write out packet header with given length
and sequence id to file-like fde
"""
length_field = FixedLengthInteger(3, length)
seq_field = FixedLengthInteger(1, seq)
length_field.write_out(fde, label=None)
seq_field.write_out(fde, label=None)
def write_out(self, fde):
"""
Write out full packet chain
"""
# TODO: impl is just outright terrible.
# Fix it in any way shape or form i don't care
sio = StringIO()
seq_id = self.start_seq_id
net_total_written = 0
total_written = 0
last_total_written = 0xffffff
for label, field in self.fields:
written = field.write_out(sio, label='\t%s' % label)
total_written += written
net_total_written += written
if total_written >= 0xffffff:
self._write_packet_header(0xffffff, seq_id, fde)
fde.write(sio.read(0xffffff))
remaining_bytes = sio.read()
sio.close()
sio = StringIO(remaining_bytes)
last_total_written = total_written
total_written -= 0xffffff
seq_id += 1
if last_total_written == 0xffffff:
self._write_packet_header(total_written, seq_id, fde)
sio.seek(0)
fde.write(sio.read(total_written))
net_total_written += total_written
return (net_total_written, seq_id)
class Packet(object):
"""
Interface class for extracting fields expected out of a single packet
or writing them out in order.
"""
def __init__(self, capabilities, **kwargs):
self.capabilities = capabilities
self.fields = []
self.seq_id = kwargs.pop('seq_id', 0)
def read_in(self, fde):
"""
Generic read-in of all fields
"""
ipc = IncomingPacketChain()
ipc.read_in(fde)
self.seq_id = ipc.seq_id
return self.read_in_internal(ipc.payload, ipc.total_length)
def read_in_internal(self, pl_fd, packet_size):
"""
This is what you actually want to extend to
do custom payload reading
"""
read_length = 0
for label, field in self.fields:
read_length += field.read_in(pl_fd, label='\t%s' % label)
return read_length
def write_out(self, fde):
"""
Generic write-out of all fields
"""
opc = OutgoingPacketChain(start_seq_id=self.seq_id)
for label, field in self.fields:
opc.add_field(field, label=label)
return opc.write_out(fde)
def get_field(self, field_of_interest):
"""
Return first field going by name `field_of_interest`
"""
for field_name, field in self.fields:
if field_name == field_of_interest:
return field
raise ValueError('field name %s does not exist' % field_of_interest)
class OKPacket(Packet):
"""
Generic OK packet, will most likely not be read in
"""
def __init__(self, capability_flags, affected_rows, last_insert_id, **kwargs):
super(OKPacket, self).__init__(capability_flags, **kwargs)
self.affected_rows = affected_rows
self.last_insert_id = last_insert_id
use_41 = capability_flags & capabilities.PROTOCOL_41
transactions = capability_flags & capabilities.TRANSACTIONS
if use_41 or transactions:
self.status_flags = kwargs.pop('status_flags', 0)
self.warnings = kwargs.pop('warnings', 0)
self.fields = [
('ok_header', FixedLengthInteger(1, 0)), # OK header
('affected_rows', LengthEncodedInteger(affected_rows)),
('last_insert_id', LengthEncodedInteger(last_insert_id))
]
if use_41:
self.fields += [
('status_flags', FixedLengthInteger(2, self.status_flags)),
('warnings', FixedLengthInteger(2, self.warnings))
]
elif transactions:
self.fields.append(('status_flags', FixedLengthInteger(2, self.status_flags)))
ok_message = kwargs.pop('info', 'k thanks')
self.fields.append(('ok_message', RestOfPacketString(ok_message)))
class ERRPacket(Packet):
"""
Error packet
"""
def __init__(self, capability_flags, error_code, error_msg, **kwargs):
super(ERRPacket, self).__init__(capability_flags, **kwargs)
self.error_code = error_code
self.error_msg = error_msg
self.fields = [
('err_header', FixedLengthInteger(1, 0xff)), # ERR header
('error_code', FixedLengthInteger(2, error_code))
]
if capability_flags & capabilities.PROTOCOL_41:
self.fields += [
('sql_state_flag', FixedLengthString(1, '#')),
('sql_state', FixedLengthString(5, kwargs.pop('sql_state', 'HY000')))
]
self.fields.append(('error_msg', RestOfPacketString(self.error_msg)))
class EOFPacket(Packet):
"""
EOF Packet
"""
def __init__(self, capability_flags, **kwargs):
super(EOFPacket, self).__init__(capability_flags, **kwargs)
self.fields = [
('eof_header', FixedLengthInteger(1, 0xfe)) # EOF header
]
if capability_flags & capabilities.PROTOCOL_41:
self.fields += [
('warnings', FixedLengthInteger(2, kwargs.pop('warnings', 0))),
('status_flags', FixedLengthInteger(2, kwargs.pop('status_flags', 0)))
]
|
openergy/openergy
|
ovbpclient/rest_client.py
|
Python
|
mit
| 4,045
| 0.002472
|
import time
import requests
from .exceptions import HttpError
from .json import json_loads
def check_rep(rep):
if (rep.status_code // 100) != 2:
raise HttpError(rep.text, rep.status_code)
def rep_to_json(rep):
check_rep(rep)
# we use our json loads for date parsing
return json_loads(rep.text)
class RestClient:
MAX_ITERATIONS = 100
def __init__(
self,
url,
login,
password,
verify_ssl=True
):
self.base_url = url.strip("/")
self.session = requests.Session()
self.session.auth = (login, password)
self.verify_ssl = verify_ssl
def list(self, path, params=None):
rep = self.session.get(
f"{self.base_url}/{path}/",
params=params,
verify=self.verify_ssl)
return rep_to_json(rep)
def retrieve(self, path, resource_id):
rep = self.session.get(
f"{self.base_url}/{path}/{resource_id}/",
verify=self.verify_ssl)
return rep_to_json(rep)
def create(self, path, data):
rep = self.session.post(
f"{self.base_url}/{path}/",
json=data,
verify=self.verify_ssl)
return rep_to_json(rep)
def partial_update(self, path, resource_id, data):
rep = self.session.patch(
f"{self.base_url}/{path}/{resource_id}/",
json=data,
verify=self.verify_ssl)
return rep_to_json(rep)
def update(self, path, resource_id, data):
rep = self.session.put(
f"{self.base_url}/{path}/{resource_id}/",
json=data,
verify=self.verify_ssl)
return rep_to_json(rep)
def detail_action(
self,
path,
resource_id,
http_method,
action_name,
params=None,
data=None,
return_json=True,
send_json=True):
rep = getattr(self.session, http_method.lower())(
f"{self.base_url}/{path}/{resource_id}/{action_name}/",
params=params,
json=data if send_json else None,
data=None if send_json else data,
verify=self.verify_ssl
)
if rep.status_code == 204:
return
if return_json:
return rep_to_json(rep)
check_rep(rep)
return rep.content
def list_action(
self,
path,
http_method,
action_name,
params=None,
data=None,
return_json=True,
send_json=True):
rep = getattr(self.session, http_method.lower())(
f"{self.base_url}/{path}/{action_name}/",
params=params,
json=data if send_json else None,
data=None if send_json else data,
verify=self.verify_ssl
)
if rep.status_code == 204:
return
if return_json:
return rep_to_json(rep)
c
|
heck_rep(rep)
return rep.content
def destroy(self, path, resource_id, params=None):
rep = self.session.delete(
f"{self.base_url}/{path}/{resource_id}/",
params=params,
verify=self.verify_ssl)
if rep.status_code == 204:
return
retu
|
rn rep_to_json(rep)
def wait_for_on(self, timeout=10, freq=1):
start = time.time()
if timeout <= 0:
raise ValueError
while True:
if (time.time() - start) > timeout:
raise TimeoutError
try:
rep = self.session.get(
f"{self.base_url}/oteams/projects/",
params=dict(empty=True),
verify=self.verify_ssl)
if rep.status_code == 503:
raise TimeoutError
break
except (requests.exceptions.ConnectionError, TimeoutError):
pass
time.sleep(freq)
|
marratj/ansible
|
lib/ansible/modules/packaging/language/maven_artifact.py
|
Python
|
gpl-3.0
| 18,975
| 0.005586
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Chris Schmidt <chris.schmidt () contrastsecurity.com>
#
# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact
# as a reference and starting point.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: maven_artifact
short_description: Downloads an Artifact from a Maven Repository
version_added: "2.0"
description:
- Downloads an artifact from a maven repository given the maven coordinates provided to the module.
- Can retrieve snapshots or release versions of the artifact and will resolve the latest available
version if one is not available.
author: "Chris Schmidt (@chrisisbeef)"
requirements:
- "python >= 2.6"
- lxml
- boto if using a S3 repository (s3://...)
options:
group_id:
description:
- The Maven groupId coordinate
required: true
artifact_id:
description:
- The maven artifactId coordinate
required: true
version:
description:
- The maven version coordinate
required: false
default: latest
classifier:
description:
- The maven classifier coordinate
required: false
default: null
extension:
description:
- The maven type/extension coordinate
required: false
default: jar
repository_url:
description:
- The URL of the Maven Repository to download from.
- Use s3://... if the repository is hosted on Amazon S3, added in version 2.2.
required: false
default: http://repo1.maven.org/maven2
username:
description:
- The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3
required: false
default: null
aliases: [ "aws_secret_key" ]
password:
description:
- The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3
required: false
default: null
aliases: [ "aws_secret_access_key" ]
dest:
description:
- The path where the artifact should be written to
- If file mode or ownerships are specified and destination path already exists, they affect the downloaded file
required: true
default: false
state:
description:
- The desired state of the artifact
required: true
default: present
choices: [present,absent]
timeout:
description:
- Specifies a timeout in seconds for the connection attempt
required: false
default: 10
version_added: "2.3"
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be set to C(no) when no other option exists.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: "1.9.3"
keep_name:
description:
- If C(yes), the downloaded artifact's name is preserved, i.e the version number remains part of it.
- This option only has effect when C(dest) is a directory and C(version) is set to C(latest).
required: false
default: 'no'
choices: ['yes', 'no']
ve
|
rsion_added: "2.4"
extends_documentation_fragment:
- files
'''
EXAMPLES = '''
# Download the latest version of the JUnit framework artifact from Maven Central
- maven_artifact:
group_id: junit
artifact_id: junit
dest: /tmp/junit-latest.jar
# Download JUnit 4.11 from Maven Central
- maven_artifact:
group_id: junit
artifact_id: junit
version: 4.11
dest: /tmp/junit-4.11
|
.jar
# Download an artifact from a private repository requiring authentication
- maven_artifact:
group_id: com.company
artifact_id: library-name
repository_url: 'https://repo.company.com/maven'
username: user
password: pass
dest: /tmp/library-name-latest.jar
# Download a WAR File to the Tomcat webapps directory to be deployed
- maven_artifact:
group_id: com.company
artifact_id: web-app
extension: war
repository_url: 'https://repo.company.com/maven'
dest: /var/lib/tomcat7/webapps/web-app.war
# Keep a downloaded artifact's name, i.e. retain the version
- maven_artifact:
version: latest
artifact_id: spring-core
group_id: org.springframework
dest: /tmp/
keep_name: yes
'''
import hashlib
import os
import posixpath
import sys
from lxml import etree
try:
import boto3
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_bytes
def split_pre_existing_dir(dirname):
'''
Return the first pre-existing directory and a list of the new directories that will be created.
'''
head, tail = os.path.split(dirname)
b_head = to_bytes(head, errors='surrogate_or_strict')
if not os.path.exists(b_head):
(pre_existing_dir, new_directory_list) = split_pre_existing_dir(head)
else:
return head, [tail]
new_directory_list.append(tail)
return pre_existing_dir, new_directory_list
def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed):
'''
Walk the new directories list and make sure that permissions are as we would expect
'''
if new_directory_list:
working_dir = os.path.join(pre_existing_dir, new_directory_list.pop(0))
directory_args['path'] = working_dir
changed = module.set_fs_attributes_if_different(directory_args, changed)
changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed)
return changed
class Artifact(object):
def __init__(self, group_id, artifact_id, version, classifier='', extension='jar'):
if not group_id:
raise ValueError("group_id must be set")
if not artifact_id:
raise ValueError("artifact_id must be set")
self.group_id = group_id
self.artifact_id = artifact_id
self.version = version
self.classifier = classifier
if not extension:
self.extension = "jar"
else:
self.extension = extension
def is_snapshot(self):
return self.version and self.version.endswith("SNAPSHOT")
def path(self, with_version=True):
base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id)
if with_version and self.version:
base = posixpath.join(base, self.version)
return base
def _generate_filename(self):
filename = self.artifact_id + "-" + self.classifier + "." + self.extension
if not self.classifier:
filename = self.artifact_id + "." + self.extension
return filename
def get_filename(self, filename=None):
if not filename:
filename = self._generate_filename()
elif os.path.isdir(filename):
filename = os.path.join(filename, self._generate_filename())
return filename
def __str__(self):
result = "%s:%s:%s" % (self.group_id, self.artifact_id, self.version)
if self.classifier:
result = "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version)
elif self.extension != "jar":
result = "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version)
return result
@staticmethod
def parse(input):
parts = input.split(":")
if len(parts) >= 3:
|
Enether/python_wow
|
models/creatures/creature_template.py
|
Python
|
mit
| 3,514
| 0.002846
|
from sqlalchemy import Column, Integer, String, Text, Boolean, ForeignKey
from sqlalchemy.orm import relationship
from database.main import Base
class CreatureTemplateSchema(Base):
"""
This table holds the information about each creature in the game
entry - the unique ID of this creature
creature_name - the name of this creature
type - the type of creature. Currently supported:
monster - hostile creature
fnpc - friendly npc
vendor - vendor npc
level - the level of this creature
hp - the health points of this creature
mana - the mana points of this creature
armor - the armor points of this creature NOTE: If not specified, the creature will take the default armor for
his level from the creature_defaults table
min_dmg - the minimum damage this creature does per swing
max_dmg - the maximum damage this creature does per swing
quest_relation_id - the id of the quest this creature is related with (if applicable)
loot_table_id - the id of the loot this creature drops (in the loot_table table)
gossip - the text this creature says when he is talked to (if applicable)
respawnable - whether this creature will respawn on different game starts. Ex: Some special creatures should not be
killed more than once
Example:
entry, creature name, type, level, hp, mana, armor, min_dmg, max_dmg, quest_relation_ID, loot_table,ID,
|
gossip, respawnable
1, Zimbab, "monster" 1, 10, 10, 50, 2, 4 1, 1, "Hey there", False
type is "monster" meaning this
|
is a hostile NPC
Creature Level: 1 Zimbab, HP: 10, MANA: 10, Damage: 2-4.
He is needed to complete quest with ID 1 and the loot he drops is from the row in the loot_table DB table with
entry 1. If talking to him is enabled, he would say "Hey there".
"""
__tablename__ = 'creature_template'
entry = Column(Integer, primary_key=True)
name = Column(String(60))
type = Column(String(60))
level = Column(Integer)
health = Column(Integer)
mana = Column(Integer)
armor = Column(Integer)
min_dmg = Column(Integer)
max_dmg = Column(Integer)
quest_relation_id = Column(Integer, ForeignKey('quest_template.entry'))
loot_table_id = Column(Integer, ForeignKey('loot_table.entry'))
loot_table = relationship('LootTableSchema', foreign_keys=[loot_table_id])
vendor_inventory = relationship('NpcVendorSchema', uselist=True)
gossip = Column(Text)
respawnable = Column(Boolean)
def build_vendor_inventory(self):
"""
This function loads all the items that a certain vendor should sell.
We take them from the self.vendor_inventory list, which holds NpcVendor objects
:return: A dictionary of Key: "Item Name", Value: Tuple(1,2)
1 - Item object of class Item from items.py
2 - The count of the item
"""
vendor_inventory: {str: ('Item', int)} = {}
for product in self.vendor_inventory:
item: 'Item' = product.item.convert_to_item_object()
item_count: int = product.item_count
if product.price: # check if there is anything set to price that'll make us override
item.buy_price = product.price
vendor_inventory[item.name] = (item, item_count)
return vendor_inventory
|
commontk/ctk-cli-indexer
|
ctk_cli_indexer/indexer.py
|
Python
|
apache-2.0
| 2,583
| 0.02168
|
import sys, datetime
import elasticsearch
INDEX = 'cli'
DOC_TYPE = 'cli'
def create_elasticsearch_index(es):
"""es should be an elasticsearch.Elasticsearch instance"""
es.indices.create(index = INDEX, ignore = 400) # ignore already existing index
es.indices.put_mapping(index = INDEX, doc_type = DOC_TYPE, body = {
DOC_TYPE : {
"_timestamp" : { "enabled" : True },
"properties" : {
"contributor": {
"type" : "string",
"index" : "not_analyzed"
},
"authors": {
"type" : "string",
"index" : "not_analyzed"
},
"license": {
"type" : "string",
"index" : "not_analyzed"
},
}
}})
# es.indices.delete('cli')
def update_elasticsearch_index(es, docs, source):
# retrieve existing documents
try:
existing = [doc['_id'] for doc in
es.search(INDEX, DOC_TYPE, body = dict(
query = dict(
term = dict(
source = source)
)),
fields = ['_id'],
size = 10000)['hits']['hits']]
except elasticsearch.exceptions.NotFoundError:
existing = []
# now update changed / add new documents:
for timestamp, doc in docs:
doc['source'] = source
doc_id = '%s:%s' % (source, doc['name'])
timestamp = datetime.datetime.fromtimestamp(timestamp)
try:
old = es.get(INDEX, doc_id, DOC_TYPE) # FIXME: with elasticsearch-2.1.1, this produces 404 warnings
except elasticsearch.exceptions.NotFoundError:
es.index(INDEX, DOC_TYPE, body = doc, id = doc_id, timestamp = timestamp)
sys.stdout.write("added
|
new document '%s'.\n" % doc_id)
else:
existing.remove(old['_id'])
if old['_source'] != doc:
es.index(INDEX, DOC_TYPE, body = doc, id = doc_id, timestamp = timestamp)
sys.stdout.write("changed document '%s'.\
|
n" % doc_id)
else:
sys.stdout.write("leaving '%s' alone, no change...\n" % doc_id)
# finally, remove existing documents that were not contained in `docs`:
for doc_id in existing:
sys.stdout.write("removing '%s', which is no longer in the '%s' JSON...\n" % (doc_id, source))
es.delete(INDEX, DOC_TYPE, doc_id)
|
FibercorpLabs/FibercorpDevops
|
cisco/aci/addUserDomain.py
|
Python
|
gpl-3.0
| 1,791
| 0.003908
|
from cobra.model.aaa import User, UserDomain
from createLocalUser import input_key_args as input_local_user
from createMo import *
def input_key_args(msg='\nPlease Specify User Domain:'):
print msg
return input_raw_input("User Domain Name", required=True)
def add_user_domain(parent_mo, user_domain):
"""The AAA domain to which the user belongs. """
aaa_userdomain = UserDomain(parent_mo, user_domain)
return aaa_userdomain
class AddSecurityDomain(CreateMo):
def __init__(self):
self.description = 'The AAA domain to which the user belongs. '
self.local_user = None
self.user_domain = None
super(AddSecurityDomain, self).__init__()
def set_cli_mode(self):
super(AddSecurityDomain, self).set_cli_mode()
self.parser_cli.add_argument('local_user', help='The name of a locally-authenticated user account.')
self.parser_cli.add_argument('user_domain', help='The name of the user domain')
def read_key_args(self):
|
self.local_user = self.args.pop('local_user')
self.user_domain = self.args.pop('user_domain')
def wizard_mode_input_args(self):
self.args['local_user'] = input_local_user('\nPlease Specify User Domain:', user_only=True, delete_function=self.delete)[0]
self.args['user_domain'] = input_key_args('')
def delete_mo(self):
self.check_if_mo_exist('uni/userext/
|
user-' + self.local_user + '/userdomain-', self.user_domain, UserDomain, description='User Domain')
super(AddSecurityDomain, self).delete_mo()
def main_function(self):
self.check_if_mo_exist('uni/userext/user-', self.local_user, User, 'User')
add_user_domain(self.mo, self.user_domain)
if __name__ == '__main__':
user_domain = AddSecurityDomain()
|
mozilla-services/FindMyDevice
|
test/buildAssert.py
|
Python
|
mpl-2.0
| 303
| 0
|
#! /usr/bin/python
impor
|
t base64
import sys
f = open(sys.argv[1], "r")
items = []
for line in f.readlines():
if len(line.strip()) == 0:
continue
if line[0] == "{":
items.append(base64.b64encode(line.strip()))
else:
items.append(line.strip())
print ".".joi
|
n(items)
|
glennmatthews/cot
|
COT/tests/test_doctests.py
|
Python
|
mit
| 1,224
| 0
|
#!/usr/bin/env python
#
# test_doctests.py - test runner for COT doctests
#
# July 2016, Glenn F. Matthews
# Copyright (c) 2016-2017 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Test runner for COT doctest tests."""
import logging
from logging import NullHandler
from doctest import DocTestSuite
from unittest import TestSuite
logging.getLogger('COT').addHandler(NullHandler())
def load_tests(*_):
"""Load doctests as unittest test suite.
|
For the parameters, see :mod:`unittest`. The parameters are unused here
|
.
"""
suite = TestSuite()
suite.addTests(DocTestSuite('COT.data_validation'))
suite.addTests(DocTestSuite('COT.utilities'))
return suite
|
enthought/traitsgui
|
enthought/pyface/timer/do_later.py
|
Python
|
bsd-3-clause
| 2,116
| 0.014178
|
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
|
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks fo
|
r using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought util package component>
#------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#
# Provides a simple function for scheduling some code to run at some time in
# the future (assumes application is wxPython based).
#
# Written by: David C. Morrill
#
# Date: 05/18/2005
#
# (c) Copyright 2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
# Import the toolkit specific version.
from enthought.pyface.toolkit import toolkit_object
DoLaterTimer = toolkit_object('timer.do_later:DoLaterTimer')
#-------------------------------------------------------------------------------
# Does something 50 milliseconds from now:
#-------------------------------------------------------------------------------
def do_later ( callable, *args, **kw_args ):
""" Does something 50 milliseconds from now.
"""
DoLaterTimer( 50, callable, args, kw_args )
#-------------------------------------------------------------------------------
# Does something after some specified time interval:
#-------------------------------------------------------------------------------
def do_after ( interval, callable, *args, **kw_args ):
""" Does something after some specified time interval.
"""
DoLaterTimer( interval, callable, args, kw_args )
|
damoxc/vsmtpd
|
plugins/ident/geoip.py
|
Python
|
gpl-3.0
| 1,203
| 0.000831
|
#
# ident/geoio.py
#
# Copyright (C) 2011 Damien Churchill <damoxc@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You sho
|
uld have received a copy of the GNU General Public License
# along with this program. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
import GeoIP
import logging
from vsmtpd.hooks import hook
from vsmtpd.plugins.plugin import PluginBase
log = logging.getLogger(__name__)
class Plugin(PluginBase):
def __init__(self):
self.gi = GeoIP.new(GeoIP.GEOIP_MEMORY_CACHE)
@hook
def connect(
|
self, connection):
country = self.gi.country_code_by_addr(connection.remote_ip)
connection.notes['geoip_country'] = country
|
Thoronador/ArxLibertatis
|
plugins/blender/arx_addon/dataFts.py
|
Python
|
gpl-3.0
| 10,486
| 0.005531
|
# Copyright 2014-2020 Arx Libertatis Team (see the AUTHORS file)
#
# This file is part of Arx Libertatis.
#
# Arx Libertatis is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Arx Libertatis is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Arx Libertatis. If not, see <http://www.gnu.org/licenses/>.
from ctypes import (
LittleEndianStructure,
c_char,
c_uint32,
c_int16,
c_int32,
c_float
)
from .dataCommon import SavedVec3, PolyTypeFlag
class UNIQUE_HEADER(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("path", c_char * 256),
("count", c_int32),
("version", c_float),
("uncompressedsize", c_int32),
("pad", c_int32 * 3)
]
class UNIQUE_HEADER3(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("path", c_char * 256), # In the c code this is currently in a separate struct
("check", c_char * 512)
]
class FAST_SCENE_HEADER(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("version", c_float),
("sizex", c_int32),
("sizez", c_int32),
("nb_textures", c_int32),
("nb_polys", c_int32),
("nb_anchors", c_int32),
("playerpos", SavedVec3),
("Mscenepos", SavedVec3),
("nb_portals", c_int32),
("nb_rooms", c_int32)
]
class FAST_TEXTURE_CONTAINER(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("tc", c_int32),
("temp", c_int32),
("fic", c_char * 256)
]
class FAST_SCENE_INFO(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("nbpoly", c_int32),
("nbianchors", c_int32),
]
class FAST_VERTEX(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("sy", c_float),
("ssx", c_float),
("ssz", c_float),
("stu", c_float),
("stv", c_float)
]
class FAST_EERIEPOLY(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("v", FAST_VERTEX * 4),
("tex", c_int32),
("norm", SavedVec3),
("norm2", SavedVec3),
("nrml", SavedVec3 * 4),
("transval", c_float),
("area", c_float),
("type", PolyTypeFlag),
("room", c_int16),
("paddy", c_int16)
]
class FAST_ANCHOR_DATA(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("pos", SavedVec3),
("radius", c_float),
("height", c_float),
("nb_linked", c_int16),
("flags", c_int16)
]
class SavedTextureVertex(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("pos", SavedVec3),
("rhw", c_float),
("color", c_uint32),
("specular", c_uint32),
("tu", c_float),
("tv", c_float)
]
class SAVE_EERIEPOLY(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("type", c_int32),
("min", SavedVec3),
("max", SavedVec3),
("norm", SavedVec3),
("norm2", SavedVec3),
("v", SavedTextureVertex * 4),
("tv", SavedTextureVertex * 4),
("nrml", SavedVec3 * 4),
("tex", c_int32),
("center", SavedVec3),
("transval", c_float),
("area", c_float),
("room", c_int16),
("misc", c_int16)
]
class EERIE_SAVE_PORTALS(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("poly", SAVE_EERIEPOLY),
("room_1", c_int32),
("room_2", c_int32),
("useportal", c_int16),
("paddy", c_int16)
]
class EERIE_SAVE_ROOM_DATA(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("nb_portals", c_int32),
("nb_polys", c_int32),
("padd", c_int32 * 6)
]
class FAST_EP_DATA(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("px", c_int16),
("py", c_int16),
("idx", c_int16),
("padd", c_int16)
]
class ROOM_DIST_DATA_SAVE(LittleEndianStructure):
_pack_ = 1
_fields_ = [
("distance", c_float),
("startpos", SavedVec3),
("endpos", SavedVec3),
]
from collections import namedtuple
FtsData = namedtuple('FtsData', ['sceneOffset', 'textures', 'cells', 'anchors', 'portals'])
import logging
from ctypes import sizeof
from .lib import ArxIO
class FtsSerializer(object):
def __init__(self, ioLib):
self.log = logging.getLogger('FtsSerializer')
self.ioLib = ioLib
def read_fts(self, data) -> FtsData:
"""If you want to read a fts file use read_fts_container"""
pos = 0
ftsHeader = FAST_SCENE_HEADER.from_buffer_copy(data, pos)
pos += sizeof(FAST_SCENE_HEADER)
self.log.debug("Fts Header version: %f" % ftsHeader.version)
self.log.debug("Fts Header size x,z: %i,%i" % (ftsHeader.sizex, ftsHeader.sizez))
self.log.debug("Fts Header playerpos: %f,%f,%f" % (ftsHeader.playerpos.x, ftsHead
|
er.playerpos.y, ftsHeader.playerpos.z))
self.log.debug("Fts Header Mscenepos: %f,%f,%f" % (ftsHeader.Mscenepos.x, ftsHeader.Mscenepos.y, ftsHeader.Mscenepos.z))
sceneOffset = (ftsHeader.Mscenepos.x, ftsHeader.Mscenepos.y, ftsHeader.Mscenepos.z)
texturesType = FAST_TEXTURE_CONTAINER * ftsHeader.nb_textures
textures = texturesType.from_buffer_copy(data, pos)
pos += sizeof(texturesType)
self.log.debug("Loaded %i textures" % len(te
|
xtures))
#for i in textures:
# log.info(i.fic.decode('iso-8859-1'))
cells = [[None for x in range(ftsHeader.sizex)] for x in range(ftsHeader.sizez)]
for z in range(ftsHeader.sizez):
for x in range(ftsHeader.sizex):
cellHeader = FAST_SCENE_INFO.from_buffer_copy(data, pos)
pos += sizeof(FAST_SCENE_INFO)
try:
if cellHeader.nbpoly <= 0:
cells[z][x] = None
else:
polysType = FAST_EERIEPOLY * cellHeader.nbpoly
polys = polysType.from_buffer_copy(data, pos)
pos += sizeof(polysType)
cells[z][x] = polys
except ValueError as e:
print("Failed reading cell data, x:%i z:%i polys:%i" % (x, z, cellHeader.nbpoly))
raise e
if cellHeader.nbianchors > 0:
AnchorsArrayType = c_int32 * cellHeader.nbianchors
anchors = AnchorsArrayType.from_buffer_copy(data, pos)
pos += sizeof(AnchorsArrayType)
anchors = []
for i in range(ftsHeader.nb_anchors):
anchor = FAST_ANCHOR_DATA.from_buffer_copy(data, pos)
pos += sizeof(FAST_ANCHOR_DATA)
if anchor.nb_linked > 0:
LinkedAnchorsArrayType = c_int32 * anchor.nb_linked
linked = LinkedAnchorsArrayType.from_buffer_copy(data, pos)
pos += sizeof(LinkedAnchorsArrayType)
anchors.append( ((anchor.pos.x, anchor.pos.y, anchor.pos.z), linked) )
else:
anchors.append( ((anchor.pos.x, anchor.pos.y, anchor.pos.z), []) )
portals = []
for i in range(ftsHeader.nb_portals):
portal = EERIE_SAVE_PORTALS.from_buffer_copy(data, pos)
pos += sizeof(EERIE_SAVE_PORTALS)
portals.append(portal)
for i in range(ftsHeader.nb_rooms + 1): # Off by one in data
room = EERIE_SAVE_ROOM_DATA.from_buffer_co
|
Armored-Dragon/goldmine
|
default_cogs/web.py
|
Python
|
mit
| 1,742
| 0.003444
|
"""Web dashboard."""
import os
import sys
import json
import aiohttp
from discord.ext import commands
import util.dynaimport as di
from .cog import Cog
japronto = di.load('japronto')
sanic = di.load('sanic')
response = di.load('sanic.response')
root_dir = os.path.dirname(os.path.abspath(sys.modules['__main__'].core_file))
web_root = os.path.join(root_dir, 'assets', 'web')
def webroot(f):
return os.path.join(web_root, *f.split('/'))
class Web(Cog):
"""The awesome web dashboard."""
def __init__(self, bot):
super().__init__(bot)
self.logger = self.logger.getChild('web')
self.port = 8085
self.host = '127.0.0.1'
self.app = None
self.server = None
self.server_task = None
if bot.user:
self.loop.create_task(self.start())
def __unload(self):
self.guild_task.cancel()
async def on_ready(self):
await self.start()
async def start(self):
self.logger.info('Starti
|
ng web server on %s:%s!', self.host, str(self.port))
app = sanic.Sanic()
await self.init_app(app)
self.app = app
self.server = app
|
.create_server(host=self.host, port=self.port)
self.server_task = self.loop.create_task(self.server)
async def init_app(self, app):
self.logger.info('Initializing app...')
@app.route('/')
async def test(req):
self.logger.info('Got request at /')
return response.text('hello')
return response.file(webroot('index.html'))
def setup(bot):
bot.add_cog(Web(bot))
'''
async def hello(request):
return request.Response(text='Hello world!')
app = japronto.Application()
app.router.add_route('/', hello)
app.run()'''
|
JulienMcJay/eclock
|
windows/Python27/Lib/Cookie.py
|
Python
|
gpl-2.0
| 25,844
| 0.010447
|
#!/usr/bin/env python
#
####
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <timo@alum.mit.edu>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell (davem@magnet.com) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy..
>>> import Cookie
Most of the time you start by creating a cookie. Cookies come in
three flavors, each with slightly different encoding semantics, but
more on that later.
>>> C = Cookie.SimpleCookie()
>>> C = Cookie.SerialCookie()
>>> C = Cookie.SmartCookie()
[Note: Long-time users of Cookie.py will remember using
Cookie.Cookie() to create an Cookie object. Although deprecated, it
is still supported by the code. See the Backward Compatibility notes
for more information.]
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = Cookie.SmartCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> C.output()
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the .output() function
>>> C = Cookie.SmartCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print C.output(header="Cookie:")
Cookie: rocky=road; Path=/cookie
>>> print C.output(attrs=[], header="Cookie:")
Cookie: rocky=road
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = Cookie.SmartCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> C.output()
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = Cookie.SmartCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print C
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = Cookie.SmartCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print C
Set-Cookie: oreo=doublestuff; Path=/
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = Cookie.SmartCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
A Bit More Advanced
-------------------
As mentioned before, there are three different flavors of Cookie
objects, each with different encoding/decoding semantics. This
section briefly discusses the differences.
SimpleCookie
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = Cookie.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
SerialCookie
The SerialCookie expects that all values should be serialized using
cPickle (or pickle, if cPickle isn't available). As a result of
serializing, SerialCookie can save almos
|
t any Python object to a
value, and recover the exact same object when the cookie ha
|
s been
returned. (SerialCookie can yield some strange-looking cookie
values, however.)
>>> C = Cookie.SerialCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string="S\'seven\'\\012p1\\012."'
Be warned, however, if SerialCookie cannot de-serialize a value (because
it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION.
SmartCookie
The SmartCookie combines aspects of each of the other two flavors.
When setting a value in a dictionary-fashion, the SmartCookie will
serialize (ala cPickle) the value *if and only if* it isn't a
Python string. String objects are *not* serialized. Similarly,
when the load() method parses out values, it attempts to de-serialize
the value. If it fails, then it fallsback to treating the value
as a string.
>>> C = Cookie.SmartCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string=seven'
Backwards Compatibility
-----------------------
In order to keep compatibilty with earlier versions of Cookie.py,
it is still possible to use Cookie.Cookie() to create a Cookie. In
fact, this simply returns a SmartCookie.
>>> C = Cookie.Cookie()
>>> print C.__class__.__name__
SmartCookie
Finis.
""" #"
# ^
# |----helps out font-lock
#
# Import our required modules
#
import string
try:
from cPickle import dumps, loads
except ImportError:
from pickle import dumps, loads
import re, warnings
__all__ = ["CookieError","BaseCookie","SimpleCookie","SerialCookie",
"SmartCookie","Cookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' :
|
ESGF/esgf-drslib
|
drslib/exceptions.py
|
Python
|
bsd-3-clause
| 73
| 0.027397
|
"""
drslib exceptions
"""
class TranslationError(
|
Exception):
pass
| |
cloudbase/maas
|
src/maasserver/models/node.py
|
Python
|
agpl-3.0
| 32,077
| 0.000717
|
# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Node objects."""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
str = None
__metaclass__ = type
__all__ = [
"NODE_TRANSITIONS",
"Node",
]
from itertools import (
imap,
islice,
repeat,
)
import random
from string import whitespace
from uuid import uuid1
from django.contrib.auth.models import User
from django.core.exceptions import (
PermissionDenied,
ValidationError,
)
from django.db.models import (
BooleanField,
CharField,
ForeignKey,
IntegerField,
Manager,
ManyToManyField,
Q,
)
from django.shortcuts import get_object_or_404
import djorm_pgarray.fields
from maasserver import DefaultMeta
from maasserver.enum import (
ARCHITECTURE,
ARCHITECTURE_CHOICES,
DISTRO_SERIES,
DISTRO_SERIES_CHOICES,
NODE_AFTER_COMMISSIONING_ACTION,
NODE_AFTER_COMMISSIONING_ACTION_CHOICES,
NODE_PERMISSION,
NODE_STATUS,
NODE_STATUS_CHOICES,
NODE_STATUS_CHOICES_DICT,
)
from maasserver.exceptions import NodeStateViolation
from maasserver.fields import (
JSONObjectField,
MAC,
)
from maasserver.models.cleansave import CleanSave
from maasserver.models.config import Config
from maasserver.models.dhcplease import DHCPLease
from maasserver.models.tag import Tag
from maasserver.models.zone import Zone
from maasserver.models.timestampedmodel import TimestampedModel
from maasserver.utils import (
get_db_state,
strip_domain,
)
from piston.models import Token
from provisioningserver.enum import (
POWER_TYPE,
POWER_TYPE_CHOICES,
)
from provisioningserver.tasks import (
power_off,
power_on,
remove_dhcp_host_map,
)
def generate_node_system_id():
return 'node-%s' % uuid1()
# Information about valid node status transitions.
# The format is:
# {
# old_status1: [
# new_status11,
# new_status12,
# new_status13,
# ],
# ...
# }
#
NODE_TRANSITIONS = {
None: [
NODE_STATUS.DECLARED,
NODE_STATUS.MISSING,
NODE_STATUS.RETIRED,
],
NODE_STATUS.DECLARED: [
NODE_STATUS.COMMISSIONING,
NODE_STATUS.MISSING,
NODE_STATUS.READY,
NODE_STATUS.RETIRED,
],
NODE_STATUS.COMMISSIONING: [
NODE_STATUS.FAILED_TESTS,
NODE_STATUS.READY,
NODE_STATUS.RETIRED,
NODE_STATUS.MISSING,
],
NODE_STATUS.FAILED_TESTS: [
NODE_STATUS.COMMISSIONING,
NODE_STATUS.MISSING,
NODE_STATUS.RETIRED,
],
NODE_STATUS.READY: [
NODE_STATUS.COMMISSIONING,
NODE_STATUS.ALLOCATED,
NODE_STATUS.RESERVED,
NODE_STATUS.RETIRED,
NODE_STATUS.MISSING,
],
NODE_STATUS.RESERVED: [
NODE_STATUS.READY,
NODE_STATUS.ALLOCATED,
NODE_STATUS.RETIRED,
NODE_STATUS.MISSING,
],
NODE_STATUS.ALLOCATED: [
NODE_STATUS.READY,
NODE_STATUS.RETIRED,
NODE_STATUS.MISSING,
],
NODE_STATUS.MISSING: [
NODE_STATUS.DECLARED,
NODE_STATUS.READY,
NODE_STATUS.ALLOCATED,
NODE_STATUS.COMMISSIONING,
],
NODE_STATUS.RETIRED: [
NODE_STATUS.DECLARED,
NODE_STATUS.READY,
NODE_STATUS.MISSING,
],
}
class NodeManager(Manager):
"""A utility to manage the collection of Nodes."""
def filter_by_ids(self, query, ids=None):
"""Filter `query` result set by system_id values.
:param query: A QuerySet of Nodes.
:type query: django.db.models.query.QuerySet_
:param ids: Optional set of ids to filter by. If given, nodes whose
system_ids are not in `ids` will be ignored.
:type param_ids: Sequence
:return: A filtered version of `query`.
.. _django.db.models.query.QuerySet: https://docs.djangoproject.com/
en/dev/ref/models/querysets/
"""
if ids is None:
return query
else:
return query.filter(system_id__in=ids)
def _filter_visible_nodes(self, nodes, user, perm):
"""Filter a `Node` query depending on user permissions.
:param nodes: A `Node` query set.
:param user: The user making the request; the filtering is based on
their privileges.
:param perm: Type of access requested. For example, a user may be
allowed to view some nodes that they are not allowed to edit.
:type perm: `NODE_PERMISSION`
:return: A version of `node` that is filtered to include only those
nodes that `user` is allowed to access.
"""
if user.is_superuser:
# Admin is allowed to see all nodes.
return nodes
elif perm == NODE_PERMISSION.VIEW:
return nodes.filter(Q(owner__isnull=True) | Q(owner=user))
elif perm == NODE_PERMISSION.EDIT:
return nodes.filter(owner=user)
elif perm == NODE_PERMISSION.ADMIN:
return nodes.none()
else:
raise NotImplementedError(
"Invalid permission check (invalid permission name: %s)." %
perm)
def get_nodes(self, user, perm, ids=None, from_nodes=None):
"""Fetch Nodes on which the User_ has the given permission.
Warning: there could be a lot of nodes! Keep scale in mind when
calling this, and watch performance in general. Prefetc
|
h related
data where appropriate.
:param user: The user that should be used in the permission check.
:type user: User_
:param perm: The permission to check.
:type perm: a permission string from NODE_PERMISSION
:param ids: If given, limit result to nodes with these system_ids.
:type ids: Sequence.
|
:param from_nodes: Optionally, restrict the answer to these nodes.
:type from_nodes: Query set of `Node`.
.. _User: https://
docs.djangoproject.com/en/dev/topics/auth/
#django.contrib.auth.models.User
"""
if from_nodes is None:
from_nodes = self.all()
nodes = self._filter_visible_nodes(from_nodes, user, perm)
return self.filter_by_ids(nodes, ids)
def get_allocated_visible_nodes(self, token, ids):
"""Fetch Nodes that were allocated to the User_/oauth token.
:param user: The user whose nodes to fetch
:type user: User_
:param token: The OAuth token associated with the Nodes.
:type token: piston.models.Token.
:param ids: Optional set of IDs to filter by. If given, nodes whose
system_ids are not in `ids` will be ignored.
:type param_ids: Sequence
.. _User: https://
docs.djangoproject.com/en/dev/topics/auth/
#django.contrib.auth.models.User
"""
if ids is None:
nodes = self.filter(token=token)
else:
nodes = self.filter(token=token, system_id__in=ids)
return nodes
def get_node_or_404(self, system_id, user, perm):
"""Fetch a `Node` by system_id. Raise exceptions if no `Node` with
this system_id exist or if the provided user has not the required
permission on this `Node`.
:param name: The system_id.
:type name: string
:param user: The user that should be used in the permission check.
:type user: django.contrib.auth.models.User
:param perm: The permission to assert that the user has on the node.
:type perm: unicode
:raises: django.http.Http404_,
:class:`maasserver.exceptions.PermissionDenied`.
.. _django.http.Http404: https://
docs.djangoproject.com/en/dev/topics/http/views/
#the-http404-exception
"""
node = get_object_or_404(Node, system_id=system_id)
if user.has_perm(perm, node):
return node
else:
raise PermissionDenied()
def get_available_nodes_for_acqui
|
dealien/Red-Magician
|
cogs/survey.py
|
Python
|
gpl-3.0
| 27,575
| 0.000544
|
import asyncio
from collections import defaultdict
from datetime import datetime, timedelta
from itertools import zip_longest
import os
from typing import Any, Dict, List
try:
from dateutil import parser as dp
dateutil_available = True
except:
dateutil_available = False
import discord
from discord.ext import commands
try:
import pytz
pytz_available = True
except:
pytz_available = False
try:
from tabulate import tabulate
tabulate_available = True
except:
tabulate_available = False
from .utils.dataIO import dataIO
from .utils import checks, chat_formatting as cf
Option = Dict[str, Any]
Options = Dict[str, Option]
class PastDeadlineError(Exception):
pass
class Survey:
"""Runs surveys for a specific role of people via DM,
and prints real-time results to a given text channel.
Supports changing responses, answer option quotas,
and reminders based on initial answer.
"""
def __init__(self, bot: commands.Bot):
self.bot = bot
self.surveys_path = "data/survey/surveys.json"
self.surveys = dataIO.load_json(self.surveys_path)
self.tasks = defaultdict(list)
self.bot.loop.create_task(self._resume_running_surveys())
async def _resume_running_surveys(self):
await self.bot.wait_until_ready()
closed = self.surveys["closed"]
for server_id in self.surveys:
if server_id not in ["closed", "next_id"]:
server = self.bot.get_server(server_id)
for survey_id in self.surveys[server_id]:
if survey_id not in closed:
self._setup_reprompts(server_id, survey_id)
self._schedule_close(
server_id, survey_id, self._get_timeout(
self._deadline_string_to_datetime(
self.surveys[server_id][survey_id]
["deadline"], adjust=False)))
await self._update_answers_message(
server_id, survey_id)
for uid in self.surveys[server_id][survey_id]["asked"]:
user = server.get_member(uid)
new_task = self.bot.loop.create_task(
self._send_message_and_wait_for_message(
server_id, survey_id, user,
send_question=False))
self.tasks[survey_id].append(new_task)
def _member_has_role(self, member: discord.Member, role: discord.Role):
return role in member.roles
def _get_users_with_role(self, server: discord.Server,
role: discord.Role) -> List[discord.User]:
roled = []
for member in server.members:
if (not member.bot) and self._member_has_role(member, role):
roled.append(member)
return roled
def _deadline_string_to_datetime(self, deadline: str,
adjust: bool = True) -> datetime:
dl = dp.parse(deadline, tzinfos=tzd)
if dl.tzinfo is None:
dl = dl.replace(tzinfo=pytz.utc)
to = self._get_timeout(dl)
if adjust and -86400 < to < 0:
dl += timedelta(days=1)
elif to < -86400:
raise PastDeadlineError()
return dl
def _get_timeout(self, deadline: datetime) -> int:
return (deadline - datetime.utcnow().replace(
tzinfo=pytz.utc)).total_seconds()
def _mark_as_closed(self, survey_id: str):
if not self.surveys["closed"]:
self.surveys["closed"] = []
closed = self.surveys["closed"]
if survey_id not in closed:
closed.append(survey_id)
dataIO.save_json(self.surveys_path, self.surveys)
async def _parse_options(self, options: str) -> Options:
opts_list = None if options == "*" else [
r.lower().strip() for r in options.split(";")]
opts = {}
if opts_list:
opt_names = [o[0] for o in [op.split(":") for op in opts_list]]
for opt in opts_list:
opt_s = opt.split(":")
if len(opt_s) =
|
= 1:
opts[opt_s[0]] = {
"limit": None, "reprompt": None, "link": None}
elif len(opt_s) > 1:
if opt_s[1] == "":
opts[opt_s[0]] = {"limit": None}
else:
|
try:
int(opt_s[1])
except ValueError:
await self.bot.reply(cf.error(
"A limit you provided was not a number."))
return "return"
opts[opt_s[0]] = {"limit": opt_s[1]}
if len(opt_s) > 2:
if opt_s[2] == "":
opts[opt_s[0]]["reprompt"] = None
else:
try:
int(opt_s[2])
except ValueError:
await self.bot.reply(cf.error(
"A reprompt value you provided was"
" not a number."))
return "return"
opts[opt_s[0]]["reprompt"] = int(opt_s[2]) * 60
else:
opts[opt_s[0]]["reprompt"] = None
if len(opt_s) == 4:
if opts[opt_s[0]]["reprompt"] is None:
await self.bot.reply(cf.error(
"You cannot link an option without giving a"
" reprompt value. Please try again."))
return "return"
if opt_s[3] == "":
opts[opt_s[0]]["link"] = None
else:
if opt_s[3] not in opt_names:
await self.bot.reply(cf.error(
"A link that you gave is not the name of"
" an option. Please try again."))
return "return"
opts[opt_s[0]]["link"] = opt_s[3]
else:
opts[opt_s[0]]["link"] = None
else:
opts = None
return opts
def _save_deadline(self, server_id: str, survey_id: str, deadline: str):
self.surveys[server_id][survey_id]["deadline"] = deadline
dataIO.save_json(self.surveys_path, self.surveys)
def _save_channel(self, server_id: str, survey_id: str, channel_id: str):
self.surveys[server_id][survey_id]["channel"] = channel_id
dataIO.save_json(self.surveys_path, self.surveys)
def _save_question(self, server_id: str, survey_id: str, question: str):
self.surveys[server_id][survey_id]["question"] = question
dataIO.save_json(self.surveys_path, self.surveys)
def _save_options(self, server_id: str, survey_id: str, options: Options):
self.surveys[server_id][survey_id]["options"] = options
self.surveys[server_id][survey_id]["answers"] = {}
dataIO.save_json(self.surveys_path, self.surveys)
if options != "any":
for opt in options:
self.surveys[server_id][survey_id]["answers"][opt] = []
dataIO.save_json(self.surveys_path, self.surveys)
def _save_asked(self, server_id: str, survey_id: str,
users: List[discord.User]):
asked = [u.id for u in users]
self.surveys[server_id][survey_id]["asked"] = asked
dataIO.save_json(self.surveys_path, self.surveys)
def _save_prefix(self, server_id: str, survey_id: str, prefix: str):
self.surveys[server_id][survey_id]["prefix"] = prefix
dataIO.save_json(self.surveys_path, self.surveys)
def _save_answer(self, server_id: str, survey_id: str, user: discord.User,
answer: str, change: bool) -> bool:
|
AlexStarov/Shop
|
applications/discount/migrations/0006_auto_20160517_2147.py
|
Python
|
apache-2.0
| 1,256
| 0.002389
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
import applications.discount.models
class Migration(migrations.Migration):
dependencies = [
('discount', '0005_auto_20160507_2145'),
]
operations = [
migrations.AlterField(
model_name='action',
name='datetime_end',
field=models.DateTimeField(default=applications.discount.models.default_datetime_end, verbose_name='\u0414\u0430\u0442\u0430 \u043e\u043a\u043e\u043d\u0447\u0430\u043d\u0438\u044f \u0430\u043a\u0446\u0438\u0438'),
),
migrations.AlterField(
|
model_name='action',
name='datetime_start',
field=models.DateTimeField(default=datetime.datetime.now, verbose_name='\u0414\u0430\u0442\u0430 \u043d\u0430\u0447\u0430\u043b\u0430 \u0430\u043a\u0446\u0438\u0438'),
),
migrations.AlterField(
model_name='a
|
ction',
name='name',
field=models.CharField(default=applications.discount.models.default_action_name, max_length=256, verbose_name='\u041d\u0430\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u0438\u0435 \u0430\u043a\u0446\u0438\u0438'),
),
]
|
darthcloud/cube64-dx
|
notes/poll.py
|
Python
|
gpl-2.0
| 6,079
| 0.023195
|
#!/usr/bin/env python3
#
# Script for polling N64/GC SI bus devices
#
# This script uses the serial bridge and pool in loops
# for the buttons status.
#
# It currently supports N64 controllers, N64 mouses & GameCube controllers.
#
# --Jacques Gagnon <darthcloud@gmail.com>
#
from bus import Bus
from collections import namedtuple, OrderedDict
import struct, time, os, sys
bmap = namedtuple('status', 'name mask color')
identity_req = namedtuple('identity_req', 'cmd')
status_req = namedtuple('status_req', 'cmd')
read_req = namedtuple('read_req', 'cmd address')
write_req = namedtuple('write_req', 'cmd address data')
dol_status_req = namedtuple('dol_status_req', 'cmd unknown rumble')
dol_wb_assoc_req = namedtuple('dol_wb_assoc_req', 'cmd id')
identity_resp = namedtuple('identity_resp', 'id info')
status_resp = namedtuple('status_resp', 'buttons x_axis y_axis')
dol_status_resp = namedtuple('dol_status_resp', 'buttons x_axis y_axis cx_axis cy_axis l_trigger r_trigger')
RED='\x1b[1;91m'
GREEN='\x1b[1;92m'
YELLOW='\x1b[1;93m'
BLUE='\x1b[1;94m'
MAGENTA='\x1b[1;95m'
CYAN='\x1b[1;96m'
LGRAY='\x1b[1;37m'
DGRAY='\x1b[1;90m'
END='\x1b[0m'
IDENTIFY = 0x00
STATUS = 0x01
READ = 0x02
WRITE = 0x03
DOL_STATUS = 0x40
WB_INIT = 0x4E
MOUSE = 0x02
NUS = 0x05
DOL = 0x09
WB_DOWN = 0xA8
WB_AUTH = 0xE9
WB_ASSOC = 0xEB
EMPTY = 0x00
OCCUPY = 0x01
EMPTIED = 0x02
INSERT = 0x03
BUTTON = {NUS:OrderedDict(
[('buttons',[bmap('A',0x8000,BLUE),
bmap('B',0x4000,GREEN),
bmap('Z',0x2000,LGRAY),
bmap('St',0x1000,RED),
bmap('Up',0x0800,LGRAY),
bmap('Dn',0x0400,LGRAY),
bmap('Lt',0x0200,LGRAY),
bmap('Rt',0x0100,LGRAY),
bmap('L',0x0020,LGRAY),
bmap('R',0x0010,LGRAY),
bmap('CUp',0x0008,YELLOW),
bmap('CDn',0x0004,YELLOW),
bmap('CLt',0x0002,YELLOW),
bmap('CRt',0x0001,YELLOW)]),
('x_axis', [bmap('X',0xFF,LGRAY)]),
('y_axis', [bmap('Y',0xFF,LGRAY)])]),
DOL:OrderedDict(
[('buttons',[bmap('St',0x1000,LGRAY),
bmap('Y',0x0800,LGRAY),
bmap('X',0x0400,LGRAY),
bmap('B',0x0200,RED),
bmap('A',0x0100,CYAN),
bmap('L',0x0040,LGRAY),
bmap('R',0x0020,LGRAY),
bmap('Z',0x0010,MAGENTA),
bmap('Up',0x0008,LGRAY),
bmap('Dn',0x0004,LGRAY),
bmap('Rt',0x0002,LGRAY),
bmap('Lt',0x0001,LGRAY)]),
('x_axis', [bmap('X',0xFF,LGRAY)]),
('y_axis', [bmap('Y',0xFF,LGRAY)]),
('cx_axis', [bmap('CX',0xFF,YELLOW)]),
('cy_axis', [bmap('CY',0xFF,YELLOW)]),
('l_trigger', [bmap('AL',0xFF,LGRAY)]),
('r_trigger', [bmap('AR',0xFF,LGRAY)])])}
class Bus(Bus):
def identify(self):
reply = self.bridge.write(bytes([IDENTIFY]), 3)[1]
if reply[0] == MOUSE:
return {'system':NUS, 'type':'mouse'}
elif reply[0] == NUS:
if reply[2] == EMPTY:
return {'system':NUS, 'type':'controller', 'slot':'Empty '}
elif reply[2] == OCCUPY:
return {'system':NUS, 'type':'controller', 'slot':'Occupy '}
elif reply[2] == EMPTIED:
return {'system':NUS, 'type':'controller', 'slot':'Emptied'}
elif reply[2] == INSERT:
return {'system':NUS, 'type':'controller', 'slot':'Insert '}
else:
print("Unknown N64 controller slot state: {}".format(reply))
sys.exit()
elif reply[0] == DOL:
return {'system':DOL, 'type':'controller'}
elif reply[0] == WB_DOWN:
return {'system':WB_DOWN, 'type':'wavebird'}
elif reply[0] == WB_AUTH:
return {'system':WB_AUTH, 'type':'wavebird', 'id':reply[-2:]}
elif reply[0] == WB_ASSOC:
return {'system':DOL, 'type':'wavebird'}
else:
print("Unknown device identity: {}".format(reply))
sys.exit()
def status(self, sy
|
stem):
if system == NUS:
reply = self.bridge.write(bytes([STATUS]), 4)[1]
return status_resp._make(struct.unpack('>H2b', reply))
elif system == DOL:
reply = self.bridge.write(struct.pack(">BH", DOL_STATUS, 0x0300), 8)[1]
return dol_status_resp._make(struct.unpack('>H6B', reply))
else:
print("Unknown system ID: {}".format(system))
sys.exit()
def wavebird_init(self, id):
return self.bridge.write(struct.
|
pack(">BBB", WB_INIT, (id[0] | 0x20) & 0x10, id[1]), 3)[1]
def poll():
os.system('setterm -cursor off')
interface = Bus()
device = interface.identify()
time.sleep(0.02)
while device['system'] == WB_DOWN:
device = interface.identify()
time.sleep(1)
if device['system'] == WB_AUTH:
interface.wavebird_init(device['id'])
try:
while 1:
device = interface.identify()
time.sleep(0.02)
status = interface.status(device['system'])
for field, values in BUTTON[device['system']].items():
for value in values:
if value.mask != 0xFF:
print("{}{}{} ".format(value.color if getattr(status, field) & value.mask else DGRAY, value.name, END), end='')
else:
print("{}{}:{:+03X}{} ".format(value.color, value.name, getattr(status, field), END), end='')
if 'slot' in device:
print("slot:{}".format(device['slot']), end='')
print("\r", end='')
time.sleep(0.02)
except KeyboardInterrupt:
pass
os.system('setterm -cursor on')
print("")
if __name__ == "__main__":
poll()
### The End ###
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.