repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
riggsd/guano-py | guano.py | Python | mit | 19,292 | 0.003421 | """
This is the Python reference implementation for reading and writing GUANO metadata.
GUANO is the "Grand Unified Acoustic Notation Ontology", an extensible metadata format
for representing bat acoustics data.
Import this Python module as::
import guano
This module utilizes the Python :mod:`logging` framework for issuing warnings and debug messages.
Application code may wish to enable logging with the :func:`logging.basicConfig` function.
"""
import os
import sys
import wave
import stru | ct
import os.path
import shutil
from datetime import datetime, tzinfo, timedelta
from contextlib import closing
from tempfile import NamedTemporaryFile
from collections import OrderedDict, namedtuple
from | base64 import standard_b64encode as base64encode
from base64 import standard_b64decode as base64decode
import logging
log = logging.Logger(__name__)
if sys.version_info[0] > 2:
unicode = str
basestring = str
__version__ = '1.0.15.dev0'
__all__ = 'GuanoFile',
WHITESPACE = ' \t\n\x0b\x0c\r\0'
wavparams = namedtuple('wavparams', 'nchannels, sampwidth, framerate, nframes, comptype, compname')
_ZERO = timedelta(0)
class tzutc(tzinfo):
"""UTC timezone"""
def utcoffset(self, dt):
return _ZERO
def tzname(self, dt):
return 'UTC'
def dst(self, dt):
return _ZERO
def __repr__(self):
return 'UTC'
utc = tzutc()
class tzoffset(tzinfo):
"""
Fixed-offset concrete timezone implementation.
`offset` should be numeric hours or ISO format string like '-07:00'.
"""
def __init__(self, offset):
if isinstance(offset, basestring):
# offset as ISO string '-07:00', '-0700', or '-07' format
if len(offset) < 4:
vals = offset, '00' # eg '-07'
elif ':' in offset:
vals = offset.split(':') # '-07:00'
else:
vals = offset[:-2], offset[-2:] # '-0700'
if vals[0].startswith('-'):
offset = int(vals[0]) - int(vals[1])/60.0
else:
offset = int(vals[0]) + int(vals[1])/60.0
self._offset_hours = offset
self._offset = timedelta(hours=offset)
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return _ZERO
def tzname(self, dt):
return 'UTC'+str(self._offset_hours)
def __repr__(self):
return self.tzname(None)
def parse_timestamp(s):
"""
Parse a string in supported subset of ISO 8601 / RFC 3331 format to :class:`datetime.datetime`.
The timestamp will be timezone-aware of a TZ is specified, or timezone-naive if in "local" fmt.
:rtype: datetime or None
"""
# Python's standard library does an awful job of parsing ISO timestamps, so we do it manually
if s is None or not s.strip():
return None
timestamp, tz = None, None
s = s.replace(' ', 'T', 1) # support using space rather than 'T' as date/time delimiter
if s[-1] == 'Z': # UTC "zulu" time
tz = utc
s = s[:-1]
elif '+' in s or s.count('-') == 3: # UTC offset provided
i = s.index('+') if '+' in s else s.rfind('-')
s, offset = s[:i], s[i:]
tz = tzoffset(offset)
if len(s) > 22: # milliseconds included
timestamp = datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%f')
else:
timestamp = datetime.strptime(s, '%Y-%m-%dT%H:%M:%S')
return timestamp.replace(tzinfo=tz) if tz else timestamp
_chunkid = struct.Struct('> 4s')
_chunksz = struct.Struct('< L')
class GuanoFile(object):
"""
An abstraction of a .WAV file with GUANO metadata.
A `GuanoFile` object behaves like a normal Python :class:`dict`, where keys can either be
well-known metadata keys, namespaced keys, or a tuple of (namespace, key).
Well-known keys will have their values coerced into the correct data type. The parser may be
configured to coerce new namespaced keys with the :func:`register()` function.
Example usage::
gfile = GuanoFile('myfile.wav')
print gfile['GUANO|Version']
>>> '1.0'
gfile['Species Manual ID'] = 'Mylu'
gfile['Note'] = 'I love GUANO!'
gfile.write()
Though reading, writing, and editing .WAV files is the target usage, this class may also be
used independent from the .WAV file format. GUANO metadata can be written into an
Anabat-format file or to a sidecar file, for example, by populating a `GuanoFile` object and
then using the :func:`serialize()` method to produce correctly formatted UTF-8 encoded metadata.
:ivar str filename: path to the file which this object represents, or `None` if a "new" file
:ivar bool strict_mode: whether the GUANO parser is configured for strict or lenient parsing
:ivar bytes wav_data: the `data` subchunk of a .WAV file consisting of its actual audio data,
lazily-loaded and cached for performance
:ivar wavparams wav_params: namedtuple of .WAV parameters (nchannels, sampwidth, framerate, nframes, comptype, compname)
"""
_coersion_rules = {
'Filter HP': float, 'Length': float, 'Loc Elevation': float,
'Loc Accuracy': int, 'Samplerate': int,
'TE': lambda value: int(value) if value else 1,
'Loc Position': lambda value: tuple(float(v) for v in value.split()),
'Timestamp': parse_timestamp,
'Note': lambda value: value.replace('\\n', '\n'),
}
_serialization_rules = {
'Loc Position': lambda value: '%f %f' % value,
'Timestamp': lambda value: value.isoformat() if value else '',
'Length': lambda value: '%.2f' % value,
'Note': lambda value: value.replace('\n', '\\n')
}
def __init__(self, filename=None, strict=True):
"""
Create a GuanoFile instance which represents a single file's GUANO metadata.
If the file already contains GUANO metadata, it will be parsed immediately. If not, then
this object will be initialized as "new" metadata.
:param filename: path to an existing .WAV file with GUANO metadata; if the path does not
exist or is `None` then this instance represents a "new" file
:type filename: str or None
:param bool strict: whether the parser should be strict and raise exceptions when
encountering bad metadata values, or whether it should be as lenient
as possible (default: True); if in lenient mode, bad values will
remain in their UTF-8 string form as found persisted in the file
:raises ValueError: if the specified file doesn't represent a valid .WAV or if its
existing GUANO metadata is broken
"""
self.filename = filename
self.strict_mode = strict
self.wav_params = None
self._md = OrderedDict() # metadata storage - map of maps: namespace->key->val
self._wav_data = None # lazily-loaded and cached
self._wav_data_offset = 0
self._wav_data_size = 0
if filename is not None and os.path.isfile(filename):
self._load()
def _coerce(self, key, value):
"""Coerce a value from its Unicode representation to a specific data type"""
if key in self._coersion_rules:
try:
return self._coersion_rules[key](value)
except (ValueError, TypeError) as e:
if self.strict_mode:
raise
else:
log.warning('Failed coercing "%s": %s', key, e)
return value # default should already be a Unicode string
def _serialize(self, key, value):
"""Serialize a value from its real representation to GUANO Unicode representation"""
serialize = self._serialization_rules.get(key, unicode)
try:
return serialize(value)
except (ValueError, TypeError) as e:
if self.strict_mode:
raise
else:
log.warning('Failed serializing "%s": %s', key, e)
def |
pgrm/project-euler | 0001-0050/35-Circular_primes.py | Python | apache-2.0 | 831 | 0.00361 | """
The number, 197, is called a circular prime because all rotations of the digits:
197, 971, and 719, are themselves prime.
There are thirteen such primes below 100: 2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73, 79, and 97.
How many circular primes are there below one million?
"""
from collections import deque
import primes
import permutations
allPrimes = primes.create_primes(1000*1000)
countOfCircularPrimes = 0
for prime in allPr | imes:
isCircularPrime = True
digits = deque(permutations.split_into_digits(prime))
for i in xrange(0, len(digits)):
digits.rotate(1)
newPrime = permutations.combine_numbers(digits)
if newPrime not in allPrimes:
| isCircularPrime = False
break
if isCircularPrime:
countOfCircularPrimes += 1
print str(countOfCircularPrimes) |
thomas-hori/Repuge-NG | prelevula/SimpleDungeonLevel.py | Python | mpl-2.0 | 6,565 | 0.037928 | from ludicrous.GeneratedLevel import GeneratedLevel
import random,math
__copying__="""
Written by Thomas Hori
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/."""
class SimpleDungeonLevel(GeneratedLevel):
"""A level generator for a simple rogue-like dungeon.
Simple in that it will always contain rooms connected in a ring.
"""
list_of_symbols={"g":"wall_corner_nw","G":"wall_corner_ne","j":"wall_corner_sw","J":"wall_corner_se","d":"vwall","o":"hwall",":":"vfeature","*":"vfeature"," ":"space",".":"floor1",",":"floor2","/":"floor3","$":"floor4","#":"floor5","P":"hfeature","l":"hfeature","v":"wall_TeeJnc_dn","^":"wall_TeeJnc_up",">":"wall_TeeJnc_rt","<":"wall_TeeJnc_lt","+":"wall_cross",}
def _add_blocks_x(self,block1r,block2r,block3r):
block1,gamut1=block1r
b1w=len(block1.split("\n",1)[0])
block2,gamut2=block2r
b2w=len(block2.split("\n",1)[0])
block3,gamut3=block3r
block4=[]
for line1,line2,line3 in zip(block1.split("\n"),block2.split("\n"),block3.split("\n")):
block4.append(line1+line2+line3)
return "\n".join(block4),gamut1+tuple((i[0]+b1w,i[1]) for i in gamut2)+tuple((i[0]+b1w+b2w,i[1]) for i in gamut3)
def _add_blocks_y(self,block1,block2):
b1h=block1[0].count("\n")+1
return block1[0]+"\n"+block2[0],block1[1]+tuple((i[0],i[1]+b1h) for i in block2[1])
def _make_block(self,joint,joinr,joinb,joinl,room=True,
#brid: bug report id (should be a nonzero hashable or None for no bug report entry)
#*_override: debugging overrides
brid=None,xoffset_override=0,yoffset_override=0):
max_width=16
max_height=9
xmiddle=8
ymiddle=5
max_iwidth=max_width-4
max_iheight=max_height-4
min_iwidth=2
min_iheight=2
if room:
iwidth=random.randint(min_iwidth,max_iwidth)
iheight=random.randint(min_iheight,max_iheight)
else:
iwidth=iheight=-2
width=iwidth+2
height=iheight+2
max_xoffset=max_width-width-1
max_yoffset=max_height-height-1
xoffset=xoffset_override or random.randint(1,max_xoffset)
yoffset=yoffset_override or random.randint(1,max_yoffset)
if room:
topdoor=random.randrange(iwidth)+xoffset+1
botdoor=random.randrange(iwidth)+xoffset+1
leftdoor=random.randrange(iheight)+yoffset+1
rightdoor=random.randrange(iheight)+yoffset+1
#
block=""
for j in range(max_height):
if j<yoffset:
block+=" "*max_width+"\n"
elif j==yoffset:
line=list(" "*xoffset+"g"+"o"*iwidth+"G"+" "*(max_width-(iwidth+xoffset+2)))
if joint:
line[topdoor]="."
block+="".join(line)+"\n"
elif j<(yoffset+1+iheight):
line=list(" "*xoffset+"d"+"."*iwidth+"d"+" "*(max_width-(iwidth+xoffset+2)))
if joinl and j==leftdoor:
line[xoffset]="."
if joinr and j==rightdoor:
line[xoffset+iwidth+1]="."
block+="".join(line)+"\n"
elif j==(yoffset+1+iheight):
line=list(" "*xoffset+"j"+"o"*iwidth+"J"+" "*(max_width-(iwidth+xoffset+2)))
if joinb:
line[botdoor]="."
block+="".join(line)+"\n"
elif j>(yoffset+iheight):
block+=" "*max_width+"\n"
else:
topdoor=botdoor=xoffset
leftdoor=rightdoor=yoffset
block=(" "*max_width+"\n")*max_height
block=block.strip("\n")
#
block=[list(i) for i in block.split("\n")]
if not room:
#Fix for bug ID 1428490448.
#Corridors join to just outside room, not to doorways themselves
# so as to avoid possible width-two doorways.
#Accordingly, junctions in corridor blocks can end up with the
# two passages lacking any orthogonal connection. With the
# present lack of diagonal traversal, and the intent of never
# making that critical, this is bad.
#Fix this by ensuring that there is always a corridor cell on the
# site of the pseudo-room.
block[yoffset][xoffset]="."
#
doors=[(joint,[xmiddle,0],[topdoor,yoffset-1]), (joinb,[xmiddle,max_height-1],[botdoor,yoffset+1+iheight+1]), (joinl,[0,ymiddle],[xoffset-1,leftdoor]), (joinr,[max_width-1,ymiddle],[xoffset+1+iwidth+1,rightdoor])]
for conditional,to,from_ in doors:
if conditional:
while 1:
block[int(from_[1])][int(from_[0])]="."
if from_[0]==to[0] and from_[1]==to[1]:
break
vector_angle=math.atan2(abs(from_[1]-to[1]),abs(fr | om_[0]-to[0]))
vector_angle*=180
vector_angle/=math.pi
vector_angle=int(vector_angle+0.5)
if vector_angle>45:
from_[1]-=(from_[1]-to[1])/abs(from_[1]-to[1])
else:
| from_[0]-=(from_[0]-to[0])/abs(from_[0]-to[0])
block="\n".join(["".join(i) for i in block])
#
self._ag=(xoffset+1,yoffset+1)
gamut=[]
gamutx=range(xoffset+1,xoffset+1+iwidth)
gamuty=range(yoffset+1,yoffset+1+iheight)
for x in gamutx:
for y in gamuty:
gamut.append((x,y))
return block,tuple(gamut)
def genmap(self):
roomyes=[True,True,True,True,True,True]
for i in range(random.randrange(3)+1):
roomyes[random.randrange(6)]=False
self.coded_grid,self.gamut=self._add_blocks_y(self._add_blocks_x(self._make_block(0,1,1,0,roomyes[0],1),self._make_block(0,1,0,1,roomyes[1],2),self._make_block(0,0,1,1,roomyes[2],3)),self._add_blocks_x(self._make_block(1,1,0,0,roomyes[3],4),self._make_block(0,1,0,1,roomyes[4],5),self._make_block(1,0,0,1,roomyes[5],6)))
self.gamut=list(self.gamut)
self.readmap()
|
keras-team/keras | keras/models/__init__.py | Python | apache-2.0 | 1,704 | 0.000587 | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras models API."""
# pylint: disable=g-bad-import-order
from keras.engine.functional import Functional
from keras.engi | ne.sequential import Sequential
from keras.engine.training import Model
from keras.models.cloning import clone_and_build_model
from keras.models.cloning import clone_model
from keras.models.cloning import share_weights
from keras.models.sharpness_aware_minimization import SharpnessAwareMinimization
from keras.saving.model_config import model_from_config
from keras.saving.model_config import model_from_json
from keras.saving.model_config import model_from_yaml
from keras.saving.save import load_model
from | keras.saving.save import save_model
# Private symbols that are used in tests.
# TODO(b/221261361): Clean up private symbols usage and remove these imports.
from keras.models.cloning import _clone_functional_model
from keras.models.cloning import _clone_layer
from keras.models.cloning import _clone_layers_and_model_config
from keras.models.cloning import _clone_sequential_model
|
wangming28/syzygy | syzygy/scripts/test_bot/log_helper.py | Python | apache-2.0 | 4,510 | 0.008426 | #!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A helper module to set up logging for this library."""
__author__ = 'rogerm@google.com (Roger McFarlane)'
import logging
import optparse
import os
import sys
ROOT_NAME = 'syzygy'
VERBOSE = logging.DEBUG
NORMAL = logging.INFO
QUIET = logging.warning
def GetLogger(name=None):
"""Retrieves a logger object for this library, or a named submodule.
Args:
name: The (optional) name of the submodule. If not given, the logger
for the top level module/library is returned.
Returns:
A logger object.
"""
if name:
name = os.path.splitext(os.path.basename(name))[0]
return logging.getLogger('%s.%s' % (ROOT_NAME, name))
else:
return logging.getLogger(ROOT_NAME)
def | AddCommandLineOptions(option_parser):
"""Adds the group of logging related options to the given option_pars | er.
Args:
option_parser: the option parser object to update. This is expected
to be an instance of optparse.OptionParser.
"""
group = optparse.OptionGroup(option_parser, 'Logging Options')
group.add_option('--log-verbose', action='store_true', default=False,
help='Log verbosely')
group.add_option('--log-file', help='Write the log to this file')
group.add_option('--log-append', action='store_true', default=False,
help='Append to log file')
group.add_option('--log-no-console', action='store_true', default=False,
help='No logging to stderr')
option_parser.add_option_group(group)
def AddStreamHandler(stream, level, logger=None):
"""Maps log entries occuring at level or higher to an output stream.
Args:
stream: The stream to write to. Any object capable of acting like
a File object can be used.
level: The logging level at which to bind the handler.
logger: The logger to which to attach the stream. By default it
if will be the top level logger for this package.
"""
if not logger:
logger = GetLogger()
handler = logging.StreamHandler(stream)
handler.setLevel(level)
handler.setFormatter(logging.Formatter(
'[%(asctime)s] %(levelname)-5s %(message)s','%Y-%m-%dT%H:%M:%S'))
logger.addHandler(handler)
def AddFileHandler(file_path, level, mode, logger=None):
"""Maps log entries occuring at level or higher to a named file.
Args:
file_path: The file to write to.
level: The logging level at which to bind the handler.
mode: The mode with which to open the log file ('a': append, 'w': write).
logger: The logger to which to attach the stream. By default it
if will be the top level logger for this package.
"""
if not logger:
logger = GetLogger()
handler = logging.FileHandler(file_path, mode)
handler.setLevel(level)
handler.setFormatter(logging.Formatter(
'[%(asctime)s:%(filename)s:%(lineno)s] %(levelname)-5s %(message)s',
'%Y-%m-%dT%H:%M:%S'))
logger.addHandler(handler)
def InitLogger(options):
"""Initialize the logging subsystem.
Args:
options: A command-line options object, typically generated by an
optparse.OptionParser instance with a call to its parse_args()
method.
"""
level = options.log_verbose and VERBOSE or NORMAL
logger = GetLogger()
logger.setLevel(level)
if not options.log_no_console:
AddStreamHandler(sys.stderr, level, logger=logger)
if options.log_file:
mode = options.log_append and 'a' or 'w'
AddFileHandler(options.log_file, level, mode, logger=logger)
class NullHandler(logging.Handler):
"""A do-nothing logging handler.
This allows the packages logging calls to work even if logging isn't
specifically enabled/configured for the package.
See: http://docs.python.org/library/logging.html#configuring-logging-for-a-library
"""
# pylint: disable=C0103
# -> our pylintrc expects Emit()
def emit(self, record):
pass
GetLogger().addHandler(NullHandler())
|
jchuang1977/openwrt | package/lean/mt/drivers/wifi-l1profile/make-l1profile.py | Python | gpl-2.0 | 3,812 | 0.033578 | #!/usr/bin/env python
# Hua Shao <hua.shao@mediatek.com>
import sys
import re
import random
l1conf = []
def parseconfig(conf):
global l1conf
l1conf.extend([[],[],[]])
with open(conf, "r") as fp:
for line in fp:
if line.startswith("CONFIG_first_card_"):
kv = line.split("=")
l1conf[0].append((kv[0][len("CONFIG_first_card_"):], kv[1].strip("\"\'\r\n\t")))
elif line.startswith("CONFIG_second_card_"):
kv = line.split("=")
l1conf[1].append((kv[0][len("CONFIG_second_card_"):], kv[1].strip("\"\'\r\n\t")))
elif line.startswith("CONFIG_third_card_"):
kv = line.split("=")
l1conf[2].append((kv[0][len("CONFIG_third_card_"):], kv[1].strip("\"\'\r\n\t")))
else:
continue
def validate():
global l1conf
d1 = dict(l1conf[0]) if len(l1conf) > 0 else {}
d2 = dict(l1conf[1]) if len(l1conf) > 1 else {}
d3 = dict(l1conf[2]) if len(l1conf) > 2 else {}
# make sure no empty value
for dx in [d1,d2,d3]:
for k,v in dx.items():
assert v
# make sure these configs are unique
for name in ["main_ifname", "ext_ifname", "wds_ifname",
"apcli_name", "mesh_ifname", "nvram_zone",
"profile_path"]:
if1 = d1.get(name, random.random())
if2 = d2.get(nam | e, random. | random())
if3 = d3.get(name, random.random())
assert len(set([if1, if2, if3])) == 3, "duplication found in "+name
# main_ifname should end with "0"
if1 = [ x.strip() for x in d1.get("main_ifname","").split(";") if x]
if2 = [ x.strip() for x in d2.get("main_ifname","").split(";") if x]
if3 = [ x.strip() for x in d3.get("main_ifname","").split(";") if x]
for each in if1:
assert not each or each.endswith("0"), "1st main_ifname {0} does not ends with 0".format(each)
for each in if2:
assert not each or each.endswith("0"), "2nd main_ifname {0} does not ends with 0".format(each)
for each in if3:
assert not each or each.endswith("0"), "3rd main_ifname {0} does not ends with 0".format(each)
# main_ifname should start with ext_ifname
if1ext = [ x.strip() for x in d1.get("ext_ifname","").split(";") if x]
if2ext = [ x.strip() for x in d2.get("ext_ifname","").split(";") if x]
if3ext = [ x.strip() for x in d3.get("ext_ifname","").split(";") if x]
assert len(if1) == len(if1ext), "number of 1st main_ifname does not equal to 1st ext_ifname"
assert len(if2) == len(if2ext), "number of 2nd main_ifname does not equal to 2nd ext_ifname"
assert len(if3) == len(if3ext), "number of 3rd main_ifname does not equal to 3rd ext_ifname"
for i,each in enumerate(if1ext):
assert if1[i].startswith(each), "1st main_ifname {0} does not start with its ext_ifname {1}".format(if1[i], each)
for i,each in enumerate(if2ext):
assert if2[i].startswith(each), "2nd main_ifname {0} does not start with its ext_ifname {1}".format(if2[i], each)
for i,each in enumerate(if3ext):
assert if3[i].startswith(each), "3rd main_ifname {0} does not start with its ext_ifname {1}".format(if3[i], each)
# assertion failure or returning any python non-true value will terminate the build procedure.
# if you need more validations, feel free to add you code below.
return True
def genfile(dest):
global l1conf
with open(dest, "w") as fp:
print("Default")
fp.write("Default\n")
for i,lst in enumerate(l1conf):
for (k,v) in lst:
if k == "name":
line = "INDEX{0}={1}".format(i, v)
else:
line = "INDEX{0}_{1}={2}".format(i, k, v)
print(line)
fp.write(line+"\n")
fp.write("\n") # extra line-end to make drivers happy
if __name__ == "__main__":
if len(sys.argv) < 3:
print("arguments missing!")
print("usage: make-l1profile.py <.config> <l1profile.dat>!")
sys.exit(-1)
conf = sys.argv[1]
dest = sys.argv[2]
parseconfig(conf)
if validate():
genfile(dest)
else:
print("something is wrong with your l1profile configurations!")
sys.exit(-1)
|
digitalfox/MAGE | ref/templatetags/filter.py | Python | apache-2.0 | 2,371 | 0.005905 | # coding: utf-8
from django import template
from django.db import models
from ref.models import ExtendedParameterDict
from django.utils.safestring import mark_safe
register = template.Library()
@r | egister.filter
def verbose_name(value):
return value._meta.verbose_name
@register.filter
def ksh_protect_and_quote(value):
if isinstance(value, bool) and value:
return "1"
elif isinstance(value, bool) and not value:
return "0"
if isinstance(value, int):
return value
if isinstance(value, ExtendedParameterDict):
return '"%s"' % value
if type(value).__name__ == 'ManyRelatedManager':
return '"' + ', | '.join([a.name for a in value.all()]) + '"'
if value is None:
return '""'
if isinstance(value, models.Model):
return '"%s"' % value.pk
res = ("%s" % value).replace('"', '\\"').replace('$', '\$')
return ('"%s"' % res)
@register.filter
def apply_field_template(component_instance, computed_field):
return computed_field.resolve(component_instance)
''' Returns (field_descr, field_value_or_None). Single pass method. Both lists must be sorted beforehand. '''
@register.filter
def project_ci_fields(descriptions, instances):
i = instances.__iter__()
n = next(i, None)
for field_descr in descriptions:
if n is not None and n.field_id == field_descr.pk:
yield (field_descr, n.value)
n = next(i, None)
else:
yield (field_descr, None)
@register.filter()
def urlify(value):
if (isinstance(value, str) or isinstance(value, unicode)) and value.startswith('http'):
if len(value.split('|')) == 2:
link = value.split('|')[1]
value = value.split('|')[0]
else:
link = 'cliquez ici'
return mark_safe(("<a href='%s'>%s</a>" % (value, link)))
elif (isinstance(value, str) or isinstance(value, unicode)) and value == 'True':
return mark_safe("<span class='glyphicon glyphicon-ok' aria-hidden='true'></span>")
elif (isinstance(value, str) or isinstance(value, unicode)) and value == 'False':
return mark_safe("<span class='glyphicon glyphicon-remove' aria-hidden='true'></span>")
elif value is None:
return ''
else:
return value
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
|
hungpham2511/toppra | tests/tests/constraint/test_joint_acceleration.py | Python | mit | 3,595 | 0.002782 | import pytest
import numpy as np
import numpy.testing as npt
import toppra as ta
import toppra.constraint as constraint
| from toppra.constants import JACC_MAXU
@pytest.fixture(params=[1, 2, 6, '6d'], name='accel_constraint_setup')
def create_acceleration_pc | _fixtures(request):
""" Parameterized Acceleration path constraint.
Return:
-------
data: A tuple. Contains path, ss, alim.
pc: A `PathConstraint`.
"""
dof = request.param
if dof == 1: # Scalar
pi = ta.PolynomialPath([1, 2, 3]) # 1 + 2s + 3s^2
ss = np.linspace(0, 1, 3)
alim = (np.r_[-1., 1]).reshape(1, 2) # Scalar case
accel_const = constraint.JointAccelerationConstraint(alim, constraint.DiscretizationType.Collocation)
data = (pi, ss, alim)
return data, accel_const
if dof == 2:
coeff = [[1., 2, 3], [-2., -3., 4., 5.]]
pi = ta.PolynomialPath(coeff)
ss = np.linspace(0, 0.75, 4)
alim = np.array([[-1., 2], [-2., 2]])
accel_const = constraint.JointAccelerationConstraint(alim, constraint.DiscretizationType.Collocation)
data = (pi, ss, alim)
return data, accel_const
if dof == 6:
np.random.seed(10)
N = 20
way_pts = np.random.randn(10, 6)
pi = ta.SplineInterpolator(np.linspace(0, 1, 10), way_pts)
ss = np.linspace(0, 1, N + 1)
vlim_ = np.random.rand(6)
alim = np.vstack((-vlim_, vlim_)).T
accel_const = constraint.JointAccelerationConstraint(alim, constraint.DiscretizationType.Collocation)
data = (pi, ss, alim)
return data, accel_const
if dof == '6d':
np.random.seed(10)
N = 20
way_pts = np.random.randn(10, 6)
pi = ta.SplineInterpolator(np.linspace(0, 1, 10), way_pts)
ss = np.linspace(0, 1, N + 1)
alim_s = np.random.rand(6)
alim = np.vstack((-alim_s, alim_s)).T
accel_const = constraint.JointAccelerationConstraint(alim_s, constraint.DiscretizationType.Collocation)
data = (pi, ss, alim)
return data, accel_const
def test_constraint_type(accel_constraint_setup):
""" Syntactic correct.
"""
data, pc = accel_constraint_setup
assert pc.get_constraint_type() == constraint.ConstraintType.CanonicalLinear
def test_constraint_params(accel_constraint_setup):
""" Test constraint satisfaction with cvxpy.
"""
(path, ss, alim), accel_const = accel_constraint_setup
# An user of the class
a, b, c, F, g, ubound, xbound = accel_const.compute_constraint_params(path, ss)
assert xbound is None
N = ss.shape[0] - 1
dof = path.dof
ps = path(ss, 1)
pss = path(ss, 2)
F_actual = np.vstack((np.eye(dof), - np.eye(dof)))
g_actual = np.hstack((alim[:, 1], - alim[:, 0]))
npt.assert_allclose(F, F_actual)
npt.assert_allclose(g, g_actual)
for i in range(0, N + 1):
npt.assert_allclose(a[i], ps[i])
npt.assert_allclose(b[i], pss[i])
npt.assert_allclose(c[i], np.zeros_like(ps[i]))
assert ubound is None
assert xbound is None
def test_wrong_dimension(accel_constraint_setup):
_, path_constraint = accel_constraint_setup
path_wrongdim = ta.SplineInterpolator(np.linspace(0, 1, 5), np.random.randn(5, 10))
with pytest.raises(ValueError) as e_info:
path_constraint.compute_constraint_params(path_wrongdim, np.r_[0, 0.5, 1], 1.0)
assert e_info.value.args[0] == "Wrong dimension: constraint dof ({:d}) not equal to path dof ({:d})".format(
path_constraint.get_dof(), 10
)
|
simplecrypto/powerpool | powerpool/main.py | Python | bsd-2-clause | 15,154 | 0.001254 | import yaml
import socket
import argparse
import datetime
import setproctitle
import gevent
import gevent.hub
import signal
import subprocess
import powerpool
import time
import logging
import sys
from gevent_helpers import BlockingDetector
from gevent import sleep
from gevent.monkey import patch_all
from gevent.server import DatagramServer
patch_all()
from .utils import import_helper, recursive_update
from .lib import MinuteStatManager, SecondStatManager, Component
from .jobmanagers import Jobmanager
from .reporters import Reporter
from .stratum_server import StratumServer
def main():
parser = argparse.ArgumentParser(description='Run powerpool!')
parser.add_argument('config', type=argparse.FileType('r'),
help='yaml configuration file to run with')
parser.add_argument('-d', '--dump-config', action="store_true",
help='print the result of the YAML configuration file and exit')
parser.add_argument('-s', '--server-number', type=int, default=0,
help='increase the configued server_number by this much')
args = parser.parse_args()
# override those defaults with a loaded yaml config
raw_config = yaml.load(args.config) or {}
if args.dump_config:
import pprint
pprint.pprint(raw_config)
exit(0)
PowerPool.from_raw_config(raw_config, vars(args)).start()
class PowerPool(Component, DatagramServer):
""" This is a singelton class that manages starting/stopping of the server,
along with all statistical counters rotation schedules. It takes the | raw
config and distributes it to each module, as well as loading dynamic | modules.
It also handles logging facilities by being the central logging registry.
Each module can "register" a logger with the main object, which attaches
it to configured handlers.
"""
manager = None
gl_methods = ['_tick_stats']
defaults = dict(procname="powerpool",
term_timeout=10,
extranonce_serv_size=4,
extranonce_size=4,
default_component_log_level='INFO',
loggers=[{'type': 'StreamHandler', 'level': 'NOTSET'}],
events=dict(enabled=False, port=8125, host="127.0.0.1"),
datagram=dict(enabled=False, port=6855, host="127.0.0.1"),
server_number=0,
algorithms=dict(
x11={"module": "drk_hash.getPoWHash",
"hashes_per_share": 4294967296}, # 2^32
scrypt={"module": "ltc_scrypt.getPoWHash",
"hashes_per_share": 65536}, # 2^16
scryptn={"module": "vtc_scrypt.getPoWHash",
"hashes_per_share": 65536},
blake256={"module": "blake_hash.getPoWHash",
"hashes_per_share": 65536},
sha256={"module": "cryptokit.sha256d",
"hashes_per_share": 4294967296},
lyra2re={"module": "lyra2re_hash.getPoWHash",
"hashes_per_share": 33554432} # 2^25
))
@classmethod
def from_raw_config(self, raw_config, args):
components = {}
types = [PowerPool, Reporter, Jobmanager, StratumServer]
component_types = {cls.__name__: [] for cls in types}
component_types['other'] = []
new_defaults = raw_config.pop('defaults', None)
if new_defaults is not None:
for typ, new_defaults in new_defaults.iteritems():
# Lookup the class that we're changing the defaults for
cls = import_helper(typ)
recursive_update(cls.defaults, new_defaults)
# For each component configured in the config
for key, config in raw_config.iteritems():
typ = import_helper(config['type'])
# Pass the commandline arguments to the 'manager' component, or
# highest level component that controls all others
if issubclass(typ, PowerPool):
config['args'] = args
# Create a new instance of our Component given the config
obj = typ(config)
obj.key = key
for typ in types:
# Create a lookup system for use later, categorizing each
# component by it's type (reporter, startum, jobmanager, etc)
if isinstance(obj, typ):
component_types[typ.__name__].append(obj)
break
else:
component_types['other'].append(obj)
components[key] = obj
pp = component_types['PowerPool'][0]
assert len(component_types['PowerPool']) == 1
pp.components = components
pp.component_types = component_types
return pp
def __init__(self, config):
self._configure(config)
self._log_handlers = []
# Parse command line args
self.config['server_number'] += self.config['args']['server_number']
self.config['procname'] += "_{}".format(self.config['server_number'])
# setup all our log handlers
for log_cfg in self.config['loggers']:
if log_cfg['type'] == "StreamHandler":
kwargs = dict(stream=sys.stdout)
else:
kwargs = dict()
handler = getattr(logging, log_cfg['type'])(**kwargs)
log_level = getattr(logging, log_cfg['level'].upper())
handler.setLevel(log_level)
fmt = log_cfg.get('format', '%(asctime)s [%(name)s] [%(levelname)s] %(message)s')
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
self._log_handlers.append((log_cfg.get('listen'), handler))
self.logger = self.register_logger(self.__class__.__name__)
setproctitle.setproctitle(self.config['procname'])
self.version = powerpool.__version__
self.version_info = powerpool.__version_info__
self.sha = getattr(powerpool, '__sha__', "unknown")
self.rev_date = getattr(powerpool, '__rev_date__', "unknown")
if self.sha == "unknown":
# try and fetch the git version information
try:
output = subprocess.check_output("git show -s --format='%ci %h'",
shell=True).strip().rsplit(" ", 1)
self.sha = output[1]
self.rev_date = output[0]
# celery won't work with this, so set some default
except Exception as e:
self.logger.info("Unable to fetch git hash info: {}".format(e))
self.algos = {}
self.server_start = datetime.datetime.utcnow()
self.logger.info("=" * 80)
self.logger.info("PowerPool stratum server ({}) starting up..."
.format(self.config['procname']))
if __debug__:
self.logger.warn(
"Python not running in optimized mode. For better performance "
"set enviroment variable PYTHONOPTIMIZE=2")
# Only try to detect blocking if running in debug mode.
# NOTE: BlockingDetector can cause (rare) PowerPool crashes
gevent.spawn(BlockingDetector(raise_exc=False))
# Detect and load all the hash functions we can find
for name, algo_data in self.config['algorithms'].iteritems():
self.algos[name] = algo_data.copy()
self.algos[name]['name'] = name
mod = algo_data['module']
try:
self.algos[name]['module'] = import_helper(mod)
except ImportError:
self.algos[name]['module'] = None
else:
self.logger.info("Enabling {} hashing algorithm from module {}"
.format(name, mod))
self.event_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.events_enabled = self.config['events']['enabled']
if |
AdminTL/gestion_personnage_TL | src/web/py_class/config.py | Python | gpl-3.0 | 2,676 | 0.001868 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
from sys import stderr
class Config(object):
"""Contains general configuration."""
def __init__(self, parser):
self._db_config_path = parser.db_config_path
self._keys = {}
try:
with open(self._db_config_path, encoding='utf-8') as keys_file:
self._keys = json.load(keys_file)
except json.decoder.JSONDecodeError as exception:
print("ERROR: %s isn't formatted properly. \nDetails: %s" % (self._db_config_path, exception),
file=stderr)
except FileNotFoundError:
print("ERROR: file %s not exist. Please create it or read installation file." % self._db_config_path)
def get(self, key, default=None):
"""
Get the value of the key.
Use dot to generate a key to navigate in the dictionary.
Example: test1.test2.test3
:return: Return the value or None if cannot find the key.
"""
lst_key = key.split(".")
first_run = True
result = default
for a_key in lst_key:
if first_run:
result = self._keys.get(a_key)
first_run = False
elif type(result) is not dict:
print("Error to get key %s in file %s" % (key, self._db_config_path), file=stderr)
return default
else:
result = result.get(a_key)
| if result is None:
result = default
return result
def update(self, key, value, save=False):
"""
Update set of key with value.
:param key: string of value separate by dot
:param value: The | value to insert.
:param save: Option to save on file
:return:
"""
# Search and replace value for key
lst_key = key.split(".")
result = None
for i in range(len(lst_key)):
a_key = lst_key[i]
if i == 0:
if a_key in self._keys:
result = self._keys.get(a_key)
else:
result = {}
self._keys[a_key] = result
elif type(result) is not dict:
print("Error to get key %s in file %s" % (key, self._db_config_path), file=stderr)
return
elif i == len(lst_key) - 1:
result[a_key] = value
else:
result = result.get(a_key)
# Save on file
if save:
with open(self._db_config_path, mode="w", encoding='utf-8') as txt_file:
json.dump(self._keys, txt_file, indent=2)
|
strava/thrift | tutorial/py.twisted/PythonServer.py | Python | apache-2.0 | 2,652 | 0.00264 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import glob
import sys
sys.path.append('gen-py.twisted')
sys.path.insert(0, glob.glob('../../lib/py/build/lib*')[0])
from tutorial import Calculator
from tutorial.ttypes import InvalidOperation, Operation
from shared.ttypes import SharedStruct
from zope.interface import implements
from twisted.internet import reactor
from thrift.transport import TTwisted
from thrift.protocol import TBinaryProtocol
class CalculatorHandler:
implements(Calculator.Iface)
def __init__(self):
self.log = {}
def ping(self):
print('ping()')
def add(self, n1, n2):
print('add(%d,%d)' % (n1, n2))
return n1 + n2
def calculate(self, logid, work):
print('calculate(%d, %r)' % (logid, work))
if work.op == Operation.ADD:
val = work.num1 + work.n | um2
elif work.op == Operation.SUBTRACT:
val = work.num1 - work.num2
elif work.op == Operation.MULTIPLY:
val = work.num1 * work.num2
elif work.op == Operation.DIVIDE:
if work.num2 == 0:
raise InvalidOperation(work.op, 'Cannot divide by 0')
val = work.num1 / | work.num2
else:
raise InvalidOperation(work.op, 'Invalid operation')
log = SharedStruct()
log.key = logid
log.value = '%d' % (val)
self.log[logid] = log
return val
def getStruct(self, key):
print('getStruct(%d)' % (key))
return self.log[key]
def zip(self):
print('zip()')
if __name__ == '__main__':
handler = CalculatorHandler()
processor = Calculator.Processor(handler)
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = reactor.listenTCP(
9090,
TTwisted.ThriftServerFactory(processor, pfactory),
interface="127.0.0.1")
reactor.run()
|
CloverHealth/airflow | airflow/contrib/hooks/bigquery_hook.py | Python | apache-2.0 | 63,861 | 0.00119 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a BigQuery Hook, as well as a very basic PEP 249
implementation for BigQuery.
"""
import time
from builtins import range
from past.builtins import basestring
from airflow import AirflowException
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.hooks.dbapi_hook import DbApiHook
from airflow.utils.log.logging_mixin import LoggingMixin
from apiclient.discovery import HttpError, build
from googleapiclient import errors
from pandas_gbq.gbq import \
_check_google_client_version as gbq_check_google_client_version
from pandas_gbq import read_gbq
from pandas_gbq.gbq import \
_test_google_api_imports as gbq_test_google_api_imports
from pandas_gbq.gbq import GbqConnector
class BigQueryHook(GoogleCloudBaseHook, DbApiHook, LoggingMixin):
"""
Interact with BigQuery. This hook uses the Google Cloud Platform
connection.
"""
conn_name_attr = 'bigquery_conn_id'
def __init__(self,
bigquery_conn_id='bigquery_default',
delegate_to=None,
use_legacy_sql=True):
super(BigQueryHook, self).__init__(
gcp_conn_id=bigquery_conn_id, delegate_to=delegate_to)
self.use_legacy_sql = use_legacy_sql
def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
project = self._get_field('project')
return BigQueryConnection(
service=service,
project_id=project,
use_legacy_sql=self.use_legacy_sql)
def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build(
'bigquery', 'v2', http=http_authorized, cache_discovery=False)
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def get_pandas_df(self, sql, parameters=None, dialect=None):
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param sql: The BigQuery SQL to execute.
:type sql: string
:param parameters: The parameters to render the SQL query with (not
used, leave to override superclass method)
:type parameters: mapping or iterable
:param dialect: Dialect of BigQuery SQL – legacy SQL or standard SQL
defaults to use `self.use_legacy_sql` if not specified
:type dialect: string in {'legacy', 'standard'}
"""
if dialect is None:
dialect = 'legacy' if self.use_legacy_sql else 'standard'
return read_gbq(sql,
project_id=self._get_field('project'),
dialect=dialect,
verbose=False)
def table_exists(self, project_id, dataset_id, table_id):
"""
Checks for the existence of a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:type project_id: string
:param dataset_id: The name of the dataset in which to look for the
table.
:type dataset_id: string
:param table_id: The name of the table to check the existence of.
:type table_id: string
"""
service = self.get_service()
try:
service.tables().get(
projectId=project_id, datasetId=dataset_id,
tableId=table_id).execute()
return True
except errors.HttpError as e:
if e.resp['status'] == '404':
return False
raise
class BigQueryPandasConnector(GbqConnector):
"""
This connector behaves identically to GbqConnector (from Pandas), except
that it allows the service to be injected, and disables a call to
self.get_credentials(). This allows Airflow to use BigQuery with Pandas
without forcing a three legged OAuth connection. Instead, we can inject
service account credentials into the binding.
"""
def __init__(self,
project_id,
service,
reauth=False,
v | erbose=False,
dialect='legacy'):
super(BigQueryPandasConnector, self).__init__(project_id)
gb | q_check_google_client_version()
gbq_test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.service = service
self.verbose = verbose
self.dialect = dialect
class BigQueryConnection(object):
"""
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def close(self):
""" BigQueryConnection does not have anything to close. """
pass
def commit(self):
""" BigQueryConnection does not support transactions. """
pass
def cursor(self):
""" Return a new :py:class:`Cursor` object using the connection. """
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self):
raise NotImplementedError(
"BigQueryConnection does not have transactions")
class BigQueryBaseCursor(LoggingMixin):
"""
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(self, service, project_id, use_legacy_sql=True):
self.service = service
self.project_id = project_id
self.use_legacy_sql = use_legacy_sql
self.running_job_id = None
def create_empty_table(self,
project_id,
dataset_id,
table_id,
schema_fields=None,
time_partitioning={}
):
"""
Creates a new, empty table in the dataset.
:param project_id: The project to create the table into.
:type project_id: str
:param dataset_id: The dataset to create the table into.
:type dataset_id: str
:param table_id: The Name of the table to be created.
:type table_id: str
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER" |
mittya/duoclub | duoclub/accounts/admin.py | Python | mit | 528 | 0 | # -*- coding: utf-8 -*-
from django.contrib im | port admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from .models import Profile
class ProfileInline(admin.StackedInline):
model = Profile
verbose_name = '用户'
verbose_name_plural = '用户扩展'
class UserAdmin(BaseUserAdmin):
inlines = (ProfileInline, )
list_display = ('username', 'email', 'is_active', 'is_staff')
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| |
shaggytwodope/rtv | rtv/objects.py | Python | mit | 24,783 | 0.000161 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import os
import time
import signal
import inspect
import weakref
import logging
import threading
import curses
import curses.ascii
from contextlib import contextmanager
import six
import praw
import requests
from . import exceptions
_logger = logging.getLogger(__name__)
@contextmanager
def curses_session():
"""
Setup terminal and initialize curses. Most of this copied from
curses.wrapper in order to convert the wrapper into a context manager.
"""
try:
# Curses must wait for some time after the Escape key is pressed to
# check if it is the beginning of an escape sequence indicating a
# special key. The default wait time is 1 second, which means that
# http://stackoverflow.com/questions/27372068
os.environ['ESCDELAY'] = '25'
# Initialize curses
stdscr = curses.initscr()
# Turn off echoing of keys, and enter cbreak mode, where no buffering
# is performed on keyboard input
curses.noecho()
curses.cbreak()
# In keypad mode, escape sequences for special keys (like the cursor
# keys) will be interpreted and a special value like curses.KEY_LEFT
# will be returned
stdscr.keypad(1)
# Start color, too. Harmless if the terminal doesn't have color; user
# can test with has_color() later on. The try/catch works around a
# minor bit of over-conscientiousness in the curses module -- the error
# return from C start_color() is ignorable.
try:
curses.start_color()
except:
pass
# Hide the blinking cursor
curses.curs_set(0)
# Assign the terminal's default (background) color to code -1
curses.use_default_colors()
yield stdscr
finally:
if 'stdscr' in locals():
stdscr.keypad(0)
curses.echo()
curses.nocbreak()
curses.endwin()
class LoadScreen(object):
"""
Display a loading dialog while waiting for a blocking action to complete.
This class spins off a separate thread to animate the loading screen in the
background. The loading thread also takes control of stdscr.getch(). If
an exception occurs in the main thread while the loader is active, the
exception will be caught, attached to the loader object, and displayed as
a notification. The attached exception can be used to trigger context
sensitive actions. For example, if the connection hangs while opening a
submission, the user may press ctrl-c to raise a KeyboardInterrupt. In this
case we would *not* want to refresh the current page.
>>> with self.terminal.loader(...) as loader:
>>> # Perform a blocking request to load content
>>> blocking_request(...)
>>>
>>> if loader.exception is None:
>>> # Only run this if the load was successful
>>> self.refresh_content()
When a loader is nested inside of itself, the outermost loader takes
priority and all of the nested loaders become no-ops. Call arguments given
to nested loaders will be ignored, and errors will propagate to the parent.
>>> with self.terminal.loader(...) as loader:
>>>
>>> # Additional loaders will be ignored
>>> with self.terminal.loader(...):
>>> raise KeyboardInterrupt()
>>>
>>> # This code will not be executed because the inner loader doesn't
>>> # catch the exception
>>> assert False
>>>
>>> # The exception is finally caught by the outer loader
>>> assert isinstance(terminal.loader.exception, KeyboardInterrupt)
"""
EXCEPTION_MESSAGES = [
(exceptions.RTVError, '{0}'),
(praw.errors.OAuthException, 'OAuth Error'),
(praw.errors.OAuthScopeRequired, 'Not logged in'),
(praw.errors.LoginRequired, 'Not logged in'),
(praw.errors.InvalidCaptcha, 'Error, captcha required'),
(praw.errors.InvalidSubreddit, '{0.args[0]}'),
(praw.errors.PRAWException, '{0.__class__.__name__}'),
(requests.exceptions.RequestException, '{0.__class__.__name__}'),
]
def __init__(self, terminal):
self.exception = None
self.catch_exception = None
self.depth = 0
self._terminal = weakref.proxy(terminal)
self._args = None
self._animator = None
self._is_running = None
def __call__(
self,
message='Downloading',
trail='...',
delay=0.5,
interval=0.4,
catch_exception=True):
"""
Params:
delay (float): Length of time that the loader will wait before
printing on the screen. Used to prevent flicker on pages that
load very fast.
interval (float): Length of time between each animation frame.
message (str): Message to display
trail (str): Trail of characters that will be animated by the
loading screen.
catch_exception (bool): If an exception occurs while the loader is
active, this flag determines whether it is caught or allowed to
bubble up.
"""
if self.depth > 0:
return self
self.exception = None
self.catch_exception = catch_exception
self._args = (delay, interval, message, trail)
return self
def __enter__(self):
self.depth += 1
if self.depth > 1:
return self
self._animator = threading.Thread(target=self.animate, args=self._args)
self._animator.daemon = True
self._is_running = True
self._animator.start()
return self
def __exit__(self, exc_type, e, exc_tb):
self.depth -= 1
if self.depth > 0:
return
self._is_running = False
self._animator.join()
if e is None or not self.catch_exception:
# Skip exception handling
return
self.exception = e
exc_name = type(e).__name__
_logger.info('Loader caught: %s - %s', exc_name, e)
if isinstance(e, KeyboardInterrupt):
# Don't need to print anything for this one, just swallow it
return True
for e_type, message in self.EXCEPTION_MESSAGES:
# Some exceptions we want to swallow and display a notification
if isinstance(e, e_type):
self._terminal.show_notification(message.format(e))
return True
def animate(self, delay, interval, message, trail):
# The animation starts with a configurable delay before drawing on the
# screen. This is to prevent very short loading sections from
# flickering on the screen before immediately disappearing.
with self._terminal.no_delay():
start = time.time()
while (time.time() - start) < delay:
# Pressing escape triggers a keyboard interrupt
if self._terminal.getch() == self._terminal.ESCAPE:
os.kill(os.getpid(), signal.SIGINT)
self._is_running = False
if not self._is_running:
return
time.sleep(0.01)
# Build the notification | window
message_len = len(message) + len(trail)
n_rows, n_cols = self._terminal.stdscr.getmaxyx()
s_row = (n_rows - 3) // 2
s_col = (n_cols - message_le | n - 1) // 2
window = curses.newwin(3, message_len + 2, s_row, s_col)
# Animate the loading prompt until the stopping condition is triggered
# when the context manager exits.
with self._terminal.no_delay():
while True:
for i in range(len(trail) + 1):
if not self._is_running:
window.erase()
del window
self._terminal.stdscr.touchwin()
self._terminal.stdscr.refresh()
return
|
piohhmy/euler | p003.py | Python | mit | 448 | 0.037946 | import math
def find_factors(num):
return set([factor for x in range(1, int((math.sqrt(num)+1))) for factor in (x, num//x) if num % x == 0])
def is_prime(num):
for x in range(2, int(math.sqrt(num)) +1):
if num%x == | 0:
return False
return True
def find_prime_factors(num):
return list(filter(is_prime, find_factors(num)))
def solve_p3():
return max(find_prime_factors(600851475143))
|
if __name__ == '__main__':
print(solve_p3())
|
ardi69/pyload-0.4.10 | tests/test_json.py | Python | gpl-3.0 | 1,322 | 0.006051 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import logging
import urllib
import urllib2
url = "http://localhost:8001/api/%s"
class TestJson(object):
def call(self, name, post=None):
if not post: post = {}
post['session'] = self.key
u = urllib2.urlopen | (url % name, data=urlencode(post))
return json.loads(u.read())
def setUp(self):
u = urllib2.urlopen(url % "login", data=urlencode({"username": "TestUser", "password": "pwhere"}))
self.key = json.loads(u.read())
assert self.key is not False
def test_wronglogin(self):
u = urllib2.urlopen(url % "login", data=urlencode | ({"username": "crap", "password": "wrongpw"}))
assert json.loads(u.read()) is False
def test_access(self):
try:
urllib2.urlopen(url % "getServerVersion")
except urllib2.HTTPError, e:
assert e.code == 403
else:
assert False
def test_status(self):
ret = self.call("statusServer")
logging.log(1, str(ret))
assert "pause" in ret
assert "queue" in ret
def test_unknown_method(self):
try:
self.call("notExisting")
except urllib2.HTTPError, e:
assert e.code == 404
else:
assert False
|
tcalmant/ipopo | tests/shell/test_report.py | Python | apache-2.0 | 7,965 | 0 | #!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Tests the shell report module
:author: Thomas Calmant
"""
# Standard library
import json
import os
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
# Tests
try:
import unittest2 as unittest
except ImportError:
import unittest
# Pelix
from pelix.framework import FrameworkFactory, create_framework
# Shell constants
from pelix.shell import SERVICE_SHELL, SERVICE_SHELL_REPORT
import pelix.shell.beans as beans
# ------------------------------------------------------------------------------
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
class ShellReportTest(uni | ttest.TestCase):
"""
Tests the shell report commands
"""
def setUp(self):
"""
Starts a framework and install the shell bundle
"""
# Start the framework
self.framework = create_framework(
['pelix.shell.core', 'pelix.shell.report'])
| self.framework.start()
self.context = self.framework.get_bundle_context()
# Shell service
svc_ref = self.context.get_service_reference(SERVICE_SHELL)
self.shell = self.context.get_service(svc_ref)
# Report service
svc_ref = self.context.get_service_reference(SERVICE_SHELL_REPORT)
self.report = self.context.get_service(svc_ref)
# Output file
self.out_file = 'report_output.js'
if os.path.exists(self.out_file):
os.remove(self.out_file)
def tearDown(self):
"""
Cleans up the framework
"""
self.framework.stop()
FrameworkFactory.delete_framework()
# Remove the output file
if os.path.exists(self.out_file):
os.remove(self.out_file)
self.report = None
self.shell = None
self.context = None
self.framework = None
def _make_session(self):
"""
Prepares a ShellSession object for _run_command
"""
# String output
str_output = StringIO()
# Session bean
session = beans.ShellSession(beans.IOHandler(None, str_output))
return session, str_output
def _run_command(self, command, *args, **kwargs):
"""
Runs the given command and returns the output stream. A keyword
argument 'session' can be given to use a custom ShellSession.
"""
# Format command
if args:
command = command.format(*args)
try:
# Get the given session
session = kwargs['session']
str_output = kwargs['output']
str_output.truncate(0)
str_output.seek(0)
except KeyError:
# No session given
str_output = StringIO()
session = beans.ShellSession(beans.IOHandler(None, str_output))
# Run command
self.shell.execute(command, session)
return str_output.getvalue()
def test_levels(self):
"""
Checks if the primordial levels are correctly listed
"""
# Check if all levels are accessible
all_levels = set(self.report.get_levels())
self.assertTrue(
all_levels.issuperset(('minimal', 'standard', 'debug', 'full')))
# Assert that all levels are shown in the 'levels' command
output = self._run_command('report.levels')
for level in all_levels:
self.assertIn(level, output)
def test_bad_levels(self):
"""
Check what happens when using bad levels
"""
for bad_level in (12, "bad level", "'<some unknown level>'"):
for command in ('make', 'show'):
output = self._run_command('report.{0} {1}'
.format(command, bad_level))
self.assertIn("Unknown report level", output)
def test_report_info(self):
"""
Check if the report description is stored for every level
"""
for level in self.report.get_levels():
# Run the 'show' command, to get the output
output = self._run_command('report.show {0}'.format(level))
parsed = json.loads(output)
# Check mandatory keys
self.assertEqual(parsed['report']['report.levels'], [level])
for report_key in ('time.stamp', 'time.local', 'time.utc'):
self.assertIn(report_key, parsed['report'])
def test_full_report(self):
"""
Simplest way to test all methods: run the full report
"""
# Run the 'show' command, to get the output
output = self._run_command('report.show full')
parsed = json.loads(output)
# Check if iPOPO entries are valid
ipopo_keys = [key for key in parsed if key.startswith('ipopo')]
for key in ipopo_keys:
self.assertIsNone(parsed[key])
# Rerun with iPOPO
self.context.install_bundle('pelix.ipopo.core').start()
# Run the 'show' command, to get the output
output = self._run_command('report.show full')
parsed = json.loads(output)
for key in ipopo_keys:
self.assertIsNotNone(parsed[key])
def test_write(self):
"""
Tests the 'write' command
"""
self.assertFalse(os.path.exists(self.out_file))
# Run the command without any report
output = self._run_command('report.write {0}'.format(self.out_file))
self.assertIn("No report", output)
self.assertFalse(os.path.exists(self.out_file))
# Make a full report
report_content = self._run_command('report.show full')
# Write it down
self._run_command('report.write {0}'.format(self.out_file))
self.assertTrue(os.path.exists(self.out_file))
# Check content
with open(self.out_file, 'r') as report_file:
self.assertEqual(report_content, report_file.read())
def test_clear(self):
"""
Tests the clear command
"""
self.assertFalse(os.path.exists(self.out_file))
# Make a report and write it down
self._run_command('report.make minimal')
self._run_command('report.write {0}'.format(self.out_file))
# Assert it's there
self.assertTrue(os.path.exists(self.out_file))
os.remove(self.out_file)
# Clear report
self._run_command("report.clear")
# Run the command without any report
output = self._run_command('report.write {0}'.format(self.out_file))
self.assertIn("No report", output)
self.assertFalse(os.path.exists(self.out_file))
def test_show_last(self):
"""
Checks that the "show" command prints the previous report when given no
argument
"""
# Clear report (just in case)
self._run_command("report.clear")
# First try: no report to show
output = self._run_command('report.show')
self.assertIn("No report", output)
# Make a report
output = self._run_command('report.show full')
# Next call without argument must have the exact same output
output_2 = self._run_command('report.show')
self.assertEqual(output, output_2)
def test_default_report(self):
"""
Checks that the 'full' report is generated by default
"""
# Clear report (just in case)
self._run_command("report.clear")
# Make the default report
self._run_command('report.make')
# Show it
output = self._run_command('report.show')
# Check its level
parsed = json.loads(output)
self.assertEqual(parsed['report']['report.levels'], ['full'])
|
has2k1/travis_doc | travis_doc/travis_doc.py | Python | bsd-3-clause | 120 | 0 | def function1():
"""
Return 1
"""
return 1
def function2():
"""
Return 2
"""
return 2 | ||
theboocock/fine_mapping_pipeline | tests/test_run_finemap.py | Python | mit | 1,755 | 0.009687 | # Copyright (c) 2015 Boocock James <james.boocock@otago.ac.nz>
# Author: Boocock James <james.boocock@otago.ac.nz>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), | to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit pers | ons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from fine_mapping_pipeline.finemap.finemap import run_finemap, remove_surrogates, _write_matrix, _write_zscores
import logging
logging.basicConfig(level=logging.INFO)
def test_remove_surrogate(tmpdir):
input_matrix = 'tests/finemap_data/test.matrix'
input_zscore = 'tests/finemap_data/test.Z'
surrogates_out = 'tests/finemap_data/out.surro'
(matrix, zscores) = remove_surrogates(input_matrix,input_zscore, surrogates_out)
_write_matrix(matrix, "tests/finemap_data/out.matrix")
_write_zscores(zscores, "tests/finemap_data/out.zscores")
assert 1 == 2
|
kreopt/aioweb | wyrm/modules/help/orator/types.py | Python | mit | 2,556 | 0.00431 | import os, sys
import shutil
brief = "output field types"
def execute(argv, argv0, engine):
print("""
table.big_increments('id') Incrementing ID using a “big integer” equivalent
table.big_integer('votes') BIGINT equivalent to the table
table.binary('data') BLOB equivalent to the table
table.boolean('confirmed') BOOLEAN equivalent to the table
table.char('name', 4) CHAR equivalent with a length
table.date('created_on') DATE equivalent to the table
table.datetime('created_at') DATETIME equivalent to the table
table.dec | imal('amount', 5, 2) DECIMAL equivalent to the table with a precision and scale
table.double('column', 15, 8) DOUBLE equivalent to the table with precision, 15 digits in total and 8 after the decimal point
table.enum('choices', ['fo | o', 'bar']) ENUM equivalent to the table
table.float('amount') FLOAT equivalent to the table
table.increments('id') Incrementing ID to the table (primary key)
table.integer('votes') INTEGER equivalent to the table
table.json('options') JSON equivalent to the table
table.long_text('description') LONGTEXT equivalent to the table
table.medium_integer('votes') MEDIUMINT equivalent to the table
table.medium_text('description') MEDIUMTEXT equivalent to the table
table.morphs('taggable') Adds INTEGER taggable_id and STRING taggable_type
table.nullable_timestamps() Same as timestamps(), except allows NULLs
table.small_integer('votes') SMALLINT equivalent to the table
table.soft_deletes() Adds deleted_at column for soft deletes
table.string('email') VARCHAR equivalent column
table.string('votes', 100) VARCHAR equivalent with a length
table.text('description') TEXT equivalent to the table
table.time('sunrise') TIME equivalent to the table
table.timestamp('added_at') TIMESTAMP equivalent to the table
table.timestamps() Adds created_at and updated_at columns
.nullable() Designate that the column allows NULL values
.default(value) Declare a default value for a column
.unsigned() Set INTEGER to UNSIGNED
""")
|
marcosbontempo/inatelos | poky-daisy/scripts/lib/mic/3rdparty/pykickstart/commands/mouse.py | Python | mit | 2,610 | 0.002299 | #
# Chris Lumens <clumens@redhat.com>
#
# Copyright 2005, 2006, 2007 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
from pykickstart.base import *
from pykickstart.errors import *
from pykickstart.options im | port *
import gettext
_ = lambda x: gettext.ldgettext("pykickstart", x)
class RHEL3_Mouse(KickstartCommand):
removedKeywords = KickstartCommand.removedKe | ywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.op = self._getParser()
self.device = kwargs.get("device", "")
self.emulthree = kwargs.get("emulthree", False)
self.mouse = kwargs.get("mouse", "")
def __str__(self):
retval = KickstartCommand.__str__(self)
opts = ""
if self.device:
opts += "--device=%s " % self.device
if self.emulthree:
opts += "--emulthree "
if self.mouse:
retval += "# System mouse\nmouse %s%s\n" % (opts, self.mouse)
return retval
def _getParser(self):
op = KSOptionParser()
op.add_option("--device", dest="device", default="")
op.add_option("--emulthree", dest="emulthree", default=False, action="store_true")
return op
def parse(self, args):
(opts, extra) = self.op.parse_args(args=args, lineno=self.lineno)
self._setToSelf(self.op, opts)
if len(extra) != 1:
raise KickstartValueError, formatErrorMsg(self.lineno, msg=_("Kickstart command %s requires one argument") % "mouse")
self.mouse = extra[0]
return self
class FC3_Mouse(DeprecatedCommand):
def __init__(self):
DeprecatedCommand.__init__(self)
|
Tomin1/sudoku-solver | sudoku/solver.py | Python | gpl-3.0 | 7,465 | 0.007905 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Solver object
# Copyright (C) 2011-2012, Tomi Leppänen (aka Tomin)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Solver object"""
class Solver():
"""Solver object
This object solves sudokus. It can be used with tools to create sudoku
solver application or combined with Runner object to make life easier.
See Runner object in sudoku.runner for more information about it.
"""
def __init__(self, sudoku):
"""Constructor
sudoku parameter is an list created by parse_sudoku in sudoko.tools.
"""
self.sudoku = sudoku
self.done = False # if Solver should be stopped
self.good = False # if sudoku is completed
self.split_mode = False # if split mode is on or not :)
self.split_numbers = 10
self.split_request = False # if split is requested or not
def __str__(self):
s = None
for row in self.sudoku:
for col in row:
if s == None:
s = str(col)
else:
s = s+","+str(col)
return s
def get_grid(self,row,col):
"""checks which grid is being procecced"""
return [int((row+3)/3),int((col+3)/3)]
def isgood_final(self):
"""Checks if sudoku is completed correctly
Use only for completed sudokus
"""
for a in range(0,9):
suma = 0
sumb = 0
for b in range(0,9):
suma = suma+self.sudoku[a][b]
sumb = sumb+self.sudoku[b][a]
if suma != 45 or sumb != 45:
return False
for r in range(1,4):
for c in range(1,4):
sumc = 0
for r_n in range(r*3-3,r*3):
for c_n in range(c*3-3,c*3):
sumc = sumc+self.sud | oku[r_n][c_n]
if sumc != 45:
return False
return True
def isgood(self):
"""Checks if a partial (or complete) sudoku is correct
This is slower than isgood_final
"""
for a in range(0,9):
numbersa = []
numbersb = []
for b in range(0,9):
if self.sudoku[a][b] != "":
try:
| numbersa.index(self.sudoku[a][b])
except ValueError:
numbersa.append(self.sudoku[a][b])
else:
return False
if self.sudoku[b][a] != "":
try:
numbersb.index(self.sudoku[b][a])
except ValueError:
numbersb.append(self.sudoku[b][a])
else:
return False
for r in range(1,4):
for c in range(1,4):
numbersc = []
for r_n in range(r*3-3,r*3):
for c_n in range(c*3-3,c*3):
if self.sudoku[r_n][c_n] != "":
try:
numbersc.index(self.sudoku[r_n][c_n])
except ValueError:
numbersc.append(self.sudoku[r_n][c_n])
else:
return False
return True
def isready(self):
"""Checks if all cells are filled"""
for row in self.sudoku:
try:
row.index("")
except ValueError:
pass
else:
return False
return True
def get_numbers(self,row,col):
"""Returns numbers that can be filled into a cell"""
numbers = []
numbers.append(self.sudoku[row][col])
numbers = list(range(1,10))
for i in range(0,9):
try:
numbers.remove(self.sudoku[row][i])
except ValueError:
pass
try:
numbers.remove(self.sudoku[i][col])
except ValueError:
pass
x,y = self.get_grid(row,col)
for r in range(int(x*3-3),int(x*3)):
for c in range(int(y*3-3),int(y*3)):
if self.sudoku[r][c] != "":
try:
numbers.remove(self.sudoku[r][c])
except ValueError:
pass
return numbers
def run(self):
"""Solves the sudoku
This solves some of the sudoku and should be called until the sudoku
is ready. The status can be monitored using Sudoku objects good, done
and split_request attributes. Also returns False if something is wrong
otherwise returns True.
"""
changed = False
if self.isready():
if self.isgood_final():
self.done = True
self.good = True
return True
else:
self.done = True
self.good = False
return False
for row in range(0,9):
for col in range(0,9):
if self.sudoku[row][col] == "":
numbers = self.get_numbers(row,col)
if len(numbers) == 1:
changed = True # changed!
self.sudoku[row][col] = numbers[0]
elif len(numbers) == 0: # got into deadlock
self.done = True
self.good = False
return False
elif self.split_mode != False and len(numbers) >= 2:
changed = True # changed!
if self.split_mode == 1 and \
len(numbers) < self.split_numbers:
self.split_numbers = len(numbers)
elif self.split_mode == 2 and \
len(numbers) == self.split_numbers:
# prepare for splitting
self.numbers = numbers
self.row = row
self.col = col
self.done = True
self.good = False
self.split_request = True
return True
if self.split_mode == 1:
self.split_mode = 2
if changed == False: # if nothing has been solved in this round
if self.isgood():
self.split_mode = 1 # turns split mode on
else: # give up if sudoku is faulty
self.done = True
self.good = False
return False
return True
|
OCA/purchase-workflow | purchase_order_line_deep_sort/models/res_company.py | Python | agpl-3.0 | 948 | 0 | # Copyright 2018 Tecnativa - Vicent Cubells <vicent.cubells@tecnativa.com>
| # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3
from odoo import fields, models
SORTING_CRITERIA = [
("name", "By name"),
("product_id.name", "By product name"),
("product_id.default_code", "By product reference"),
("date_planned", "By date planned"),
("price_unit", "By price"),
("product_qty", "By quantity"),
]
SORTING | _DIRECTION = [
("asc", "Ascending"),
("desc", "Descending"),
]
class ResCompany(models.Model):
_inherit = "res.company"
default_po_line_order = fields.Selection(
selection=SORTING_CRITERIA,
string="Line Order",
help="Select a sorting criteria for purchase order lines.",
)
default_po_line_direction = fields.Selection(
selection=SORTING_DIRECTION,
string="Sort Direction",
help="Select a sorting direction for purchase order lines.",
)
|
palaniyappanBala/rekall | rekall-core/rekall/plugins/linux/address_resolver.py | Python | gpl-2.0 | 2,567 | 0.00039 | # Rekall Memory Forensics
# Copyright 2014 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""The module implements the linux specific address resolution plugin."""
__author__ = "Michael Cohen <scudette@gmail.com>"
import re
from rekall import obj
from rekall.plugins.common import address_resolver
from rekall.plugins.linux import common
class LKMModule(address_resolver.Module):
"""A Linux kernel module."""
def __init__(self, module, **kwargs):
self.module = module
super(LKMModule, self).__init__(
name=unicode(module.name),
start=module.base,
end=module.end,
**kwargs)
class KernelModule(address_resolver.Module):
"""A Fake object which makes the kernel look like a module.
This removes the need to treat kernel addresses any different from module
addresses, and allows them to be resolved by this module.
"""
def __init__(self, session=None, **kwargs):
super(KernelModule, self).__init__(
# Check if the address appears in the kernel binary.
start=obj.Pointer.integer_to_address(
session.profile.get_constant("_text")),
end=session.profile.get_constant("_etext"),
name="linux",
profile=session.profile,
session=session, **kwargs)
class LinuxAddressResolver(address_resolver.AddressResolverMixin,
| common.LinuxPlugin):
"""A Linux specific address resolver plugin."""
def _EnsureInitialized(self):
if self._initialized:
return
# Insert a psuedo module for the kernel
self.AddModule(KernelModule(session=self.session))
# Add LKMs.
| for kmod in self.session.plugins.lsmod().get_module_list():
self.AddModule(LKMModule(kmod, session=self.session))
self._initialized = True
|
therve/twotp | twotp/test/util.py | Python | mit | 320 | 0.003125 | # Copyright (c) 2007-2009 Thomas Herve <therve@free.fr>.
# See LICENSE for details.
"""
Test utilities.
"""
from twisted.trial.unitte | st import TestCase as TrialTestCase
class TestCase(TrialTestCase):
"""
Specific TestCase class to add some specific functionalities or backpor | t
recent additions.
"""
|
kyleaoman/simfiles | simfiles/_simfiles.py | Python | gpl-3.0 | 10,409 | 0 | import warnings
from importlib.util import spec_from_file_location, module_from_spec
from os.path import expanduser, dirname, join
from ._hdf5_io import hdf5_get
# SimFiles is a dict with added features, notably __getattr__ and __setattr__,
# and automatic loading of data from simulation files as defined using a config
# file.
def dealias(func):
def dealias_wrapper(self, key, *args, **kwargs):
if not key.startswith('_') and hasattr(self, '_aliases'):
key = self._aliases.get(key, key)
return func(self, key, *args, **kwargs)
return dealias_wrapper
class SimFiles(dict):
"""
Provides a generic interface to simulation hdf5 files.
SimFiles is a dict with added features, notably __getattr__ and
__setattr__, and automatic loading of data from simulation files based on a
configuration file.
Parameters
----------
snap_id : index
An identifier for a specific simulation snapshot. The exact format is
defined in the configuration for the simulation in question.
configfile : str
Path to the configuration file to use (default: None).
ncpu : int
Number of processors on which to run (default: 2).
share_mode : bool
Setting 'True' disables the __delitem__ method (default: False) and
suppresses warnings for repeated loading of the same keys.
single_file : int
Specify to load from only a specific hdf5 file 'piece' of the snapshot.
Assumes 'pieces' end in '.X.hdf5' where X is an integer.
Returns
-------
out : SimFiles
A SimFiles object configured using the file provided.
Examples
--------
The following example sets up a SimFiles instance, loads a few keys, and
accesses the loaded data, for APOSTLE simulation data on the cavi system::
from simfiles import SimFiles
from simfiles.configs.APOSTLE_cavi import __file__ as configfile
import namedtuple
snap_id = namedtuple('snap_id', ['res', 'phys', 'vol', 'snap'])
mysnap = snap_id(res=3, phys='hydro', vol=1, snap=127)
SF = SimFiles(mysnap, configfile=configfile)
SF.load(keys=('m_s', 'xyz_s', 'vxyz_s'))
# print mass and coordinates of one particle
# both dict-like and attribute-like access are supported
# this config file supports units via astropy.units
print(SF.m_s[0], SF['xyz_s'][0])
"""
def __init__(self, snap_id, configfile=None, ncpu=2, share_mode=False,
single_file=None):
self.snap_id = snap_id
self.configfile = expanduser(configfile)
self.ncpu = ncpu
self.share_mode = share_mode
self.single_file = single_file
self._read_config()
return
def _read_config(self):
try:
spec = spec_from_file_location('config', self.configfile)
config = module_from_spec(spec)
spec.loader.exec_module(config)
except FileNotFoundError:
raise FileNotFoundError("SimFiles: configfile '{:s}' not found."
.format(self.configfile))
try:
snapshots = config.snapshots
except AttributeError:
raise ValueError("SimFiles: configfile missing 'snapshots' "
"definition.")
try:
self._snapshot = snapshots[self.snap_id]
except KeyError:
raise ValueError("SimFiles: unknown snapshot (not defined in "
"configfile).")
try:
self._extractors = config.extractors
except AttributeError:
raise ValueError("Simfiles: configfile missing 'extractors' "
"definition.")
self._aliases = dict()
self._dealiases = dict()
try:
aliaspath = dirname(config.__file__)
aliasfile = join(aliaspath, config.aliasfile)
except AttributeError:
pass
else:
with open(aliasfile) as f:
lines = f.readlines()
for line in lines:
v, k = line.strip().split()
if k.startswith('_'):
raise ValueError("Aliases may not start with '_'.")
self._aliases[k] = v
self._dealiases[v] = k
if not set(self._aliases.values()).issubset(
self._extractors.keys()
):
unknown = set(self._aliases.values()) - \
set(self._extractors.keys())
warnings.warn( |
'Aliases exist for unknown keys:\n {:s}.'.format(
'\n '.join(unknown)
),
| RuntimeWarning
)
return
@dealias
def __setattr__(self, key, value):
return self.__setitem__(key, value)
@dealias
def __getattr__(self, key):
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError("'SimFiles' object has no attribute '{:s}'."
.format(key))
__getitem__ = dealias(dict.__getitem__)
__setitem__ = dealias(dict.__setitem__)
@dealias
def __delitem__(self, key):
if not self.share_mode:
return super().__delitem__(key)
else:
return
@dealias
def __delattr__(self, key):
if key in self.keys():
if not self.share_mode:
return super().__delitem__(key)
else:
return super().__delattr__(key)
def load(self, keys=tuple(), filetype=None, intervals=None, verbose=True):
"""
Load data for a set of keys.
Parameters
----------
keys : iterable
List of keys to load (default: tuple()).
filetype : str
Advanced use only, override filetype defined in config file
(default: None).
intervals : iterable
List containing lists of 2-tuples, one for each key. Each 2-tuple
represents an interval of indices from the underlying data table
to load (default: None).
verbose : bool
Setting 'True' prints messages upon loading each key (default:
True).
"""
keys = [self._aliases.get(key, key) for key in keys]
loaded_keys = set()
try:
keys = tuple(keys)
except TypeError:
raise TypeError("SimFiles.load: keys must be iterable.")
if intervals is None:
intervals = (None, ) * len(keys)
for key, interval in zip(keys, intervals):
loaded_keys.update(self._load_key(
key,
filetype=filetype,
interval=interval,
verbose=verbose
))
return loaded_keys
def fields(self, keytype='all', aliases=True):
"""
Return a list of available keys, optionally for a specific keytype.
Parameters
----------
keytype : str
Specify which type of keys to include. This can be one of the
keytypes defined in the extractors, or 'all', or 'aliased'
(default: 'all').
aliases : bool
If True, the keys will be replaced by their aliases in the list
(default: True).
"""
if keytype == 'all':
retval = [k for k in self._extractors.keys()]
elif keytype == 'aliased':
retval = list(self._aliases.values())
else:
retval = [k for k, E in self._extractors.items()
if E.keytype == keytype]
if aliases:
retval = [self._dealiases.get(k, k) for k in retval]
return retval
def _dependencies(self, _dependencies_list, filetype=None, interval=None,
verbose=True):
loaded_keys = set()
for k in _dependencies_list:
if k not in self:
loaded_keys.update(self._load_key(
k,
filetype=filetype,
|
clebergnu/autotest | client/profilers/perf/perf.py | Python | gpl-2.0 | 2,846 | 0.001757 | """
perf is a tool included in the linux kernel tree that
supports functionality similar to oprofile and more.
@see: http://lwn.net/Articles/310260/
"""
import time, os, stat, subprocess, signal
i | mport logging
from autotest_lib.client.bin import profiler, os_dep, utils
|
class perf(profiler.profiler):
version = 1
def initialize(self, events=["cycles","instructions"], trace=False):
if type(events) == str:
self.events = [events]
else:
self.events = events
self.trace = trace
self.perf_bin = os_dep.command('perf')
perf_help = utils.run('%s report help' % self.perf_bin,
ignore_status=True).stderr
self.sort_keys = None
for line in perf_help.split('\n'):
a = "sort by key(s):"
if a in line:
line = line.replace(a, "")
self.sort_keys = [k.rstrip(",") for k in line.split() if
k.rstrip(",") != 'dso']
if not self.sort_keys:
self.sort_keys = ['comm', 'cpu']
def start(self, test):
self.logfile = os.path.join(test.profdir, "perf")
cmd = ("exec %s record -a -o %s" %
(self.perf_bin, self.logfile))
if "parent" in self.sort_keys:
cmd += " -g"
if self.trace:
cmd += " -R"
for event in self.events:
cmd += " -e %s" % event
self._process = subprocess.Popen(cmd, shell=True,
stderr=subprocess.STDOUT)
def stop(self, test):
os.kill(self._process.pid, signal.SIGINT)
self._process.wait()
def report(self, test):
for key in self.sort_keys:
reportfile = os.path.join(test.profdir, '%s.comm' % key)
cmd = ("%s report -i %s --sort %s,dso" % (self.perf_bin,
self.logfile,
key))
outfile = open(reportfile, 'w')
p = subprocess.Popen(cmd, shell=True, stdout=outfile,
stderr=subprocess.STDOUT)
p.wait()
if self.trace:
tracefile = os.path.join(test.profdir, 'trace')
cmd = ("%s script -i %s" % (self.perf_bin, self.logfile,))
outfile = open(tracefile, 'w')
p = subprocess.Popen(cmd, shell=True, stdout=outfile,
stderr=subprocess.STDOUT)
p.wait()
# The raw detailed perf output is HUGE. We cannot store it by default.
perf_log_size = os.stat(self.logfile)[stat.ST_SIZE]
logging.info('Removing %s after generating reports (saving %s bytes).',
self.logfile, perf_log_size)
os.unlink(self.logfile)
|
zyga/plainbox | plainbox/impl/__init__.py | Python | gpl-3.0 | 4,500 | 0 | # This file is part of Checkbox.
#
# Copyright 2012 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
plainbox.impl
=============
* THIS MODULE DOES NOT HAVE STABLE PUBLIC API *
"""
from functools import wraps
from inspect import getabsfile
import os.path
import plainbox
def public(import_path, introduced=None, deprecated=None):
"""
Public API decorator generator.
This decorator serves multiple uses:
* It clearly documents all public APIs. This is visible to
both developers reading the source code directly and to people
reading code documentation (by adjusting __doc__)
* It provides a stable import location while allowing to move the
implementation around as the code evolves. This unbinds the name and
documentation of the symbol from the code.
* It documents when each function was introduced. This is also visible
in the generated documentation.
* It documents when ea | ch function will be decommissioned. This is
visible in the generated documentation and at runtime. Each ini | tial
call to a deprecated function will cause a PendingDeprecationWarnings
to be logged.
The actual implementation of the function must be in in a module specified
by import_path. It can be a module name or a module name and a function
name, when separated by a colon.
"""
# Create a forwarding decorator for the shim fuction The shim argument is
# the actual empty function from the public module that serves as
# documentation carrier.
def decorator(shim):
# Allow to override function name by specifying it in the import path
# after a colon. If missing it defaults to the name of the shim
try:
module_name, func_name = import_path.split(":", 1)
except ValueError:
module_name, func_name = import_path, shim.__name__
# Import the module with the implementation and extract the function
module = __import__(module_name, fromlist=[''])
try:
impl = getattr(module, func_name)
except AttributeError:
raise NotImplementedError(
"%s.%s does not exist" % (module_name, func_name))
@wraps(shim)
def call_impl(*args, **kwargs):
return impl(*args, **kwargs)
# Document the public nature of the function
call_impl.__doc__ += "\n".join([
"",
" This function is a part of the public API",
" The private implementation is in {}:{}".format(
import_path, shim.__name__)
])
if introduced is None:
call_impl.__doc__ += "\n".join([
"",
" This function was introduced in the initial version of"
" plainbox",
])
else:
call_impl.__doc__ += "\n".join([
"",
" This function was introduced in version: {}".format(
introduced)
])
# Document deprecation status, if any
if deprecated is not None:
call_impl.__doc__ += "\n".join([
" warn:",
" This function is deprecated",
" It will be removed in version: {}".format(deprecated),
])
# Add implementation docs, if any
if impl.__doc__ is not None:
call_impl.__doc__ += "\n".join([
" Additional documentation from the private"
" implementation:"])
call_impl.__doc__ += impl.__doc__
return call_impl
return decorator
def get_plainbox_dir():
"""
Return the root directory of the plainbox package.
"""
return os.path.dirname(getabsfile(plainbox))
|
wuga214/Django-Wuga | env/bin/player.py | Python | apache-2.0 | 2,120 | 0.000472 | #!/Users/wuga/Documents/website/wuga/env/bin/python2.7
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
# --------------------------------------------------------------------
# an image animation player
class UI(tkinter.Label):
def __init__(self, master, im):
| if isinstance(im, list):
# list of images
self.im = im[1:]
im = self.im[0]
else:
# sequence
self.im = im
if im.mode == "1":
self.image = ImageTk.BitmapImage(im, foreground="white")
else:
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bg="black", bd=0)
self.update()
duration = im.inf | o.get("duration", 100)
self.after(duration, self.next)
def next(self):
if isinstance(self.im, list):
try:
im = self.im[0]
del self.im[0]
self.image.paste(im)
except IndexError:
return # end of list
else:
try:
im = self.im
im.seek(im.tell() + 1)
self.image.paste(im)
except EOFError:
return # end of file
duration = im.info.get("duration", 100)
self.after(duration, self.next)
self.update_idletasks()
# --------------------------------------------------------------------
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python player.py imagefile(s)")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
if len(sys.argv) > 2:
# list of images
print("loading...")
im = []
for filename in sys.argv[1:]:
im.append(Image.open(filename))
else:
# sequence
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
|
andymckay/zamboni | mkt/api/v2/urls.py | Python | bsd-3-clause | 2,541 | 0.000787 | from django.conf.urls import include, patterns, url
from rest_framework.routers import SimpleRouter
import mkt.feed.views as views
from mkt.api.base import SubRouterWithFormat
from mkt. | api.v1.urls import urlpatterns as v1_urls
from mkt.api.views import endpoint_removed
from mkt.search.views import RocketbarViewV2
feed = SimpleRouter()
feed.register(r'apps', views.FeedAppViewSet, base_name='feedapps')
feed.register(r'brands', views.FeedBrandViewSet, base_name='feedbrands')
feed.register(r'collections', views.FeedCollectionViewSet,
base_name='feedcollections')
feed.register(r'shelves', views.FeedShelfViewSet, base_name='feedshelves')
f | eed.register(r'items', views.FeedItemViewSet, base_name='feeditems')
subfeedapp = SubRouterWithFormat()
subfeedapp.register('image', views.FeedAppImageViewSet,
base_name='feed-app-image')
subfeedcollection = SubRouterWithFormat()
subfeedcollection.register('image', views.FeedCollectionImageViewSet,
base_name='feed-collection-image')
subfeedshelf = SubRouterWithFormat()
subfeedshelf.register('image', views.FeedShelfImageViewSet,
base_name='feed-shelf-image')
urlpatterns = patterns('',
url(r'^apps/search/rocketbar/', RocketbarViewV2.as_view(),
name='rocketbar-search-api'),
url(r'^rocketfuel/collections/.*', endpoint_removed),
url(r'^feed/builder/$', views.FeedBuilderView.as_view(),
name='feed.builder'),
url(r'^feed/elements/search/$', views.FeedElementSearchView.as_view(),
name='feed.element-search'),
url(r'^feed/get/', views.FeedView.as_view(), name='feed.get'),
url(r'^feed/', include(feed.urls)),
url(r'^feed/apps/', include(subfeedapp.urls)),
url(r'^feed/collections/', include(subfeedcollection.urls)),
url(r'^feed/shelves/', include(subfeedshelf.urls)),
url(r'^feed/shelves/(?P<pk>[^/.]+)/publish/$',
views.FeedShelfPublishView.as_view(),
name='feed-shelf-publish'),
url(r'^consumer/feed/(?P<item_type>[\w]+)/(?P<slug>[^/.]+)/$',
views.FeedElementGetView.as_view(), name='feed.feed_element_get'),
# Remove fireplace version once fireplace has been updated to use
# consumer/feed/ with ?app_serializer=fireplace.
url(r'^fireplace/feed/(?P<item_type>[\w]+)/(?P<slug>[^/.]+)/$',
views.FeedElementGetView.as_view(), name='feed.fire_feed_element_get'),
url(r'^transonic/feed/(?P<item_type>[\w]+)/$',
views.FeedElementListView.as_view(), name='feed.feed_element_list'),
) + v1_urls
|
emilydolson/forestcat | pyrobot/plugins/brains/BraitenbergVehicle2b.py | Python | agpl-3.0 | 789 | 0.020279 | """
Braitenberg Vehicle2b
The more light sensed on the left side the faster the right motor moves.
The more light sensed on the right side the faster the left motor moves.
This causes the robot to turn towards a light source.
"""
from pyrobot.brain import Brain, avg
class Vehicle(Brain):
def setup(self):
self.robot.ligh | t[0].units = "SCALED"
def step(self):
leftSpeed = max([s.value for s in self.robot.light[0]["right"]])
rightSpeed = max([s.value fo | r s in self.robot.light[0]["left"]])
print "leftSpeed, rightSpeed:", leftSpeed, rightSpeed
self.motors(leftSpeed, rightSpeed)
def INIT(engine):
if engine.robot.type not in ['K-Team', 'Pyrobot']:
raise "Robot should have light sensors!"
return Vehicle('Braitenberg2a', engine)
|
initios/hackvg-cityreport-backend | core/serializers.py | Python | gpl-2.0 | 798 | 0.002506 | from rest_framework import serializers, fields
from . import models
class TypeSerializer(serializers.ModelSerializer):
class Meta:
model = models.Type
class IssueSerializer(serializers.ModelSerializer):
class Meta:
model = models.Issue
address = fields.CharField(read_only=True)
postal_code = fields.CharField(read_onl | y=True)
city = fields.CharField(read_only=True)
state = fields.CharField(read_only=True)
county = fi | elds.CharField(read_only=True)
country = fields.CharField(read_only=True)
type = serializers.PrimaryKeyRelatedField(queryset=models.Type.objects.all())
type_nested = TypeSerializer(read_only=True, source='type')
class PopulateExternalSerializer(serializers.Serializer):
city = fields.CharField(write_only=True)
|
procamora/Wiki-Personal | pelican-plugins/liquid_tags/test_data/pelicanconf.py | Python | gpl-3.0 | 892 | 0.001121 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'The Tester'
SITENAME = 'Testing site'
SITEURL = 'http://example.com/test'
# to make the test suite portable
TIMEZONE = 'UTC'
PATH = 'content'
READERS = {'html': None}
# Generate only one feed
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Disable unn | ecessary pages
CATEGORY_SAVE_AS = ''
TAG_SAVE_AS = ''
AUTHOR_SAVE_AS = ''
ARCHIVES_SAVE_AS = ''
AUTHORS_SAVE_AS = ''
CATEGORIES_SAVE_AS = ''
TAGS_SAVE_AS = ''
PLUGIN_PATHS = ['../../']
PLUGINS = ['liquid_tags.notebook', 'liquid_tags.generic']
NOTEBOOK_DIR = 'notebooks'
LIQUID_CONFIGS = (('PATH', '.', "The default path"), ('THEME', '', 'The theme in use'), ('SITENAME', 'Default Sitename', 'The name of the site'), ('AUTH | OR', '', 'Name of the blog author'))
|
mannion9/Intro-to-Python | ListComprehension.py | Python | mit | 430 | 0.013953 | ''' Exp | lains list comprehension which is much faster than looping through list '''
''' The general method is this ' mylist = [ SOME_EXPRESION_WITH_i for i in ITERABLE_LIST ] '
'''
# Loop (slow)
iterer = list(range(10))
double = list(range(10))
mylist = []
for i in iterer:
mylist.append(i+1)
print('Loop result',mylist)
# List Comprehension | (fast)
mylist = [ i+1 for i in iterer]
print('List comprehension result',mylist)
|
geodashio/geodash-server | geodashserver/data.py | Python | bsd-3-clause | 1,085 | 0.003687 | import errno
import psycopg2
from socket import error as socket_error
#from jenks import jenks
from django.conf import settings
from django.template.loader import get_template
from geodash.enumerations import MONTHS_SHORT3
from geodash.cache import provision_memcached_client
from geodash.data import data_local_country
class data_local_country_admin(data_local_country):
key = None
def _build_key(self, *args, **kwargs):
return "data/local/country/{iso_alpha3}/admin/{level}/geojson".format(**kwargs)
def _build_data(self, *args, **kwargs):
cursor = kwargs.get('cursor', None)
iso_alpha3 = kwargs.get('iso_alpha3', None)
level = kwargs.get('level', None)
results = None
if level == 2:
q = get_template("geodashserver/sql/_admin2_polygons.sql").render({
'toler | ance': '.01',
'iso_alpha3': iso_alpha3})
cursor.execute(q)
res = cursor.fetchone()
results = json.loads(res[0]) if (type | (res[0]) is not dict) else res[0]
return results
|
GregMilway/Exercism | python/circular-buffer/circular_buffer_test.py | Python | gpl-3.0 | 3,084 | 0 | import unittest
from circular_buffer import (
CircularBuffer,
BufferFullException,
BufferEmptyException
)
class CircularBufferTest(unittest.TestCase):
def test_read_empty_buffer(self):
buf = CircularBuffer(1)
with self.assertRaises(BufferEmptyException):
buf.read()
def test_write_and_read_back_one_item(self):
buf = CircularBuffer(1)
buf.write('1')
self.assertEqual('1', buf.read())
with self.assertRaises(BufferEmptyException):
buf.read()
def test_write_and_read_back_multiple_items(self):
buf = CircularBuffer(2)
buf.write('1')
buf.write('2')
self.assertEqual('1', buf.read())
self.assertEqual('2', buf.read())
with self.as | sertRaises(BufferEmptyException):
buf.read()
def test_clearing_buffer(self):
buf = CircularBuffer(3)
for c in '123':
buf.write(c)
buf.clear()
with self.assertR | aises(BufferEmptyException):
buf.read()
buf.write('1')
buf.write('2')
self.assertEqual('1', buf.read())
buf.write('3')
self.assertEqual('2', buf.read())
def test_alternate_write_and_read(self):
buf = CircularBuffer(2)
buf.write('1')
self.assertEqual('1', buf.read())
buf.write('2')
self.assertEqual('2', buf.read())
def test_read_back_oldest_item(self):
buf = CircularBuffer(3)
buf.write('1')
buf.write('2')
buf.read()
buf.write('3')
buf.read()
self.assertEqual('3', buf.read())
def test_write_full_buffer(self):
buf = CircularBuffer(2)
buf.write('1')
buf.write('2')
with self.assertRaises(BufferFullException):
buf.write('A')
def test_overwrite_full_buffer(self):
buf = CircularBuffer(2)
buf.write('1')
buf.write('2')
buf.overwrite('A')
self.assertEqual('2', buf.read())
self.assertEqual('A', buf.read())
with self.assertRaises(BufferEmptyException):
buf.read()
def test_overwrite_non_full_buffer(self):
buf = CircularBuffer(2)
buf.overwrite('1')
buf.overwrite('2')
self.assertEqual('1', buf.read())
self.assertEqual('2', buf.read())
with self.assertRaises(BufferEmptyException):
buf.read()
def test_alternate_read_and_overwrite(self):
buf = CircularBuffer(5)
for c in '123':
buf.write(c)
buf.read()
buf.read()
buf.write('4')
buf.read()
for c in '5678':
buf.write(c)
buf.overwrite('A')
buf.overwrite('B')
self.assertEqual('6', buf.read())
self.assertEqual('7', buf.read())
self.assertEqual('8', buf.read())
self.assertEqual('A', buf.read())
self.assertEqual('B', buf.read())
with self.assertRaises(BufferEmptyException):
buf.read()
if __name__ == '__main__':
unittest.main()
|
joneskoo/sikteeri | services/admin.py | Python | mit | 1,213 | 0 | from django.contrib import admin
from django.contrib.admin import SimpleListFilter
from django.utils.translation import ugettext_lazy as _
from services.models import Service, ServiceType, Alias
# See
# <http://docs.djangoproject.com/en/dev/ref/contrib/admin/#django.contrib.admin.ModelAdmin.list_filter>
# for documentation
class StartsWithListFilter(SimpleListFilter):
title = _('Starts with')
parameter_name = 'starts_with'
def lookups(self, request, model_ | admin):
def first_two(s):
s = unicode(s)
if len(s) < 2:
return s
else:
return s[:2]
prefixes = [first_two(alias.name)
for alias in model_admin.model.objects.only('name')]
prefixes = sorted(set(prefixes))
| return [(prefix, prefix) for prefix in prefixes]
def queryset(self, request, queryset):
if self.value():
return queryset.filter(name__istartswith=self.value())
else:
return queryset
class AliasAdmin(admin.ModelAdmin):
list_filter = (StartsWithListFilter,)
admin.site.register(Service)
admin.site.register(ServiceType)
admin.site.register(Alias, AliasAdmin)
|
UTSA-ICS/keystone-SID | keystone/common/dependency.py | Python | apache-2.0 | 10,620 | 0.000188 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This module provides support for dependency injection.
Providers are registered via the 'provider' decorator, and dependencies on them
are registered with 'requires' or 'optional'. Providers are available to their
consumers via an attribute. See the documentation for the individual functions
for more detail.
See also:
https://en.wikipedia.org/wiki/Dependency_injection
"""
import six
from keystone import notifications
from keystone.openstack.common.gettextutils import _ # flake8: noqa
REGISTRY = {}
_future_dependencies = {}
_future_optionals = {}
_factories = {}
class UnresolvableDependencyException(Exception):
"""An UnresolvableDependencyException is raised when a required dependency
is not resolvable; see 'resolve_future_dependencies'.
"""
def __init__(self, name):
msg = 'Unregistered dependency: %s' % name
super(UnresolvableDependencyException, self).__init__(msg)
def provider(name):
"""'provider' is a class decorator used to register providers.
When 'provider' is used to decorate a class, members of that class will
register themselves as providers for the named dependency. As an example,
In the code fragment::
@dependency.provider('foo_api')
class Foo:
def __init__(self):
...
...
foo = Foo()
The object 'foo' will be registered as a provider for 'foo_api'. No more
than one such instance should be created; additional instances will replace
the previous ones, possibly resulting in different instances being used by
different consumers.
"""
def wrapper(cls):
def wrapped(init):
def register_event_callbacks(self):
# NOTE(morganfainberg): A provider who has an implicit
# dependency on other providers may utilize the event callback
# mechanism to react to any changes in those providers. This is
# performed at the .provider() mechanism so that we can ensure
# that the callback is only ever called once and guaranteed
# to be on the properly configured and instantiated backend.
if not hasattr(self, 'event_callbacks'):
return
if not isinstance(self.event_callbacks, dict):
msg = _('event_callbacks must be a dict')
raise ValueError(msg)
for event in self.event_callbacks:
if not isinstance(self.event_callbacks[event], dict):
msg = _('event_callbacks[%s] must be a dict') % event
raise ValueError(msg)
for resource_type in self.event_callbacks[event]:
# Make sure we register the provider for each event it
# cares to call back.
callbacks = self.event_callbacks[event][resource_type]
if not callbacks:
continue
if not hasattr(callbacks, '__iter__'):
# ensure the callback information is a list
# allowing multiple callbacks to exist
callbacks = [callbacks]
notifications.register_event_callback(event,
resource_type,
callbacks)
def __wrapped_init__(self, *args, **kwargs):
"""Initialize the wrapped object and add it to the registry."""
init(self, *args, **kwargs)
REGISTRY[name] = self
register_event_callbacks(self)
resolve_future_dependencies(name)
return __wrapped_init__
cls.__init__ = wrapped(cls.__init__)
_factories[name] = cls
return cls
return wrapper
def _process_dependencies(obj):
# Any dependencies that can be resolved immediately are resolved.
# Dependencies that cannot be resolved immediately are stored for
# resolution in resolve_future_dependencies.
def process(obj, attr_name, unresolved_in_out):
for dependency in getattr(obj, attr_name, []):
if dependency not in REGISTRY:
# We don't know about this dependency, so save it for later.
unresolved_in_out.setdefault(dependency, []).append(obj)
continue
setattr(obj, dependency, REGISTRY[dependency])
process(obj, '_dependencies', _future_dependencies)
process(obj, '_optionals', _future_optionals)
def requires(*dependencies):
"""'requires' is a class decorator used to inject providers into consumers.
The required providers will be made available to instances of the decorated
class via an attribute with the same name as the provider. For example,
in the code fragment::
@dependency.requires('foo_api', 'bar_api')
class FooBarClient:
def __init__(self):
...
...
client = FooBarClient()
The object 'client' will have attributes named 'foo_api' and 'bar_api',
which are instances of the named providers.
Objects must not rely on the existence of these attributes until after
'resolve_future_dependencies' has been called; they may not exist
beforehand.
Dependencies registered via 'required' must have providers - if not, an
exception will be raised when 'resolve_future_dependencies' is called.
"""
def wrapper(self, *args, **kwargs):
"""Inject each dependency from the registry."""
self.__wrapped_init__(*args, **kwargs)
_process_dependencies(self)
def wrapped(cls):
"""Note the required dependencies on the object for later injection.
The dependencies of the parent class are combined with that of the
child class to create a new set of dependencies.
"""
existing_dependencies = getattr(cls, '_dependencies', set())
cls._dependencies = existing_dependencies.union(dependencies)
if not hasattr(cls, '__wrapped_init__'):
cls.__wrapped_init__ = cls.__init__
cls.__init__ = wrapper
return cls
return wrapped
def optional(*dependencies):
"""'optional' is the same as 'requires', except that the dependencies are
optional - if no provider is available, the attributes will be set to None.
"""
def wrapper(self, *args, **kwargs):
"""Inject each dependency from the registry."""
self.__wrapped_init__(*args, **kwargs)
_process_dependencies(self)
def wrapped(cls):
"""Note the optio | nal dependencies on the object for later injection.
The dependencies of the parent class are combined with that of the
child class to create a new set of dependencies.
"""
existing_optionals = getattr(cls, '_optionals', set())
cls._optionals = existing_optionals.union(dependencies)
if not hasattr(cls, '__wrapped_init__'):
cls.__wrapped_init__ = cls.__ini | t__
cls.__init__ = wrapper
return cls
return wrapped
def resolve_future_dependencies(provider_name=None):
"""'resolve_future_dependencies' forces injection of all dependencies.
Before this function is called, circular dependencies may not have been
injected. This function should be called only once, after all global
providers are registered. If an object n |
diging/tethne-tests | tests/test_persistence_hdf5_tapmodel.py | Python | gpl-3.0 | 4,806 | 0.000624 | from settings import *
import unittest
from tethne.model.social import tapmodel
from tethne.persistence.hdf5.tapmodel import HDF5TAPModel, from_hdf5, to_hdf5
from tethne.persistence.hdf5.graphcollection import HDF5Graph
import numpy
import networkx
import os
import cPickle as pickle
with open(picklepath + '/test_TAPModel.pickle', 'r') as f:
T_ = pickle.load(f)
class TestH5F5TAPModel(unittest.TestCase):
def setUp(self):
self.h5name = 'test_HDF5TAPModel.h5'
self.h5path = temppath+'/'+self.h5name
self.T = HDF5TAPModel(T_, self.h5path)
def test_test(self):
for i in T_.theta.keys():
self.assertGreater(1e-07, abs(self.T.theta[i][0]-T_.theta[i][0]))
for i in T_.a.keys():
self.assertGreater(1e-07, abs(self.T.a[i][0][0]-T_.a[i][0][0]))
for i in T_.b.keys():
self.assertGreater(1e-07, abs(self.T.b[i][0][0]-T_.b[i][0][0]))
for i in T_.r.keys():
self.assertGreater(1e-07, abs(self.T.r[i][0][0]-T_.r[i][0][0]))
for i in T_.g.keys():
self.assertGreater(1e-07, abs(self.T.g[i][0][0]-T_.g[i][0][0]))
def test_from_hdf5_object(self):
tmodel = from_hdf5(self.T)
self.assertIsInstance(tmodel, tapmodel.TAPModel)
for i in tmodel.a.keys():
self.assertEqual(tmodel.a[i].all(), self.T.a[i].all())
for i in tmodel.r.keys():
self.assertEqual(tmodel.r[i].all(), self.T.r[i].all())
for i in tmodel.g.keys():
self.assertEqual(tmodel.g[i].all(), self.T.g[i].all())
for i in tmodel.b.keys():
self.assertEqual(tmodel.b[i].all(), self.T.b[i].all())
for i in tmodel.theta.keys():
self.assertEqual(tmodel.theta[i].all(), self.T.theta[i].all())
self.assertEqual(tmodel.N_d, self.T.N_d)
self.assertIsInstance(tmodel.G, networkx.Graph)
self.assertEqual(tmodel.G.nodes(data=True), self.T.G.nodes(data=True))
self.assertEqual(tmodel.G.edges(data=True), self.T.G.edges(data=True))
def test_from_hdf5_datapath(self):
tmodel = from_hdf5(self.h5path)
self.assertIsInstance(tmodel, tapmodel.TAPModel)
for i in tmodel.a.keys():
self.assertEqual(tmodel.a[i].all(), self.T.a[i].all())
for i in tmodel.r.keys():
self.assertEqual(tmodel.r[i].all(), self.T.r[i].all())
for i in tmodel.g.keys():
self.assertEqual(tmodel.g[i].all(), self.T.g[i].all())
for i in tmodel.b.keys():
self.assertEqual(tmodel.b[i].all(), self.T.b[i].all())
for i in tmodel.theta.keys():
self.assertEqual(tmodel.theta[i].all(), self.T.theta[i].all())
self.assertEqual(tmodel.N_d, self.T.N_d)
self.assertIsInstance(tmodel.G, networkx.Graph)
self.assertEqual(tmodel.G.nodes(data=True), self.T.G.nodes(data=True))
self.assertEqual(tmodel.G.edges(data=True), self.T.G.edges(data=True))
def test_to_hdf5(self):
hmodel = to_hdf5(T_)
self.assertIsInstance(hmodel, HDF5TAPModel)
for i in hmodel.a.keys():
self.assertEqual(hmodel.a[i].all(), T_.a[i].all())
for i in hmodel.r.keys():
self.assertEqual(hmodel.r[i].all(), T_.r[i].all())
for i in hmodel.g.ke | ys():
self.assertEqual(hmodel.g[i].all(), T_.g[i].all())
for i in hmodel.b.keys():
self.assertEqual(hmodel.b[i] | .all(), T_.b[i].all())
for i in hmodel.theta.keys():
self.assertEqual(hmodel.theta[i].all(), T_.theta[i].all())
self.assertEqual(hmodel.N_d, T_.N_d)
self.assertIsInstance(hmodel.G, HDF5Graph)
self.assertEqual(hmodel.G.nodes(data=True), T_.G.nodes(data=True))
self.assertEqual(hmodel.G.edges(data=True), T_.G.edges(data=True))
def test_from_to_hdf5(self):
tmodel = from_hdf5(self.T)
hmodel = to_hdf5(tmodel)
for i in hmodel.a.keys():
self.assertEqual(hmodel.a[i].all(), tmodel.a[i].all())
for i in hmodel.r.keys():
self.assertEqual(hmodel.r[i].all(), tmodel.r[i].all())
for i in hmodel.g.keys():
self.assertEqual(hmodel.g[i].all(), tmodel.g[i].all())
for i in hmodel.b.keys():
self.assertEqual(hmodel.b[i].all(), tmodel.b[i].all())
for i in hmodel.theta.keys():
self.assertEqual(hmodel.theta[i].all(), tmodel.theta[i].all())
self.assertEqual(hmodel.N_d, tmodel.N_d)
self.assertIsInstance(hmodel.G, HDF5Graph)
self.assertEqual(hmodel.G.nodes(data=True), tmodel.G.nodes(data=True))
self.assertEqual(hmodel.G.edges(data=True), tmodel.G.edges(data=True))
def tearDown(self):
os.remove(self.h5path)
if __name__ == '__main__':
unittest.main() |
laurenbarker/SHARE | share/robot.py | Python | apache-2.0 | 4,632 | 0.001511 | import abc
import json
from django.apps import apps
from django.db import migrations
from django.apps import AppConfig
from django.utils.functional import cached_property
class RobotAppConfig(AppConfig, metaclass=abc.ABCMeta):
disabled = False
@abc.abstractproperty
def version(self):
raise NotImplementedError
@abc.abstractproperty
def task(self):
raise NotImplementedError
@abc.abstractproperty
def task_name(self):
raise NotImplementedError
@abc.abstractproperty
def description(self):
raise NotImplementedError
@abc.abstractproperty
def schedule(self):
raise NotImplementedError
@cached_property
def user(self):
from share.models import ShareUser
return ShareUser.objects.get(robot=self.name)
class AbstractRobotMigration:
def __init__(self, label):
self.config = apps.get_app_config(label)
if not isinstance(self.config, RobotAppConfig):
raise Exception('Found non-robot app, "{}", in a robot migration.'.format(label))
def deconstruct(self):
return ('{}.{}'.format(__name__, self.__class__.__name__), (self.config.label, ), {})
class RobotMigrations:
def __init__(self, app_config):
self.config = app_config
def migrations(self):
return [InitialMigration(self.config).migration()]
class InitialMigration:
def __init__(self, app_config):
self.config = app_config
def ops(self):
return [
migrations.RunPython(
RobotUserMigration(self.config.label),
# RobotUserMigration(self.config.label).reverse,
),
migrations.RunPython(
RobotOauthTokenMigration(self.config.label),
# RobotOauthTokenMigration(self.config.label).reverse,
),
migrations.RunPython(
RobotScheduleMigration(self.config.label),
# RobotScheduleMigration(self.config.label).reverse,
),
]
def dependencies(self):
return [
('share', '0001_initial'),
('djcelery', '0001_initial'),
]
def migration(self):
m = migrations.Migration('0001_initial', self.config.label)
m.operations = self.ops()
m.dependencies = self.dependencies()
return m
class RobotUserMigration(AbstractRobotMigration):
def __call__(self, apps, schema_editor):
ShareUser = apps.get_model('share', 'ShareUser')
ShareUser.objects.create_robot_user(
username=self.config.name,
robot=self.config.name,
)
def reverse(self, apps, schema_editor):
ShareUser = apps.get_model('share', 'ShareUser')
try:
ShareUser.objects.get(username=self.config.name, harvester=self.config.name).delete()
except ShareUser.DoesNotExist:
pass
class RobotOauthTokenMigration(AbstractRobotMigration):
def __call__(self, apps, schema_editor):
# OAuth tokens are now created automatically in share.models.core.user_post_save
# Keeping this class so existing migrations don't complain.
pass
def reverse(self, apps, schema_editor):
pass
class RobotScheduleMigration(AbstractRobotMigration):
def __call__(self, apps, schema_editor):
from djcelery.models import PeriodicTask
from djcelery.models import CrontabSchedule
tab = CrontabSchedule.from_schedule(self.config.schedule)
tab.save()
PeriodicTask(
enabled=not self.config.disabled,
name=self.config.task_name,
task=self.config.task,
description=self.config.description,
args=json.dumps([1, self.config.label]), # Note 1 should always be the system user
crontab=tab,
).save()
def reverse(self, apps, schema_editor):
PeriodicTask = apps.get_model('djcelery', 'PeriodicTask')
try:
PeriodicTask.get(
task=self.config.task,
args=json.dumps([1, self.config.label]), # Note 1 should always be the system user
).dele | te()
except PeriodicTask.DoesNotExist:
pass
class DisableRobotScheduleMigration(AbstractRobotMigration):
def __call__(self, apps, schema_editor):
Periodi | cTask = apps.get_model('djcelery', 'PeriodicTask')
PeriodicTask.objects.filter(
task=self.config.task,
args=json.dumps([1, self.config.label]), # Note 1 should always be the system user
).update(enabled=False)
|
Lehych/iktomi | iktomi/unstable/db/sqla/images.py | Python | mit | 5,666 | 0.001059 | import os, logging
from PIL import Image
from sqlalchemy.orm.session import object_session
from sqlalchemy.orm.util import identity_key
from iktomi.unstable.utils.image_resizers import ResizeFit
from iktomi.utils import cached_property
from ..files import TransientFile, PersistentFile
from .files import FileEventHandlers, FileProperty
logger = logging.getLogger(__name__)
class ImageFile(PersistentFile):
def _get_properties(self, properties=['width', 'height']):
if 'width' in properties or 'height' in properties:
image = Image.open(self.path)
self.width, self.height = image.size
@cached_property
def width(self):
self._get_properties(['width'])
return self.width
@cached_property
def height(self):
self._get_properties(['height'])
return self.height
class ImageEventHandlers(FileEventHandlers):
def _2persistent(self, target, transient):
# XXX move this method to file_manager
# XXX Do this check or not?
image = Image.open(transient.path)
assert image.format in Image.SAVE and image.format != 'bmp',\
'Unsupported image format'
if self.prop.image_sizes:
session = object_session(target)
persistent_name = getattr(target, self.prop.attribute_name)
pn, ext = os.path.splitext(persistent_name)
image_crop = self.prop.resize(image, self.prop.image_sizes)
if self.prop.force_rgb and image_crop.mode not in ['RGB', 'RGBA']:
image_crop = image_crop.convert('RGB')
if ext == '.gif':
image_crop.format = 'jpeg'
ext = '.jpeg'
if self.prop.enhancements:
for enhance, factor in self.prop.enhancements:
image_crop = enhance(image_crop).enhance(factor)
if self.prop.filter:
image_crop = image_crop.filter(self.prop.filter)
if not ext:
# set extension if it is not set
ext = '.' + image.format.lower()
if pn + ext != persistent_name:
persistent_name = pn + ext
# XXX hack?
setattr(target, self.prop.attribute_name, persistent_name)
image_attr = getattr(target.__class__, self.prop.key)
file_manager = persistent = session.find_file_manager(image_attr)
persistent = file_manager.get_persistent(persistent_name,
self.prop.persistent_cls)
transient = session.find_file_manager(image_attr).new_transient(ext)
kw = dict(quality=self.prop.quality)
if self.prop.optimize:
kw = dict(kw, optimize=True)
image_crop.save(transient.path, **kw)
session.find_file_manager(image_attr).store(transient, persistent)
return persistent
else:
# Attention! This method can accept PersistentFile.
# In this case one shold NEVER been deleted or rewritten.
assert isinstance(transient, TransientFile), repr(transient)
return FileEventHandlers._2persistent(self, target, transient)
def before_update(self, mapper, connection, target):
FileEventHandlers.before_update(self, mapper, connection, target)
self._fill_img(mapper, connection, target)
def before_insert(self, mapper, connection, target):
FileEventHandlers.before_insert(self, mapper, connection, target)
self._fill_img(mapper, connection, target)
def _fill_img(self, mapper, connection, target):
if self.prop.fill_from:
# XXX Looks hacky
value = getattr(target, self.prop.key)
if value is None:
base = getattr(target, | self.prop.fill_from)
if base is None:
return
if not os.path.isfile(base.path):
logger.warn('Original file is absent %s %s %s',
identity_key(instance=target),
self.prop.fill_from,
base.path)
return
ext = | os.path.splitext(base.name)[1]
session = object_session(target)
image_attr = getattr(target.__class__, self.prop.key)
name = session.find_file_manager(image_attr).new_file_name(
self.prop.name_template, target, ext, '')
setattr(target, self.prop.attribute_name, name)
persistent = self._2persistent(target, base)
setattr(target, self.prop.key, persistent)
class ImageProperty(FileProperty):
event_cls = ImageEventHandlers
def _set_options(self, options):
# XXX rename image_sizes?
options = dict(options)
self.image_sizes = options.pop('image_sizes', None)
self.resize = options.pop('resize', None) or ResizeFit()
# XXX implement
self.fill_from = options.pop('fill_from', None)
self.filter = options.pop('filter', None)
self.enhancements = options.pop('enhancements', [])
self.force_rgb = self.enhancements or \
self.filter or \
options.pop('force_rgb', True)
self.quality = options.pop('quality', 85)
self.optimize = options.pop('optimize', False)
assert self.fill_from is None or self.image_sizes is not None
options.setdefault('persistent_cls', ImageFile)
FileProperty._set_options(self, options)
|
beddit/sleep-musicalization-web | webapp/urls.py | Python | bsd-2-clause | 806 | 0.004963 | from djan | go.conf.urls.defaults import patterns, url
urlpatterns = patterns('webapp',
url(r'^/?$', 'views.home', name='home'),
url(r'^auth_redirect$', 'views.auth_redirect', name='auth_redirect'),
url(r'^nights$', 'views.night_index', name='night_index | '),
url(r'^song$', 'views.song_index', name='song_index'),
url(r'^create_song$', 'views.song_create', name='song_create'),
url(r'^song/(?P<key>[\w\d]+)$', 'views.song', name='song'),
url(r'^song/(?P<key>[\w\d]+).mp3$', 'views.song_mp3', name='song_mp3'),
url(r'^song/(?P<key>[\w\d]+)/edit$', 'views.song_edit', name='song_edit'),
url(r'^song/(?P<key>[\w\d]+)/wait$', 'views.song_wait_finished', name='song_wait_finished'),
url(r'^sign_out$', 'views.sign_out', name='sign_out'),
)
|
vyral/bombardier | networking/grunt.py | Python | mit | 598 | 0.038462 | import socket
import poormanslogging as log
LISTENPORT = 6666
class Grunt(object):
def __init__(self):
try:
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.bind(('', LISTENPORT))
self.s.listen(1 | )
log.info('Waiting for orders on port {}'.format(LISTENPORT))
(c, a) = self.s.accept()
self._receive_orders(c)
finally:
log.info('Shutting down')
self.s.close()
def _receive_orders(self, sock):
chunks = []
while 1:
try:
chunks.append(self.s.recv(1024))
except OSErro | r:
break
msg = b''.join(chunks)
print("Message:")
print(msg)
|
nemumu/whoispy | whoispy/whoispy.py | Python | gpl-3.0 | 1,198 | 0.013356 | import re
import sys
import whoisSrvDict
import whoispy_sock
import parser_branch
OK = '\033[92m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def query( | domainName):
rawMsg = ""
tldNa | me = ""
whoisSrvAddr = ""
regex = re.compile('.+\..+')
match = regex.search(domainName)
if not match:
# Invalid domain
_display_fail("Invalid domain format")
return None
# Divice TLD
regex = re.compile('\..+')
match = regex.search(domainName)
if match:
tldName = match.group()
else:
_display_fail("Can not parse TLD")
return None
# Get TLD List
if not (tldName in whoisSrvDict.get_whoisSrvDict()):
_display_fail("Not Found TLD whois server")
return None
whoisSrvAddr = whoisSrvDict.get_whoisSrvDict().get(tldName)
rawMsg = whoispy_sock.get_rawMsg(whoisSrvAddr , domainName, 43)
return parser_branch.get_parser(rawMsg, whoisSrvAddr)
# Display method
def _display_fail(msg):
sys.stdout.write( FAIL )
sys.stdout.write("%s\n" % msg)
sys.stdout.write( ENDC )
def _display_safe(msg):
sys.stdout.write( OK )
sys.stdout.write("%s\n" % msg)
sys.stdout.write( ENDC )
|
thelabnyc/django-oscar-cybersource | sandbox/order/migrations/0001_initial.py | Python | isc | 40,289 | 0.000819 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import oscar.models.fields.autoslugfield
import django.db.models.deletion
import oscar.models.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
("partner", "0001_initial"),
("customer", "0001_initial"),
("address", "0001_initial"),
("basket", "0002_auto_20140827_1705"),
("catalogue", "0001_initial"),
("sites", "0001_initial"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="BillingAddress",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"title",
models.CharField(
verbose_name="Title",
max_length=64,
blank=True,
choices=[
("Mr", "Mr"),
("Miss", "Miss"),
("Mrs", "Mrs"),
("Ms", "Ms"),
("Dr", "Dr"),
],
),
),
(
"first_name",
models.CharField(
max_length=255, verbose_name="First name", blank=True
),
),
(
"last_name",
models.CharField(
max_length=255, verbose_name="Last name", blank=True
),
),
(
"line1",
models.CharField(
max_length=255, verbose_name="First line of address"
),
),
(
"line2",
models.CharField(
max_length=255,
verbose_name="Second line of address",
blank=True,
),
),
(
"line3",
models.CharField(
max_length=255, verbose_name="Third line of address", blank=True
),
),
(
"line4",
models.CharField(max_length=255, verbose_name="City", blank=True),
),
(
"state",
models.CharField(
max_length=255, verbose_name="State/County", blank=True
),
),
(
"postcode",
oscar.models.fields.UppercaseCharField(
max_length=64, verbose_name="Post/Zip-code", blank=True
),
),
(
"search_text",
models.TextField(
editable=False,
verbose_name="Search text - used only for searching addresses",
),
),
(
"country",
models.ForeignKey(
verbose_name="Country",
to="address.Country",
on_delete=models.CASCADE,
),
),
],
options={
"verbose_name_plural": "Billing addresses",
"verbose_name": "Billing address",
"abstract": False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name="CommunicationEvent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"date_created",
models.DateTimeField(auto_now_add=True, verbose_name="Date"),
),
(
"event_type",
models.ForeignKey(
verbose_name="Event Type",
to="customer.CommunicationEventType",
on_delete=models.CASCADE,
),
),
],
options={
"ordering": ["-date_created"],
"verbose_name_plural": "Communication Events",
"verbose_name": "Communication Event",
"abstract": False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name="Line",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
| (
"partner_name",
models.CharField(
max_length=128, verbose_name="Partner name", blank=True
),
),
(
"partner_sku",
models.CharField(max_length=128, verbose_name="Partner SKU"),
| ),
(
"partner_line_reference",
models.CharField(
verbose_name="Partner reference",
max_length=128,
help_text="This is the item number that the partner uses within their system",
blank=True,
),
),
(
"partner_line_notes",
models.TextField(verbose_name="Partner Notes", blank=True),
),
("title", models.CharField(max_length=255, verbose_name="Title")),
(
"upc",
models.CharField(
verbose_name="UPC", max_length=128, blank=True, null=True
),
),
(
"quantity",
models.PositiveIntegerField(default=1, verbose_name="Quantity"),
),
(
"line_price_incl_tax",
models.DecimalField(
max_digits=12, decimal_places=2, verbose_name="Price (inc. tax)"
),
),
(
"line_price_excl_tax",
models.DecimalField(
max_digits=12,
decimal_places=2,
verbose_name="Price (excl. tax)",
),
),
(
"line_price_before_discounts_incl_tax",
models.DecimalField(
max_digits=12,
decimal_places=2,
verbose_name="Price before discounts (inc. tax)",
),
),
(
"line_price_before_discounts_excl_tax",
models.DecimalField(
max_digits=12,
decimal_places=2,
verbose_name="Price before discounts (excl. tax)",
),
),
(
"unit_cost_price",
models.DecimalField(
max_digits=12,
dec |
prathamtandon/g4gproblems | Arrays/flip_zeros_to_maximize_ones.py | Python | mit | 3,953 | 0.002024 | import unittest
"""
Given a binary array and an integer m, find the position of zeros flipping which creates the maximum
number of consecutive 1s in the array such that number of zeros is <= m.
Input: 1 0 0 1 1 0 1 0 1 1 1
Output: 5 7
"""
"""
Approach:
1. First, compute number of consecutive zeros to the left and right of each zero.
2. Next, for every consecutive m zeros, compute the number of consecutive 1s that can be obtained.
3. Time complexity is O(n) and Space complexity is O(n).
"""
"""
Approach 2:
1. Use a sliding window for the given array.
2. Let left end of the window be wL and right end be wR.
3. Let number of zeros inside the window be zeroCount.
4. We maintain the window with at most m zeros inside.
5. The main steps are:
a. While zeroCount is at most m, expand the window to the right (wR++) and update zeroCount.
b. While zeroCount exceeds m, shrink the window from left (wL++), update zeroCount.
c. Update the widest window along the way.
"""
def flip_zeros_to_maximize_ones(ones_and_zeros, m):
consecutive_1s_to_left = [0] * len(ones_and_zeros)
consecutive_1s_to_right = [0] * len(ones_and_zeros)
one_count = 0
end = len(ones_and_zeros)
for i in range(end):
if ones_and_zeros[i] == 1:
one_count += 1
else:
consecutive_1s_to_left[i] = one_count
one_count = 0
one_count = 0
for i in range(end-1, -1, -1):
if ones_and_zeros[i] == 1:
one_count += 1
else:
consecutive_1s_to_right[i] = one_count
one_count = 0
zeros = [i for i in range(end) if ones_and_zeros[i] == 0]
if len(zeros) <= m:
return zeros
max_length = 0
max_index = 0
for i in range(len(zeros)-m+1):
cur_length = 0
remaining_flips = m
idx = i
while remaining_flips > 0:
cur_length += consecutive_1s_to_left[zeros[idx]]
cur_length += consecutive_1s_to_right[zeros[idx]]
idx += 1
remaining_flips -= 1
if cur_length > max_length:
max_length = cur_length
max_index = i
return zeros[max_index:max_index+m]
def flip_zeros_sliding_window(ones_and_zeros, m):
window_left = 0
window_right = 0
zero_count = 0
best_window_size = 0
best_index = 0
while window_right < len(ones_and_zeros):
if zero_count <= m:
if ones_and_zeros[window_right] == 0:
zero_count += 1
window_right += 1
if zero_count > m:
if ones_and_zeros[window_left] == 0:
zero_count -= 1
window_left += 1
if window_right - window_left > best_window_size:
best_window_size = window_right - window_left
best_index = window_left
result = []
while len(result) < m:
if ones_and_zeros[best_index] == 0:
result.append(best_index)
best_index += 1
if best_index >= len(ones_and_zeros):
break
return result
class TestFlips(unittest.TestCase):
def test_flips(self):
ones_and_zeros = [1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1]
self.assertEqual(flip_zeros_to_maximize_ones(on | es_and_zeros, 2), [5, 7])
ones_and_zeros = [1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1]
self.assertEqual(flip_zeros_to_maximize_ones(ones_and_zeros, 1), [7])
ones_and_zeros = [0, 0, 0, 1]
self.assertEqual(flip_zeros_to_maximize_ones(ones_and_zeros, 4), [0, 1, 2])
def test_flips_sliding_window(self):
ones_and_zer | os = [1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1]
self.assertEqual(flip_zeros_sliding_window(ones_and_zeros, 2), [5, 7])
ones_and_zeros = [1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1]
self.assertEqual(flip_zeros_sliding_window(ones_and_zeros, 1), [7])
ones_and_zeros = [0, 0, 0, 1]
self.assertEqual(flip_zeros_sliding_window(ones_and_zeros, 4), [0, 1, 2])
|
kopringo/django-genealogy | genealogy/urls.py | Python | unlicense | 856 | 0.022196 | #-*- coding: utf-8 -*-
#django
from django.conf.urls import patterns, include, url
from django.views.generic import RedirectView
urlpatterns = patterns('genealogy',
|
# strona glowna
url(r'^person-list$', 'views.person_list', name='person_list'),
url(r'^person-view/(?P<handle>.*)/$', 'views.person_view', name='person_view'),
url(r'^family-list$', 'views.family_list', name='family_list'),
url(r'^branch-list$', 'views.branch_list', name='br | anch_list'),
url(r'^test$', 'views.test'),
# mechanizm przelaczania wersji jezykowej
#url(r'^lang/(?P<id>[plen]+)', 'views.lang', name='lang'),
url(r'^$', 'views.home', name='home'),
)
|
LICEF/edx-platform | cms/djangoapps/contentstore/views/course.py | Python | agpl-3.0 | 42,299 | 0.002482 | """
Views related to operations on course objects
"""
import json
import random
import string # pylint: disable=W0402
from django.utils.translation import ugettext as _
import django.utils
from django.contrib.auth.decorators import login_required
from django_future.csrf import ensure_csrf_cookie
from django.conf import settings
from django.views.decorators.http import require_http_methods
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponse
from util.json_request import JsonResponse
from edxmako.shortcuts import render_to_response
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.content import StaticContent
from xmodule.tabs import PDFTextbookTabs
from xmodule.partitions.partitions import UserPartition, Group
from xmodule.modulestore.exceptions import ItemNotFoundError, InvalidLocationError
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import Location, SlashSeparatedCourseKey
from contentstore.course_info_model import get_course_updates, update_course_updates, delete_course_update
from contentstore.utils import (
get_lms_link_for_item,
add_extra_panel_tab,
remove_extra_panel_tab,
reverse_course_url
)
from models.settings.course_details import CourseDetails, CourseSettingsEncoder
from models.settings.course_grading import CourseGradingModel
from models.settings.course_metadata import CourseMetadata
from util.json_request import expect_json
from util.string_utils import _has_non_ascii_characters
from .access import has_course_access
from .component import (
OPEN_ENDED_COMPONENT_TYPES,
NOTE_COMPONENT_TYPES,
ADVANCED_COMPONENT_POLICY_KEY,
SPLIT_TEST_COMPONENT_TYPE,
)
from django_comment_common.models import assign_default_role
from django_comment_common.utils import seed_permissions_roles
from student.models import CourseEnrollment
from student.roles import CourseRole, UserBasedRole
from opaque_keys.edx.keys import CourseKey
from course_creators.views import get_course_creator_status, add_user_with_status_unrequested
from contentstore import utils
from student.roles import CourseInstructorRole, CourseStaffRole, CourseCreatorRole, GlobalStaff
from student import auth
from microsite_configuration import microsite
__all__ = ['course_info_handler', 'course_handler', 'course_info_update_handler',
'settings_handler',
'grading_handler',
'advanced_settings_handler',
'textbooks_list_handler', 'textbooks_detail_handler',
'group_configurations_list_handler', 'group_configurations_detail_handler']
class AccessListFallback(Exception):
"""
An exception that is raised whenever we need to `fall back` to fetching *all* courses
available to a user, rather than using a shorter method (i.e. fetching by group)
"""
pass
def _get_course_module(course_key, user, depth=0):
"""
Internal method used to calculate and return the locator and course module
for the view functions in this file.
"""
if not has_course_access(user, course_key):
raise PermissionDenied()
course_module = modulestore().get_course(course_key, depth=depth)
return course_module
# pylint: disable=unused-argument
@login_required
def course_handler(request, course_key_string=None):
"""
| The restful handler for course specific requests.
It provides the course tree with the necessary information for identifying and labeling the parts. The root
will typically be a 'course' object but may not be especially as we support modules.
GET
html: return course listing page if not given a | course id
html: return html page overview for the given course if given a course id
json: return json representing the course branch's index entry as well as dag w/ all of the children
replaced w/ json docs where each doc has {'_id': , 'display_name': , 'children': }
POST
json: create a course, return resulting json
descriptor (same as in GET course/...). Leaving off /branch/draft would imply create the course w/ default
branches. Cannot change the structure contents ('_id', 'display_name', 'children') but can change the
index entry.
PUT
json: update this course (index entry not xblock) such as repointing head, changing display name, org,
course, run. Return same json as above.
DELETE
json: delete this branch from this course (leaving off /branch/draft would imply delete the course)
"""
response_format = request.REQUEST.get('format', 'html')
if response_format == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if request.method == 'GET':
return JsonResponse(_course_json(request, CourseKey.from_string(course_key_string)))
elif request.method == 'POST': # not sure if this is only post. If one will have ids, it goes after access
return create_new_course(request)
elif not has_course_access(request.user, CourseKey.from_string(course_key_string)):
raise PermissionDenied()
elif request.method == 'PUT':
raise NotImplementedError()
elif request.method == 'DELETE':
raise NotImplementedError()
else:
return HttpResponseBadRequest()
elif request.method == 'GET': # assume html
if course_key_string is None:
return course_listing(request)
else:
return course_index(request, CourseKey.from_string(course_key_string))
else:
return HttpResponseNotFound()
@login_required
def _course_json(request, course_key):
"""
Returns a JSON overview of a course
"""
course_module = _get_course_module(course_key, request.user, depth=None)
return _xmodule_json(course_module, course_module.id)
def _xmodule_json(xmodule, course_id):
"""
Returns a JSON overview of an XModule
"""
is_container = xmodule.has_children
result = {
'display_name': xmodule.display_name,
'id': unicode(xmodule.location),
'category': xmodule.category,
'is_draft': getattr(xmodule, 'is_draft', False),
'is_container': is_container,
}
if is_container:
result['children'] = [_xmodule_json(child, course_id) for child in xmodule.get_children()]
return result
def _accessible_courses_list(request):
"""
List all courses available to the logged in user by iterating through all the courses
"""
courses = modulestore().get_courses()
# filter out courses that we don't have access to
def course_filter(course):
"""
Get courses to which this user has access
"""
if isinstance(course, ErrorDescriptor):
return False
if GlobalStaff().has_user(request.user):
return course.location.course != 'templates'
return (has_course_access(request.user, course.id)
# pylint: disable=fixme
# TODO remove this condition when templates purged from db
and course.location.course != 'templates'
)
courses = filter(course_filter, courses)
return courses
def _accessible_courses_list_from_groups(request):
"""
List all courses available to the logged in user by reversing access group names
"""
courses_list = {}
instructor_courses = UserBasedRole(request.user, CourseInstructorRole.ROLE).courses_with_role()
staff_courses = UserBasedRole(request.user, CourseStaffRole.ROLE).courses_with_role()
all_courses = instructor_courses | staff_courses
for course_access in all_courses:
course_key = course_access.course_id
if course_key is None:
# If the course_access does not have a course_id, it's an org-based role, so we fall back
raise AccessListFallback
if course_key not in courses_list:
try:
course = modulestore().get_course(course_key)
|
FichteFoll/CSScheme | my_sublime_lib/constants.py | Python | mit | 2,230 | 0.028251 | KEY_UP = "up"
KEY_DOWN = "down"
KEY_RIGHT = "right"
KEY_LEFT = "left"
KEY_INSERT = "insert"
KEY_HOME = "home"
KEY_END = "end"
KEY_PAGEUP = "pageup"
KEY_PAGEDOWN = "pagedown"
KEY_BACKSPACE = "backspace"
KEY_DELETE = "delete"
KEY_TAB = "tab"
KEY_ENTER = "enter"
KEY_PAUSE = "pause"
KEY_ESCAPE = "escape"
KEY_SPACE = "space"
KEY_KEYPAD0 = "keypad0"
KEY_KEYPAD1 = "keypad1"
KEY_KEYPAD2 = "keypad2"
KEY_KEYPAD3 = "keypad | 3"
KEY_KEYPAD4 = "keypad4"
KEY_KEYPAD5 = "keypad5"
KEY_KEYPAD6 = "keypad6"
KEY_KEYPAD7 = "keypad7"
KEY_KEYPAD8 = "keypad8"
KEY_KEYPAD9 = "keypad9"
KEY_KEYPAD_PERIOD = "keypad_period"
KEY_KEYPAD_DIVIDE = "keypad_divide"
KEY_KEYPAD_MULTIPLY = "keypad_multiply"
KEY_KEYPAD_MINUS = "keypad_minus"
KEY_KEYPAD_PLUS = "keypad_plus"
KEY_KEYPAD_ENTER = "keypad_enter"
| KEY_CLEAR = "clear"
KEY_F1 = "f1"
KEY_F2 = "f2"
KEY_F3 = "f3"
KEY_F4 = "f4"
KEY_F5 = "f5"
KEY_F6 = "f6"
KEY_F7 = "f7"
KEY_F8 = "f8"
KEY_F9 = "f9"
KEY_F10 = "f10"
KEY_F11 = "f11"
KEY_F12 = "f12"
KEY_F13 = "f13"
KEY_F14 = "f14"
KEY_F15 = "f15"
KEY_F16 = "f16"
KEY_F17 = "f17"
KEY_F18 = "f18"
KEY_F19 = "f19"
KEY_F20 = "f20"
KEY_SYSREQ = "sysreq"
KEY_BREAK = "break"
KEY_CONTEXT_MENU = "context_menu"
KEY_BROWSER_BACK = "browser_back"
KEY_BROWSER_FORWARD = "browser_forward"
KEY_BROWSER_REFRESH = "browser_refresh"
KEY_BROWSER_STOP = "browser_stop"
KEY_BROWSER_SEARCH = "browser_search"
KEY_BROWSER_FAVORITES = "browser_favorites"
KEY_BROWSER_HOME = "browser_home"
|
dsax64/reddit-to-prezi | xml_generator/prezi_xml_helpers.py | Python | mit | 3,141 | 0.000955 |
from lxml import etree
# TODO akos.hochrein think a way of removing side effects
class PreziXmlHelpers(object):
@staticmethod
def generate_autoplay_node(xml_node, delay):
autoplay_node = etree.SubElement(xml_node, 'autoplay')
delay_node = etree.SubElement(autoplay_node, 'delay')
delay_node.text = str(delay)
@staticmethod
def generate_bounds_node(xml_node, bounds, dimensions):
etree.SubElement(xml_node, 'bounds',
x=str(bounds[0]), y=str(bounds[1]),
width=str(dimensions[0]), height=str(dimensions[1]))
@staticmethod
def generate_version_node(xml_node, version):
version_node = etree.SubElement(xml_node, 'version')
| version_node.text = str(version)
@staticmethod
def generate_type_node(xml_node, type):
type_node = etree.SubElement(xml_node, 'type')
type_node.text = type
@staticmethod
def generate_size_node(xml_node, dimensions):
| size_node = etree.SubElement(xml_node, 'size')
w_node = etree.SubElement(size_node, 'w')
w_node.text = str(dimensions['w'])
h_node = etree.SubElement(size_node, 'h')
h_node.text = str(dimensions['h'])
@staticmethod
def generate_width_node(xml_node, dimensions):
w_node = etree.SubElement(xml_node, 'width')
w_node.text = str(dimensions['w'])
@staticmethod
def generate_height_node(xml_node, dimensions):
h_node = etree.SubElement(xml_node, 'height')
h_node.text = str(dimensions['h'])
@staticmethod
def generate_text_node(xml_node, text, alignment):
paragraph_node = etree.SubElement(xml_node, 'p', align=alignment)
text_node = etree.SubElement(paragraph_node, 'text')
text_node.text = text
@staticmethod
def generate_object_node(xml_node):
pass
class ObjectNode(object):
def __init__(self, parent_node, id, node_type, dimensions):
return etree.SubElement(parent_node, 'object',
id=str(id),
type=str(node_type),
x=str(dimensions['x']),
y=str(dimensions['y']),
s=str(dimensions['s']))
class FrameNode(ObjectNode):
def __init__(self, parent_node, id, node_type, dimensions, frame_type):
frame_node = super(FrameNode, self).__init__(parent_node, id, node_type, dimensions)
PreziXmlHelpers.generate_type_node(frame_node, frame_type)
PreziXmlHelpers.generate_size_node(frame_node, dimensions)
class TextNode(ObjectNode):
def __init__(self, parent_node, id, node_type, dimensions, text, alignment):
text_node = super(TextNode, self).__init__(parent_node, id, node_type, dimensions)
PreziXmlHelpers.generate_width_node(text_node, dimensions)
PreziXmlHelpers.generate_height_node(text_node, dimensions)
PreziXmlHelpers.generate_text_node(text_node, text, alignment)
class ImageNode(ObjectNode):
pass
class Thread(object):
def __init__(self):
pass
|
entpy/beauty-and-pics | beauty_and_pics/custom_form_app/forms/delete_user_form.py | Python | mit | 2,518 | 0.00556 | # -*- coding: utf-8 -*-
from django import forms
from datetime import date
from dateutil.relativedelta import *
from django.contrib.auth.models import User
from custom_form_app.forms.base_form_class import *
from account_app.models impo | rt *
from website.exceptions import *
import logging, sys
# force utf8 read data
reload(sys);
sys.setdefaulte | ncoding("utf8")
# Get an instance of a logger
logger = logging.getLogger(__name__)
class DeleteUserForm(forms.Form, FormCommonUtils):
user_id = forms.IntegerField(label='User id', required=True)
# list of validator for this form
custom_validation_list = (
'check_all_fields_valid',
)
def __init__(self, *args, **kwargs):
# parent forms.Form init
super(DeleteUserForm, self).__init__(*args, **kwargs)
FormCommonUtils.__init__(self)
# current form instance
self.validation_form = super(DeleteUserForm, self)
def clean(self):
super(DeleteUserForm, self).clean_form_custom()
return True
def delete_user(self):
"""Function to delete user"""
return_var = False
logged_user_id = self.request_data.user.id
user_id = self.form_validated_data.get("user_id")
account_obj = Account()
try:
account_obj.delete_user(user_id=user_id, logged_user_id=logged_user_id)
except UserDeleteDoesNotExistsError:
logger.error("Errore nell'eliminazione dell'account, l'id utente non esiste: " + str(self.form_validated_data) + " | error code: " + str(UserDeleteDoesNotExistsError.get_error_code))
self._errors = {"__all__": ["Errore nell'eliminazione dell'account. Sii gentile, segnala il problema (Codice " + str(UserDeleteDoesNotExistsError.get_error_code) + ")"]}
except UserDeleteIdDoesNotMatchError:
logger.error("Errore nell'eliminazione dell'account, l'id utente non matcha con l'id utente in sessione: " + str(self.form_validated_data) + " | error code: " + str(UserDeleteIdDoesNotMatchError.get_error_code))
self._errors = {"__all__": ["Errore nell'eliminazione dell'account. Sii gentile, segnala il problema (Codice " + str(UserDeleteIdDoesNotMatchError.get_error_code) + ")"]}
else:
return_var = True
return return_var
def form_actions(self):
"""Function to perform form actions"""
return_var = False
# delete user and related data
if self.delete_user():
return_var = True
return return_var
|
alfredodeza/ceph-doctor | ceph_medic/check.py | Python | mit | 2,944 | 0.000679 | import sys
import ceph_medic
import logging
from ceph_medic import runner, collector
from tambo import Transport
logger = logging.getLogger(__name__)
def as_list(string):
if not string:
return []
string = string.strip(',')
# split on commas
string = string.split(',')
# strip spaces
return [x.strip() for x in string]
class Check(object):
he | lp = "Run checks for all the configured nodes in a cluster or hosts file"
long_help = """
check: Run for all the configured nodes in the configuration
Options:
--ignore Comma-separated list of errors and warnings to ignore.
Loaded Config Path: {config_path}
Configured Nodes:
{configured_nodes}
"""
def __init__(self, argv=None | , parse=True):
self.argv = argv or sys.argv
@property
def subcommand_args(self):
# find where `check` is
index = self.argv.index('check')
# slice the args
return self.argv[index:]
def _help(self):
node_section = []
for daemon, node in ceph_medic.config.nodes.items():
header = "\n* %s:\n" % daemon
body = '\n'.join([" %s" % n for n in ceph_medic.config.nodes[daemon]])
node_section.append(header+body+'\n')
return self.long_help.format(
configured_nodes=''.join(node_section),
config_path=ceph_medic.config.config_path
)
def main(self):
options = ['--ignore']
config_ignores = ceph_medic.config.file.get_list('check', '--ignore')
parser = Transport(
self.argv, options=options,
check_version=False
)
parser.catch_help = self._help()
parser.parse_args()
ignored_codes = as_list(parser.get('--ignore', ''))
# fallback to the configuration if nothing is defined in the CLI
if not ignored_codes:
ignored_codes = config_ignores
if len(self.argv) < 1:
return parser.print_help()
# populate the nodes metadata with the configured nodes
for daemon in ceph_medic.config.nodes.keys():
ceph_medic.metadata['nodes'][daemon] = []
for daemon, nodes in ceph_medic.config.nodes.items():
for node in nodes:
node_metadata = {'host': node['host']}
if 'container' in node:
node_metadata['container'] = node['container']
ceph_medic.metadata['nodes'][daemon].append(node_metadata)
collector.collect()
test = runner.Runner()
test.ignore = ignored_codes
results = test.run()
runner.report(results)
#XXX might want to make this configurable to not bark on warnings for
# example, setting forcefully for now, but the results object doesn't
# make a distinction between error and warning (!)
if results.errors or results.warnings:
sys.exit(1)
|
julie-anderson/python_learning_samples | python_range.py | Python | mit | 219 | 0.013699 | # python_range.py
# J.M. Anderson
# sample of range function
print "range(10): " | , range(10)
print "range(10, 20): ", range(10,20)
print "range(2, 30, 6): ", range(2,30,6)
print "range( | 20, 10, -2): ", range(20, 10, -2)
|
marzique/cs50_finance | sqlquery.py | Python | mit | 382 | 0.007853 | from cs50 import SQL
from flask import session
from helpers import usd
db = SQL("sqlite: | ///finance.db")
def get_cash():
return float(db.execute("SELECT cash FROM users WHERE id = :id",
id=session["user_id"])[0]["cash"])
def get_username(): |
return db.execute("SELECT username FROM users WHERE id = :id", id=session["user_id"] )[0]["username"]
|
fy2462/apollo | modules/tools/car_sound/car_sound.py | Python | apache-2.0 | 2,085 | 0.000959 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
Play car sounds
"""
import sys
import rospy
from std_msgs.msg import String
from modules.common.proto import car_sound_pb2
from sound_play.msg import SoundRequest
from sound_play.libsoundplay import SoundClient
import time
class CarSound(object):
"""
car sound class
"""
def __init__(self):
self.entity = car_sound_pb2.SoundRequest()
self.soundhandle = SoundClient()
self.voice = 'voice_kal_diphone'
self.lasttime = rospy.get_time()
def callback_sound(self, data):
"""
new sound request
"""
print "New Sound Msg"
self.entity.ParseFromString(data.data)
print self.entity
if self.entity.mode == car_sound_pb2.SoundRequest.SAY:
self.soundhandle.say(self.entity.words, self.voice)
elif self.entity.mode == car_sound_pb2.SoundRequest.BEEP:
self.soundhandle.play(SoundRequest.NEEDS_PLUGGING)
self.lasttime = rospy.get_time()
def main():
"""
Main rosn | ode
"""
rospy.init_node('car_sound', anonymous=True)
sound = CarSound()
time.sleep(1)
sound.soundhandle.play(SoundRequest.NEEDS_UNPLUGGING)
rospy.Subscriber('/apollo/carsound', String, sound.call | back_sound)
rospy.spin()
if __name__ == '__main__':
main()
|
Kalimaha/simple_flask_blueprint | simple_flask_blueprint/rest/blueprint_rest.py | Python | gpl-2.0 | 299 | 0.003344 | from flask import Blueprint
from simple_flask_blueprint.core. | blueprint_core import say_hallo
bp = Blueprint('simple_flask_blueprint', __name__)
@bp.route('/')
def say_hall | o_service():
return say_hallo()
@bp.route('/<name>/')
def say_hallo_to_guest_service(name):
return say_hallo(name)
|
daizhengy/RDS | trove/tests/fakes/__init__.py | Python | apache-2.0 | 752 | 0 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# | Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Impleme | nts a fake version of the models code so that the server can be stood
up and run under test quickly.
"""
|
ramusus/django-facebook-comments | setup.py | Python | bsd-3-clause | 1,122 | 0 | from setuptools import setup, find_packages
setup(
name='django-facebook-comments',
version=__import__('facebook_comments').__version__,
description='Django implementation for Facebook Graph API Comments',
long_description=open('RE | ADME.md').read(),
author='ramusus',
author_email='ramusus@gmail.com',
| url='https://github.com/ramusus/django-facebook-comments',
download_url='http://pypi.python.org/pypi/django-facebook-comments',
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False, # because we're including media that Django needs
install_requires=[
'django-facebook-api>=0.5.0',
'django-facebook-users>=0.3.0',
'django-m2m-history>=0.1.2',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
Tayamarn/socorro | socorro/signature/__main__.py | Python | mpl-2.0 | 9,386 | 0.002664 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function
import argparse
import csv
import logging
import logging.config
import os
import sys
import requests
from socorro.lib.treelib import tree_get
from socorro.signature import SignatureGenerator
DESCRIPTION = """
Given one or more crash ids via command line or stdin (one per line), pulls down information from
Socorro, generates signatures, and prints signature information.
"""
EPILOG = """
Note: In order for the SignatureJitCategory rule to work, you need a valid API token from
Socorro that has "View Personally Identifiable Information" permission.
"""
logger = logging.getLogger('socorro.signature')
# FIXME(willkg): This hits production. We might want it configurable.
API_URL = 'https://crash-stats.mozilla.com/api/'
def setup_logging(logging_level):
dc = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'bare': {
'format': '%(levelname)s: %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'bare',
},
},
'loggers': {
'socorro': {
'propagate': False,
'handlers': ['console'],
'level': logging_level,
},
},
}
logging.config.dictConfig(dc)
class OutputBase:
"""Base class for outputter classes
Outputter classes are context managers. If they require start/top or begin/end semantics, they
should implement ``__enter__`` and ``__exit__``.
Otherwise they can just implement ``data`` and should be fine.
"""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def warning(self, line):
"""Prints out a warning line to stderr
:arg str line: the line to print to stderr
"""
print('WARNING: %s' % line, file=sys.stderr)
def data(self, crash_id, old_sig, new_sig, notes):
"""Outputs a data point
:arg str crash_id: the crash id for the signature generated
:arg str old_sig: the old signature retrieved in the processed crash
:arg str new_sig: the new generated signature
:arg list notes: any processor notes
"""
pass
class TextOutput(OutputBase):
def data(self, crash_id, old_sig, new_sig, notes):
print('Crash id: %s' % crash_id)
print('Original: %s' % old_sig)
print('New: %s' % new_sig)
print('Same?: %s' % (old_sig == new_sig))
if notes:
print('Notes: (%d)' % len(notes))
for no | te in notes:
print(' %s' % note)
class CSVOutput(OutputBase):
def __enter__(self):
self.out = csv.writer(sys.stdout, quoting=csv.QUOTE_ALL)
self.out.writerow(['crashid', 'old', 'new', 'same?', 'notes'])
return self
def __exit__(self, exc_type, exc_value, traceback):
self.out = None
def data(self, c | rash_id, old_sig, new_sig, notes):
self.out.writerow([crash_id, old_sig, new_sig, str(old_sig == new_sig), notes])
def fetch(endpoint, crash_id, api_token=None):
kwargs = {
'params': {
'crash_id': crash_id
}
}
if api_token:
kwargs['headers'] = {
'Auth-Token': api_token
}
return requests.get(API_URL + endpoint, **kwargs)
def main(args):
"""Takes crash data via args and generates a Socorro signature
"""
parser = argparse.ArgumentParser(description=DESCRIPTION, epilog=EPILOG)
parser.add_argument(
'-v', '--verbose', help='increase output verbosity', action='store_true'
)
parser.add_argument(
'--format', help='specify output format: csv, text (default)'
)
parser.add_argument(
'--different-only', dest='different', action='store_true',
help='limit output to just the signatures that changed',
)
parser.add_argument(
'crashids', metavar='crashid', nargs='*', help='crash id to generate signatures for'
)
args = parser.parse_args()
if args.format == 'csv':
outputter = CSVOutput
else:
outputter = TextOutput
if args.verbose:
logging_level = 'DEBUG'
else:
logging_level = 'INFO'
api_token = os.environ.get('SOCORRO_API_TOKEN', '')
setup_logging(logging_level)
generator = SignatureGenerator(debug=args.verbose)
crashids_iterable = args.crashids or sys.stdin
with outputter() as out:
for crash_id in crashids_iterable:
crash_id = crash_id.strip()
resp = fetch('/RawCrash/', crash_id, api_token)
if resp.status_code == 404:
out.warning('%s: does not exist.' % crash_id)
continue
if resp.status_code == 429:
out.warning('API rate limit reached. %s' % resp.content)
# FIXME(willkg): Maybe there's something better we could do here. Like maybe wait a
# few minutes.
return 1
if resp.status_code == 500:
out.warning('HTTP 500: %s' % resp.content)
continue
raw_crash = resp.json()
# If there's an error in the raw crash, then something is wrong--probably with the API
# token. So print that out and exit.
if 'error' in raw_crash:
out.warning('Error fetching raw crash: %s' % raw_crash['error'])
return 1
raw_crash_minimal = {
'JavaStackTrace': raw_crash.get('JavaStackTrace', None),
'OOMAllocationSize': raw_crash.get('OOMAllocationSize', None),
'AbortMessage': raw_crash.get('AbortMessage', None),
'AsyncShutdownTimeout': raw_crash.get('AsyncShutdownTimeout', None),
'ipc_channel_error': raw_crash.get('ipc_channel_error', None),
'additional_minidumps': raw_crash.get('additional_minidumps', None),
'IPCMessageName': raw_crash.get('IPCMessageName', None),
'MozCrashReason': raw_crash.get('MozCrashReason', None),
}
resp = fetch('/ProcessedCrash/', crash_id, api_token)
if resp.status_code == 404:
out.warning('%s: does not have processed crash.' % crash_id)
continue
if resp.status_code == 429:
out.warning('API rate limit reached. %s' % resp.content)
# FIXME(willkg): Maybe there's something better we could do here. Like maybe wait a
# few minutes.
return 1
if resp.status_code == 500:
out.warning('HTTP 500: %s' % resp.content)
continue
processed_crash = resp.json()
# If there's an error in the processed crash, then something is wrong--probably with the
# API token. So print that out and exit.
if 'error' in processed_crash:
out.warning('Error fetching processed crash: %s' % processed_crash['error'])
return 1
old_signature = processed_crash['signature']
processed_crash_minimal = {
'hang_type': processed_crash.get('hang_type', None),
'json_dump': {
'threads': tree_get(processed_crash, 'json_dump.threads', default=[]),
'system_info': {
'os': tree_get(processed_crash, 'json_dump.system_info.os', default=''),
},
'crash_info': {
'crashing_thread': tree_get(
processed_crash, 'json_dump.crash_info.crashing_thread', default=None
),
},
},
# |
MayankAgarwal/euler_py | 002/euler002.py | Python | mit | 236 | 0.021186 | T = int(raw | _input())
for test in xrange(T):
N = int(raw_input())
a, b, result = 0, 1, 0
c = a+b
while c < N:
if c%2 == 0:
result += c
a,b = b,c
c = a+b |
print result |
BryanQuigley/sos | sos/report/plugins/monit.py | Python | gpl-2.0 | 2,464 | 0 | # Copyright (C) 2015 Red Hat, Inc.,
# Pablo Iranzo Gomez <Pablo.Iranzo@redhat.com>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.report.plugins import Plugin, RedHatPlugin
from glob import glob
class Monit(Plugin, RedHatPlugin):
short_desc = 'Monit monitoring daemon'
packages = ('monit',)
profiles = ('system',)
plugin_name = 'monit'
# Define configuration files
# FIXME: direct globs will fail in container environments.
monit_conf = glob("/etc/monit.d/*")
monit_conf.append("/etc/monit.conf")
monit_conf.append("/etc/monitrc")
# Define log files
monit_log = ["/var/log/monit.log"]
option_list = []
def setup(self):
self.add_cmd_output("monit status")
self.add_copy_spec(self.monit_log + self.monit_conf)
def postproc(self):
# Post process the files included to scrub any
# password or other sensitive data
# usernames and emails are cleaned to not disclose any
# confidential data
for file in self.monit_conf:
# Remove username:password from files
self.do_file_sub(file,
r"allow (.*):(.*)",
r"allow ********:********"
)
self.do_file_sub(file,
r"ALLOW (.*):(.*)",
r"ALLOW ********:********"
)
# Remove MAILSERVER username/password
self.do_file_sub(file,
r"username (\w)+",
r"username ********"
)
self.do_file_sub(file,
r"password (\w)+",
r"password * | *******"
)
self.do_file_sub(file,
r"USERNAME (\w)+",
r"USERNAME ********"
)
self.do_file_sub(file,
| r"PASSWORD (\w)+",
r"PASSWORD ********"
)
# vim: et ts=4 sw=4
|
shennjia/weblte | R12_36211/PRB.py | Python | mit | 4,004 | 0.011489 | __author__ = 'shenojia'
import sys
sys.path.insert(0, '.')
sys.path.insert(0, '..')
from R12_36xxx.HighLayer import *
from R12_36211.REG import REG
from math import floor
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from xlsxwriter.worksheet import Worksheet
class PRB:
"""
Resource Block
A physical resource block is defined as N__symb___UL consecutive SC-FDMA symbols
in the time domain and N__sc___RB_UL consecutive subcarriers in the frequency domain,
A physical resource block is defined as N__symb___DL consecutive OFDM symbols
in the time domain and N__sc___RB_DL consecutive subcarriers in the frequency domain
"""
def __init__(self, n__PRB:int, n__s:int, h:int, w:int, subFrameTypeName:str):
self.n__PRB = n__PRB
self.res = []
self.regs = []
self.h = h #height in subcarriers
self.w = w #width in symbols
self.n__s = n__s
self.prbFormat = cell.add_format({"bottom":1, "top":1, "left":1, "right":1})
self.patch, self.codes, self.vertices = self._initPatch(n__s, subFrameTypeName)
def appendReg(self, reg:REG):
self.regs.append(reg)
def __repr__(self):
return 'prb({})'.format(self.n__PRB)
def _initPatch(self, n__s:int, subFrameTypeName:str):
"""
generate path patch for this PRB
:param n__s:
:param subFrameTypeName:
:return:
"""
| vertices = []
codes = []
bottom = 0
left = 0
width = 0
height = 0
if subFrameTypeName == 'D':
bottom = self.n__PRB * conf.N__sc___RB_DL
left = 0
width = conf.N__symb___DL
height = conf.N__sc___RB_DL
if subFrameTypeNam | e == 'U':
bottom = self.n__PRB * conf.N__sc___RB_UL
left = 0
width = conf.N__symb___UL
height = conf.N__sc___RB_UL
if subFrameTypeName == 'S':
if n__s %1 == 0:
bottom = self.n__PRB * conf.N__sc___RB_DL
left = 0
width = conf.N__symb___DwPTS
height = conf.N__sc___RB_DL
if n__s %1 == 1:
bottom = self.n__PRB * conf.N__sc___RB_UL
left = conf.N__symb___UpPTS
width = conf.N__symb___UL - conf.N__symb___DwPTS
height = conf.N__sc___RB_UL
codes = [Path.MOVETO] + [Path.LINETO]*3 + [Path.CLOSEPOLY]
vertices = [(left,bottom),
(left,bottom+height),
(left+width,bottom+height),
(left+width,bottom),
(0,0)]
path = Path(vertices, codes)
patch = PathPatch(path, facecolor='white', edgecolor='black', linewidth=2.0, fill='none' )
patch.set_zorder(80)
return patch, codes, vertices
def write(self, frame:Worksheet):
"""
set board property of a PRB
:param frame:
:return:
"""
for re in self.res:
if re().k % self.h == 0 :
# bottom --> mirror top
re().reFormat.top = self.prbFormat.top
if re().l % self.w == 0:
re().reFormat.left = self.prbFormat.left
elif re().l % self.w == self.w - 1:
re().reFormat.right = self.prbFormat.right
elif re().k % self.h == self.h - 1:
# top -->mirror bottom
re().reFormat.bottom = self.prbFormat.bottom
if re().l % self.w == 0:
re().reFormat.left = self.prbFormat.left
elif re().l % self.w == self.w - 1:
re().reFormat.right = self.prbFormat.right
elif re().l % self.w == 0:
re().reFormat.left = self.prbFormat.left
elif re().l % self.w == self.w - 1:
re().reFormat.right = self.prbFormat.right
re().write(frame)
|
thydeyx/LeetCode-Python | Queue_Reconstruction_by_Height.py | Python | mit | 1,637 | 0.0281 | # -*- coding:utf-8 -*-
#
# Author : TangHanYi
# E-mail : thydeyx@163.com
# Create Date : 2016-11-23 04:33:03 PM
# Last modified : 2016-11-23 04:59:02 PM
# File Name : Queue_Reconstruction_by_Height.py
# Desc :
"""
#we have list of persons represented as [height, key] as input
#First fill the answer list with the tallest persons in ascending order of their keys eg: [7,0], [7,1], [7,2]
#Then fill the next tallest persons in the answer list at index same as ke | y. eg: [7,0],[6,1],[7,1],[7,2],[6,5]
#so on
more explanation:
#First sort the input list in descending order of heights and ascending order of keys
#now iterate ov | er the list and insert each person into answer array at index same as key of person.
eg: input : [[7,0], [4,4], [7,1], [5,0], [6,1], [5,2]]
sort input: [[7,0], [7,1], [6,1], [5,0], [5,2], [4,4]
iterate over sorted array and insert each person at index same as key of the person
answer array grows like this for each iteration.
"""
class Solution(object):
def compare(self, pair_a, pair_b):
if pair_a[0] > pair_b[0]:
return -1
elif pair_a[0] < pair_b[0]:
return 1
elif pair_a[1] > pair_b[1]:
return 1
elif pair_a[1] < pair_b[1]:
return -1
else:
return 0
def reconstructQueue(self, people):
n = len(people)
ret = []
if n == 0:
return ret
people_height = sorted(people, cmp = self.compare)
ret.append(people_height[0])
for i in range(1, n):
ret.insert(people_height[i][1], people_height[i])
return ret
if __name__ == "__main__":
s = Solution()
people = [[7,0],[4,4],[7,1],[5,0],[6,1],[5,2]]
print s.reconstructQueue(people)
|
Pabsm94/HyperPlume | testing/tests_SSM/test_SSM.py | Python | mit | 5,920 | 0.050845 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 23 17:18:54 2016
@author: pablo
"""
import os
dir_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import sys
sys.path.append(dir_path) #change to src
from src import np,unittest,SSM,type_parks,type_korsun,type_ashkenazy,Hyperplume
class Test_SSM_plume(unittest.TestCase):
"""Class Test_SSM_Plume performs different validity tests on Class SSM, by checking on each of its indpendent methods"""
def test_SSM__init(self):
""" Testin SSM interface.Several calling methods for class SSM."""
Plume1 = SSM() #Creation of SSM plume with default input arguments
self.assertIsInstance(Plume1,SSM) #checking categorization of Plume1
self.assertIsNotNone(Plume1) #checking object creation of class SSM
self.assertEqual(Plume1.M_0,40) #checking creation of class attributes
self.assertEqual(Plume1.d_0,0.2)
Plasma = Hyperplume().simple_plasma(1,1,1,1)
Z_span = np.linspace(0,100,500)
eta_0 = np.linspace(0,40,500)
nu = np.exp(- 0.047 * eta_0** 2)
Plume3 = SSM(Plasma,40,0.2,Z_span,eta_0,nu) #alternative creation of SSM plume object with user given inputs
self.assertIsInstance(Plume3,SSM)
self.assertIsNotNone(Plume3)
upsilon = np.ones(nu.size)
self.assertRaises(TypeError,Plume4 = SSM, args = (Plasma,40,0.2,Z_span,eta_0,nu,upsilon)) # only initial density vector can be passed as input to SSM class. Error should be raised when both initial velocity and density profiles are given
def test_solver(self):
"""Tests on SSM Class self.solver() method"""
Plasma = Hyperplume().simple_plasma(1,1,1,1)
Z_span = np.linspace(0,100,500)
eta_0 = np.linspace(0,40,500)
C_user = 6.15
n0 = np.exp(- C_user/2 * eta_0** 2)
Plume = SSM(Plasma,40,0.2,Z_span,eta_0,n0) #creation of SSM plume object
Plume.solver() #good call on SSM method self.solver
z,r = np.linspace(0,10,5000),np.linspace(0,3,5000)
self.assertRaises(TypeError, Plume.solver, args = (z,r)) #wrong call on self.solver method leads to exception error
self.assertIsNotNone(Plume.nu_prime_interp) #checking performance of self.solver method in storing plume variables
self.assertIsNotNone(Plume.h_interp) #checking trakcking of self-similar dilation function h and dh
self.assertAlmostEqual(Plume.C,C_user,places = 0) #Testing model-calculated dimensioning constant C with user-given constant C up to three decimal places
def test_upsilon_compare(self):
""" Comparison between SSM general framework developed in Python code, and theoretical plume profiles"""
P = Hyperplume().simple_plasma(1,1,1,1)
Z_span = np.linspace(0,100,500)
eta_0 = np.linspace(0,40,500)
n0_parks = np.exp(-np.log(0.05)*eta_0**2) #Initial density profile for a Parks-type SSM plume
upsilon_parks = np.ones(eta_0.size) #Initial dimensionless axial velocity profile for a Parks-type SSM plume
Plume = SSM(P,40,0.2,Z_span,eta_0,n0_parks)
Plume.solver()
self.assertAlmostEqual(float(Plume.upsilon_interp(3)),float(np.any(upsilon_parks)),places=0) #comparing model returned upsilon profile, with theoretical upsilon profile
def test_query(self):
"""Assertiveness of method query inside SSM plume class"""
P = Hyperplume().simple_plasma(1.6e-19,2,1.5,1.4)
Z_span = np.linspace(0,100,50)
eta_0 = np.linspace(0,40,50)
n0 = np.exp(-0.05*eta_0**2)
Plume = SSM(P,20,0.2,Z_span,eta_0,n0)
Plume.solver()
z_target = np.array([15,20,25,30])
r_target = np.array([20,25,30,35])
Z_target,R_target = np.meshgrid(z_target,r_target)
n,u_z,u_r,T,phi,error,etaSSM = Plume.query(Z_target,R_target) #calling method query
self.assertIsNotNone(n) #checking performance of self.query method based on returned varables
self.assertIsNotNone(T)
self.assertEqual(n.shape,Z_target.shape,R_target.shape) #checking performance of self.method based on targeted poins inputted by user
def test_types(self):
""" Checking theoretical Parks,Ashkenazy and Korsun model plume creation"""
P = Hyper | plume().simple_plasma(1,1,1,1)
Z_span = np.linspace(0,100,500)
eta_0 = np.linspace(0,40,500)
Plume = type_parks()
Plume1 = type_parks(P,30,0.3,Z_span,eta_0,0.5)
self.assertIsNotNone(Plume,Plume1) # test type_ interface.
P1 = Hyperplume().simple_plasma(1,1,1,1.4)
Plume2 = type_parks(P1,30,0.3,Z_span,eta_0,0.5)
Pl | ume3 = type_korsun(P1,30,0.3,Z_span,eta_0,0.5)
Plume4 = type_ashkenazy(P1,30,0.3,Z_span,eta_0,0.5)
self.assertIsNone(Plume2,Plume4) # test validity of Gamma value for the different plume types.
self.assertIsNotNone(Plume3)
nu = nu = 1 / (1 + 0.047 * eta_0** 2)
self.assertRaises(TypeError,Plume5 = type_parks, args =(P1,30,0.3,Z_span,eta_0,0.5,nu)) # for type_ plumes, initial density profile is not an input
if __name__ == '__main__': # When, run for testing only
#self-test code
unittest.main()
|
gary-pickens/HouseMonitor | housemonitor/outputs/zigbee/zigbeeoutputstep.py | Python | mit | 2,202 | 0.012262 | '''
Created on 2012-11-06
@author: Gary
'''
from housemonitor.steps.abc_step import abcStep
from housemonitor.lib.constants import Constants
from housemonitor.lib.hmqueue import HMQueue
import random
class ZigBeeOutputStep( abcStep ):
'''
This object should be started with with the COSM thread and hang around forever.
'''
queue = None
''' A Queue for communicating between threads. '''
def __init__( self, queue ):
'''
Initialize COSMOutputStep.
:param queue: an object which communicates between threads
:type queue: COSMQueue
'''
super( ZigBeeOutputStep, self ).__init__()
self.queue = queue
self.logger.debug( "ZigBeeOutputStep started" )
@property
def topic_name( self ):
''' The topic name to which this routine subscribes.'''
return Constants.TopicNames.ZigBeeOutput
@property
def logger_name( self ):
''' Set the logger level. '''
return Constants.LogKeys.outputsZigBee
def step( self, value, data={}, listeners=[] ):
"""
This function receives data that will be sent to COSM and forwards it to the COSM output processing
thread.
This function will compare the value with the previous value and if they are different send the data to
the next listener else don't send the data along.
:param value: The input value to be processesed
| :type value: int, float, string, etc
:param data: a dictionary containing more information about the value.
:param listeners: a list of the subscribed routines to send the data to
:returns: new_value, new_data, new_listeners
:rtype: int, dict, listeners
:raises: ValueError, KeyError
"""
data[Const | ants.DataPacket.ID] = int( random.random() * 255.0 )
data[Constants.DataPacket.value] = value
data[Constants.DataPacket.listeners] = listeners
self.queue.transmit( data, self.queue.THREE_QUARTERS_PRIORITY )
self.logger.debug( "ZigBee Step data transmitted to ZigBee thread: value = {} data = {}".format( value, data ) )
return value, data, listeners
|
thunderboltsid/stampman | stampman/tests/test_services.py | Python | mit | 1,776 | 0 | import unittest
import os
from stampman.services import pool, sendgrid, mailgun
from stampman.helpers import config_, mail_
class PoolServiceTest(unittest.TestCase):
def test_creation(self):
pool.PooledService()
class TestSendgridEmailService(unittest.TestCase):
def setUp(self):
self._config = config_.ServiceConfig("sendgrid",
os.environ.get(
'SENDGRID_API_KEY'), 1)
self._domain = os.environ.get('MAIL_DOMAIN')
self._service = sendgrid.SendgridEmailService(config=self._config,
| domain=self._domain)
self._email = mail_.Email(sender=("Test", "sid@waveroll.io"),
recipients=["thunderboltsid@gmail.com"],
| subject="test",
content="test_sendgrid")
def test_send_email(self):
self._service.send_email(self._email)
class TestMailgunEmailService(unittest.TestCase):
def setUp(self):
self._config = config_.ServiceConfig("sendgrid",
os.environ.get('MAILGUN_API_KEY'),
1)
self._domain = os.environ.get('MAIL_DOMAIN')
self._service = mailgun.MailgunEmailService(config=self._config)
self._email = mail_.Email(sender=("Test", "sid@waveroll.io"),
recipients=["thunderboltsid@gmail.com"],
subject="test",
content="test_mailgun")
@unittest.expectedFailure
def test_send_email(self):
self._service.send_email(self._email)
|
toastar/Python-Segy-Bandpass | freq.py | Python | bsd-2-clause | 1,915 | 0.031854 | from numpy import sin, linspace, pi, array, empty
from pylab import plot, show, title, xlabel, ylabel, subplot
from scipy import fft, arange, signal
from obspy.segy.core import readSEGY
#
# Input Parameters Block
#
filename='test.sgy';
Lowcut = 8 # Low frequency
Highcut = 60 # High frequency
order = 5 # Higher number means a harsher cutoff
#
#
#
def plotSpectrum(y,Fs):
"""
Build a function to Plots the Freq domain
"""
n = len(Data) # length of the signal
k = arange(n)
T = n/Fs
frq = k/T # two sides frequency range
frq = frq[range(n/2)] # one side frequency range
Y = fft(y)/n # fft computing and normalization
Y = Y[range(n/2)]
plot(frq,abs | (Y),'r')
xlabel('Freq (Hz)')
ylabe | l('|Y(freq)|')
# End Function space and begin main section.
segy = readSEGY(filename)
Data = segy.traces[0].data # This pulls the first trace strips the header and plots a numpy array of the data.
Fs = 250.0; # sampling rate ( Samples per second.)
datalen = len(Data) #Gets the length of the trace
count = arange(datalen)
rate = -1* count.astype(float) / 250.0 # turns sample number into Seconds
# End number crunching and begin buiding plots.
subplot(2,2,1)
plot(Data,rate)
ylabel('Time')
xlabel('Amplitude')
subplot(6,1,4)
plotSpectrum(Data,Fs)
show()
#Begin Bandpass
low = Lowcut * 2 / Fs # 2 / Fs is the nyquest freq / First number is the pass freq
high = Highcut * 2 / Fs
subplot(2,2,2)
b, a = signal.butter(order, [low,high], btype='band') # Build the filter
BP_Data = signal.lfilter(b, a, Data) # Filter Data
plot(BP_Data,rate)
subplot(6,1,6)
# Plot Bandpass
#plot(Data[::-1],rate)
ylabel('Time')
xlabel('Amplitude')
plotSpectrum(BP_Data,Fs)
w, h = signal.freqz(b, a, worN=2000)
subplot(6,1,5)
plot((Fs * 0.5 / pi) * w, abs(h))
show()
|
Tungul/pythonplayground | switch.py | Python | mit | 2,791 | 0.005016 | # Got this from http://code.activestate.com/recipes/410692/
# This class provides the functionality we want. You only need to look at
# this if you want to know how this works. It only needs to be defined
# once, no need to muck around with its internals.
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
|
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
# The following example is pretty much the exact use-case of a dict | ionary,
# but is included for its simplicity. Note that you can include statements
# in each suite.
v = 'ten'
for case in switch(v):
if case('one'):
print 1
break
if case('two'):
print 2
break
if case('ten'):
print 10
break
if case('eleven'):
print 11
break
if case(): # default, could also just omit condition or 'if True'
print "something else!"
# No need to break here, it'll stop anyway
# break is used here to look as much like the real thing as possible, but
# elif is generally just as good and more concise.
# Empty suites are considered syntax errors, so intentional fall-throughs
# should contain 'pass'
c = 'z'
for case in switch(c):
if case('a'): pass # only necessary if the rest of the suite is empty
if case('b'): pass
# ...
if case('y'): pass
if case('z'):
print "c is lowercase!"
break
if case('A'): pass
# ...
if case('Z'):
print "c is uppercase!"
break
if case(): # default
print "I dunno what c was!"
# As suggested by Pierre Quentel, you can even expand upon the
# functionality of the classic 'case' statement by matching multiple
# cases in a single shot. This greatly benefits operations such as the
# uppercase/lowercase example above:
import string
c = 'A'
for case in switch(c):
if case(*string.lowercase): # note the * for unpacking as arguments
print "c is lowercase!"
break
if case(*string.uppercase):
print "c is uppercase!"
break
if case('!', '?', '.'): # normal argument passing style also applies
print "c is a sentence terminator!"
break
if case(): # default
print "I dunno what c was!"
# Since Pierre's suggestion is backward-compatible with the original recipe,
# I have made the necessary modification to allow for the above usage. |
wsqhubapp/learning_log | learning_logs/admin.py | Python | apache-2.0 | 163 | 0 | fr | om django.contrib import admin
# Register your models here.
from learning_logs.models import Topic, Entry
admin.site.register(Topic)
admin.site.register(Entry) | |
kostyll/kivy-okapi | okapi/screen_manager.py | Python | mit | 5,182 | 0.001351 | from __future__ import print_function, absolute_import, unicode_literals
# Kivy
from kivy.clock import Clock
from kivy.uix.boxlayout import BoxLayout
class ScreenManager(BoxLayout):
"""
Class that handles toggling between windows. So that's managing
the loading screen, game screen, victory screen, high scores, etc.
"""
GAME_CLASS = None
def __init__(self, configuration, game_class=None):
self._current_screen = None
self.configuration = configuration
# Allow this to be passed in to prevent _forcing_ a developer
# to define a child window manager *just* to specify this
# reference
self.game_class = game_class or self.GAME_CLASS
super(ScreenManager, self).__init__()
# Load up and audit the ``welcome_screen``
self.welcome_screen = self.get_welcome_screen()
if self.welcome_screen:
assert hasattr(self.welcome_screen, 'starter'), 'Must put a clickable attribute ``starter`` on welcome screen used to start the game.'
self.welcome_screen.starter.bind(on_press=self.start_game)
self.current_screen = self.welcome_screen
else:
self.start_game(None)
def get_welcome_screen(self, *args, **kwargs):
"""Should return some sort of ``Widget`` to use as your game's welcome screen.
"""
return None
def get_game_class(self):
"""Helper to get the actual Game Class
"""
assert self.game_class is not None, 'Failed to set a Game class on ``ScreenManager``'
return self.game_class
def get_game(self, **kwargs):
"""Should return the actual Game object.
"""
kwargs.setdefault('screen_manager', self)
return self.get_game_class()(**kwargs)
de | f render(self):
self.clear_widgets()
widget = self.current_screen
if hasattr(self.curre | nt_screen, 'container'):
widget = self.current_screen.container
self.add_widget(widget)
@property
def current_screen(self):
"""
Property used to wrap whichever screen is current to facilitate
the swapping of screens and also automatically directing all user
input to the correct screen.
"""
return self._current_screen
def unregister_with_clock(self):
"""
Unregisters either:
(cb, freq,)
or
(
(cb, freq,),
(cb, freq,),
...
)
"""
if getattr(self, '_current_screen', None) and getattr(self._current_screen, 'get_clock_tuple', None):
clock_tuple = self._current_screen.get_clock_tuple()
if clock_tuple:
if isinstance(clock_tuple[0], tuple):
for _ in clock_tuple:
Clock.unschedule(_[0])
else:
Clock.unschedule(clock_tuple[0])
def register_with_clock(self):
"""
Registers either:
(cb, freq,)
or
(
(cb, freq,),
(cb, freq,),
...
)
"""
if getattr(self._current_screen, 'get_clock_tuple', None):
clock_tuple = self._current_screen.get_clock_tuple()
if clock_tuple:
if isinstance(clock_tuple[0], tuple):
for _ in clock_tuple:
Clock.schedule_interval(*_)
else:
Clock.schedule_interval(*clock_tuple)
@current_screen.setter
def current_screen(self, value):
"""
Setter for the @current_screen prop that also triggers a render.
"""
self.unregister_with_clock()
self._current_screen = value
self.register_with_clock()
self.render()
def start_game(self, instance):
"""Handler for the ``click to continue`` click
"""
self.game = self.get_game(
configuration=self.configuration
)
self.game.start()
self.update_screen_from_game()
def get_screen_from_game(self):
raise NotImplementedError("Your `ScreenManager` class must implement this function.")
def update_screen_from_game(self):
self.current_screen = self.get_screen_from_game()
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
# print(keycode, text, modifiers)
if not modifiers:
command_name = 'on_press_{}'.format(keycode[1])
command = getattr(self.current_screen, command_name, None)
if command is not None:
command()
if modifiers:
modifiers.sort()
modifiers_string = "_".join(modifiers)
command_name = 'on_press_{}_{}'.format(modifiers_string, keycode[1])
command = getattr(self.current_screen, command_name, None)
if command is not None:
command
if getattr(self, 'game', None):
command = getattr(self.game, 'on_press_{}'.format(keycode[1]), None)
if command:
command()
|
karthik-sethuraman/ONFOpenTransport | RI/flask_server/tapi_server/controllers/tapi_photonic_media_controller.py | Python | apache-2.0 | 88,036 | 0 | import connexion
import six
from tapi_server.models.tapi_photonic_media_application_identifier import TapiPhotonicMediaApplicationIdentifier # noqa: E501
from tapi_server.models.tapi_photonic_media_central_frequency import TapiPhotonicMediaCentralFrequency # noqa: E501
from tapi_server.models.tapi_photonic_media_fec_properties_pac import TapiPhotonicMediaFecPropertiesPac # noqa: E501
from tapi_server.models.tapi_photonic_media_frequency_constraint import TapiPhotonicMediaFrequencyConstraint # noqa: E501
from tapi_server.models.tapi_photonic_media_laser_properties_pac import TapiPhotonicMediaLaserPropertiesPac # noqa: E501
from tapi_server.models.tapi_photonic_media_media_channel_assembly_spec import TapiPhotonicMediaMediaChannelAssemblySpec # noqa: E501
from tapi_server.models.tapi_photonic_media_media_channel_connection_end_point_spec import TapiPhotonicMediaMediaChannelConnectionEndPointSpec # noqa: E501
from tapi_server.models.tapi_photonic_media_media_channel_node_edge_point_spec import TapiPhotonicMediaMediaChannelNodeEdgePointSpec # noqa: E501
from tapi_server.models.tapi_photonic_media_media_channel_pool_capability_pac import TapiPhotonicMediaMediaChannelPoolCapabilityPac # noqa: E501
from tapi_server.models.tapi_photonic_media_media_channel_properties_pac import TapiPhotonicMediaMediaChannelPropertiesPa | c # noqa: E501
from tapi_server.models.tapi_photonic_media_media_channel_service_interface_point_spec import TapiPhotonicMediaMediaChannelServiceInterfacePointSpec # noqa: E501
from tapi_server.models.tapi_photonic_media_ots_connection_end_point_spec import TapiPhotonicMediaOtsConnec | tionEndPointSpec # noqa: E501
from tapi_server.models.tapi_photonic_media_otsi_assembly_connection_end_point_spec import TapiPhotonicMediaOtsiAssemblyConnectionEndPointSpec # noqa: E501
from tapi_server.models.tapi_photonic_media_otsi_capability_pac import TapiPhotonicMediaOtsiCapabilityPac # noqa: E501
from tapi_server.models.tapi_photonic_media_otsi_connection_end_point_spec import TapiPhotonicMediaOtsiConnectionEndPointSpec # noqa: E501
from tapi_server.models.tapi_photonic_media_otsi_connectivity_service_end_point_spec import TapiPhotonicMediaOtsiConnectivityServiceEndPointSpec # noqa: E501
from tapi_server.models.tapi_photonic_media_otsi_gserver_adaptation_pac import TapiPhotonicMediaOtsiGserverAdaptationPac # noqa: E501
from tapi_server.models.tapi_photonic_media_otsi_service_interface_point_spec import TapiPhotonicMediaOtsiServiceInterfacePointSpec # noqa: E501
from tapi_server.models.tapi_photonic_media_otsi_termination_config_pac import TapiPhotonicMediaOtsiTerminationConfigPac # noqa: E501
from tapi_server.models.tapi_photonic_media_otsi_termination_pac import TapiPhotonicMediaOtsiTerminationPac # noqa: E501
from tapi_server.models.tapi_photonic_media_power_properties_pac import TapiPhotonicMediaPowerPropertiesPac # noqa: E501
from tapi_server.models.tapi_photonic_media_spectrum_band import TapiPhotonicMediaSpectrumBand # noqa: E501
from tapi_server.models.tapi_photonic_media_total_power_threshold_pac import TapiPhotonicMediaTotalPowerThresholdPac # noqa: E501
from tapi_server import util
def data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_media_channel_service_interface_point_spec_delete(uuid, local_id): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_media_channel_service_interface_point_spec_delete
removes tapi.photonic.media.MediaChannelServiceInterfacePointSpec # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: None
"""
return 'do some magic!'
def data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_media_channel_service_interface_point_spec_get(uuid, local_id): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_media_channel_service_interface_point_spec_get
returns tapi.photonic.media.MediaChannelServiceInterfacePointSpec # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: TapiPhotonicMediaMediaChannelServiceInterfacePointSpec
"""
return 'do some magic!'
def data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_media_channel_service_interface_point_spec_mc_pool_available_spectrumupper_frequencylower_frequency_frequency_constraint_get(uuid, local_id, upper_frequency, lower_frequency): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_media_channel_service_interface_point_spec_mc_pool_available_spectrumupper_frequencylower_frequency_frequency_constraint_get
returns tapi.photonic.media.FrequencyConstraint # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param upper_frequency: Id of available-spectrum
:type upper_frequency: int
:param lower_frequency: Id of available-spectrum
:type lower_frequency: int
:rtype: TapiPhotonicMediaFrequencyConstraint
"""
return 'do some magic!'
def data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_media_channel_service_interface_point_spec_mc_pool_available_spectrumupper_frequencylower_frequency_get(uuid, local_id, upper_frequency, lower_frequency): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_media_channel_service_interface_point_spec_mc_pool_available_spectrumupper_frequencylower_frequency_get
returns tapi.photonic.media.SpectrumBand # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param upper_frequency: Id of available-spectrum
:type upper_frequency: int
:param lower_frequency: Id of available-spectrum
:type lower_frequency: int
:rtype: TapiPhotonicMediaSpectrumBand
"""
return 'do some magic!'
def data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_media_channel_service_interface_point_spec_mc_pool_get(uuid, local_id): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_media_channel_service_interface_point_spec_mc_pool_get
returns tapi.photonic.media.MediaChannelPoolCapabilityPac # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: TapiPhotonicMediaMediaChannelPoolCapabilityPac
"""
return 'do some magic!'
def data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_media_channel_service_interface_point_spec_mc_pool_occupied_spectrumupper_frequencylower_frequency_frequency_constraint_get(uuid, local_id, upper_frequency, lower_frequency): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_media_channel_service_interface_point_spec_mc_pool_occupied_spectrumupper_frequencylower_frequency_frequency_constraint_get
returns tapi.photonic.media.FrequencyConstraint # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param upper_frequency: Id of occupied-spectrum
:type upper_frequency: int
:param lower_frequency: Id of occupied-spectrum
:type lower_frequency: int
:rtype: TapiPhotonicMediaFrequencyConstraint
"""
return 'do some magic!'
def data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_media_channel_service_interface_point_spec_mc_pool_occupied_spectrumupper_frequencylower_frequency_get(uuid, local_id, upper_frequency, lower_frequency): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_media_channel_service_interface_point_spec_mc_pool_occupied_spectrumupper_frequencylower_frequency_get
returns tapi.photonic.media.SpectrumBand # noqa: E501
:param uuid: Id |
googlearchive/simian | src/simian/mac/admin/xsrf.py | Python | apache-2.0 | 2,004 | 0.009481 | #!/usr/bin/env python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""XSRF generator/validator."""
import base64
import hmac
import os
import time
from google.appengine.api import users
from simian import settings
XSRF_DELIMITER = '|#|'
XSRF_VALID_TIME = 3600 # Seconds = 60 minutes
def XsrfTokenGenerate(action, user=None, timestamp=None):
"""Generate an XSRF token."""
if not user:
user = users.get_current_user().email()
if not timestamp:
timestamp = time.time()
timestr = str(timestamp)
try:
secret = settings.XSRF_SECRET
except AttributeError:
secret = os.urandom(16).encode('base64')[:20]
settings.XSRF_SECRET = secret
secret = str(secret) # hmac secrets cannot be unicode.
h = hmac.new(secret, XSRF_DELIMITER.join([user, action, timestr]))
return base64.urlsafe_b64encode(
''.join([h.digest(), XSRF_DELIMITER, timestr]))
def XsrfTokenValidate(token, action, user=None, | timestamp=None, time_=time):
"""Validate an XSRF token."""
if not token:
return False
if not user:
user = users.get_current_user().email()
if not timestamp:
try:
_, timestr = base64.urlsafe_b64decode(str(token)).rsplit(
XSRF_DELIMITER, 1)
timestamp = float(timestr)
except (ValueError, TypeError):
return False
i | f timestamp + XSRF_VALID_TIME < time_.time():
return False
if token != XsrfTokenGenerate(action, user, timestamp):
return False
return True
|
marcusbuffett/command-line-chess | src/MoveNode.py | Python | mit | 1,936 | 0 | class MoveNode:
def __init__(self, move, children, parent):
self.move = move
self.children = children
self.parent = parent
self.pointAdvantage = None
self.depth = 1
def __str__(self):
stringRep = "Move : " + str(self.move) + \
" Point advantage : " + str(self.pointAdvantage) + \
" Checkmate : " + str(self.move.checkmate)
stringRep += "\n"
for child in self.children:
stringRep += " " * self.getDepth() * 4
stringRep += str(child)
return stringRep
def __gt__(self, other):
if self.move.checkmate and not other.move.checkmate:
return True
if not self.move.checkmate and other.move.checkmate:
return False
if self.move.checkmate and other.move.checkmate:
return False
return self.pointAdvantage > other.pointAdvantage
def __lt__(self, other):
if self.move.checkmate and not other.move.checkmate:
return False
if not self.move.checkmate and other.move.checkmate:
return True
if self.move.stalemate and other.move.stalemate:
return False
return self.pointAdvantage < other.pointAdvantage
def __eq__(self, other):
if self.move.checkmate and other.move.checkmate:
return True
return self.pointAdvantage == other.pointAdvantage
def getHighestNode(self):
highestNode = self
while True:
if highestNode.parent is not None:
highestNode = highestNode.parent
els | e:
| return highestNode
def getDepth(self):
depth = 1
highestNode = self
while True:
if highestNode.parent is not None:
highestNode = highestNode.parent
depth += 1
else:
return depth
|
mohsraspi/mhscs14 | jay/wowobsidian.py | Python | gpl-2.0 | 437 | 0.025172 | import minecraft as minecraft
import random
import time
x = 128
y = 2
z | = 128
mc = minecraft.Minecraft.create()
while y < 63:
j = mc.getBlock(x,y,z)
if j == 0:
mc.setBlock(x,y,z,8)
z = z - 1
if z <= -128:
z = 128
x = x - 1
if x<= -128:
x = 128
| y = y + 1
|
lintzc/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/pg_twophase/switch_ckpt_serial/cleanup_sql/test_cleanup.py | Python | apache-2.0 | 845 | 0.001183 | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, e | ither express or implied.
See the License for the specific language governing permissions | and
limitations under the License.
"""
from mpp.models import SQLConcurrencyTestCase
'''
Cleanup sqls
'''
class TestCleanupClass(SQLConcurrencyTestCase):
'''
Cleanup sqls before the next test.
'''
|
JeromeErasmus/browserstack_automate | automate/server/user/views.py | Python | apache-2.0 | 2,112 | 0.009943 | # automate/server/user/views.py
#################
#### imports ####
#################
#from flask import render_template, Blueprint, url_for, \
# redirect, flash, request
#from flask_login import login_user, logout_user, login_required
#from automate.server import bcrypt, db
#from automate.server import db
#from automate.server.models import User
#from automate.server.user.forms import LoginForm, RegisterForm
################
#### config ####
################
#user_blueprint = Blueprint('user', __name__,)
################
#### routes ####
################
#@user_blueprint.route('/register', methods=['GET', 'POST'])
#def register():
# form = RegisterForm(request.form)
# if form.validate_on_submit():
# user = User(
# email=form.email.data,
# password=form.password.data
# )
# db.session.add(user)
# db.session.commit()
#
# login_user(user)
#
# flash('Thank you for registering.', 'success')
# return redirect(url_for("user.members"))
#
# return render_template('user/register.html', form=form)
#
#
#@user_blueprint.route('/login', methods=['GET', 'POST'])
#def login():
# | form = LoginForm(request.form)
# if form.validate_on_submit():
# user = User.query.filter_by(email=form.email.data).first()
# if user:
# #if user and bcrypt.check_password_hash(
# # user.password, request.form['password']):
# # login_user(user)
# flash('You are logged in. Welcome!', 'succes | s')
# return redirect(url_for('user.members'))
# else:
# flash('Invalid email and/or password.', 'danger')
# return render_template('user/login.html', form=form)
# return render_template('user/login.html', title='Please Login', form=form)
#
#
#@user_blueprint.route('/logout')
#@login_required
#def logout():
# logout_user()
# flash('You were logged out. Bye!', 'success')
# return redirect(url_for('main.home'))
#
#
#@user_blueprint.route('/members')
#@login_required
#def members():
# return render_template('user/members.html')
# |
inuitwallet/bippy | num/rand.py | Python | mit | 2,418 | 0.02895 | import hashlib
import num.elip as elip
import num.enc as enc
def clockbase():
"""
256 bit hex: 4 x 16 byte long from float using clock (process time) + time (UTC epoch time)
Note: not enough clock precision on Linuxes to be unique between two immediate calls
"""
from struct import pack
from time import time, clock
return pack('<dddd', clock(), time(), clock(), time()).encode('hex')
def clockrnd():
"""
512 bit int: random delay while hashing data,
return result of 192-1725 time-based hashes.
execution time on 2.8GHz Core2: 1.8-15.7ms
"""
loopcount = 64 + int(hashlib.sha256(clockbase()).hexdigest()[:3], 16)/8 # 64-575 loops, random
hash1 = hash2 = int(clockbase()+clockbase(), 16)
for i in xrange(loopcount):
hash1 ^= int(hashlib.sha512(clockbase() + hashlib.sha512(clockbase()).hexdigest()).hexdigest(), 16)
hash2 ^= int(hashlib.sha512((hex(hash1)) + ('%d' % hash1)).hexdigest(), 16)
return hash1 ^ hash2
def entropy(entropy):
"""
512 bit random number from mouse co-ords and timer
"""
hashes = clockrnd()
x = []
y = []
for coord in entropy:
hashes ^= clockrnd()
for char in str(coord[0]):
x.append(char)
for char in str(coord[1]):
y.append(char)
hashes ^= clockrnd()
mouse = enc.sxor(x,y)
return hashes ^ int(hashlib.sha512(str(mouse)*8).hexdigest(), 16)
def randomKey(entropy):
"""
256 bit number from equally strong urandom, user entropy, and timer parts
"""
if entropy.bit_length() < 250:
print('Insufficient entropy parameter to generate key')
return False
from random import SystemRandom
osrndi = SystemRandom()
entstr = enc.encode(entropy, 16) + enc.encode(osrndi.getrandbits(512), 256) + str(clockrnd())
osrnd = SystemRandom(entstr)
privkey = 0
while privkey < 1 or privkey > elip.N:
privkey = enc.decode(hashlib.sha256(enc.encode(osrnd.getrandbits(512), 256)).digest(), 256) ^ osrnd.getrandbits(256)
for lbit in xrange(clockrnd() % 64 + 64):
clockstr = hex(clockrnd()) + str(clockrnd()) + entstr
# Slice a moving 256 bit window out of SHA512
clock32 = hashlib.sha512(c | lockst | r).digest()[1+(lbit % 29): 33+(lbit % 29)]
randhash = hashlib.sha512(enc.encode(osrnd.getrandbits(512), 256)).digest()[0+(lbit % 31): 32+(lbit % 31)]
privkey ^= enc.decode(randhash, 256) ^ enc.decode(clock32, 256) ^ osrndi.getrandbits(256)
osrnd = SystemRandom(hashlib.sha512(clock32 + randhash + entstr).digest()) # reseed
return privkey
|
remcohaszing/pywakeonlan | docs/conf.py | Python | mit | 534 | 0 | """
Configuratio | n for the documentation generation.
"""
import pkg_resources
project = "wakeonlan"
_dist = pkg_resources.get_distribution(project)
version = _dist.version
release = _dist.version
copyright = "2012, Remco Haszing"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.6", None),
}
nitpicky = True
default_role = "any"
todo_include_todos = True
master_doc = "index"
html_theme = "sphinx | _rtd_theme"
|
rojassergio/Aprendiendo-a-programar-en-Python-con-mi-computador | Programas_Capitulo_02/Cap02_pagina_25_comp_interactiva.py | Python | mit | 746 | 0.014765 | '''
@author: Sergio Rojas
@contact: rr.sergio@gma | il.com
--------------------------
Contenido bajo
Atribución-NoComercial-CompartirIgual 3.0 Venezuela (CC BY-NC-SA 3.0 VE)
http://creativecommons.org/licenses/by-nc-sa/3.0/ve/
Creado en abril 19, 2016
'''
print(3+5)
print(2-6)
print(2*7)
print(6/2)
print(1/3)
print(1.0/3)
print(((2 + 7*(234 -15)+673)*775)/(5+890.0 -(234+1)*5.0))
print(( (2.0 + 7*(234 - 15) + 673)*775 )/( 5+890.0 - (234+1)*5.0 ))
print(( (2.0 + 7*(234 - 15) + 673)*775 ) /( 5+890.0 - (234+1)*5.0 ))
print(2.5**3)
print(2.5**(3.2 + 2.1))
pri | nt(6.78**30)
print(8.647504884825773*1e+24 - 8.647504884825773*10**24)
print(1e+2)
print(1e2)
print(1e-2)
print(2e4)
print(4**(1./2.))
print(4**0.5)
print(8**(1./3.))
print(8**0.3333)
|
ericmjl/bokeh | tests/unit/bokeh/plotting/test__lengends.py | Python | bsd-3-clause | 8,707 | 0.004249 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import itertools
# Bokeh imports
from bokeh.models import ColumnDataSource, GlyphRenderer, Legend, LegendItem
# Module under test
import bokeh.plotting._legends as bpl # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
def all_combinations(lst):
return itertools.chain.from_iterable(
itertools.combinations(lst, i + 1)
for i in range(2, len(lst)))
LEGEND_KWS = ['legend', 'legend_label', 'legend_field', 'legend_group']
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
@pytest.mark.parametrize('key', LEGEND_KWS)
def test_pop_legend_kwarg(key) -> None:
kws = {'foo': 10, key: 'bar'}
assert bpl.pop_legend_kwarg(kws) == {key: "bar"}
@pytest.mark.parametrize('keys', all_combinations(LEGEND_KWS))
def test_pop_legend_kwarg_error(keys) -> None:
kws = dict(zip(keys, range(len(keys))))
with pytest.raises(ValueError):
bpl.pop_legend_kwarg(kws)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def test__find_legend_item() -> None:
legend = Legend(items=[LegendItem(label=dict(value="foo")), LegendItem(label=dict(field="bar"))])
assert bpl._find_legend_item(dict(value= | "baz"), legend) is None
assert bpl._find_legend_item(dict(value="foo"), legend) | is legend.items[0]
assert bpl._find_legend_item(dict(field="bar"), legend) is legend.items[1]
class Test__handle_legend_deprecated(object):
@pytest.mark.parametrize('arg', [1, 2.7, None, False, [], {'junk': 10}, {'label': 'foo', 'junk': 10}, {'value': 'foo', 'junk': 10}])
def test_bad_arg(self, arg) -> None:
with pytest.raises(ValueError):
bpl._handle_legend_deprecated(arg, "legend", "renderer")
def test_value_string(self) -> None:
legend = Legend(items=[LegendItem(label=dict(value="foo"))])
renderer = GlyphRenderer(data_source=ColumnDataSource())
bpl._handle_legend_deprecated("foo", legend, renderer)
assert len(legend.items) == 1
assert all("value" in item.label for item in legend.items)
bpl._handle_legend_deprecated("bar", legend, renderer)
assert len(legend.items) == 2
assert all("value" in item.label for item in legend.items)
def test_value_dict(self) -> None:
legend = Legend(items=[LegendItem(label=dict(value="foo"))])
renderer = GlyphRenderer(data_source=ColumnDataSource())
bpl._handle_legend_deprecated(dict(value="foo"), legend, renderer)
assert len(legend.items) == 1
assert all("value" in item.label for item in legend.items)
bpl._handle_legend_deprecated(dict(value="bar"), legend, renderer)
assert len(legend.items) == 2
assert all("value" in item.label for item in legend.items)
def test_field_string(self) -> None:
legend = Legend(items=[LegendItem(label=dict(field="foo"))])
renderer = GlyphRenderer(data_source=ColumnDataSource(data=dict(foo=[], bar=[])))
bpl._handle_legend_deprecated("foo", legend, renderer)
assert len(legend.items) == 1
assert all("field" in item.label for item in legend.items)
bpl._handle_legend_deprecated("bar", legend, renderer)
assert len(legend.items) == 2
assert all("field" in item.label for item in legend.items)
def test_field_dict(self) -> None:
legend = Legend(items=[LegendItem(label=dict(field="foo"))])
renderer = GlyphRenderer(data_source=ColumnDataSource(data=dict(foo=[], bar=[])))
bpl._handle_legend_deprecated(dict(field="foo"), legend, renderer)
assert len(legend.items) == 1
assert all("field" in item.label for item in legend.items)
bpl._handle_legend_deprecated(dict(field="bar"), legend, renderer)
assert len(legend.items) == 2
assert all("field" in item.label for item in legend.items)
class Test__handle_legend_field(object):
@pytest.mark.parametrize('arg', [1, 2.7, None, False, [], {}])
def test_bad_arg(self, arg) -> None:
with pytest.raises(ValueError):
bpl._handle_legend_field(arg, "legend", "renderer")
def test_label_already_exists(self) -> None:
legend = Legend(items=[LegendItem(label=dict(field="foo"))])
renderer = GlyphRenderer()
bpl._handle_legend_field("foo", legend, renderer)
assert len(legend.items) == 1
assert legend.items[0].label == dict(field="foo")
assert legend.items[0].renderers == [renderer]
def test_label_not_already_exists(self) -> None:
legend = Legend(items=[LegendItem(label=dict(field="foo"))])
renderer = GlyphRenderer()
bpl._handle_legend_field("bar", legend, renderer)
assert len(legend.items) == 2
assert legend.items[0].label == dict(field="foo")
assert legend.items[0].renderers == []
assert legend.items[1].label == dict(field="bar")
assert legend.items[1].renderers == [renderer]
class Test__handle_legend_group(object):
@pytest.mark.parametrize('arg', [1, 2.7, None, False, [], {}])
def test_bad_arg(self, arg) -> None:
with pytest.raises(ValueError):
bpl._handle_legend_group(arg, "legend", "renderer")
def test_bad_source(self) -> None:
with pytest.raises(ValueError):
bpl._handle_legend_group("foo", "legend", GlyphRenderer())
with pytest.raises(ValueError):
bpl._handle_legend_group("foo", "legend", GlyphRenderer(data_source=ColumnDataSource(data=dict(bar=[]))))
def test_items(self) -> None:
source = ColumnDataSource(data=dict(foo=[10,10,20,30,20,30,40]))
renderer = GlyphRenderer(data_source=source)
legend = Legend(items=[])
bpl._handle_legend_group("foo", legend, renderer)
assert len(legend.items) == 4
assert legend.items[0].label == dict(value="10")
assert legend.items[0].renderers == [renderer]
assert legend.items[0].index == 0
assert legend.items[1].label == dict(value="20")
assert legend.items[1].renderers == [renderer]
assert legend.items[1].index == 2
assert legend.items[2].label == dict(value="30")
assert legend.items[2].renderers == [renderer]
assert legend.items[2].index == 3
assert legend.items[3].label == dict(value="40")
assert legend.items[3].renderers == [renderer]
assert legend.items[3].index == 6
class Test__handle_legend_label(object):
@pytest.mark.parametrize('arg', [1, 2.7, None, False, [], {}])
def test_bad_arg(self, arg) -> None:
with pytest.raises(ValueError):
bpl._handle_legend_label(arg, "legend", "renderer")
def test_label_already_exists(self) -> None:
legend = Legend(items=[LegendItem(label=dict(value="foo"))])
renderer = GlyphRenderer()
bpl._handle_legend_label("foo", legend, renderer)
assert len(legend.items) == 1
assert legend.items[0].label == dict(value="foo")
|
geosolutions-it/ckanext-geonode | ckanext/geonode/harvesters/mappers/base.py | Python | gpl-2.0 | 14,238 | 0.002107 | import json
import logging
from string import Template
from ckan.logic import NotFound, get_action
from ckan import model, plugins as p
from ckan.plugins.toolkit import _
from ckan.model import Session
from ckanext.harvest.harvesters import HarvesterBase
from ckanext.harvest.model import HarvestObject
from ckanext.geonode.harvesters import (
CONFIG_GROUP_MAPPING,
CONFIG_GROUP_MAPPING_FIELDNAME,
CONFIG_IMPORT_FIELDS,
GEONODE_JSON_TYPE,
GeoNodeType,
)
from ckanext.geonode.harvesters.mappers.dcatapit import parse_dcatapit_info
from ckanext.geonode.harvesters.mappers.dynamic import parse_dynamic
from ckanext.geonode.harvesters.utils import format_date
from ckanext.geonode.model.types import Layer, Map, Doc, GeoNodeResource
log = logging.getLogger(__name__)
def parse(harvest_object, config):
json_dict = json.loads(harvest_object.content)
res_type = json_dict[GEONODE_JSON_TYPE]
parsed_type = GeoNodeType.parse_by_json_resource_type(res_type)
if parsed_type == GeoNodeType.LAYER_TYPE:
return parse_layer(harvest_object, harvest_object.content, config)
elif parsed_type == GeoNodeType.MAP_TYPE:
return parse_map(harvest_object, harvest_object.content, config)
elif parsed_type == GeoNodeType.DOC_TYPE:
return parse_doc(harvest_object, harvest_object.content, config)
else:
log.error('Unknown GeoNode type %s' % res_type)
return None, None
def parse_layer(harvest_object, json_layer, config):
# log.debug(f'get_layer_package_dict --> {json_layer}')
layer = Layer(json_layer)
package_dict, extras = parse_common(harvest_object, layer, config)
for resource in [
{
'name': _('Main page about layer'),
'description': _('Layer detail page in GeoNode'),
'format': 'html',
'url': layer.get('detail_url'),
},
{
'name': _('API link'),
'description': _('API link to layer, can be retrieved as HTML or JSON'),
'format': 'html',
'url': layer.get('link'),
},
{
'name': _('Thumbnail'),
'description': _('Default thumbnail for the Layer in GeoNode'),
'format': 'png',
'url': layer.get('thumbnail_url'),
},
{
'name': _('URL for embedding'),
'description': _('URL for embedding the GeoNode resource in other pages'),
'format': 'html',
'url': layer.get('embed_url'),
},
]:
package_dict['resources'].append(resource)
# full_layer_name = "%s:%s" % (layer.workspace(), layer.name())
#
# # Add WMS resource
# resource = {}
# resource['format'] = 'wms'
# resource['url'] = self.source_config['geoserver_url'] + "/wms"
# resource['name'] = full_layer_name
# resource['description'] = p.toolkit._('WMS resource')
# resource['geoserver_base_url'] = self.source_config['geoserver_url']
# resource['store'] = layer.store()
# resource['workspace'] = layer.workspace()
# resource['layer'] = layer.name()
# resource['is_vector'] = layer.is_vector()
#
# package_dict['resources'].append(resource)
# # if it's vectorial, add a WFS resource as well. This may be used for chart preview
# if layer.is_vector() and self._get_config_value('import_wfs_as_wfs', False):
# wfs_resource = {}
# wfs_resource['format'] = 'wfs'
# wfs_resource['url'] = self.source_config['geoserver_url'] + "/wfs"
# wfs_resource['name'] = full_layer_name
# wfs_resource['description'] = p.toolkit._('WFS resource')
# wfs_resource['geoserver_base_url'] = self.source_config['geoserver_url']
# wfs_resource['store'] = layer.store()
# wfs_resource['workspace'] = layer.workspace()
# wfs_resource['layer'] = layer.name()
# wfs_resource['is_vector'] = layer.is_vector()
# package_dict['resources'].append(wfs_resource)
# # if it's vectorial, add a CSV resource as well. This may be used for chart preview
# if layer.is_vector() and self._get_config_value('import_wfs_as_csv', False):
# wfs_resource = {}
# wfs_resource['format'] = 'csv'
# wfs_resource['url'] = utils.get_wfs_getfeatures_url(self.source_config['geoserver_url'], full_layer_name)
#
# wfs_resource['name'] = full_layer_name
# wfs_resource['description'] = p.toolkit._('CSV resource')
# wfs_resource['geoserver_base_url'] = self.source_config['geoserver_url']
# wfs_resource['store'] = layer.store()
# wfs_resource['workspace'] = layer.workspace()
# wfs_resource['layer'] = layer.name()
# wfs_resource['is_vector'] = layer.is_vector()
# wfs_resource[RESOURCE_DOWNLOADER] = \
# WFSCSVDownloader(self.source_config['geoserver_url'], full_layer_name, layer.name() + ".csv")
#
# package_dict['resources'].append(wfs_resource)
extras['is_vector'] = layer.is_vector()
return package_dict, extras
def parse_map(harvest_object, json_map, config):
geomap = Map(json_map)
package_dict, extras = parse_common(harvest_object, geomap, config)
# Add main view
package_dict['resources'].append(
{
'name': 'Map view',
'description': p.toolkit._('Map client in GeoNode'),
'format': 'html',
'url': f'{harvest_object.source.url}/maps/{geomap.id()}/view',
})
# Add map details
package_dict['resources'].append(
{
'name': 'Map details',
'description': p.toolkit._('Map details in GeoNode'),
'format': 'html',
'url': f'{harvest_object.source.url}/maps/{geomap.id()}',
})
# Add WMC resource
package_dict['resources'].append(
{
'name': 'Map',
'description': p.toolkit._('Full Web Map Context'),
'format': 'wmc',
'url': f'{harvest_object.source.url}/maps/{geomap.id()}/wmc',
# 'map_data': geomap.map_data()
})
return package_dict, extras
def parse_doc(harvest_object, json_map, config):
doc = Doc(json_map)
package_dict, extras = parse_common(harvest_object, doc, config)
# # Add resource
# resource = {}
# resource['format'] = doc.extension()
# # Not sure about this: we're creating a resource in CKAN, so we'll have to create a URL for this.
# # "url" is mandatory, and not providing it will raise a Validation Error
# resource['url'] = '%s/documents/%s/download' % (harvest_object.source.url, doc.id())
# resource['source_url'] = '%s/documents/%s/download' % (harvest_object.source.url, doc.id())
#
# resource['name'] = doc.doc_file()
# resource['descriptio | n'] = doc.doc_type()
#
# # Prepare the data downloader
# resource[RESOURCE_DOWNLOADER] = \
# GeonodeDataDownloader(harvest_object.source.url, doc.id(), doc.doc_file())
#
# package_dict['resources'].append(resource)
return package_dict, extras
def parse_common(harvest_object, georesource: GeoNodeResource, config: dict) -> dict:
'''
Create a package dict for a generic GeoNode resource
:param harvest_object: HarvestObject domain ob | ject (with access to job and source objects)
:type harvest_object: HarvestObject
:param georesource: a resource (Layer or Map) from GeoNode
:type georesource: a GeoResource (Map or Layer)
:returns: A dataset dictionary (package_dict)
:rtype: dict
'''
tags = []
for tag in georesource.keywords():
tag = tag['name']
tag = tag[:50] if len(tag) > 50 else tag
tags.append({'name': tag})
# Infer groups
groups = handle_groups(harvest_object, georesource, config)
resources = []
pos = 0
for link in georesource.links():
pos = pos + 1
is_main = link.name() == georesource.alternate()
resource = {
'url': link.url(),
'name': link.name(),
'description': f'{link.name()}\n\n{link.extension()} {link.link_type()}',
'mimetype': link.mime(),
|
jayceyxc/hue | desktop/core/ext-py/cx_Oracle-5.2.1/test/uCursor.py | Python | apache-2.0 | 10,123 | 0.005334 | """Module for testing cursor objects."""
import cx_Oracle
class TestCursor(BaseTestCase):
def testExecuteNoArgs(self):
"""test executing a statement without any arguments"""
result = self.cursor.execute(u"begin null; end;")
self.failUnlessEqual(result, None)
def testExecuteNoStatementWithArgs(self):
"""test executing a None statement with bind variables"""
self.failUnlessRaises(cx_Oracle.ProgrammingError, self.cursor.execute,
None, x = 5)
def testExecuteEmptyKeywordArgs(self):
"""test executing a statement with args and empty keyword args"""
simpleVar = self.cursor.var(cx_Oracle.NUMBER)
args = [simpleVar]
kwArgs = {}
result = self.cursor.execute(u"begin :1 := 25; end;", args, **kwArgs)
self.failUnlessEqual(result, None)
self.failUnlessEqual(simpleVar.getvalue(), 25)
def testExecuteKeywordArgs(self):
"""test executing a statement with keyword arguments"""
simpleVar = self.cursor.var(cx_Oracle.NUMBER)
result = self.cursor.execute(u"begin :value := 5; end;",
value = simpleVar)
self.failUnlessEqual(result, None)
self.failUnlessEqual(simpleVar.getvalue(), 5)
def testExecuteDictionaryArg(self):
"""test executing a statement with a dictionary argument"""
simpleVar = self.cursor.var(cx_Oracle.NUMBER)
dictArg = { u"value" : simpleVar }
result = self.cursor.execute(u"begin :value := 10; end;", dictArg)
self.failUnlessEqual(result, None)
self.failUnlessEqual(simpleVar.getvalue(), 10)
def testExecuteMultipleMethod(self):
"""test executing a statement with both a dict arg and keyword args"""
simpleVar = self.cursor.var(cx_Oracle.NUMBER)
dictArg = { u"value" : simpleVar }
self.failUnlessRaises(cx_Oracle.InterfaceError, self.cursor.execute,
u"begin :value := 15; end;", dictArg, value = simpleVar)
def testExecuteAndModifyArraySize(self):
"""test executing a statement and then changing the array size"""
self.cursor.execute(u"select IntCol from TestNumbers")
self.cursor.arraysize = 20
self.failUnlessEqual(len(self.cursor.fetchall()), 10)
def testCallProc(self):
"""test executing a stored procedure"""
var = self.cursor.var(cx_Oracle.NUMBER)
results = self.cursor.callproc(u"proc_Test", (u"hi", 5, var))
self.failUnlessEqual(results, [u"hi", 10, 2.0])
def testCallProcNoArgs(self):
"""test executing a stored procedure without any arguments"""
results = self.cursor.callproc(u"proc_TestNoArgs")
self.failUnlessEqual(results, [])
def testCallFunc(self):
"""test executing a stored function"""
results = self.cursor.callfunc(u"func_Test", cx_Oracle.NUMBER,
(u"hi", 5))
self.failUnlessEqual(results, 7)
def testCallFuncNoArgs(self):
"""test executing a stored function without any arguments"""
results = self.cursor.callfunc(u"func_TestNoArgs", cx_Oracle.NUMBER)
self.failUnlessEqual(results, 712)
def testExecuteManyByName(self):
"""test executing a statement multiple times (named args)"""
self.cursor.execute(u"truncate table TestExecuteMany")
rows = [ { u"value" : n } for n in range(250) ]
self.cursor.arraysize = 100
statement = u"insert into TestExecuteMany (IntCol) values (:value)"
self.cursor.executemany(statement, rows)
self.connection.commit()
self.cursor.execute(u"select count(*) from TestExecuteMany")
count, = self.cursor.fetchone()
self.failUnlessEqual(count, len(rows))
def testExecuteManyByPosition(self):
"""test executing a statement multiple times (positional args)"""
self.cursor.execute(u"truncate table TestExecuteMany")
rows = [ [n] for n in range(230) ]
self.cursor.arraysize = 100
statement = u"insert into TestExecuteMany (IntCol) values (:1)"
self.cursor.executemany(statement, rows)
self.connection.commit()
self.cursor.execute(u"select count(*) from TestExecuteMany")
count, = self.cursor.fetchone()
self.failUnlessEqual(count, len(rows))
def testExecuteManyWithPrepare(self):
"""test executing a statement multiple times (with prepare)"""
self. | cursor.execute(u"truncate table Te | stExecuteMany")
rows = [ [n] for n in range(225) ]
self.cursor.arraysize = 100
statement = u"insert into TestExecuteMany (IntCol) values (:1)"
self.cursor.prepare(statement)
self.cursor.executemany(None, rows)
self.connection.commit()
self.cursor.execute(u"select count(*) from TestExecuteMany")
count, = self.cursor.fetchone()
self.failUnlessEqual(count, len(rows))
def testExecuteManyWithRebind(self):
"""test executing a statement multiple times (with rebind)"""
self.cursor.execute(u"truncate table TestExecuteMany")
rows = [ [n] for n in range(235) ]
self.cursor.arraysize = 100
statement = u"insert into TestExecuteMany (IntCol) values (:1)"
self.cursor.executemany(statement, rows[:50])
self.cursor.executemany(statement, rows[50:])
self.connection.commit()
self.cursor.execute(u"select count(*) from TestExecuteMany")
count, = self.cursor.fetchone()
self.failUnlessEqual(count, len(rows))
def testExecuteManyWithExecption(self):
"""test executing a statement multiple times (with exception)"""
self.cursor.execute(u"truncate table TestExecuteMany")
rows = [ { u"value" : n } for n in (1, 2, 3, 2, 5) ]
statement = u"insert into TestExecuteMany (IntCol) values (:value)"
self.failUnlessRaises(cx_Oracle.DatabaseError, self.cursor.executemany,
statement, rows)
self.failUnlessEqual(self.cursor.rowcount, 3)
def testPrepare(self):
"""test preparing a statement and executing it multiple times"""
self.failUnlessEqual(self.cursor.statement, None)
statement = u"begin :value := :value + 5; end;"
self.cursor.prepare(statement)
var = self.cursor.var(cx_Oracle.NUMBER)
self.failUnlessEqual(self.cursor.statement, statement)
var.setvalue(0, 2)
self.cursor.execute(None, value = var)
self.failUnlessEqual(var.getvalue(), 7)
self.cursor.execute(None, value = var)
self.failUnlessEqual(var.getvalue(), 12)
self.cursor.execute(u"begin :value2 := 3; end;", value2 = var)
self.failUnlessEqual(var.getvalue(), 3)
def testExceptionOnClose(self):
"confirm an exception is raised after closing a cursor"
self.cursor.close()
self.failUnlessRaises(cx_Oracle.InterfaceError, self.cursor.execute,
u"select 1 from dual")
def testIterators(self):
"""test iterators"""
self.cursor.execute(u"""
select IntCol
from TestNumbers
where IntCol between 1 and 3
order by IntCol""")
rows = []
for row in self.cursor:
rows.append(row[0])
self.failUnlessEqual(rows, [1, 2, 3])
def testIteratorsInterrupted(self):
"""test iterators (with intermediate execute)"""
self.cursor.execute(u"truncate table TestExecuteMany")
self.cursor.execute(u"""
select IntCol
from TestNumbers
where IntCol between 1 and 3
order by IntCol""")
testIter = iter(self.cursor)
value, = testIter.next()
self.cursor.execute(u"insert into TestExecuteMany (IntCol) values (1)")
self.failUnlessRaises(cx_Oracle.InterfaceError, testIter.next)
def testBindNames(self):
"""test that bindnames() works correctly."""
self.failUnlessRaises(cx_Oracle.ProgrammingError,
self.cursor.bindnames)
self.cursor.prepare(u"begin null; end;")
self.failUnlessEqual(self.cursor.bin |
FESOM/pyfesom | tools/scalar2geo.py | Python | mit | 12,219 | 0.009412 | import sys
import os
from netCDF4 import Dataset, MFDataset, num2date
import numpy as np
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import pyfesom as pf
import joblib
from joblib import Parallel, delayed
import json
from collections import OrderedDict
import click
from mpl_toolkits.basemap import maskoceans
from scipy.interpolate import griddata
import scipy.spatial.qhull as qhull
from scipy.interpolate import LinearNDInterpolator, CloughTocher2DInterpolator
@click.command()
@click.argument('meshpath', type=click.Path(exists=True), required=True)
@click.argument('ipath', nargs=-1, type=click.Path(exists=True), required=True)
@click.argument('opath', nargs=1, required=True, default='./')
@click.argument('variable', nargs=1, required=True, default='temp')
@click.option('--depths', '-d', default='-1', type=click.STRING,show_default=True,
help='Depths in meters.')
@click.option('--box', '-b',
nargs=4,
type=(click.IntRange(-180, 180),
click.IntRange(-180, 180),
click.IntRange(-90, 90),
click.IntRange(-90, 90)),
default=(-180,180,-80,90), show_default=True,
help='Map boundaries in -180 180 -90 90 format.')
@click.option('--res', '-r', nargs=2,
type=(click.INT, click.INT),
default=(360, 170), show_default=True,
help='Number of points along each axis (for lon and lat).')
@click.option('--influence','-i', default=80000, show_default=True,
help='Radius of influence for interpolation, in meters.')
@click.option('--interp', type=click.Choice(['nn', 'idist', 'linear', 'cubic']),
default='nn',
help = 'Interpolation method. Options are nn - nearest neighbor (KDTree implementation, fast), idist - inverse distance (KDTree implementation, decent speed), linear (scipy implementation, slow) and cubic (scipy implementation, slowest and give strange results on corarse meshes).')
@click.option('--timestep', '-t', default=0, show_default=True,
help='Timstep from netCDF variable, starts with 0.\
If -1, all timesteps of the netCDF file will be used.')
@click.option('--abg', nargs=3, type=(click.FLOAT,
click.FLOAT,
click.FLOAT), default=(50, 15, -90),
help='Alpha, beta and gamma Euler angles. If you plots look rotated, you use wrong abg values. Usually nessesary only during the first use of the mesh.')
@click.option('--ncore', '-n', default = 1, help='Number of cores to use in parralel')
@click.option('-k', default=5, help='Number of neighbors to take in to account for idist interpolation.')
def convert(meshpath, ipath, opath, variable, depths, box,
res, influence, timestep, abg, interp, ncore, k):
'''
meshpath - Path to the folder with FESOM1.4 mesh files.
ipath - Path to FESOM1.4 netCDF file or files (with wildcard).
opath - path where the output will be stored.
variable - The netCDF variable to be converted.
'''
print(ipath)
mesh = pf.load_mesh(meshpath, abg=abg, usepickle=False, usejoblib=True)
sstep = timestep
radius_of_influence = influence
left, right, down, up = box
lonNumber, latNumber = res
lonreg = np.linspace(left, right, lonNumber)
latreg = np.linspace(down, up, latNumber)
lonreg2, latreg2 = np.meshgrid(lonreg, latreg)
localdir = os.path.dirname(os.path.abspath(__file__))
# print(os.path.abspath(__file__))
print('localdir='+localdir)
with open(localdir+'/CMIP6_Omon.json') as data_file:
cmore_table = json.load(data_file, object_pairs_hook=OrderedDict)
with open(localdir+'/CMIP6_SIday.json') as data_file:
cmore_table_ice = json.load(data_file, object_pairs_hook=OrderedDict)
depths = np.array(depths.split(' '),dtype='float32')
if depths[0] == -1:
dind = range(mesh.zlevs.shape[0])
realdepth = mesh.zlevs
else:
dind = []
realdepth = []
for depth in depths:
ddepth = abs(mesh.zlevs-depth).argmin()
dind.append(ddepth)
realdepth.append(mesh.zlevs[ddepth])
print(dind)
print(realdepth)
#realdepth = mesh.zlevs[dind]
distances, inds = pf.create_indexes_and_distances(mesh, lonreg2, latreg2,\
k=k, n_jobs=4)
ind_depth_all = []
ind_noempty_all = []
ind_empty_all = []
for i in range(len(mesh.zlevs)):
ind_depth, ind_noempty, ind_empty = pf.ind_for_depth(mesh.zlevs[i], mesh)
ind_depth_all.append(ind_depth)
ind_noempty_all.append(ind_noempty)
ind_empty_all.append(ind_empty)
if interp == 'nn':
topo_interp = pf.fesom2regular(mesh.topo, mesh, lonreg2, latreg2, distances=distances,
inds=inds, radius_of_influence=radius_of_influence, n_jobs=1)
k = 1
distances, inds = pf.create_indexes_and_distances(mesh, lonreg2, latreg2,\
k=k, n_jobs=4)
points, qh = None, None
elif interp == 'idist':
topo_interp = pf.fesom2regular(mesh.topo, mesh, lonreg2, latreg2, distances=distances,
inds=inds, radius_of_influence=radius_of_influence, n_jobs=1, how='idist')
k = k
distances, inds = pf.create_indexes_and_distances(mesh, lonreg2, latreg2,\
k=k, n_jobs=4)
points, qh = None, None
elif interp == 'linear':
points = np.vstack((mesh.x2, mesh.y2)).T
qh = qhull.Delaunay(points)
topo_interp = LinearNDInterpolator(qh, mesh. | topo)((lonreg2, latreg2))
distances, inds = None, None
elif interp == 'cubic':
points = np.vstack((mesh.x2, mesh.y2)).T
qh = qhull.Delaunay(points)
topo_interp = CloughTocher2DInterpolator(qh, mesh.topo)((lonreg2, latreg2))
distances, inds = None, None
mdata = maskoceans(lonreg2, | latreg2, topo_interp, resolution = 'h', inlands=False)
topo = np.ma.masked_where(~mdata.mask, topo_interp)
# Backend is switched to threading for linear and cubic interpolations
# due to problems with memory mapping.
# One have to test threading vs multiprocessing.
if (interp == 'linear') or (interp=='cubic'):
backend = 'threading'
else:
backend = 'multiprocessing'
Parallel(n_jobs=ncore, backend=backend, verbose=50)(delayed(scalar2geo)(ifile, opath, variable,
mesh, ind_noempty_all,
ind_empty_all,ind_depth_all, cmore_table, lonreg2, latreg2,
distances, inds, radius_of_influence, topo, points, interp, qh, timestep, dind, realdepth) for ifile in ipath)
# scalar2geo(ipath, opath, variable,
# mesh, ind_noempty_all,
# ind_empty_all,ind_depth_all, cmore_table, lonreg2, latreg2,
# distances, inds, radius_of_influence, topo, points, interp, qh, timestep)
def scalar2geo(ifile, opath, variable,
mesh, ind_noempty_all,
ind_empty_all,ind_depth_all, cmore_table,
lonreg2, latreg2, distances, inds, radius_of_influence,
topo, points, interp, qh, timestep, dind, realdepth):
print(ifile)
ext = variable
#ifile = ipath
ofile = os.path.join(opath, '{}_{}.nc'.format(os.path.basename(ifile)[:-3], ext))
fl = Dataset(ifile)
if fl.variables[variable].shape[1] == mesh.n2d:
dim3d = False
dind = [dind[0]]
realdepth = [realdepth[0]]
elif fl.variables[variable].shape[1] == mesh.n3d:
dim3d = True
else:
raise ValueError('Variable size {} is not equal to number of 2d ({}) or 3d ({}) nodes'.format(fl.variables[variable].shape[1], mesh.n2d, mesh.n3d))
fw = Dataset(ofile, mode='w',data_model='NETCDF4_CLASSIC', )
fw.createDimension('latitude', lonreg2.shape[0])
fw.createDimension('longitude', latreg2.shape[1])
f |
obi-two/Rebelion | data/scripts/templates/object/draft_schematic/community_crafting/component/shared_lightweight_turret_hardware.py | Python | mit | 485 | 0.045361 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/community_crafting/component/shared_lightweight_turret_h | ardware.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MO | DIFICATIONS ####
#### END MODIFICATIONS ####
return result |
amaggi/bda | chapter_03/sample_via_cdf.py | Python | gpl-2.0 | 402 | 0.002488 | from scipy.integrate import cumtrapz
from scipy.int | erpolate import interp1d
from scipy.stats import uniform
def sample_via_cdf(x, p, nsamp):
# get normalized cumulative distribution
cdf = cumtrapz(p, x, initial=0)
cdf = cdf/cdf.max()
# get interpolator
interp = int | erp1d(cdf, x)
# get uniform samples over cdf
cdf_samp = uniform.rvs(size=nsamp)
return interp(cdf_samp)
|
junqueira/balance | finance/migrations/0004_remove_extract_provider.py | Python | mit | 355 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_l | iterals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('finance', '0003_auto_20140929_0130'),
]
operations = [
| migrations.RemoveField(
model_name='extract',
name='provider',
),
]
|
Luindil/PeakThis | peakthis/model/PickModel.py | Python | gpl-3.0 | 2,797 | 0.002145 | # -*- coding: utf8 -*-
__author__ = 'Clemens Prescher'
class PickModel():
num_pick_models = 0
def __init__(self, number_picks):
self.id = PickModel.num_pick_models
PickModel.num_pick_models += 1
self._prefix = "id"+str(self.id)+"_"
self.current_pick = 0
self.number_picks = number_picks
self.pick_points = [Point()] * number_picks
self._parse_params()
self.parameters = self.make_params()
def update_current_parameter(self, x, y):
raise NotImplementedError
def _parse_params(self):
raise NotImplementedError
def pick_parameter(self, x, y):
self.update_current_parameter(x, y)
self.pick_points[self.current_pick].x = x
self.pick_points[self.current_pick].y = y
self.current_pick += 1
if self.current_pick < self.number_picks:
return True
else:
self.current_pick = 0
return False
def get_param(self, param_name):
return self.parameters[param_name]
def set_parameter_value(self, param_name, value):
self.parameters["{}{}".format(self._prefix, param_name)].value = value
def get_parameter_value(self, param_name):
return self.parameters["{}{}".format(self._prefix, param_name)].value
def set_parameter_max_value(self, param_name, max_value):
self.parameters["{}{}".format(self._prefix, param_name)].max = max_value
def get_parameter_max_value(self, param_name):
return self.parameters["{}{}".format(self._prefix, param_name)].max
def set_parameter_min_value(self, param_name, min_value):
self.parameters["{}{}".format(self._prefix, param_name)].min = min_value
def get_parameter_min_value(self, param_name):
return self.parameters["{}{}".format(self._prefix, param_name)].min
def quick_eval(self, x):
retur | n self.eval(self.parameters, x=x)
def eval(self, *args, **kwargs):
raise NotImplementedError
def make_params(self, *args, **kwargs):
raise NotImplemente | dError
def __deepcopy__(self, memo):
cls = self.__class__
result = cls()
for parameter_name in self._param_root_names:
# print result.parameters
# print self.parameters
result_param = result.get_param("{}{}".format(result._prefix, parameter_name))
current_param = self.get_param("{}{}".format(self._prefix, parameter_name))
result_param.value = current_param.value
result_param.vary = current_param.vary
result_param.min = current_param.min
result_param.max = current_param.max
return result
class Point():
def __init__(self, x=0., y=0.):
self.x = x
self.y = y |
iulian787/spack | var/spack/repos/builtin/packages/nicstat/package.py | Python | lgpl-2.1 | 962 | 0.00104 | # C | opyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Nicstat(MakefilePackage, SourceforgePackage):
"""
Nicstat is a Solaris and Linux command-line that prints out network
statistics for all network interface cards (NICs), including packets,
kilobytes per second, average packet s | izes and more.
"""
homepage = "https://github.com/scotte/nicstat"
sourceforge_mirror_path = "nicstat/nicstat-1.95.tar.gz"
version('1.95', sha256='c4cc33f8838f4523f27c3d7584eedbe59f4c587f0821612f5ac2201adc18b367')
def edit(self, spec, prefix):
copy('Makefile.Linux', 'makefile')
filter_file(r'CMODEL =\s+-m32', '', 'makefile')
filter_file('sudo', '', 'makefile', string=True)
def install(self, spec, prefix):
install_tree(".", prefix)
|
jhjguxin/PyCDC | Karrigell-2.3.5/webapps/demo/wiki/search.py | Python | gpl-3.0 | 1,781 | 0.021898 | # search engine
import re
import posixpath
import BuanBuan
import wikiBase
db = wikiBase.db
caseSensitive=QUERY.has_key("caseSensitive")
fullWord=QUERY.has_key("fullWord")
words=_words
if fullWord:
words=r"\W"+words+r"\W"
sentence="[\n\r.?!].*"+words+".*[\n\r.?!]"
if caseSensitive:
sentencePattern=re.compile(sentence)
wordPattern=re.compile(words)
else:
sentencePattern=re.compile(sentence,re.IGNORECASE)
wordPattern=re.compile(words,re.IGNORECASE)
occ=0 # number of occurences
def replace(matchObj):
return "<b>"+matchObj.string[matchObj.start():matchObj.end()]+"</b>"
def linkToPage(name):
# returns a link to the page called name
return '<a href="BuanShow.pih?pageName=%s">%s</a>\n<br><blockquote>' %(name,name)
print "<h2>Searching [%s]</h2>" %(_words)
# gets all pages in base
for page in db:
content = page['content']
content="\n"+content+"\n"
flag=0 # true if at least one match
deb=0
while 1:
searchObj=sentencePattern.search(content,deb)
if searchObj is None:
if flag:
print "\n</blockquote>\n"
break
else:
if not flag:
print linkToPage(page['name'])
flag=1
sentence=content[searchObj.start():searchObj.end()]
sentence=sentence.lstrip()
sente | nce=sentence[re.search("[^!]",sentence).start():]
sentence=wordPattern.sub(replace,sentence)
# elimina | tes leading char "!"
print sentence+"<br>"
deb=searchObj.end()-len(words)+1
occ+=1
flag=1
if not occ:
print "%s not found" %_words
print '<a href="index.pih">Back</a>' |
antoinecarme/pyaf | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_MovingAverage_Seasonal_DayOfMonth_MLP.py | Python | bsd-3-clause | 169 | 0.047337 | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( [ | 'Quantization'] , ['MovingAverage'] , ['Seasonal_DayOfMonth'] | , ['MLP'] ); |
wylieswanson/agilepyfs | fs/remote.py | Python | bsd-3-clause | 26,904 | 0.008772 | """
fs.remote
=========
Utilities for interfacing with remote filesystems
This module provides reusable utility functions that can be used to construct
FS subclasses interfacing with a remote filesystem. These include:
* RemoteFileBuffer: a file-like object that locally buffers the contents of
a remote file, writing them back on flush() or close().
* ConnectionManagerFS: a WrapFS subclass that tracks the connection state
of a remote FS, and allows client code to wait for
a connection to be re-established.
* CacheFS: a WrapFS subclass that caches file and directory meta-data in
memory, to speed access to a remote FS.
"""
from __future__ import with_statement
import time
import stat as statinfo
from errno import EINVAL
import fs.utils
from fs.base import threading, FS
from fs.wrapfs import WrapFS, wrap_fs_methods
from fs.wrapfs.lazyfs import LazyFS
from fs.path import *
from fs.errors import *
from fs.local_functools import wraps
from fs.filelike import StringIO, SpooledTemporaryFile, FileWrapper
from fs import SEEK_SET, SEEK_CUR, SEEK_END
_SENTINAL = object()
from six import PY3, b
class RemoteFileBuffer(FileWrapper):
"""File-like object providing buffer for local file operations.
Instances of this class manage a local tempfile buffer corresponding
to the contents of a remote file. All reads and writes happen locally,
with the content being copied to the remote file only on flush() or
close(). Writes to the remote file are performed using the setcontents()
method on the owning FS object.
The intended use-case is for a remote filesystem (e.g. S3FS) to return
instances of this class from its open() method, and to provide the
file-uploading logic in its setcontents() method, as in the following
pseudo-code::
def open(self,path,mode="r"):
rf = self._get_remote_file(path)
return RemoteFileBuffer(self,path,mode,rf)
def setcontents(self,path,file):
self._put_remote_file(path,file)
The contents of the remote file are read into the buffer on-demand.
"""
max_size_in_memory = 1024 * 8
def __init__(self, fs, path, mode, rfile=None, write_on_flush=True):
"""RemoteFileBuffer constructor.
The owning filesystem, path and mode must be provided. If the
optional argument 'rfile' is provided, it must be a read()-able
object or a string containing the initial file contents.
"""
wrapped_file = SpooledTemporaryFile(max_size=self.max_size_in_memory)
self.fs = fs
self.path = path
self.write_on_flush = write_on_flush
self._changed = False
self._readlen = 0 # How many bytes already loaded from rfile
self._rfile = None # Reference to remote file object
self._eof = False # Reached end of rfile?
if getattr(fs,"_lock",None) is not None:
self._lock = fs._lock.__class__()
else:
self._lock = threading.RLock()
if "r" in mode or "+" in mode or "a" in mode:
if rfile is None:
# File was just created, force to write anything
self._changed = True
self._eof = True
if not hasattr(rfile, "read"):
#rfile = StringIO(unicode(rfile))
rfile = StringIO(rfile)
self._rfile = rfile
else:
# Do not use remote file object
self._eof = True
self._rfile = None
if rfile is not None and hasattr(rfile,"close"):
rfile.clos | e()
super(RemoteFileBuffer,self).__init__(wrapped_file,mode)
# FIXME: What if mode with position on eof?
if "a" in mode:
# Not good enough...
self.seek(0, SEEK_END)
def __del__(self):
# Don't try to close a partially-constructed file
if "_lock" in self | .__dict__:
if not self.closed:
try:
self.close()
except FSError:
pass
def _write(self,data,flushing=False):
with self._lock:
# Do we need to discard info from the buffer?
toread = len(data) - (self._readlen - self.wrapped_file.tell())
if toread > 0:
if not self._eof:
self._fillbuffer(toread)
else:
self._readlen += toread
self._changed = True
self.wrapped_file.write(data)
def _read_remote(self, length=None):
"""Read data from the remote file into the local buffer."""
chunklen = 1024 * 256
bytes_read = 0
while True:
toread = chunklen
if length is not None and length - bytes_read < chunklen:
toread = length - bytes_read
if not toread:
break
data = self._rfile.read(toread)
datalen = len(data)
if not datalen:
self._eof = True
break
bytes_read += datalen
self.wrapped_file.write(data)
if datalen < toread:
# We reached EOF,
# no more reads needed
self._eof = True
break
if self._eof and self._rfile is not None:
self._rfile.close()
self._readlen += bytes_read
def _fillbuffer(self, length=None):
"""Fill the local buffer, leaving file position unchanged.
This method is used for on-demand loading of data from the remote file
into the buffer. It reads 'length' bytes from rfile and writes them
into the buffer, seeking back to the original file position.
"""
curpos = self.wrapped_file.tell()
if length == None:
if not self._eof:
# Read all data and we didn't reached EOF
# Merge endpos - tell + bytes from rfile
self.wrapped_file.seek(0, SEEK_END)
self._read_remote()
self._eof = True
self.wrapped_file.seek(curpos)
elif not self._eof:
if curpos + length > self._readlen:
# Read all data and we didn't reached EOF
# Load endpos - tell() + len bytes from rfile
toload = length - (self._readlen - curpos)
self.wrapped_file.seek(0, SEEK_END)
self._read_remote(toload)
self.wrapped_file.seek(curpos)
def _read(self, length=None):
if length is not None and length < 0:
length = None
with self._lock:
self._fillbuffer(length)
data = self.wrapped_file.read(length if length != None else -1)
if not data:
data = None
return data
def _seek(self,offset,whence=SEEK_SET):
with self._lock:
if not self._eof:
# Count absolute position of seeking
if whence == SEEK_SET:
abspos = offset
elif whence == SEEK_CUR:
abspos = offset + self.wrapped_file.tell()
elif whence == SEEK_END:
abspos = None
else:
raise IOError(EINVAL, 'Invalid whence')
if abspos != None:
toread = abspos - self._readlen
if toread > 0:
self.wrapped_file.seek(self._readlen)
self._fillbuffer(toread)
else:
self.wrapped_file.seek(self._readlen)
self._fillbuffer()
self.wrapped_file.seek(offset, whence)
def _truncate(self,size):
with self._lock:
if not self._eof and self._readlen < size:
# Read the rest of file
self._fillbuff |
eandersson/amqpstorm | amqpstorm/tests/functional/management/test_basic.py | Python | mit | 2,514 | 0 | from amqpstorm.management import ManagementApi
from amqpstorm.message import Message
from amqpstorm.tests import HTTP_URL
from amqpstorm.tests import PASSWORD
from amqpstorm.tests import USERNAME
from amqpstorm.tests.functional.utility import TestFunctionalFramework
from amqpstorm.tests.functional.utility import setup
class ApiBasicFunctionalTests(TestFunctionalFramework):
@setup(queue=True)
def test_api_basic_publish(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
try:
self.assertEqual(api.basic.publish(self.message, self.queue_name),
{'routed': True})
finally:
api.queue.delete(self.queue_name)
@setup(queue=True)
def test_api_basic_get_message(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
self.assertEqual(api.basic.publish(self.message, self.queue_name),
{'routed': True})
result = api.basic.get(self.queue_name, requeue=False)
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], Message)
self.assertEqual(result[0].body, self.message)
# Make sure the message wasn't re-queued.
self.assertFalse(api.basic.get(self.queue_name, requeue=False))
@setup(queue=True)
def test_api_basic_get_message_requeue(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
self.assertEqual(api.basic.publish(self.message, self.queue_name),
{'routed': True}) |
result = api.basic.get(self.queue_name, requeue=True)
sel | f.assertIsInstance(result, list)
self.assertIsInstance(result[0], Message)
self.assertEqual(result[0].body, self.message)
# Make sure the message was re-queued.
self.assertTrue(api.basic.get(self.queue_name, requeue=False))
@setup(queue=True)
def test_api_basic_get_message_to_dict(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
self.assertEqual(api.basic.publish(self.message, self.queue_name),
{'routed': True})
result = api.basic.get(self.queue_name, requeue=False, to_dict=True)
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], dict)
self.assertEqual(result[0]['payload'], self.message)
|
Q-Leap-Networks/pyslurm | examples/listdb_reservations.py | Python | gpl-2.0 | 866 | 0.005774 | #!/usr/bin/env python
import | time
import pyslurm
def reservation_display(reservation):
if reservation:
for key,value in reservation.items():
print("\t{}={}".format(key, value))
if __name__ == "__main__":
try:
end = time.time()
start = end - (30*24*60*60)
print("start={}, end={}".format(start, end))
reservations = pyslurm.slurmdb_reservations()
reservations. | set_reservation_condition(start, end)
reservations_dict = reservations.get()
if len(reservations_dict):
for key, value in reservations_dict.items():
print("{} Reservation: {}".format('{', key))
reservation_display(value)
print("}")
else:
print("No reservation found")
except ValueError as e:
print("Error:{}".format(e.args[0]))
|
schae234/cob | docs/conf.py | Python | mit | 5,863 | 0.000682 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# cob documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 7 18:09:10 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
#from recommonmark.parser import CommonMarkParser
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinxcontrib.programoutput',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
source_suffix = ['.rst', '.md']
#source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'COB'
copyright = '2019, Joseph Jeffers, Rob Schaefer'
author = 'Joseph Jeffers, Rob Schaefer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import cob
version = cob.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else th | ey produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages | . See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'globaltoc.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cobdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cob.tex', 'cob Documentation',
'Joseph Jeffers, Rob Schaefer', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cob', 'cob Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cob', 'cob Documentation',
author, 'cob', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
hassaanm/stock-trading | src/pybrain/rl/environments/ode/instances/acrobot.py | Python | apache-2.0 | 1,031 | 0.008729 | __author__ = 'Frank Sehnke, sehnke@in.tum.de'
from pybrain.rl.environments.ode import ODEEnvironment, sensors, actuators
import imp
from scipy import array
class AcrobotEnvironment(ODEEnvironment):
def __init__(self, renderer=True, realtime=True, ip="127.0.0.1", port="21590", buf='16384'):
ODEEnvironment.__init__(self, renderer, realtime | , ip, port, buf)
# load model file
self.loadXODE(imp.find_module('pybrain')[1] + "/rl/environments/ode/models/acrobot.xode")
# standard sensors and actuators
self.addSensor(sensors.JointSensor())
self.addSensor(sensors.JointVelocitySensor())
self.addActuator(actuators.JointActuator())
#set a | ct- and obsLength, the min/max angles and the relative max touques of the joints
self.actLen = self.indim
self.obsLen = len(self.getSensors())
self.stepsPerAction = 1
if __name__ == '__main__' :
w = AcrobotEnvironment()
while True:
w.step()
if w.stepCounter == 1000: w.reset()
|
Harmon758/discord.py | discord/user.py | Python | mit | 13,435 | 0.002159 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from typing import Any, Dict, Optional, TYPE_CHECKING
import discord.abc
from .flags import PublicUserFlags
from .utils import snowflake_time, _bytes_to_base64_data, MISSING
from .enums import DefaultAvatar
from .colour import Colour
from .asset import Asset
__all__ = (
'User',
'ClientUser',
)
class _UserTag:
__slots__ = ()
id: int
class BaseUser(_UserTag):
__slots__ = ('name', 'id', 'discriminator', '_avatar', '_banner', '_accent_colour', 'bot', 'system', '_public_flags', '_state')
if TYPE_CHECKING:
name: str
id: int
discriminator: str
bot: bool
system: bool
def __init__(self, *, state, data):
self._state = state
self._update(data)
def __repr__(self):
return (
f"<BaseUser id={self.id} name={self.name!r} discriminator={self.discriminator!r}"
f" bot={self.bot} system={self.system}>"
)
def __str__(self):
return f'{self.name}#{self.discriminator}'
def __eq__(self, other):
return isinstance(other, _UserTag) and other.id == self.id
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.id >> 22
def _update(self, data):
self.name = data['username']
self.id = int(data['id'])
self.discriminator = data['discriminator']
self._avatar = data['avatar']
self._banner = data.get('banner', None)
self._accent_colour = data.get('accent_color', None)
self._public_flags = data.get('public_flags', 0)
self.bot = data.get('bot', False)
self.system = data.get('system', False)
@classmethod
def _copy(cls, user):
self = cls.__new__(cls) # bypass __init__
self.name = user.name
self.id = user.id
self.discriminator = user.discriminator
self._avatar = user._avatar
self._banner = user._banner
self._accent_colour = user._accent_colour
self.bot = user.bot
self._state = user._state
self._public_flags = user._public_flags
return self
def _to_minimal_user_json(self):
return {
'username': self.name,
'id': self.id,
'avatar': self._avatar,
'discriminator': self.discriminator,
'bot': self.bot,
}
@property
def public_flags(self):
""":class:`PublicUserFlags`: The publicly available flags the user has."""
return PublicUserFlags._from_value(self._public_flags)
@property
def avatar(self):
""":class:`Asset`: Returns an :class:`Asset` for the avatar the user has.
If the user does not have a traditional avatar, an asset for
the default avatar is returned instead.
"""
if self._avatar is None:
return Asset._from_default_avatar(self._state, int(self.discriminator) % len(DefaultAvatar))
else:
return Asset._from_avatar(self._state, self.id, self._avatar)
@property
def default_avatar(self):
""":class:`Asset`: Returns the default avatar for a given user. This is calculated by the user's discriminator."""
return Asset._from_default_avatar(self._state, int(self.discriminator) % len(DefaultAvatar))
@property
def banner(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the user's banner asset, if available.
.. versionadded:: 2.0
.. note::
This information is only available via :meth:`Client.fetch_user`.
"""
if self._banner is None:
return None
return Asset._from_user_banner(self._state, self.id, self._banner)
@property
def accent_colour(self) -> Optional[Colour]:
"""Optional[:class:`Colour`]: Returns the user's accent colour, if applicable.
There is an alias for this named :attr:`accent_color`.
.. versionadded:: 2.0
.. note::
This information is only available via :meth:`Client.fetch_user`.
"""
if self._accent_colour is None:
return None
return Colour(self._accent_colour)
@property
def accent_color(self) -> Optional[Colour]:
"""Optional[:class:`Colour`]: Returns the user's accent color, if applicable.
There is an alias for this named :attr:`accent_colour`.
.. versionadded:: 2.0
.. note::
This information is only available via :meth:`Client.fetch_user`.
"""
return self.accent_colour
@property
def colour(self):
""":class:`Colour`: A property that returns a colour denoting the rendered colour
for the user. This always returns :meth:`Colour.default`.
There is an alias for this named :attr:`color`.
"""
return Colour.default()
@property
def color(self):
""":class:`Colour`: A property that returns a color denoting the rendered color
for the user. This always returns :meth:`Colour.default`.
There is an alias for this named :attr:`colour`.
"""
return self.colour
@property
def mention(self):
""":class:`str`: Returns a string that allows you to mention the given user."""
return f'<@{self.id}>'
@property
def created_at(self):
""":class:`datetime.datetime`: Returns the user's creation time in UTC.
This is when the user's Discord account was created.
"""
return snowflake_time(self.id)
@property
def display_name(self):
""":cla | ss:`str`: Returns the user's display name.
For regular users this is just their username, but
if they have a guild specific nickname then that
is returned instead.
"""
return self.name
def mentioned_in(sel | f, message):
"""Checks if the user is mentioned in the specified message.
Parameters
-----------
message: :class:`Message`
The message to check if you're mentioned in.
Returns
-------
:class:`bool`
Indicates if the user is mentioned in the message.
"""
if message.mention_everyone:
return True
return any(user.id == self.id for user in message.mentions)
class ClientUser(BaseUser):
"""Represents your Discord user.
.. container:: operations
.. describe:: x == y
Checks if two users are equal.
.. describe:: x != y
Checks if two users are not equal.
.. describe:: hash(x)
Return the user's hash.
.. describe:: str(x)
Returns the user's name with discriminator.
Attributes
-----------
name: :class:`str`
The user's username.
id: :class:`int`
The user's unique ID.
discriminator: :class:`str`
The user's discriminator. This is given when the username has conflicts.
bot: :class:`bool`
Specifies if the user is a bot account.
system: :class:`bool`
Specifies if t |
ViralTexts/vt-passim | scripts/place-cascade.py | Python | apache-2.0 | 6,658 | 0.004656 | from __future__ import print_function
import argparse, re
import numpy as np
from pyspark import SparkSession
from pyspark.sql import Row
from pyspark.sql.functions import col, datediff, lit, sum as gsum
from pyspark.ml.feature import CountVectorizer, VectorAssembler
def pairFeatures(sseries, dseries, sday, dday):
lag = abs(dday - sday) # throw away the sign to allow learning
lagBin = str(int(np.log(lag))) if lag > 0 else '-inf'
return ["src:" + sseries, "dst:" + dseries, "pair:" + sseries + ":" + dseries,
"lag:" + lagBin]
def normalizeText(s):
return re.sub("[^\w\s]", "", re.sub("\s+", " ", s.strip().lower()))
def clusterFeatures(c, gap):
## Sorting by string date needs to be consistent with numerical date
wits = sorted(c[1], key=lambda w: w.date + ' ' + w.series)
n = len(wits)
res = []
curday = wits[0].day - gap
prevday = curday
for d in range(n):
dst = wits[d]
dstClean = normalizeText(dst.text)
if dst.day > curday:
prevday = curday
curday = dst.day
allowRoot = 1 if ( curday - prevday >= gap ) else 0
res.append(Row(cluster=long(c[0]), src=0, dst=d+1, label=allowRoot,
longer=0.0, shorter=0.0,
raw=["root:" + dst.series]))
for s in range(n):
src = wits[s]
if (s != d) and (abs(dst.day - src.day) < gap):
srcClean = normalizeText(src.text)
growth = (len(dstClean) - len(srcClean))/float(len(srcClean))
| res.append(Row(cluster=long(c[0]), src=s+1, dst=d+1,
label=(1 if dst.day > src.day else 0),
longer=growth if growth > 0 else 0.0,
shorter=abs(growth) if growth < 0 else 0.0,
raw=pairFeatures(src.series, dst.series, src.day, dst.day)))
return res
# Pad upper left row/col with zeros.
def padUpLeft(m):
size = m.shape[0]
return np.concatena | te((np.zeros((size+1, 1)),
np.concatenate((np.zeros((1, size)), m), axis=0)),
axis=1)
def laplaceGradient(L):
tinv = padUpLeft(np.transpose(np.linalg.inv(L[1:, 1:])))
return tinv - tinv.diagonal()
## Should figure out how to reuse this in clusterGradients
def clusterPosteriors(c, w):
n = max(map(lambda r: r.dst, c[1])) + 1
L = np.zeros((n, n))
for r in c[1]:
score = -np.exp(w[np.array(r.features.indices)].dot(r.features.values))
L[r.src, r.dst] = score if r.label == 1 else 0
L += np.diag(-L.sum(axis=0))
Lgrad = laplaceGradient(L)
posts = []
for r in c[1]:
mom = r.src
kid = r.dst
post = L[mom, kid] * Lgrad[mom, kid]
posts.append(Row(post=float(post), **(r.asDict())))
return posts
def clusterGradients(c, w):
n = max(map(lambda r: r.dst, c[1])) + 1
numL = np.zeros((n, n))
denL = np.zeros((n, n))
for r in c[1]:
score = -np.exp(w[np.array(r.features.indices)].dot(r.features.values))
numL[r.src, r.dst] = score if r.label == 1 else 0
denL[r.src, r.dst] = score
numL += np.diag(-numL.sum(axis=0))
denL += np.diag(-denL.sum(axis=0))
numLgrad = laplaceGradient(numL)
denLgrad = laplaceGradient(denL)
fgrad = []
for r in c[1]:
mom = r.src
kid = r.dst
grad = -numL[mom, kid] * numLgrad[mom, kid] + denL[mom, kid] * denLgrad[mom, kid]
fgrad += [(long(f), float(grad * v)) for f, v in zip(r.features.indices, r.features.values)]
return fgrad
def featurizeData(raw, gap, vocabFile, featFile):
feats = raw.dropDuplicates(['cluster', 'series', 'date'])\
.withColumn('day', datediff(col('date'), lit('1970-01-01')))\
.na.drop(subset=['day'])\
.rdd.groupBy(lambda r: r.cluster)\
.flatMap(lambda c: clusterFeatures(c, gap))\
.toDF()
feats.cache()
cv = CountVectorizer(inputCol='raw', outputCol='features', minDF=4.0)
interner = cv.fit(feats) # alternate possibility: grab features only from label==1 edges
full = interner.transform(feats)
# combiner = VectorAssembler(inputCols=realCols + ['categorial'], outputCol='features')
# # I don't think a Pipeline will work here since we need to get the interner.vocabulary
# full = combiner.transform(interner.transform(feats)).drop('categorial')
full.write.parquet(featFile)
np.savetxt(vocabFile, np.array(interner.vocabulary), fmt='%s')
feats.unpersist()
if __name__ == "__main__":
argparser = argparse.ArgumentParser(description='Cascade features')
argparser.add_argument('-f', '--input', help='Input data')
argparser.add_argument('-g', '--gap', type=int, default=730)
argparser.add_argument('-i', '--iterations', type=int, default=20)
argparser.add_argument('-r', '--rate', type=float, default=1.0)
argparser.add_argument('-v', '--variance', type=float, default=1.0)
argparser.add_argument('-p', '--posteriors', metavar='params')
argparser.add_argument('outdir', help='Output directory')
args = argparser.parse_args()
spark = SparkSession.builder.appName('Cascade Features').getOrCreate()
vocabFile = args.outdir + "/vocab.gz"
featFile = args.outdir + "/feats.parquet"
try:
full = spark.read.load(featFile)
vocab = np.loadtxt(vocabFile, 'string')
except:
featurizeData(spark.read.load(args.input), args.gap, vocabFile, featFile)
full = spark.read.load(featFile)
vocab = np.loadtxt(vocabFile, 'string')
if args.posteriors:
w = np.loadtxt(args.posteriors)
full.rdd.groupBy(lambda r: r.cluster).flatMap(lambda c: clusterPosteriors(c, w)).toDF()\
.write.save(args.posteriors + ".parquet")
exit(0)
fcount = len(vocab)
w = np.zeros(fcount)
fdata = full.select('cluster', 'src', 'dst', 'label', 'features')\
.rdd.groupBy(lambda r: r.cluster)
fdata.cache()
rate = args.rate / fdata.count() # scale with training size
for i in range(args.iterations):
grad = fdata.flatMap(lambda c: clusterGradients(c, w)).toDF(['feat', 'grad'])\
.groupBy('feat').agg(gsum('grad').alias('grad'))\
.collect()
update = np.zeros(fcount)
for g in grad:
update[g.feat] = g.grad
if args.variance > 0:
update += w / args.variance
w -= rate * update
np.savetxt("%s/iter%03d.gz" % (args.outdir, i), w)
spark.stop()
|
aronsky/home-assistant | homeassistant/components/tasmota/device_automation.py | Python | apache-2.0 | 1,937 | 0.001549 | """Provides device automations for Tasmota."""
from hatasmota.const import AUTOMATION_TYPE_TRIGGER
from hatasmota.models import DiscoveryHashType
from hatasmota.trigger import TasmotaTrigger
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import Event, HomeAssistant
from homeassistant.helpers.device_registry import EVENT_DEVICE_REGISTRY_UPDATED
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import device_trigger
from .const import DATA_REMOVE_DISCOVER_COMPONENT, DATA_UNSUB
from .discovery import TASMOTA_DISCOVERY_ENTITY_NEW
async def async_remove_automations(hass: HomeAssistant, device_id: str) -> None:
"""Remove automations for a Tasmota device."""
await device_trigger.async_remove_triggers(hass, device_id)
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Set up Tasmota device automation dynamically through discovery."""
async def async_device_removed(event: Event) -> None:
"""Handle the removal of a device."""
if event.data["action"] != "remove":
return
await async_remove_autom | ations(hass, event.data["device_id"])
async def async_discover(
tasmota_automation: TasmotaTrigger, discovery_hash: DiscoveryHashType
) -> None:
"""Discover and add a Tasmota device automation."""
if tasmota_automation.automation_type == AUTOMATION_TYPE_TRIGGER:
await device_trigger.async_setup_trigger(
| hass, tasmota_automation, config_entry, discovery_hash
)
hass.data[
DATA_REMOVE_DISCOVER_COMPONENT.format("device_automation")
] = async_dispatcher_connect(
hass,
TASMOTA_DISCOVERY_ENTITY_NEW.format("device_automation"),
async_discover,
)
hass.data[DATA_UNSUB].append(
hass.bus.async_listen(EVENT_DEVICE_REGISTRY_UPDATED, async_device_removed)
)
|
yuanagain/seniorthesis | venv/lib/python2.7/site-packages/mpl_toolkits/mplot3d/axes3d.py | Python | mit | 93,454 | 0.001316 | #!/usr/bin/python
# axes3d.py, original mplot3d version by John Porter
# Created: 23 Sep 2005
# Parts fixed by Reinier Heeres <reinier@heeres.eu>
# Minor additions by Ben Axelrod <baxelrod@coroware.com>
# Significant updates and revisions by Ben Root <ben.v.root@gmail.com>
"""
Module containing Axes3D, an object which can plot 3D objects on a
2D matplotlib figure.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
from matplotlib.externals import six
from matplotlib.externals.six.moves import map, xrange, zip, reduce
import warnings
from operator import itemgetter
import matplotlib.axes as maxes
from matplotlib.axes import Axes, rcParams
from matplotlib import cbook
import matplotlib.transforms as mtransforms
from matplotlib.transforms import Bbox
import matplotlib.collections as mcoll
from matplotlib import docstring
import matplotlib.scale as mscale
from matplotlib.tri.triangulation import Triangulation
import numpy as np
from matplotlib.colors import Normalize, colorConverter, LightSource
from . import art3d
from . import proj3d
from . import axis3d
def unit_bbox():
box = Bbox(np.array([[0, 0], [1, 1]]))
return box
class Axes3D(Axes):
"""
3D axes object.
"""
name = '3d'
_shared_z_axes = cbook.Grouper()
def __init__(self, fig, rect=None, *args, **kwargs):
'''
Build an :class:`Axes3D` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*azim* Azimuthal viewing angle (default -60)
*elev* Elevation viewing angle (de | fault 30)
*zscale* [%(scale)s]
*sharez* Other axes to share z-limits with
================ =========================================
.. versionadded :: 1.2.1
*sharez*
''' % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
if rect is None:
rect = [0.0, 0.0, 1.0, 1.0]
self._cids = []
self.initial_azim = kwarg | s.pop('azim', -60)
self.initial_elev = kwargs.pop('elev', 30)
zscale = kwargs.pop('zscale', None)
sharez = kwargs.pop('sharez', None)
self.xy_viewLim = unit_bbox()
self.zz_viewLim = unit_bbox()
self.xy_dataLim = unit_bbox()
self.zz_dataLim = unit_bbox()
# inihibit autoscale_view until the axes are defined
# they can't be defined until Axes.__init__ has been called
self.view_init(self.initial_elev, self.initial_azim)
self._ready = 0
self._sharez = sharez
if sharez is not None:
self._shared_z_axes.join(self, sharez)
self._adjustable = 'datalim'
Axes.__init__(self, fig, rect,
frameon=True,
*args, **kwargs)
# Disable drawing of axes by base class
Axes.set_axis_off(self)
# Enable drawing of axes by Axes3D class
self.set_axis_on()
self.M = None
# func used to format z -- fall back on major formatters
self.fmt_zdata = None
if zscale is not None :
self.set_zscale(zscale)
if self.zaxis is not None :
self._zcid = self.zaxis.callbacks.connect('units finalize',
self.relim)
else :
self._zcid = None
self._ready = 1
self.mouse_init()
self.set_top_view()
self.axesPatch.set_linewidth(0)
# Calculate the pseudo-data width and height
pseudo_bbox = self.transLimits.inverted().transform([(0, 0), (1, 1)])
self._pseudo_w, self._pseudo_h = pseudo_bbox[1] - pseudo_bbox[0]
self.figure.add_axes(self)
def set_axis_off(self):
self._axis3don = False
self.stale = True
def set_axis_on(self):
self._axis3don = True
self.stale = True
def have_units(self):
"""
Return *True* if units are set on the *x*, *y*, or *z* axes
"""
return (self.xaxis.have_units() or self.yaxis.have_units() or
self.zaxis.have_units())
def convert_zunits(self, z):
"""
For artists in an axes, if the zaxis has units support,
convert *z* using zaxis unit type
.. versionadded :: 1.2.1
"""
return self.zaxis.convert_units(z)
def _process_unit_info(self, xdata=None, ydata=None, zdata=None,
kwargs=None):
"""
Look for unit *kwargs* and update the axis instances as necessary
"""
Axes._process_unit_info(self, xdata=xdata, ydata=ydata, kwargs=kwargs)
if self.xaxis is None or self.yaxis is None or self.zaxis is None:
return
if zdata is not None:
# we only need to update if there is nothing set yet.
if not self.zaxis.have_units():
self.zaxis.update_units(xdata)
# process kwargs 2nd since these will override default units
if kwargs is not None:
zunits = kwargs.pop('zunits', self.zaxis.units)
if zunits != self.zaxis.units:
self.zaxis.set_units(zunits)
# If the units being set imply a different converter,
# we need to update.
if zdata is not None:
self.zaxis.update_units(zdata)
def set_top_view(self):
# this happens to be the right view for the viewing coordinates
# moved up and to the left slightly to fit labels and axes
xdwl = (0.95/self.dist)
xdw = (0.9/self.dist)
ydwl = (0.95/self.dist)
ydw = (0.9/self.dist)
# This is purposely using the 2D Axes's set_xlim and set_ylim,
# because we are trying to place our viewing pane.
Axes.set_xlim(self, -xdwl, xdw, auto=None)
Axes.set_ylim(self, -ydwl, ydw, auto=None)
def _init_axis(self):
'''Init 3D axes; overrides creation of regular X/Y axes'''
self.w_xaxis = axis3d.XAxis('x', self.xy_viewLim.intervalx,
self.xy_dataLim.intervalx, self)
self.xaxis = self.w_xaxis
self.w_yaxis = axis3d.YAxis('y', self.xy_viewLim.intervaly,
self.xy_dataLim.intervaly, self)
self.yaxis = self.w_yaxis
self.w_zaxis = axis3d.ZAxis('z', self.zz_viewLim.intervalx,
self.zz_dataLim.intervalx, self)
self.zaxis = self.w_zaxis
for ax in self.xaxis, self.yaxis, self.zaxis:
ax.init3d()
def get_children(self):
return [self.zaxis, ] + Axes.get_children(self)
def _get_axis_list(self):
return super(Axes3D, self)._get_axis_list() + (self.zaxis, )
def unit_cube(self, vals=None):
minx, maxx, miny, maxy, minz, maxz = vals or self.get_w_lims()
xs, ys, zs = ([minx, maxx, maxx, minx, minx, maxx, maxx, minx],
[miny, miny, maxy, maxy, miny, miny, maxy, maxy],
[minz, minz, minz, minz, maxz, maxz, maxz, maxz])
return list(zip(xs, ys, zs))
def tunit_cube(self, vals=None, M=None):
if M is None:
M = self.M
xyzs = self.unit_cube(vals)
tcube = proj3d.proj_points(xyzs, M)
return tcube
def tunit_edges(self, vals=None, M=None):
tc = self.tunit_cube(vals, M)
edges = [(tc[0], tc[1]),
(tc[1], tc[2]),
(tc[2], tc[3]),
(tc[3], tc[0]),
(tc[0], tc[4]),
(tc[1], tc[5]),
(tc[2], tc[6]),
(tc[3], tc[7]),
(tc[4], tc[5]),
(tc[5], tc[6]),
(tc[6], tc[7]),
|
sattila83/Robocar | input_parser.py | Python | mit | 369 | 0.0271 | #!/usr/bin/env python
from posit | ion import Position
class InputParser:
@staticmethod
def parse(filePath):
positions = []
with open( | filePath, 'r') as file:
for line in file.read().splitlines():
parts = [part.strip() for part in line.split(',')]
if 2 == len(parts):
positions.append(Position(float(parts[0]), float(parts[1])))
return positions
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.