repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
isohybrid/dotfile
|
vim/bundle/git:--github.com-klen-python-mode/pylibs/pylint/checkers/misc.py
|
Python
|
bsd-2-clause
| 2,594
| 0.003084
|
# pylint: disable=W0511
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
""" Copyright (c) 2000-2010 LOGILAB S.A. (Paris, FRANCE).
http://www.logilab.fr/ -- mailto:contact@logilab.fr
Check source code is ascii only or has an encoding declaration (PEP 263)
"""
import re, sys
from pylint.interfaces import IRawChecker
from pylint.checkers import BaseChecker
MSGS = {
'W0511': ('%s',
'Used when a warning note as FIXME or XXX is detected.'),
}
class EncodingChecker(BaseChecker):
"""checks for:
* warning notes in the code like FIXME, XXX
* PEP 263: source code with non ascii character but no encoding declaration
"""
|
__implements__ = IRawChecker
# configuration section name
name = 'miscellaneous'
msgs = MSGS
options = (('notes',
{'type' : 'csv', 'metavar' : '<comma separated values>',
'default' : ('FIXME', 'XXX', 'TODO'),
'help' : 'List of note tags to take in consideration, \
separated by a comma.'
|
}),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
def process_module(self, node):
"""inspect the source file to found encoding problem or fixmes like
notes
"""
stream = node.file_stream
stream.seek(0)
# warning notes in the code
notes = []
for note in self.config.notes:
notes.append(re.compile(note))
linenum = 1
for line in stream.readlines():
for note in notes:
match = note.search(line)
if match:
self.add_message('W0511', args=line[match.start():-1],
line=linenum)
break
linenum += 1
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(EncodingChecker(linter))
|
ellmetha/django-machina
|
machina/test/factories/polls.py
|
Python
|
bsd-3-clause
| 1,023
| 0
|
import factory
import factory.django
from faker import Faker
from machina.core.db.models import get_model
from machina.test.factories.auth import UserFactory
from machina.test.factories.conversation import TopicFactory
faker = Faker()
Topi
|
cPoll = get_model('forum_polls', 'TopicPoll')
TopicP
|
ollOption = get_model('forum_polls', 'TopicPollOption')
TopicPollVote = get_model('forum_polls', 'TopicPollVote')
class TopicPollFactory(factory.django.DjangoModelFactory):
topic = factory.SubFactory(TopicFactory)
question = faker.text(max_nb_chars=200)
class Meta:
model = TopicPoll
class TopicPollOptionFactory(factory.django.DjangoModelFactory):
poll = factory.SubFactory(TopicPollFactory)
text = faker.text(max_nb_chars=100)
class Meta:
model = TopicPollOption
class TopicPollVoteFactory(factory.django.DjangoModelFactory):
poll_option = factory.SubFactory(TopicPollOptionFactory)
voter = factory.SubFactory(UserFactory)
class Meta:
model = TopicPollVote
|
jhmatthews/panlens
|
constants.py
|
Python
|
gpl-2.0
| 1,157
| 0.057044
|
import math as mth
import numpy as np
#----------------------
# J Matthews, 21/02
# This is a file containing useful constants for python coding
#
# Units in CGS unless stated
#
#----------------------
#H=6.62606957E-27
HEV=4.13620e-15
#C=29979245800.0
#BOLTZMANN=1.3806488E-16
VERY_BIG=1e50
H=6.6262e-27
HC=1.98587e-16
HEV=4.13620e-15 # Planck's constant in eV
HRYD=3.04005e-16 # NSH 1204 Planck's constant in Rydberg
C =2.997925e10
G=6.670e-8
BOLTZMANN =1.38062e-16
WIEN= 5.879e10 # NSH 1208 Wien Disp Const in frequency units
H_OVER_K=4.799437e-11
STEFAN_BOLTZMANN =5.6696e-5
THOMPSON=0.66524e-24
PI = 3.1415927
MELEC = 9.10956e-28
E= 4.8035e-10 # Electric charge in esu
MPROT = 1.672661e-24
MSOL = 1.989e33
PC= 3.08e18
YR = 3.1556925e7
PI_E2_OVER_MC=0.02655103 # Classical cross-section
PI_E2
|
_OVER_M =7.96e8
ALPHA= 7.297351e-3 # Fine structure constant
BOHR= 0.529175e-8 # Bohr radius
CR= 3.288051e15 #Rydberg frequency for H != Ryd freq for infinite mass
ANGSTROM = 1.e-8 #Definition of an Angstrom in units of this code, e.g. cm
EV2ERGS =1.602192e-12
RADIAN= 57.29578
RYD2ERGS
|
=2.1798741e-11
PARSEC=3.086E18
|
KevinVDVelden/7DRL_2015
|
Game/MapGen.py
|
Python
|
gpl-2.0
| 4,996
| 0.055244
|
import math
import random
import GameData
from Util.TileTypes import *
from Util import Line, StarCallback
def initializeRandom( x, y ):
dist = math.sqrt( x ** 2 + y ** 2 )
angle = math.atan2( x, y ) / math.pi * 5
rand = ( random.random() * 7 ) - 3.5
val = ( ( dist + angle + rand ) % 10 )
if val > 5:
return 1
else:
return 0
def circle(x0, y0, radius, endRadius, cb):
stepSize = 1.0 / endRadius
angle = math.pi / 2
while angle >= 0:
c = math.cos( angle )
s = math.sin( angle )
r = radius
while r < endRadius:
cb( int( c * r ) + x0, int( s * r ) + y0 )
cb( int( s * r ) + x0, int( c * r ) + y0 )
cb(-int( c * r ) + x0, int( s * r ) + y0 )
cb(-int( s * r ) + x0, int( c * r ) + y0 )
cb( int( c * r ) + x0,-int( s * r ) + y0 )
cb( int( s * r ) + x0,-int( c * r ) + y0 )
cb(-int( c * r ) + x0,-int( s * r ) + y0 )
|
cb(-int( s * r ) + x0,-int( c * r ) + y0 )
r += 0.5
angle -= stepSize
def buildFixedWalls( self, I, _buffer, val ):
#Clear center room
centerX = int( self.width / 2 )
centerY = int( self.height / 2 )
for x in range( centerX - GameData.MapGen_CenterRoom_Size[0] - 1, centerX + GameData.MapGen_CenterRoom_Size[0] + 1 ):
for y in range( centerY - GameData.MapGen_CenterRoom_Size[1] - 1,
|
centerY + GameData.MapGen_CenterRoom_Size[1] + 1 ):
_buffer[ I( x, y ) ] = 0
#Build center room walls
for x in range( centerX - GameData.MapGen_CenterRoom_Size[0] - 1, centerX + GameData.MapGen_CenterRoom_Size[0] + 1 ):
_buffer[ I( x, centerY - GameData.MapGen_CenterRoom_Size[1] - 1 ) ] = val
_buffer[ I( x, centerY + GameData.MapGen_CenterRoom_Size[1] ) ] = val
for y in range( centerY - GameData.MapGen_CenterRoom_Size[1] - 1, centerY + GameData.MapGen_CenterRoom_Size[1] + 1 ):
_buffer[ I( centerX - GameData.MapGen_CenterRoom_Size[0] - 1, y ) ] = val
_buffer[ I( centerX + GameData.MapGen_CenterRoom_Size[0], y ) ] = val
def preIterInit( self, I, _buffer ):
#Outer wall
for x in range( self.width ):
_buffer[ I( x, 0 ) ] = 1
_buffer[ I( x, self.height - 1 ) ] = 1
for y in range( self.height ):
_buffer[ I( 0, y ) ] = 1
_buffer[ I( self.width - 1, y ) ] = 1
#Area around outer wall
for x in range( 1, self.width- 1 ):
_buffer[ I( x, 1 ) ] = 0
_buffer[ I( x, self.height - 2 ) ] = 0
for y in range( 1, self.height - 1 ):
_buffer[ I( 1, y ) ] = 0
_buffer[ I( self.width - 2, y ) ] = 0
buildFixedWalls( self, I, _buffer, 1 )
def postInit( self, I, _buffer ):
centerX = int( self.width / 2 )
centerY = int( self.height / 2 )
for x in range( self.width ):
for y in range( self.height ):
i = I( x, y )
val = _buffer[ i ]
if val == 0:
_buffer[ i ] = TILE_AIR #NOOP, but for clarity
elif val == 1:
_buffer[ i ] = TILE_WALL
else:
raise Exception( "Incorrect tile type in postInit!" )
for x in range( self.width ):
_buffer[ I( x, 0 ) ] = TILE_FIXED_WALL
_buffer[ I( x, self.height - 1 ) ] = TILE_FIXED_WALL
for y in range( self.height ):
_buffer[ I( 0, y ) ] = TILE_FIXED_WALL
_buffer[ I( self.width - 1, y ) ] = TILE_FIXED_WALL
buildFixedWalls( self, I, _buffer, TILE_FIXED_WALL )
curSurface = ( GameData.MapGen_CenterRoom_Size[0] * 2 ) * ( GameData.MapGen_CenterRoom_Size[1] * 2 )
curRadius = -1
def setFixedWall( x, y ):
_buffer[ I( int( x ), int( y ) ) ] = TILE_FIXED_WALL
circleNum = 0
while curRadius < GameData.MapGen_MaxCircleRadius:
sectionCount = max( circleNum * GameData.MapGen_CircleSectionsPerLayer, 1 )
nextSurface = curSurface + ( GameData.MapGen_BaseSurface * sectionCount )
nextRadius = int( math.sqrt( nextSurface / math.pi ) )
circle( centerX, centerY, nextRadius, nextRadius + 2, setFixedWall )
#Seperate sections in circle
if sectionCount > 1:
for i in range( sectionCount ):
angle = i * math.pi * 2 / sectionCount
s = math.sin( angle )
c = math.cos( angle )
Line( int( s * ( curRadius + 1 ) ) + centerX, int( c * ( curRadius + 1 ) ) + centerY, int( s * nextRadius ) + centerX, int( c * nextRadius ) + centerY, StarCallback( setFixedWall ) )
curRadius = nextRadius
curSurface = int( curRadius ** 2 * math.pi )
circleNum += 1
print( curRadius )
curRadius += 1
curRadiusSquared = curRadius ** 2
for x in range( self.width ):
for y in range( self.height ):
if ( ( x - centerX ) ** 2 + ( y - centerY ) ** 2 ) > curRadiusSquared:
_buffer[ I( x, y ) ] = TILE_AIR #NOOP, but for clarity
|
bjuvensjo/scripts
|
vang/nexus3/upload.py
|
Python
|
apache-2.0
| 1,766
| 0.003964
|
#!/usr/bin/env python3
from argparse import Arg
|
umentParser
from os import environ
from sys import argv
from requests import put
def read_file(file_path): # pragma: no cover
with open(file_path, 'r
|
b') as f:
return f.read()
def upload(file_path, repository, repository_path, url, username, password):
url = f'{url}/repository/{repository}/{repository_path}'
data = read_file(file_path)
headers = {'Content-Type': 'application/octet-stream'}
response = put(url, data=data, headers=headers, auth=(username, password))
if response.status_code != 201:
raise OSError(f'{response.status_code}, {response.content}')
return response.status_code
def parse_args(args):
parser = ArgumentParser(description='Get assets')
parser.add_argument('file_path', help='File to upload, e.g. ./myartifact-1.0.0.jar')
parser.add_argument('repository', help='Nexus3 repository, e.g. maven-releases')
parser.add_argument('repository_path',
help='Path within Nexus3 repository, e.g com/myorg/myartifact/1.0.0/myartifact-1.0.0.jar')
parser.add_argument('-l', '--url', default=environ.get('NEXUS3_REST_URL', None),
help='Nexus3 url, e.g. http://nexus_host:8080')
parser.add_argument('-u', '--username', default=environ.get('NEXUS3_USERNAME', None), help='Nexus3 username')
parser.add_argument('-p', '--password', default=environ.get('NEXUS3_PASSWORD', None), help='Nexus3 password')
return parser.parse_args(args)
def main(file_path, repository, repository_path, url, username, password):
print(upload(file_path, repository, repository_path, url, username, password))
if __name__ == '__main__': # pragma: no cover
main(**parse_args(argv[1:]).__dict__)
|
CaesarTjalbo/musictagger
|
mp3names/model_classes.py
|
Python
|
gpl-3.0
| 3,029
| 0.028722
|
# -*- coding: utf-8 -*-
import sys
import os
import logging
import random
import PyQt4
from PyQt4.QtCore import *
#from PyQt4.QtCore import QAbstractTableModel
import constants
class Model(QAbstractTableModel):
keys = list()
modelType = None
def __init__(self, parent = None):
''' '''
self.log = logging.getLogger('Model')
#self.log.debug('__init__ start')
super(QAbstractTableModel, self).__init__(parent)
def rowCount(self, parent = None):
''' '''
#self.log.debug('rowCount start')
|
#self.log.debug('rowCount end')
if hasattr(self, 'album') and self.album:
if hasattr(self.album, 'rows'):
return len(self.album.rows)
return 0
def columnCount(self, parent = None):
''' '''
#self.log.debug('columnCount start')
#self.log.debug('columnCount end')
return len(self.keys)
def data(s
|
elf, index, role = None):
''' '''
#self.log.debug('data start')
if index.isValid():
if index.row() >= 0 or index.row() < len(self.rows):
if role == Qt.DisplayRole or role == Qt.ToolTipRole or role == Qt.EditRole:
return self.album.rows[index.row()][self.keys[index.column()]]
#self.log.debug('data end')
return QVariant()
def setData(self, index, value, role):
''' '''
#self.log.debug('setData start')
if index.isValid() and role == Qt.EditRole:
key = self.keys[index.column()]
row = index.row()
value = unicode(value.toString())
self.album.rows[index.row()][key] = value
self.emit(SIGNAL('dataChanged'), index, index)
#self.log.debug('setData end')
return True
def headerData(self, section, orientation, role):
''' '''
#self.log.debug('headerData start' + str(section))
if section >= 0 and section < len(self.keys):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self.keys[section]
#self.log.debug('headerData end ')
return QVariant()
def flags(self, index):
''' '''
#self.log.debug('flags start')
if self.modelType == constants.ModelType.ModelTypeFinal:
return super(QAbstractTableModel, self).flags(index) | Qt.ItemIsEditable
#self.log.debug('flags end')
return super(QAbstractTableModel, self).flags(index)
def getModelType(self):
''' '''
#self.log.debug('getModelType start')
#self.log.debug('getModelType end')
return self.modelType
#def getState(self):
#''' '''
##self.log.debug('getState start')
##self.log.debug('getState end')
#return None
|
sschaetz/n5a
|
test/test_generate.py
|
Python
|
mit
| 361
| 0.00831
|
from
|
n5a import make_type
from n5a.generate import generate
from .test_definitions import get_pos3d_definition
def test_generate_string():
s = generate(get_pos3d_definition())
assert 'struct Pos3D' in s
def test_generate_file():
s = generate(get_pos3d_definition())
with
|
open('test/test_cpp/generated/pos3d.hpp', 'w') as f:
f.write(s)
|
DragonQuiz/MCEdit-Unified
|
stock-filters/Forester.py
|
Python
|
isc
| 51,634
| 0.000562
|
# Version 5
'''This takes a base MineCraft level and adds or edits trees.
Place it in the folder where the save files are (usually .../.minecraft/saves)
Requires mcInterface.py in the same folder.'''
# Here are the variables you can edit.
# This is the name of the map to edit.
# Make a backup if you are experimenting!
LOADNAME = "LevelSave"
# How many trees do you want to add?
TREECOUNT = 12
# Where do you want the new trees?
# X, and Z are the map coordinates
X = 66
Z = -315
# How large an area do you want the trees to be in?
# for example, RADIUS = 10 will make place trees randomly in
# a circular area 20 blocks wide.
RADIUS = 80
# NOTE: tree density will be higher in the center than at the edges.
# Which shapes would you like the trees to be?
# these first three are best suited for small heights, from 5 - 10
# "normal" is the normal minecraft shape, it only gets taller and shorter
# "bamboo" a trunk with foliage, it only gets taller and shorter
# "palm" a trunk with a fan at the top, only gets taller and shorter
# "stickly" selects randomly from "normal", "bamboo" and "palm"
# these last five are best suited for very large trees, heights greater than 8
# "round" procedural spherical shaped tree, can scale up to immense size
# "cone" procedural, like a pine tree, also can scale up to immense size
# "procedural" selects randomly from "round" and "conical"
# "rainforest" many slender trees, most at the lower range of the height,
# with a few at the upper end.
# "mangrove" makes mangrove trees (see PLANTON below).
SHAPE = "procedural"
# What height should the trees be?
# Specifies the average height of the tree
# Examples:
# 5 is normal minecraft tree
# 3 is minecraft tree with foliage flush with the ground
# 10 is very tall trees, they will be hard to chop down
# NOTE: for round and conical, this affects the foliage size as well.
# CENTERHEIGHT is the height of the trees at the center of the area
# ie, when radius = 0
CENTERHEIGHT = 55
# EDGEHEIGHT is the height at the trees at the edge of the area.
# ie, when radius = RADIUS
EDGEHEIGHT = 25
# What should the variation in HEIGHT be?
# actual value +- variation
# default is 1
# Example:
# HEIGHT = 8 and HEIGHTVARIATION = 3 will result in
# trunk heights from 5 to 11
# value is clipped to a max of HEIGHT
# for a good rainforest, set this value not more than 1/2 of HEIGHT
HEIGHTV
|
ARIATION = 12
# Do you want branches, trunk, and roots?
# True makes all of that
# False does not create the trunk and br
|
anches, or the roots (even if they are
# enabled further down)
WOOD = True
# Trunk thickness multiplyer
# from zero (super thin trunk) to whatever huge number you can think of.
# Only works if SHAPE is not a "stickly" subtype
# Example:
# 1.0 is the default, it makes decently normal sized trunks
# 0.3 makes very thin trunks
# 4.0 makes a thick trunk (good for HOLLOWTRUNK).
# 10.5 will make a huge thick trunk. Not even kidding. Makes spacious
# hollow trunks though!
TRUNKTHICKNESS = 1.0
# Trunk height, as a fraction of the tree
# Only works on "round" shaped trees
# Sets the height of the crown, where the trunk ends and splits
# Examples:
# 0.7 the default value, a bit more than half of the height
# 0.3 good for a fan-like tree
# 1.0 the trunk will extend to the top of the tree, and there will be no crown
# 2.0 the trunk will extend out the top of the foliage, making the tree appear
# like a cluster of green grapes impaled on a spike.
TRUNKHEIGHT = 0.7
# Do you want the trunk and tree broken off at the top?
# removes about half of the top of the trunk, and any foliage
# and branches that would attach above it.
# Only works if SHAPE is not a "stickly" subtype
# This results in trees that are shorter than the height settings
# True does that stuff
# False makes a normal tree (default)
BROKENTRUNK = False
# Note, this works well with HOLLOWTRUNK (below) turned on as well.
# Do you want the trunk to be hollow (or filled) inside?
# Only works with larger sized trunks.
# Only works if SHAPE is not a "stickly" subtype
# True makes the trunk hollow (or filled with other stuff)
# False makes a solid trunk (default)
HOLLOWTRUNK = False
# Note, this works well with BROKENTRUNK set to true (above)
# Further note, you may want to use a large value for TRUNKTHICKNESS
# How many branches should there be?
# General multiplyer for the number of branches
# However, it will not make more branches than foliage clusters
# so to garuntee a branch to every foliage cluster, set it very high, like 10000
# this also affects the number of roots, if they are enabled.
# Examples:
# 1.0 is normal
# 0.5 will make half as many branches
# 2.0 will make twice as mnay branches
# 10000 will make a branch to every foliage cluster (I'm pretty sure)
BRANCHDENSITY = 1.0
# do you want roots from the bottom of the tree?
# Only works if SHAPE is "round" or "cone" or "procedural"
# "yes" roots will penetrate anything, and may enter underground caves.
# "tostone" roots will be stopped by stone (default see STOPSROOTS below).
# There may be some penetration.
# "hanging" will hang downward in air. Good for "floating" type maps
# (I really miss "floating" terrain as a default option)
# "no" roots will not be generated
ROOTS = "tostone"
# Do you want root buttresses?
# These make the trunk not-round at the base, seen in tropical or old trees.
# This option generally makes the trunk larger.
# Only works if SHAPE is "round" or "cone" or "procedural"
# Options:
# True makes root butresses
# False leaves them out
ROOTBUTTRESSES = True
# Do you want leaves on the trees?
# True there will be leaves
# False there will be no leaves
FOLIAGE = True
# How thick should the foliage be
# General multiplyer for the number of foliage clusters
# Examples:
# 1.0 is normal
# 0.3 will make very sparse spotty trees, half as many foliage clusters
# 2.0 will make dense foliage, better for the "rainforests" SHAPE
FOLIAGEDENSITY = 1.0
# Limit the tree height to the top of the map?
# True the trees will not grow any higher than the top of the map
# False the trees may be cut off by the top of the map
MAPHEIGHTLIMIT = True
# add lights in the middle of foliage clusters
# for those huge trees that get so dark underneath
# or for enchanted forests that should glow and stuff
# Only works if SHAPE is "round" or "cone" or "procedural"
# 0 makes just normal trees
# 1 adds one light inside the foliage clusters for a bit of light
# 2 adds two lights around the base of each cluster, for more light
# 4 adds lights all around the base of each cluster for lots of light
LIGHTTREE = 0
# Do you want to only place trees near existing trees?
# True will only plant new trees near existing trees.
# False will not check for existing trees before planting.
# NOTE: the taller the tree, the larger the forest needs to be to qualify
# OTHER NOTE: this feature has not been extensively tested.
# IF YOU HAVE PROBLEMS: SET TO False
ONLYINFORESTS = False
#####################
# Advanced options! #
#####################
# What kind of material should the "wood" be made of?
# defaults to 17
WOODMAT = 17
# What data value should the wood blocks have?
# Some blocks, like wood, leaves, and cloth change
# apperance with different data values
# defaults to 0
WOODDATA = 0
# What kind of material should the "leaves" be made of?
# defaults to 18
LEAFMAT = 18
# What data value should the leaf blocks have?
# Some blocks, like wood, leaves, and cloth change
# apperance with different data values
# defaults to 0
LEAFDATA = 0
# What kind of material should the "lights" be made of?
# defaults to 89 (glowstone)
LIGHTMAT = 89
# What data value should the light blocks have?
# defaults to 0
LIGHTDATA = 0
# What kind of material would you like the "hollow" trunk filled with?
# defaults to 0 (air)
TRUNKFILLMAT = 0
# What data value would you like the "hollow" trunk filled with?
# defaults to 0
TRUNKFILLDATA = 0
# What kind of blocks should the trees be planted on?
# Use the Minecraft index.
# Examples
# 2 is grass (the default)
# 3 is dirt
# 1 is stone (an odd choice)
# 12 is sand (for beach or desert)
# 9 is water (if you want an aquatic forest)
# this is a list, and comma
|
chriscoyfish/coala-bears
|
bears/java/InferBear.py
|
Python
|
agpl-3.0
| 896
| 0
|
import re
from coalib.bearlib.abstractions.Lint import Lint
from coalib.bears.LocalBear import LocalBear
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
class InferBear(LocalBear, Lint):
executable = 'infer'
arguments = '-npb -- javac {filename}'
output_regex = re.compile(
r'(.+):'
r'(?P<line>.+): '
r'(?P<severity>error|warning): '
r'(?P<message>.*)')
severity_map = {
"error": RESULT_SEVERITY.MAJOR,
"warning": RESULT_SEVERITY.NORMAL}
LANGUAGES = {"Java"}
AUTHORS = {'The coala developers'}
|
AUTHORS_EMAILS = {'coala-devel@googl
|
egroups.com'}
LICENSE = 'AGPL-3.0'
ASCIINEMA_URL = 'https://asciinema.org/a/1g2k0la7xo5az9t8f1v5zy66q'
CAN_DETECT = {'Security'}
def run(self, filename, file):
'''
Checks the code with ``infer``.
'''
return self.lint(filename)
|
looker/sentry
|
src/sentry/debug/panels/redis.py
|
Python
|
bsd-3-clause
| 2,952
| 0.000678
|
from __future__ import absolute_import, unicode_literals
from django.template import Context, Template
from django.utils.translation import ugettext_lazy as _
from time import time
from .base import CallRecordingPanel
from .
|
.utils.function_wrapper import FunctionWrapper
from ..utils.patch_context import PatchContext
TEMPLATE = Template(
"""
{% load i18n %}
<h4>{% trans "Requests" %}</h4>
<table>
<thead>
<tr>
<th>{% tran
|
s "Duration" %}</th>
<th>{% trans "Command" %}</th>
<th>{% trans "Args" %}</th>
</tr>
</thead>
<tbody>
{% for call in calls %}
<tr>
<td>{{ call.duration }} ms</td>
<td>{{ call.command }}</td>
<td>{{ call.args }} {{ call.kwargs }}</td>
</tr>
{% endfor %}
</tbody>
</table>
"""
)
class RedisPipelineWrapper(FunctionWrapper):
def __call__(self, func, pipeline, *args, **kwargs):
__traceback_hide__ = True # NOQA
command_stack = pipeline.command_stack[:]
start = time()
try:
return func(pipeline, *args, **kwargs)
finally:
end = time()
data = {
'name': 'pipeline',
'args': repr(command_stack),
'kwargs': repr({}),
'start': start,
'end': end,
}
self.record(data)
class RedisWrapper(FunctionWrapper):
def __call__(self, func, *args, **kwargs):
__traceback_hide__ = True # NOQA
start = time()
try:
return func(*args, **kwargs)
finally:
end = time()
data = {
'name': args[1],
'args': repr(args[2:]),
'kwargs': repr(kwargs),
'start': start,
'end': end,
}
self.record(data)
class RedisPanel(CallRecordingPanel):
title = nav_title = _("Redis")
@classmethod
def get_context(cls, collector):
return [
PatchContext('redis.client.StrictRedis.execute_command', RedisWrapper(collector)),
PatchContext('redis.client.BasePipeline.execute', RedisPipelineWrapper(collector)),
]
@property
def content(self):
stats = self.get_stats()
return TEMPLATE.render(Context(stats))
def process_response(self, request, response):
calls = []
total_time = 0
for call in self.calls:
duration = int((call['end'] - call['start']) * 1000)
total_time += duration
calls.append(
{
'duration': duration,
'command': call['name'],
'args': call['args'],
'kwargs': call['kwargs'],
}
)
self.record_stats({
'calls': calls,
'total_time': total_time,
})
|
GoogleCloudPlatform/terraform-python-testing-helper
|
test/test_args.py
|
Python
|
apache-2.0
| 4,448
| 0.002248
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed t
|
o in writing, software
# distributed under the License is distribut
|
ed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Test the function for mapping Terraform arguments."
import pytest
import tftest
ARGS_TESTS = (
({'auto_approve': True}, ['-auto-approve']),
({'auto_approve': False}, []),
({'backend': True}, []),
({'backend': None}, []),
({'backend': False}, ['-backend=false']),
({'color': True}, []),
({'color': False}, ['-no-color']),
({'color': False, 'input': False}, ['-no-color', '-input=false']),
({'force_copy': True}, ['-force-copy']),
({'force_copy': None}, []),
({'force_copy': False}, []),
({'input': True}, []),
({'input': False}, ['-input=false']),
({'json_format': True}, ['-json']),
({'json_format': False}, []),
({'lock': True}, []),
({'lock': False}, ['-lock=false']),
({'plugin_dir': ''}, []),
({'plugin_dir': 'abc'}, ['-plugin-dir', 'abc']),
({'refresh': True}, []),
({'refresh': None}, []),
({'refresh': False}, ['-refresh=false']),
({'upgrade': True}, ['-upgrade']),
({'upgrade': False}, []),
({'tf_var_file': None}, []),
({'tf_var_file': 'foo.tfvar'}, ['-var-file=foo.tfvar']),
)
@pytest.mark.parametrize("kwargs, expected", ARGS_TESTS)
def test_args(kwargs, expected):
assert tftest.parse_args() == []
assert tftest.parse_args(**kwargs) == expected
TERRAGRUNT_ARGS_TESTCASES = [
({"tg_config": "Obama"}, ['--terragrunt-config', 'Obama']),
({"tg_tfpath": "Barrack"}, ['--terragrunt-tfpath', 'Barrack']),
({"tg_no_auto_init": True}, ['--terragrunt-no-auto-init']),
({"tg_no_auto_init": False}, []),
({"tg_no_auto_retry": True}, ['--terragrunt-no-auto-retry']),
({"tg_no_auto_retry": False}, []),
({"tg_non_interactive": True}, ['--terragrunt-non-interactive']),
({"tg_non_interactive": False}, []),
({"tg_working_dir": "George"}, ['--terragrunt-working-dir', 'George']),
({"tg_download_dir": "Bush"}, ['--terragrunt-download-dir', 'Bush']),
({"tg_source": "Clinton"}, ['--terragrunt-source', 'Clinton']),
({"tg_source_update": True}, ['--terragrunt-source-update']),
({"tg_source_update": False}, []),
({"tg_iam_role": "Bill"}, ['--terragrunt-iam-role', 'Bill']),
({"tg_ignore_dependency_errors": True}, ['--terragrunt-ignore-dependency-errors']),
({"tg_ignore_dependency_errors": False}, []),
({"tg_ignore_dependency_order": True}, ['--terragrunt-ignore-dependency-order']),
({"tg_ignore_dependency_order": False}, []),
({"tg_ignore_external_dependencies": "dont care what is here"},
['--terragrunt-ignore-external-dependencies']),
({"tg_include_external_dependencies": True}, ['--terragrunt-include-external-dependencies']),
({"tg_include_external_dependencies": False}, []),
({"tg_parallelism": 20}, ['--terragrunt-parallelism 20']),
({"tg_exclude_dir": "Ronald"}, ['--terragrunt-exclude-dir', 'Ronald']),
({"tg_include_dir": "Reagan"}, ['--terragrunt-include-dir', 'Reagan']),
({"tg_check": True}, ['--terragrunt-check']),
({"tg_check": False}, []),
({"tg_hclfmt_file": "Biden"}, ['--terragrunt-hclfmt-file', 'Biden']),
({"tg_override_attr": {"Iron": "Man", "Captain": "America"}},
['--terragrunt-override-attr=Iron=Man', '--terragrunt-override-attr=Captain=America']),
({"tg_debug": True}, ['--terragrunt-debug']),
({"tg_debug": False}, []),
]
@pytest.mark.parametrize("kwargs, expected", TERRAGRUNT_ARGS_TESTCASES)
def test_terragrunt_args(kwargs, expected):
assert tftest.parse_args(**kwargs) == expected
def test_var_args():
assert sorted(tftest.parse_args(init_vars={'a': 1, 'b': '["2"]'})) == sorted(
["-backend-config=a=1", '-backend-config=b=["2"]'])
assert sorted(tftest.parse_args(tf_vars={'a': 1, 'b': '["2"]'})) == sorted(
['-var', 'b=["2"]', '-var', 'a=1'])
def test_targets():
assert tftest.parse_args(targets=['one', 'two']) == sorted(
['-target=one', '-target=two'])
|
Williams224/davinci-scripts
|
ksteta3pi/Consideredbkg/MC_12_11134011_MagUp.py
|
Python
|
mit
| 4,905
| 0.026911
|
#-- GAUDI jobOptions generated on Mon Jul 20 10:20:49 2015
#-- Contains event types :
#-- 11134011 - 42 files - 900254 events - 251.92 GBytes
#-- Extra information about the data processing phases:
#-- Processing Pass Step-125836
#-- StepId : 125836
#-- StepName : Stripping20-NoPrescalingFlagged for Sim08 - Implicit merging.
#-- ApplicationName : DaVinci
#-- ApplicationVersion : v32r2p1
#-- OptionFiles : $APPCONFIGOPTS/DaVinci/DV-Stripping20-Stripping-MC-NoPrescaling.py;$APPCONFIGOPTS/DaVinci/DataType-2012.py;$APPCONFIGOPTS/DaVinci/InputType-DST.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-127969
#-- StepId : 127969
#-- StepName : Reco14c for MC - 2012
#-- ApplicationName : Brunel
#-- ApplicationVersion : v43r2p11
#-- OptionFiles : $APPCONFIGOPTS/Brunel/DataType-2012.py;$APPCONFIGOPTS/Brunel/MC-WithTruth.py;$APPCONFIGOPTS/Persistency/DST-multipleTCK-2012.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r218
#-- Visible : Y
from Gaudi.Configuration import *
from GaudiConf import IOHelper
IOHelper('ROOT').inputFiles(['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000001_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000002_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000003_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000004_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000005_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000006_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000007_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000008_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000009_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000010_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000011_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000012_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000013_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000014_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000015_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000016_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000017_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000018_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000019_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000020_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000021_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000022_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000023_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000024_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/0
|
0046297/0000/00046297_00000025_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000026_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000027_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000029_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000030_2.AllStreams.dst',
'LFN:/lhcb/MC
|
/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000031_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000032_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000033_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000034_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000035_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000036_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000037_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000038_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000039_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000040_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000041_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000042_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00046297/0000/00046297_00000043_2.AllStreams.dst'
], clear=True)
|
xkmato/casepro
|
casepro/contacts/migrations/0021_contact_is_stopped_pt1.py
|
Python
|
bsd-3-clause
| 745
| 0.002685
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contacts', '0020_unset_suspend_from_dynamic'),
]
operat
|
ions = [
migrations.AddField(
model_name='contact',
name='is_stopped',
field=models.NullBooleanField(help_text='Whether this contact opted out of receiving messages'),
),
|
migrations.AlterField(
model_name='contact',
name='is_stopped',
field=models.NullBooleanField(default=False,
help_text='Whether this contact opted out of receiving messages'),
),
]
|
matrix-org/synapse
|
synapse/replication/slave/storage/directory.py
|
Python
|
apache-2.0
| 767
| 0
|
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LIC
|
ENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIE
|
S OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.storage.databases.main.directory import DirectoryWorkerStore
from ._base import BaseSlavedStore
class DirectoryStore(DirectoryWorkerStore, BaseSlavedStore):
pass
|
DarkFenX/Pyfa
|
service/port/multibuy.py
|
Python
|
gpl-3.0
| 3,507
| 0.001426
|
# =============================================================================
# Copyright (C) 2014 Ryan Holmes
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
from service.const import PortMultiBuyOptions
from service.price import Price as sPrc
MULTIBUY_OPTIONS = (
(PortMultiBuyOptions.LOADED_CHARGES, 'Loaded Charges', 'Export charges loaded into modules', True),
(PortMultiBuyOptions.IMPLANTS, 'Implants && Boosters', 'Export implants and boosters', False),
(PortMultiBuyOptions.CARGO, 'Cargo', 'Export cargo contents', True),
(PortMultiBuyOptions.OPTIMIZE_PRICES, 'Optimize Prices', 'Replace items by cheaper alternatives', False),
)
def exportMultiBuy(fit, options, callback):
itemAmounts = {}
for module in fit.modules:
if module.item:
# Mutated items are of no use for multibuy
if module.isMutated:
continue
_addItem(itemAmounts, module.item)
if module.charge and options[PortMultiBuyOptions.LOADED_CHARGES]:
_addItem(itemAmounts, module.charge, module.numCharges)
for drone in fit.drones:
_addItem(itemAmounts, drone.item, drone.amount)
for fighter in fit.fighters:
_addItem(itemAmounts, fighter.item, fighter.amount)
if options[PortMultiBuyOptions.CARGO]:
for cargo in fit.cargo:
_addItem(itemAmounts, cargo.item, cargo.amount)
if options[PortMultiBuyOptions.IMPLANTS]:
for implant in fit.implants:
_addItem(itemAmounts, implant.item)
for booster in fit.boosters:
_addItem(itemAmounts, booster.item)
if options[PortMultiBuyOptions.OPTIMIZE_PRICES]:
def formatCheaperExportCb(replacementsCheaper):
updatedAmounts = {}
for item, itemAmount in itemAmounts.items():
_addItem(updatedAmounts, replacementsCheaper.get(i
|
tem, item), itemAmount)
string = _prepareString(fit.ship.item, updatedAmounts)
callback(string)
priceSvc = sPrc.getInstance()
priceSvc.findCheaperReplacements(itemAmounts, formatCheaperExportCb)
else:
string = _prepareString(fit.ship.item, itemAmounts)
if callback:
callback(string)
else:
return string
def _addItem(container, item, quantity=1):
if item not in container:
container[item] = 0
con
|
tainer[item] += quantity
def _prepareString(shipItem, itemAmounts):
exportLines = []
exportLines.append(shipItem.name)
for item in sorted(itemAmounts, key=lambda i: (i.group.category.name, i.group.name, i.name)):
count = itemAmounts[item]
if count == 1:
exportLines.append(item.name)
else:
exportLines.append('{} x{}'.format(item.name, count))
return "\n".join(exportLines)
|
Wakeupbuddy/pexpect
|
pexpect/replwrap.py
|
Python
|
isc
| 4,604
| 0.002389
|
"""Generic wrapper for read-eval-print-loops, a.k.a. interactive shells
"""
import os.path
import signal
import sys
import re
import pexpect
PY3 = (sys.version_info[0] >= 3)
if PY3:
def u(s): return s
basestring = str
else:
def u(s): return s.decode('utf-8')
PEXPECT_PROMPT = u('[PEXPECT_PROMPT>')
PEXPECT_CONTINUATION_PROMPT = u('[PEXPECT_PROMPT+')
class REPLWrapper(object):
"""Wrapper for a REPL.
|
:param cmd_or_spawn: This can either be an instance of :class:`pexpect.spawn`
in which a REPL
|
has already been started, or a str command to start a new
REPL process.
:param str orig_prompt: The prompt to expect at first.
:param str prompt_change: A command to change the prompt to something more
unique. If this is ``None``, the prompt will not be changed. This will
be formatted with the new and continuation prompts as positional
parameters, so you can use ``{}`` style formatting to insert them into
the command.
:param str new_prompt: The more unique prompt to expect after the change.
:param str extra_init_cmd: Commands to do extra initialisation, such as
disabling pagers.
"""
def __init__(self, cmd_or_spawn, orig_prompt, prompt_change,
new_prompt=PEXPECT_PROMPT,
continuation_prompt=PEXPECT_CONTINUATION_PROMPT,
extra_init_cmd=None):
if isinstance(cmd_or_spawn, basestring):
self.child = pexpect.spawnu(cmd_or_spawn, echo=False)
else:
self.child = cmd_or_spawn
if self.child.echo:
# Existing spawn instance has echo enabled, disable it
# to prevent our input from being repeated to output.
self.child.setecho(False)
self.child.waitnoecho()
if prompt_change is None:
self.prompt = orig_prompt
else:
self.set_prompt(orig_prompt,
prompt_change.format(new_prompt, continuation_prompt))
self.prompt = new_prompt
self.continuation_prompt = continuation_prompt
self._expect_prompt()
if extra_init_cmd is not None:
self.run_command(extra_init_cmd)
def set_prompt(self, orig_prompt, prompt_change):
self.child.expect(orig_prompt)
self.child.sendline(prompt_change)
def _expect_prompt(self, timeout=-1):
return self.child.expect_exact([self.prompt, self.continuation_prompt],
timeout=timeout)
def run_command(self, command, timeout=-1):
"""Send a command to the REPL, wait for and return output.
:param str command: The command to send. Trailing newlines are not needed.
This should be a complete block of input that will trigger execution;
if a continuation prompt is found after sending input, :exc:`ValueError`
will be raised.
:param int timeout: How long to wait for the next prompt. -1 means the
default from the :class:`pexpect.spawn` object (default 30 seconds).
None means to wait indefinitely.
"""
# Split up multiline commands and feed them in bit-by-bit
cmdlines = command.splitlines()
# splitlines ignores trailing newlines - add it back in manually
if command.endswith('\n'):
cmdlines.append('')
if not cmdlines:
raise ValueError("No command was given")
self.child.sendline(cmdlines[0])
for line in cmdlines[1:]:
self._expect_prompt(timeout=1)
self.child.sendline(line)
# Command was fully submitted, now wait for the next prompt
if self._expect_prompt(timeout=timeout) == 1:
# We got the continuation prompt - command was incomplete
self.child.kill(signal.SIGINT)
self._expect_prompt(timeout=1)
raise ValueError("Continuation prompt found - input was incomplete:\n"
+ command)
return self.child.before
def python(command="python"):
"""Start a Python shell and return a :class:`REPLWrapper` object."""
return REPLWrapper(command, u(">>> "), u("import sys; sys.ps1={0!r}; sys.ps2={1!r}"))
def bash(command="bash"):
"""Start a bash shell and return a :class:`REPLWrapper` object."""
bashrc = os.path.join(os.path.dirname(__file__), 'bashrc.sh')
child = pexpect.spawnu(command, ['--rcfile', bashrc], echo=False)
return REPLWrapper(child, u'\$', u("PS1='{0}' PS2='{1}' PROMPT_COMMAND=''"),
extra_init_cmd="export PAGER=cat")
|
bbsan2k/nzbToMedia
|
libs/beetsplug/badfiles.py
|
Python
|
gpl-3.0
| 4,564
| 0
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, François-Xavier Thomas.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyri
|
ght notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Use command-line tools to check for audio file corruption.
"""
from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
from beets.util import displayable_path, confit
from beets import ui
from subprocess import check_ou
|
tput, CalledProcessError, list2cmdline, STDOUT
import shlex
import os
import errno
import sys
class BadFiles(BeetsPlugin):
def run_command(self, cmd):
self._log.debug(u"running command: {}",
displayable_path(list2cmdline(cmd)))
try:
output = check_output(cmd, stderr=STDOUT)
errors = 0
status = 0
except CalledProcessError as e:
output = e.output
errors = 1
status = e.returncode
except OSError as e:
if e.errno == errno.ENOENT:
ui.print_(u"command not found: {}".format(cmd[0]))
sys.exit(1)
else:
raise
output = output.decode(sys.getfilesystemencoding())
return status, errors, [line for line in output.split("\n") if line]
def check_mp3val(self, path):
status, errors, output = self.run_command(["mp3val", path])
if status == 0:
output = [line for line in output if line.startswith("WARNING:")]
errors = len(output)
return status, errors, output
def check_flac(self, path):
return self.run_command(["flac", "-wst", path])
def check_custom(self, command):
def checker(path):
cmd = shlex.split(command)
cmd.append(path)
return self.run_command(cmd)
return checker
def get_checker(self, ext):
ext = ext.lower()
try:
command = self.config['commands'].get(dict).get(ext)
except confit.NotFoundError:
command = None
if command:
return self.check_custom(command)
elif ext == "mp3":
return self.check_mp3val
elif ext == "flac":
return self.check_flac
def check_bad(self, lib, opts, args):
for item in lib.items(ui.decargs(args)):
# First, check whether the path exists. If not, the user
# should probably run `beet update` to cleanup your library.
dpath = displayable_path(item.path)
self._log.debug(u"checking path: {}", dpath)
if not os.path.exists(item.path):
ui.print_(u"{}: file does not exist".format(
ui.colorize('text_error', dpath)))
# Run the checker against the file if one is found
ext = os.path.splitext(item.path)[1][1:]
checker = self.get_checker(ext)
if not checker:
continue
path = item.path
if not isinstance(path, unicode):
path = item.path.decode(sys.getfilesystemencoding())
status, errors, output = checker(path)
if status > 0:
ui.print_(u"{}: checker exited withs status {}"
.format(ui.colorize('text_error', dpath), status))
for line in output:
ui.print_(" {}".format(displayable_path(line)))
elif errors > 0:
ui.print_(u"{}: checker found {} errors or warnings"
.format(ui.colorize('text_warning', dpath), errors))
for line in output:
ui.print_(u" {}".format(displayable_path(line)))
else:
ui.print_(u"{}: ok".format(ui.colorize('text_success', dpath)))
def commands(self):
bad_command = Subcommand('bad',
help=u'check for corrupt or missing files')
bad_command.func = self.check_bad
return [bad_command]
|
GoogleCloudPlatform/declarative-resource-client-library
|
python/services/monitoring/service.py
|
Python
|
apache-2.0
| 7,348
| 0.001225
|
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.monitoring import service_pb2
from google3.cloud.graphite.mmv2.services.google.monitoring import service_pb2_grpc
from typing import List
class Service(object):
def __init__(
self,
name: str = None,
display_name: str = None,
custom: dict = None,
telemetry: dict = None,
user_labels: dict = None,
project: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.display_name = display_name
self.custom = custom
self.telemetry = telemetry
self.user_labels = user_labels
self.project = project
self.service_account_file = service_account_file
def apply(self):
stub = service_pb2_grpc.MonitoringServiceServiceStub(channel.Channel())
request = service_pb2.ApplyMonitoringServiceRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self
|
.name)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if ServiceCustom.to_proto(self.custom):
request.resource.custom.CopyFrom(ServiceCustom.to_proto(self.cust
|
om))
else:
request.resource.ClearField("custom")
if ServiceTelemetry.to_proto(self.telemetry):
request.resource.telemetry.CopyFrom(
ServiceTelemetry.to_proto(self.telemetry)
)
else:
request.resource.ClearField("telemetry")
if Primitive.to_proto(self.user_labels):
request.resource.user_labels = Primitive.to_proto(self.user_labels)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
request.service_account_file = self.service_account_file
response = stub.ApplyMonitoringService(request)
self.name = Primitive.from_proto(response.name)
self.display_name = Primitive.from_proto(response.display_name)
self.custom = ServiceCustom.from_proto(response.custom)
self.telemetry = ServiceTelemetry.from_proto(response.telemetry)
self.user_labels = Primitive.from_proto(response.user_labels)
self.project = Primitive.from_proto(response.project)
def delete(self):
stub = service_pb2_grpc.MonitoringServiceServiceStub(channel.Channel())
request = service_pb2.DeleteMonitoringServiceRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if ServiceCustom.to_proto(self.custom):
request.resource.custom.CopyFrom(ServiceCustom.to_proto(self.custom))
else:
request.resource.ClearField("custom")
if ServiceTelemetry.to_proto(self.telemetry):
request.resource.telemetry.CopyFrom(
ServiceTelemetry.to_proto(self.telemetry)
)
else:
request.resource.ClearField("telemetry")
if Primitive.to_proto(self.user_labels):
request.resource.user_labels = Primitive.to_proto(self.user_labels)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
response = stub.DeleteMonitoringService(request)
@classmethod
def list(self, project, service_account_file=""):
stub = service_pb2_grpc.MonitoringServiceServiceStub(channel.Channel())
request = service_pb2.ListMonitoringServiceRequest()
request.service_account_file = service_account_file
request.Project = project
return stub.ListMonitoringService(request).items
def to_proto(self):
resource = service_pb2.MonitoringService()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
resource.display_name = Primitive.to_proto(self.display_name)
if ServiceCustom.to_proto(self.custom):
resource.custom.CopyFrom(ServiceCustom.to_proto(self.custom))
else:
resource.ClearField("custom")
if ServiceTelemetry.to_proto(self.telemetry):
resource.telemetry.CopyFrom(ServiceTelemetry.to_proto(self.telemetry))
else:
resource.ClearField("telemetry")
if Primitive.to_proto(self.user_labels):
resource.user_labels = Primitive.to_proto(self.user_labels)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
return resource
class ServiceCustom(object):
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = service_pb2.MonitoringServiceCustom()
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ServiceCustom()
class ServiceCustomArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ServiceCustom.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [ServiceCustom.from_proto(i) for i in resources]
class ServiceTelemetry(object):
def __init__(self, resource_name: str = None):
self.resource_name = resource_name
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = service_pb2.MonitoringServiceTelemetry()
if Primitive.to_proto(resource.resource_name):
res.resource_name = Primitive.to_proto(resource.resource_name)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ServiceTelemetry(
resource_name=Primitive.from_proto(resource.resource_name),
)
class ServiceTelemetryArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ServiceTelemetry.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [ServiceTelemetry.from_proto(i) for i in resources]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
renner/spacewalk
|
backend/satellite_tools/reposync.py
|
Python
|
gpl-2.0
| 62,750
| 0.002343
|
#
# Copyright (c) 2008--2018 Red Hat, Inc.
# Copyright (c) 2010--2011 SUSE Linux Products GmbH
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import re
import shutil
import sys
from datetime import datetime
from xml.dom import minidom
import gzip
import ConfigParser
import gettext
import errno
from rhn.connections import idn_puny_to_unicode
from spacewalk.server import rhnPackage, rhnSQL, rhnChannel
from spacewalk.common.usix import raise_with_tb
from spacewalk.common import fileutils, rhnLog, rhnCache, rhnMail
from spacewalk.common.rhnLib import isSUSE
from spacewalk.common.checksum import getFileChecksum
from spacewalk.common.rhnConfig import CFG, initCFG
from spacewalk.common.rhnException import rhnFault
from spacewalk.server.importlib import importLib, mpmSource, packageImport, errataCache
from spacewalk.server.importlib.packageImport import ChannelPackageSubscription
from spacewalk.server.importlib.backendOracle import SQLBackend
from spacewalk.server.importlib.errataImport import ErrataImport
from spacewalk.satellite_tools.download import ThreadedDownloader, ProgressBarLogger, TextLogger
from spacewalk.satellite_tools.repo_plugins import CACHE_DIR
from spacewalk.server import taskomatic, rhnPackageUpload
from spacewalk.satellite_tools.satCerts import verify_certificate_dates
from syncLib import log, log2, log2disk, dumpEMAIL_LOG, log2background
translation = gettext.translation('spacewalk-backend-server', fallback=True)
_ = translation.ugettext
default_log_location = '/var/log/rhn/'
relative_comps_dir = 'rhn/comps'
relative_modules_dir = 'rhn/modules'
checksum_cache_filename = 'reposync/checksum_cache'
default_import_batch_size = 10
errata_typemap = {
'security': 'Security Advisory',
'recommended': 'Bug Fix Advisory',
'bugfix': 'Bug Fix Advisory',
'optional': 'Product Enhancement Advisory',
'feature': 'Product Enhancement Advisory',
'enhancement': 'Product Enhancement Advisory'
}
def send_mail(sync_type="Repo"):
""" Send email summary """
body = dumpEMAIL_LOG()
if body:
print(_("+++ sending log as an email +++"))
host_label = idn_puny_to_unicode(os.uname()[1])
headers = {
'Subject': _("%s sync. report from %s") % (sync_type, host_label),
}
sndr = "root@%s" % host_label
if CFG.default_mail_from:
sndr = CFG.default_mail_from
rhnMail.send(headers, body, sender=sndr)
else:
print(_("+++ email requested, but there is nothing to send +++"))
class KSDirParser:
file_blacklist = ["release-notes/"]
def __init__(self, dir_html, additional_blacklist=None):
self.dir_content = []
if additional_blacklist is None:
additional_blacklist = []
elif not isinstance(additional_blacklist, type([])):
additional_blacklist = [additional_blacklist]
for s in (m.group(1) for m in re.finditer(r'(?i)<a href="(.+?)"', dir_html)):
if not (re.match(r'/', s) or re.search(r'\?', s) or re.search(r'\.\.', s) or re.match(r'[a-zA-Z]+:', s) or
re.search(r'\.rpm$', s)):
if re.search(r'/$', s):
file_type = 'DIR'
else:
file_type = 'FILE'
if s not in (self.file_blacklist + additional_blacklist):
self.dir_content.append({'name': s, 'type': file_type})
def get_content(self):
return self.dir_content
class TreeInfoError(Exception):
pass
class TreeInfoParser(object):
def __init__(self, filename):
self.parser = ConfigParser.RawConfigParser()
# do not lowercase
self.parser.optionxform = str
fp = open(filename)
try:
try:
self.parser.readfp(fp)
except ConfigParser.ParsingError:
raise TreeInfoError("Could not parse treeinfo file!")
finally:
if fp is not None:
fp.close()
def get_images(self):
files = []
for section_name in self.parser.sections():
if section_name.startswith('images-') or section_name == 'stage2':
for item in self.parser.items(section_name):
files.append(item[1])
return files
def get_family(self):
for section_name in self.parser.sections():
if section_name == 'general':
for item in self.parser.items(section_name):
if item[0] == 'family':
return item[1]
def get_major_version(self):
for section_name in self.parser.sections():
if section_name == 'general':
for item in self.parser.items(section_name):
if item[0] == 'version':
return item[1].split('.')[0]
def get_package_dir(self):
for section_name in self.parser.sections():
if section_name == 'general':
for item in self.parser.items(section_name):
if item[0] == 'packagedir':
return item[1]
def get_addons(self):
addons_dirs = []
for section_name in self.parser.sections():
# check by name
if section_name.startswith('addon-'):
for item in self.parser.items(section_name):
if item[0] == 'repository':
addons_dirs.append(item[1])
# check by type
else:
repository = None
repo_type = None
for item in self.parser.items(section_name):
if item[0] == 'repository':
repository = item[1]
elif item[0] == 'type':
repo_type = item[1]
if repo_type == 'addon' and repository is not None:
addons_dirs.append(repository)
return addons_dirs
def set_filter_opt(option, opt_str, value, parser):
# pylint: disable=W0613
if opt_str in ['--include', '-i']:
f_type = '+'
else:
f_type = '-'
parser.values.filters.append((f_type, [v.strip() for v in value.split(',') if v.strip()]))
def getChannelRepo():
initCFG('server.satellite')
rhnSQL.initDB()
items = {}
sql = """
select s.source_url, c.label
from rhnContentSource s,
rhnChannelContentSource cs,
rhnChannel c
where s.id = cs.source_id and cs.channel_id=c.id
"""
h = rhnSQL.prepare(sql)
h.execute()
while 1:
row = h.fetchone_dict()
if not row:
break
if not row['label'] in items:
items[row['label']] = []
items[row['label']] += [row['source_url']]
return items
def getParentsChilds(b_only_custom=False):
initCFG('server.satellite')
rhnSQL.initDB()
sql = """
select c1.label, c2.label parent_channel, c1.id
|
from rhnChannel c1 left outer join rhnChannel c2 on c1.parent_channel = c2.id
order by c2.label desc, c1.label asc
"""
h = rhnSQL.prepare(sql)
h.execute()
d_parents = {}
while 1:
row = h.fetchone_dict()
if not row:
break
if not b_only_custom or rhnChannel.isCustomChan
|
nel(row['id']):
parent_channel = row['parent_channel']
if not parent_channel:
d_parents[row['label']] = []
else:
# If the parent is not a custom channel treat the child like
# it's a pare
|
MrWhoami/WhoamiBangumi
|
Youku.py
|
Python
|
mit
| 1,140
| 0.00088
|
# -*- coding:utf_8 -*-
import urllib2
from bs4 import BeautifulSoup
from Bangumi import Bangumi
class Youku(Bangumi):
link = "http://comic.youku.com"
name = u'优酷'
def getBangumi(self):
"""Youku processing function"""
# Get Youku bangumi HTML
req = urllib2.Request(self.link)
res = urllib2.urlopen(req)
html = res.read()
# Give the HTML to BeautifulSoup
# TODO: Change the parser to lxml for better performance
soup = BeautifulSoup(html, "html.parser")
# Get the list by day of the week
for wd in range(7):
if wd == 0:
lid = "tab_100895_7"
else:
lid = "tab_100895_{}".format(wd)
div = soup.find(id=lid)
blist = div.find_all("div", class_="v-meta va")
for binfo in blist:
bupdate = binfo.find("span", class_="v-status").string
|
btitle = binfo.fin
|
d(class_="v-meta-title")
bname = btitle.find("a").string
blink = btitle.find('a')['href']
self.add(wd, bname, bupdate, blink)
|
philpot/tocayo
|
tocayoproj/tocayoapp/migrations/0008_auto_20151212_1607.py
|
Python
|
apache-2.0
| 455
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-12
|
16:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tocayoapp', '0007_gender_description'),
]
operations = [
migrations.AlterField(
model_name='gender',
name='description',
field=models.Ch
|
arField(max_length=15),
),
]
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/OpenGL/raw/GL/ARB/texture_swizzle.py
|
Python
|
lgpl-3.0
| 781
| 0.025608
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL
|
._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ARB_texture_swizzle'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ARB_texture_swizzle',error_checker=_errors._error_checker)
GL_TEXTURE_SWIZZLE_A=_C('GL_TEXTURE_SWIZZLE_A',0x8E45)
GL_TEXTURE_SWIZZLE_B=_C('GL_TEXTURE_SWIZZLE_B',0x8E44)
GL_TEXTURE_SWIZZLE_G=_C('GL_TEXTURE_SWIZZLE_G',0x8E43)
GL_TEXTUR
|
E_SWIZZLE_R=_C('GL_TEXTURE_SWIZZLE_R',0x8E42)
GL_TEXTURE_SWIZZLE_RGBA=_C('GL_TEXTURE_SWIZZLE_RGBA',0x8E46)
|
lifemapper/core
|
LmWebServer/flask_app/scenario.py
|
Python
|
gpl-3.0
| 3,342
| 0.00389
|
"""This module provides REST services for Scenario"""
import werkzeug.exceptions as WEXC
from LmCommon.common.lmconstants import HTTPStatus
from LmWebServer.common.lmconstants import HTTPMethod
from LmWebServer.services.api.v2.base import LmService
from LmWebServer.services.common.access_control import check_user_permission
from LmWebServer.services.cp_tools.lm_format import lm_formatter
# .............................................................................
class ScenarioService(LmService):
"""Scenarios service class.
"""
# ................................
# @lm_formatter
# def GET(self, scenario_id=None, after_time=None,
# alt_pred_code=None, before_time=None, date_code=None,
# epsg_code=None, gcm_code=None, limit=100, offset=0, url_user=None,
# **params):
# """GET request. Individual, list, count
# """
# if scenario_id is None:
# return self._list_scenarios(
# self.get_user_id(url_user=url_user), after_time=after_time,
# alt_pred_code=alt_pred_code, before_time=before_time,
# date_code=date_code, epsg_code=epsg_code, gcm_code=gcm_code,
# limit=limit, offset=offset)
#
# if scenario_id.lower() == 'count':
# return self._count_scenarios(
# self.get_user_id(url_user=url_user), after_time=after_time,
# alt_pred_code=alt_pred_code, before_time=before_time,
# date_code=date_code, epsg_code=epsg_code, gcm_code=gcm_code)
#
# return self._get_scenario(scenario_id)
# ................................
@lm_formatter
def count_scenarios(
self, user_id, after_time=None, before_time=None, alt_pred_code=None, date_code=None,
gcm_code=None, epsg_code=None):
"""Return a list of scenarios matching the specified criteria"""
scen_count = self.scribe.count_scenarios(
user_id=user_id, before_time=before_time, after_time=after_time,
epsg=epsg_code, gcm_code=gcm_code, alt_pred_code=alt_pred_code,
date_code=date_code)
return {'count': scen_count}
# ................................
@lm_formatter
def get_scenario(self, user_id, scenario_id):
"""Return a scenario"""
scn = self.scribe.get_scenario(int(scenario_id), fill_layers=True)
if scn is None:
raise WEXC.
|
NotFound('Scenario {} not found'.format(scenario_id))
if check_user_permission(user_id, scn, HTTPMethod.GET):
return scn
else:
raise WEXC.Forbidden('User {} does not have permission to get scenario {}'.format(
user_id, scenario_id))
# ................................
@lm_formatter
def list_scenarios(
|
self, user_id, after_time=None, before_time=None, alt_pred_code=None, date_code=None,
gcm_code=None, epsg_code=None, limit=100, offset=0):
"""Return a list of scenarios matching the specified criteria"""
scn_atoms = self.scribe.list_scenarios(
offset, limit, user_id=user_id, before_time=before_time, after_time=after_time,
epsg=epsg_code, gcm_code=gcm_code, alt_pred_code=alt_pred_code, date_code=date_code)
return scn_atoms
|
dadosgovbr/ckanext-dadosabertos
|
ckanext/dadosgovbr/controllers/scheming.py
|
Python
|
agpl-3.0
| 12,430
| 0.001853
|
# coding=utf-8
import logging
from urllib import urlencode
import datetime
import mimetypes
import cgi
from ckan.common import config
from paste.deploy.converters import asbool
import paste.fileapp
import ckan.logic as logic
import ckan.lib.base as base
import ckan.lib.i18n as i18n
import ckan.lib.maintain as maintain
import ckan.lib.navl.dictization_functions as dict_fns
import ckan.lib.helpers as h
import ckan.model as model
import ckan.lib.datapreview as datapreview
import ckan.lib.plugins
import ckan.lib.uploader as uploader
import ckan.plugins as p
import ckan.lib.render
from ckan.common import OrderedDict, _, json, request, c, response
#from home import CACHE_PARAMETERS
from ckan.controllers.package import PackageController
log = logging.getLogger(__name__)
render = base.render
abort = base.abort
NotFound = logic.NotFound
NotAuthorized = logic.NotAuthorized
ValidationError = logic.Valida
|
tionError
check_access = logic.check_access
get_action = logic.get_action
tuplize_dict = logic.tuplize_dict
clean_dict = logic.clean_dict
parse_params = logic.parse_params
flatten_to_string_key = logic.flatt
|
en_to_string_key
lookup_package_plugin = ckan.lib.plugins.lookup_package_plugin
def _encode_params(params):
return [(k, v.encode('utf-8') if isinstance(v, basestring) else str(v))
for k, v in params]
def url_with_params(url, params):
params = _encode_params(params)
return url + u'?' + urlencode(params)
def search_url(params, package_type=None):
if not package_type or package_type == 'dataset':
url = h.url_for(controller='package', action='search')
else:
url = h.url_for('{0}_search'.format(package_type))
return url_with_params(url, params)
class SchemingPagesController(PackageController):
def search(self):
from ckan.lib.search import SearchError, SearchQueryError
# Get package type name
package_type = self._guess_package_type()[:-1]
c.package_type = package_type
# Get page content from Wordpress
# =========================================
import ckanext.dadosgovbr.helpers.wordpress as wp
wp_page_slug = 'scheming_'+package_type+'s'
c.wp_page = type('Nothing', (object,), {})
c.wp_page.content = type('Nothing', (object,), {})
c.wp_page.content.rendered = "Conteudo da pagina nao encontrado..."
try:
c.wp_page = wp.page(wp_page_slug)
except:
pass
# DEBUG
# from pprint import pprint
# pprint(c.concursos)
# Package type facets (filters)
# =========================================
package_type_facets = u'organization groups tags res_format license_id'
if(package_type == 'inventario'):
package_type_facets = u'organization situacao_base informacoes_sigilosas_base informacoes_publicas_base atualizacoes_base dados_abertos_base'
if(package_type == 'concurso'):
package_type_facets = u'organization datasets_used'
if(package_type == 'aplicativo'):
package_type_facets = u'organization groups tags res_format license_id'
try:
context = {'model': model, 'user': c.user,
'auth_user_obj': c.userobj}
check_access('site_read', context)
except NotAuthorized:
abort(403, _('Not authorized to see this page'))
# unicode format (decoded from utf8)
q = c.q = request.params.get('q', u'')
c.query_error = False
page = h.get_page_number(request.params)
limit = int(config.get('ckan.datasets_per_page', 20))
# most search operations should reset the page counter:
params_nopage = [(k, v) for k, v in request.params.items()
if k != 'page']
def drill_down_url(alternative_url=None, **by):
return h.add_url_param(alternative_url=alternative_url,
controller='package', action='search',
new_params=by)
c.drill_down_url = drill_down_url
def remove_field(key, value=None, replace=None):
return h.remove_url_param(key, value=value, replace=replace,
controller='package', action='search')
c.remove_field = remove_field
sort_by = request.params.get('sort', None)
params_nosort = [(k, v) for k, v in params_nopage if k != 'sort']
def _sort_by(fields):
"""
Sort by the given list of fields.
Each entry in the list is a 2-tuple: (fieldname, sort_order)
eg - [('metadata_modified', 'desc'), ('name', 'asc')]
If fields is empty, then the default ordering is used.
"""
params = params_nosort[:]
if fields:
sort_string = ', '.join('%s %s' % f for f in fields)
params.append(('sort', sort_string))
return search_url(params, package_type)
c.sort_by = _sort_by
if not sort_by:
c.sort_by_fields = []
else:
c.sort_by_fields = [field.split()[0]
for field in sort_by.split(',')]
def pager_url(q=None, page=None):
params = list(params_nopage)
params.append(('page', page))
return search_url(params, package_type)
c.search_url_params = urlencode(_encode_params(params_nopage))
try:
c.fields = []
# c.fields_grouped will contain a dict of params containing
# a list of values eg {'tags':['tag1', 'tag2']}
c.fields_grouped = {}
search_extras = {}
fq = ''
for (param, value) in request.params.items():
if param not in ['q', 'page', 'sort'] \
and len(value) and not param.startswith('_'):
if not param.startswith('ext_'):
c.fields.append((param, value))
fq += ' %s:"%s"' % (param, value)
if param not in c.fields_grouped:
c.fields_grouped[param] = [value]
else:
c.fields_grouped[param].append(value)
else:
search_extras[param] = value
context = {'model': model, 'session': model.Session,
'user': c.user, 'for_view': True,
'auth_user_obj': c.userobj}
if package_type and package_type != 'dataset':
# Only show datasets of this particular type
fq += ' +dataset_type:{type}'.format(type=package_type)
else:
# Unless changed via config options, don't show non standard
# dataset types on the default search page
if not asbool(
config.get('ckan.search.show_all_types', 'False')):
fq += ' +dataset_type:dataset'
facets = OrderedDict()
default_facet_titles = {
# Default package
'organization': _('Organizations'),
'groups': _('Groups'),
'tags': _('Tags'),
'res_format': _('Formats'),
'license_id': _('Licenses'),
# Inventário package
'situacao_base': _(u'Situação da base'),
'informacoes_sigilosas_base': _(u'Base possui informações sigilosas?'),
'vocab_sim': _(u'Sim'),
'vocab_nao': _(u'Não'),
'informacoes_publicas_base': _(u'Base possui informações públicas?'),
'informacoes_publicas_base_publico': _(u'Público'),
'atualizacoes_base': _(u'Período de atualização dos dados'),
'dados_abertos_base': _(u'Exporta para dados abertos?'),
# Concurso package
'datasets_used': _(u'Dados utilizados'),
'tags': _(u'Tags'),
'date': _(u'Data de iní
|
deadRaccoons/TestAirlines
|
tabo/cherrypy/cherrypy/lib/static.py
|
Python
|
gpl-2.0
| 14,778
| 0
|
import os
import re
import stat
import mimetypes
try:
from io import UnsupportedOperation
except ImportError:
UnsupportedOperation = object()
import cherrypy
from cherrypy._cpcompat import ntob, unquote
from cherrypy.lib import cptools, httputil, file_generator_limited
mimetypes.init()
mimetypes.types_map['.dwg'] = 'image/x-dwg'
mimetypes.types_map['.ico'] = 'image/x-icon'
mimetypes.types_map['.bz2'] = 'application/x-bzip2'
mimetypes.types_map['.gz'] = 'application/x-gzip'
def serve_file(path, content_type=None, disposition=None, name=None,
debug=False):
"""Set status, headers, and body in order to serve the given path.
The Content-Type header will be set to the content_type arg, if provided.
If not provided, the Content-Type will be guessed by the file extension
of the 'path' argument.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, it will be set
to the basename of path. If disposition is None, no Content-Disposition
header will be written.
"""
response = cherrypy.serving.response
# If path is relative, users should fix it by making path absolute.
# That is, CherryPy should not guess where the application root is.
# It certainly should *not* use cwd (since CP may be invoked from a
# variety of paths). If using tools.staticdir, you can make your relative
# paths become absolute by supplying a value for "tools.staticdir.root".
if not os.path.isabs(path):
msg = "'%s' is not an absolute path." % path
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
try:
st = os.stat(path)
except OSError:
if debug:
cherrypy.log('os.stat(%r) failed' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Check if path is a directory.
if stat.S_ISDIR(st.st_mode):
# Let the caller deal with it as they like.
if debug:
cherrypy.log('%r is a directory' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
if content_type is None:
# Set content-type based on filename extension
ext = ""
i = path.rfind('.')
if i != -1:
ext = path[i:].lower()
content_type = mimetypes.types_map.get(ext, None)
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
name = os.path.basename(path)
cd = '%s; filename="%s"' % (disposition, name)
response.headers["Content-Disposition"] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
content_length = st.st_size
fileobj = open(path, 'rb')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def serve_fileobj(fileobj, content_type=None, disposition=None, name=None,
debug=False):
"""Set status, headers, and body in order to serve the given file object.
The Content-Type header will be set to the content_type arg, if provided.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, 'filename' will
not be set. If disposition is None, no Content-Disposition header will
be written.
CAUTION: If the request contains a 'Range' header, one or more seek()s will
be performed on the file object. This may cause undesired behavior if
the file object is not seekable. It could also produce undesired results
if the caller set the read position of the file object prior to calling
serve_fileobj(), expecting that the data would be served starting from that
position.
"""
response = cherrypy.serving.response
try:
st = os.fstat(fileobj.fileno())
except AttributeError:
if debug:
cherrypy.log('os has no fstat attribute', 'TOOLS.STATIC')
content_length = None
except UnsupportedOperation:
content_length = None
else:
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
content_length = st.st_size
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
cd = disposition
else:
cd = '%s; filename="%s"' % (disposition, name)
response.headers["Content-Disposition"] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def _serve_fileobj(fileobj, con
|
tent_type, content_length, debug=False):
"""Internal. Set response.body to the given file object, perhaps ranged."""
response = cherrypy.serving.response
# HTTP/1.0 didn't have Range/Accept-Ranges headers, or the 206 code
request = cherrypy.serving.request
if request.protocol >= (1, 1):
response.headers["Accept-Ranges"] = "bytes"
r = httputil.get_ranges(request.headers.get('Range'), content_length)
if r == []:
respo
|
nse.headers['Content-Range'] = "bytes */%s" % content_length
message = ("Invalid Range (first-byte-pos greater than "
"Content-Length)")
if debug:
cherrypy.log(message, 'TOOLS.STATIC')
raise cherrypy.HTTPError(416, message)
if r:
if len(r) == 1:
# Return a single-part response.
start, stop = r[0]
if stop > content_length:
stop = content_length
r_len = stop - start
if debug:
cherrypy.log(
'Single part; start: %r, stop: %r' % (start, stop),
'TOOLS.STATIC')
response.status = "206 Partial Content"
response.headers['Content-Range'] = (
"bytes %s-%s/%s" % (start, stop - 1, content_length))
response.headers['Content-Length'] = r_len
fileobj.seek(start)
response.body = file_generator_limited(fileobj, r_len)
else:
# Return a multipart/byteranges response.
response.status = "206 Partial Content"
try:
# Python 3
from email.generator import _make_boundary as make_boundary
except ImportError:
# Python 2
from mimetools import choose_boundary as make_boundary
boundary = make_boundary()
ct = "multipart/byteranges; boundary=%s" % boundary
response.headers['Content-Type'] = ct
if "Content-Length" in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers["Content-Length"]
def file_ranges():
# Apache compatibility:
yield ntob("\r\n")
for start, stop in r:
if debug:
cherrypy.log(
'Multipart; start: %r, stop: %r' % (
start, stop),
'TOOLS.STATIC')
yield ntob("--" + boundary, 'ascii')
|
uclouvain/OSIS-Louvain
|
program_management/tests/ddd/service/read/test_get_program_tree_version_from_node_service.py
|
Python
|
agpl-3.0
| 2,219
| 0.003607
|
# ############################################################################
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
# #########################################################################
|
###
from unittest import mock
from django.test import SimpleTestCase
from program_management.ddd import command
from program_management.ddd.domain.service.identity_search import ProgramTreeVersionIdentitySearch
from program_management.ddd.repositories.program_tree_version import ProgramTreeVersionRepository
from program_management.ddd.service.read import get_program_tree_version_from_node_service
class TestGetProgramTreeVersionFromNodeService(SimpleTestCase):
@mock.patch.objec
|
t(ProgramTreeVersionIdentitySearch, 'get_from_node_identity')
@mock.patch.object(ProgramTreeVersionRepository, 'get')
def test_domain_service_is_called(self, mock_domain_service, mock_repository_get):
cmd = command.GetProgramTreeVersionFromNodeCommand(code="LDROI1200", year=2018)
get_program_tree_version_from_node_service.get_program_tree_version_from_node(cmd)
self.assertTrue(mock_domain_service.called)
self.assertTrue(mock_repository_get.called)
|
Donkyhotay/MoonPy
|
twisted/protocols/sip.py
|
Python
|
gpl-3.0
| 41,973
| 0.003502
|
# -*- test-case-name: twisted.test.test_sip -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Session Initialization Protocol.
Documented in RFC 2543.
[Superceded by 3261]
This module contains a deprecated implementation of HTTP Digest authentication.
See L{twisted.cred.credentials} and L{twisted.cred._digest} for its new home.
"""
# system imports
import socket, time, sys, random, warnings
from zope.interface import implements, Interface
# twisted imports
from twisted.python import log, util
from twisted.python.deprecate import deprecated
from twisted.python.versions import Version
from twisted.python.hashlib import md5
from twisted.internet import protocol, defer, reactor
from twisted import cred
import twisted.cred.error
from twisted.cred.credentials import UsernameHashedPassword, UsernamePassword
# sibling imports
from twisted.protocols import basic
PORT = 5060
# SIP headers have short forms
shortHeaders = {"call-id": "i",
"contact": "m",
"content-encoding": "e",
"content-length": "l",
"content-type": "c",
"from": "f",
"subject": "s",
"to": "t",
"via": "v",
}
longHeaders = {}
for k, v in shortHeaders.items():
longHeaders[v] = k
del k, v
statusCodes = {
100: "Trying",
180: "Ringing",
181: "Call Is Being Forwarded",
182: "Queued",
183: "Session Progress",
200: "OK",
300: "Multiple Choices",
301: "Moved Permanently",
302: "Moved Temporarily",
303: "See Other",
305: "Use Proxy",
380: "Alternative Service",
400: "Bad Request",
401: "Unauthorized",
402: "Payment Required",
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed",
406: "Not Acceptable",
407: "Proxy Authentication Required",
408: "Request Timeout",
409: "Conflict", # Not in RFC3261
410: "Gone",
411: "Length Required", # Not in RFC3261
413: "Request Entity Too Large",
414: "Request-URI Too Large",
415: "Unsupported Media Type",
416: "Unsupported URI Scheme",
420: "Bad Extension",
421: "Extension Required",
423: "Interval Too Brief",
480: "Temporarily Unavailable",
481: "Call/Transaction Does Not Exist",
482: "Loop Detected",
483: "Too Many Hops",
484: "Address Incomplete",
485: "Ambiguous",
486: "Busy Here",
487: "Request Terminated",
488: "Not Acceptable Here",
491: "Request Pending",
493: "Undecipherable",
500: "Internal Server Error",
501: "Not Implemented",
502: "Bad Gateway", # no donut
503: "Service Unavailable",
504: "Server Time-out",
505: "SIP Version not supported",
513: "Message Too Large",
600: "Busy Everywhere",
603: "Decline",
604: "Does not exist anywhere",
606: "Not Acceptable",
}
specialCases = {
'cseq': 'CSeq',
'call-id': 'Call-ID',
'www-authenticate': 'WWW-Authenticate',
}
def dashCapitalize(s):
''' Capitalize a string, making sure to treat - as a word seperator '''
return '-'.join([ x.capitalize() for x in s.split('-')])
def unq(s):
if s[0] == s[-1] == '"':
return s[1:-1]
return s
def DigestCalcHA1(
pszAlg,
pszUserName,
pszRealm,
pszPassword,
pszNonce,
pszCNonce,
):
m = md5()
m.update(pszUserName)
m.update(":")
m.update(pszRealm)
m.update(":")
m.update(pszPassword)
HA1 = m.digest()
if pszAlg == "md5-sess":
m = md5()
m.update(HA1)
m.update(":")
m.update(pszNonce)
m.update(":")
m.update(pszCNonce)
HA1 = m.digest()
return HA1.encode('hex')
DigestCalcHA1 = deprecated(Version("Twisted", 9, 0, 0))(DigestCalcHA1)
def DigestCalcResponse(
HA1,
pszNonce,
pszNonceCount,
pszCNonce,
pszQop,
pszMethod,
pszDigestUri,
pszHEntity,
):
m = md5()
m.update(pszMethod)
m.update(":")
m.update(pszDigestUri)
if pszQop == "auth-int":
m.update(":")
m.update(pszHEntity)
HA2 = m.digest().encode('hex')
m = md5()
m.update(HA1)
m.update(":")
m.update(pszNonce)
m.update(":")
if pszNonceCount and pszCNonce: # pszQop:
m.update(pszNonceCount)
m.update(":")
m.update(pszCNonce)
m.update(":")
m.update(pszQop)
m.update(":")
m.update(HA2)
hash = m.digest().encode('hex')
return hash
DigestCalcResponse = deprecated(Version("Twisted", 9, 0, 0))(DigestCalcResponse)
_absent = object()
class Via(object):
"""
A L{Via} is a SIP Via header, representing a segment of the path taken by
the request.
See RFC 3261, sections 8.1.1.7, 18.2.2, and 20.42.
@ivar transport: Network protocol used for this leg. (Probably either "TCP"
or "UDP".)
@type transport: C{str}
@ivar branch: Unique identifier for this request.
@type branch: C{str}
@ivar host: Hostname or IP for this leg.
@type host: C{str}
@ivar port: Port used for this leg.
@type port C{int}, or None.
@ivar rportRequested: Whether to request RFC 3581 client processing or not.
@type rportRequested: C{bool}
@ivar rportValue: Servers wishing to honor requests for RFC 3581 processing
should set this parameter to the source port the request was received
from.
@type rportValue: C{int}, or None.
@ivar ttl: Time-to-live for requests on multicast paths.
@type ttl: C{int}, or None.
@ivar maddr: The destination multicast address, if any.
@type maddr: C{str}, or None.
@ivar hidden: Obsolete in SIP 2.0.
@type h
|
idden: C{bool}
@ivar otherParams: Any other parameters in the header.
@type otherParams: C{dict}
"""
de
|
f __init__(self, host, port=PORT, transport="UDP", ttl=None,
hidden=False, received=None, rport=_absent, branch=None,
maddr=None, **kw):
"""
Set parameters of this Via header. All arguments correspond to
attributes of the same name.
To maintain compatibility with old SIP
code, the 'rport' argument is used to determine the values of
C{rportRequested} and C{rportValue}. If None, C{rportRequested} is set
to True. (The deprecated method for doing this is to pass True.) If an
integer, C{rportValue} is set to the given value.
Any arguments not explicitly named here are collected into the
C{otherParams} dict.
"""
self.transport = transport
self.host = host
self.port = port
self.ttl = ttl
self.hidden = hidden
self.received = received
if rport is True:
warnings.warn(
"rport=True is deprecated since Twisted 9.0.",
DeprecationWarning,
stacklevel=2)
self.rportValue = None
self.rportRequested = True
elif rport is None:
self.rportValue = None
self.rportRequested = True
elif rport is _absent:
self.rportValue = None
self.rportRequested = False
else:
self.rportValue = rport
self.rportRequested = False
self.branch = branch
self.maddr = maddr
self.otherParams = kw
def _getrport(self):
"""
Returns the rport value expected by the old SIP code.
"""
if self.rportRequested == True:
return True
elif self.rportValue is not None:
return self.rportValue
else:
return None
def _setrport(self, newRPort):
"""
L{Base._fixupNAT} sets C{rport} directly, so this method sets
C{rportValue} based on that.
@param newRPort: The new rport value.
@type newRPort: C{int}
"""
self.rportValue = newRPort
self.rportRequested = False
rport = property(_getrport, _setrport)
def toString(self):
"""
Serialize this header for use in a request or response.
"""
s = "SIP/2.
|
crateio/crate.web
|
crate/web/history/models.py
|
Python
|
bsd-2-clause
| 2,810
| 0
|
from django.db import models
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from jsonfield import JSONField
from model_utils import Choices
from model_utils.models import TimeStampedModel
from crate.web.packages.models import Package, Release, ReleaseFile
class Event(TimeStampedModel):
ACTIONS = Choices(
("package_create", _("Package Created")),
("package_delete", _("Package Deleted")),
("release_create", _("Release Created")),
("release_delete", _("Release Deleted")),
("file_add", _("File Added")),
("file_remove", _("File Removed")),
)
package = models.SlugField(max_length=150)
version = models.CharField(max_length=512, blank=True)
action = models.CharField(max_length=25, choices=ACTIONS)
data = JSONField(null=True, blank=True)
@receiver(post_save, sender=Package)
def history_package_create(instance, created, **kwargs):
if created:
Event.objects.create(
package=instance.name,
action=Event.ACTIONS.package_create
)
@receiver(post_delete, sender=Package)
def history_package_delete(instance, **kwargs):
Event.objects.create(
package=instance.name,
action=Event.ACTIONS.package_delete
)
@receiver(post_save, sender=Release)
def history_release_update(instance, created, **kwargs):
if created:
Event.objects.create(
package=instance.package.name,
version=instance.version,
action=Event.ACTIONS.release_create
)
if instance.has_changed("hidden"):
if instance.hidden:
Event.objects.create(
package=instance.package.name,
version=instance.version,
action=Event.ACTIONS.release_delete
)
else:
Event.objects.create(
package=instance.package.name,
version=instance.version,
action=Event.ACTIONS.release_create
)
@receiver(post_save, sender=ReleaseFile)
def history_releasefile_update(instance, created, **kwargs):
e = None
if instance.has_chan
|
ged("hidden"):
if instance.hidden:
e = Event.objects.create(
package=instance.release.package.name,
version=instance.release.version,
action=Event.ACTIONS.file_remove
)
if e is not None:
try:
e.data = {
"filename": instance.
|
filename,
"digest": instance.digest,
"uri": instance.get_absolute_url(),
}
except ValueError:
pass
else:
e.save()
|
Bobspadger/python-amazon-mws
|
tests/test_param_methods.py
|
Python
|
unlicense
| 4,345
| 0.000921
|
"""
Testing for enumerate_param, enumerate_params, and enumerate_keyed_param
"""
import unittest
import mws
# pylint: disable=invalid-name
class TestParamsRaiseExceptions(unittest.TestCase):
"""
Simple test that asserts a ValueError is raised by an improper entry to
`utils.enumerate_keyed_param`.
"""
def test_keyed_param_fails_without_dict(self):
"""
Should raise ValueError for values not being a dict.
"""
param = "something"
values = ["this is not a dict like it should be!"]
with self.assertRaises(ValueError):
mws.utils.enumerate_keyed_param(param, values)
def test_single_param_default():
"""
Test each method type for their default empty dicts.
"""
# Single
assert mws.utils.enumerate_param("something", []) == {}
# Multi
assert mws.utils.enumerate_params() == {}
assert mws.utils.enumerate_params("antler") == {}
# Keyed
assert mws.utils.enumerate_keyed_param("acorn", []) == {}
def test_single_param_not_dotted_list_values():
"""
A param string with no dot at the end and a list of ints.
List should be ingested in order.
"""
param = "SomethingOrOther"
values = (123, 765, 3512, 756437, 3125)
result = mws.utils.enumerate_param(param, values)
assert result == {
"SomethingOrOther.1": 123,
"SomethingOrOther.2": 765,
"SomethingOrOther.3": 3512,
"SomethingOrOther.4": 756437,
"SomethingOrOther.5": 3125,
}
def test_single_param_dotted_single_value():
"""
A param string with a dot at the end and a single string value.
Values that are not list, tuple, or set should coerce to a list and provide a single output.
"""
param = "FooBar."
values = "eleven"
result = mws.utils.enumerate_param(param, values)
assert result == {
"FooBar.1": "eleven",
}
def test_multi_params():
"""
A series of params sent as a list of dicts to enumerate_params.
Each param should generate a unique set of keys and values.
Final result should be a flat dict.
"""
param1 = "Summat."
values1 = ("colorful", "cheery", "turkey")
param2 = "FooBaz.what"
values2 = "singular"
param3 = "hot_dog"
values3 = ["something", "or", "other"]
# We could test with values as a set, but we cannot be 100% of the order of the output,
# and I don't feel it necessary to flesh this out enough to account for it.
result = mws.utils.enumerate_params({
param1: values1,
param2: values2,
param3: values3,
})
assert result == {
"Summat
|
.1": "colorful",
"Summat.2": "cheery",
"Summat.3": "turkey",
"FooBaz.what.1": "singular",
"hot_dog.1": "something",
"hot_dog.2": "or",
"hot_dog.3": "other",
}
def test_keyed_params():
"""
Asserting the result through enumerate_keyed_param is as expected.
"""
# Example:
# param = "InboundShipmentPlanRequestItems.member"
# values = [
# {'
|
SellerSKU': 'Football2415',
# 'Quantity': 3},
# {'SellerSKU': 'TeeballBall3251',
# 'Quantity': 5},
# ...
# ]
# Returns:
# {
# 'InboundShipmentPlanRequestItems.member.1.SellerSKU': 'Football2415',
# 'InboundShipmentPlanRequestItems.member.1.Quantity': 3,
# 'InboundShipmentPlanRequestItems.member.2.SellerSKU': 'TeeballBall3251',
# 'InboundShipmentPlanRequestItems.member.2.Quantity': 5,
# ...
# }
param = "AthingToKeyUp.member"
item1 = {
"thing": "stuff",
"foo": "baz",
}
item2 = {
"thing": 123,
"foo": 908,
"bar": "hello",
}
item3 = {
"stuff": "foobarbazmatazz",
"stuff2": "foobarbazmatazz5",
}
result = mws.utils.enumerate_keyed_param(param, [item1, item2, item3])
assert result == {
"AthingToKeyUp.member.1.thing": "stuff",
"AthingToKeyUp.member.1.foo": "baz",
"AthingToKeyUp.member.2.thing": 123,
"AthingToKeyUp.member.2.foo": 908,
"AthingToKeyUp.member.2.bar": "hello",
"AthingToKeyUp.member.3.stuff": "foobarbazmatazz",
"AthingToKeyUp.member.3.stuff2": "foobarbazmatazz5",
}
|
fruitnuke/catan
|
tests.py
|
Python
|
gpl-3.0
| 714
| 0.001401
|
from main import Board
import collections
import unittest
class ClassicBoardTests(unittest.TestCase):
def test_tile_iterator(self):
options = {
'randomize_production': False,
'randomize_ports': False}
board = Board(options)
|
self.assertEqual([t.value for t in board.tiles if t.value], board._numbers)
hexes = collections.Counter([t.terrain for t in board.tiles])
self.assertEqual(hexes['F'], 4)
self.assertEqual(hexes['P'], 4)
self.assertEqual(hexes['H'], 4)
self.assertEqual(hexes['M'], 3)
self.assertEqual(hexes['C'], 3)
self
|
.assertEqual(hexes['D'], 1)
if __name__ == '__main__':
unittest.main()
|
RobertIan/ethoStim
|
individualtesting/trial.py
|
Python
|
mit
| 7,935
| 0.008696
|
#! /usr/python
'''
///////////////////////////////////////////////////////////
// Permission is hereby granted, free of charge,
// to any person obtaining a copy of
// this software and associated documentation files
// (the "Software"), to deal in the Software without
// restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute,
// sublicense, and/or sell copies of the Software, and
// to permit persons to whom the Software is furnished
// to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice
// shall be included in all copies or substantial portions
// of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
'''
__author__ = 'RobertIan'
__version__ = '0.2.5'
import argparse
import pygame
import picamera
import time
import datetime
import netifaces
import RPi.GPIO as GPIO
import os.path
import sys
import select
import os
class Trial:
def __init__(self, stim, starttime, feedornot):
## initialize display
pygame.display.init()
pygame.mouse.set_visible(False)
self.screen = pygame.display.set_mode((0,0),pygame.FULLSCREEN)
## assign stimulus
self.stimulus = stim
## timing
self.start = float(starttime)
self.tLength = 4*60 #four minute trial
self.feedDelay = 30 #thirty second delay
## GPIO setup
GPIO.setmode(GPIO.BCM)
self.feeder = 17 ##????
self.notfeeder = 5 ##????
self.feederin = 26 ##????
self.notfeederin = 25 ##????
if feedornot == 'feed':
self.feederin = self.feederin
self.feederout = self.feeder
elif feedornot == 'notfeed':
self.feederin = self.notfeederin
self.feederout = self.notfeeder
else:
## currently a print, should be changed to send a message to
#the client
print 'feeder not assigned'
self.safeQuit()
GPIO.setup(self.feederin, GPIO.IN)
GPIO.add_event_detect(self.feederin, GPIO.RISING)
GPIO.setup(self.feederout, GPIO.OUT)
GPIO.output(self.feederout, True)
def checkPiIP(self):
## query IP address from system
self.ip = netifaces.ifaddresses('eth0')[2][0]['addr']
def whatStimulus(self):
## locate stimulus in src folder
self.stim, extension = os.path.splitext(self.stimulus)
if extension == '.png' or extension == '.PNG' or extension == '.jpg' \
or extension == '.JPG':
## still image
try:
self.image = pygame.image.load('/home/pi/ethoStim/individualtesting/src/10.png')
except IOError:
## currently a print, should be changed to send a message to
#the client
print 'are you sure this file exists? check the src folder \
ony jpg/JPG, png/PNG formats'
self.safeQuit()
def cameraInit(self):
## adjust camera settings here
self.camera = picamera.PiCamera()
self.camera.resolution = (1920, 1080)
self.camera.framerate = 30
self.camera.autofocus = False
self.camera.awb_mode = 'fluorescent'
def videoFileName(self, species, tround, sl, sex, fishid, day, session,
conditionside):
## adjust video naming convention here
self.vidout = ('data/'+str(self.ip)+'/'+(str(species)+'_'+str(tround)
+'_'+str(sl)+'_'+str(sex) +'_'+str(fishid)+'_'+str(day)+'_'+
str(session)+'_' +str(self.stim)+'_'+str(conditionside)))
def startRecording(self):
self.camera.start_recording(self.vidout+ '.h264') #output video
def stopRecording(self):
self.camera.stop_recording()
def cameraQuit(self):
self.camera.close()
def safeQuit(self):
GPIO.output(self.feeder, True) #reset feeder ????
GPIO.output(self.notfeeder, True) #reset notfeeder ????
GPIO.cleanup() #reset all GPIOs
pygame.quit()
exit()
def mainLoop(self, camera):
## hang until assigned start time
while time.time()<self.start:
print time.time()-self.start
pass
## start timer
self.startT = time.time()
fed = False # feed delay control variable
## start recording
if camera == 'record':
selft.startRecording()
elif camera == 'notrecord':
pass
## display stimulus/start main loop
while ((time.time() - self.startT) < self.tLength):
pygame.display.flip()
self.screen.blit(self.image, (250,100)) # location of stimulus
## control feeder delay
try:
if (time.time() - self.startT) > self.feedDelay:
if fed:
pass
elif GPIO.event_detected(self.feederin):
time.sleep(1.0)
GPIO.output(self.feederout,True)
fed = True
else:
GPIO.output(self.feederout, False)
except KeyboardInterrupt:
self.safeQuit()
if __name__ == '__main__':
## load in command line argumenents
ap = argparse.ArgumentParser()
ap.add_argument("-f","--fish", help="ID of fish in tank")
ap.add_argument("-ts", "--trainedStim",help="numerosity stimulus the individual is being trained to, e.g. 12")
ap.add_argument("-ps", "--presentedStim", help="stimulus being presented with this raspberry pi")
ap.add_argument("-d","--day", help="experiment day, e.g. 1-7")
ap.add_argument("-s","--session", help="trial session, e.g. 1-4")
ap.add_argument("-fs","--fedSide", help="side(self.ip feed on/conditioned side")
ap.add_argument("-x","--sex", help="fish sex")
ap.add_argument("-p","--proportion", help="training ratio")
ap.add_argument("-sp", "--species", help="species name")
ap.add_argument("-sl","--fishstandardlength", help="standard length of the")
ap.add_argument("-r","--round"
|
, help="training round")
ap.add_argument("-fd", "--feed", help="feed with this stimulus",action="store_true")
ap.add_argument("-c", "--camera",help="do you want to record using this pi?",action="store_true")
ap.add_argument("-m:", "--startTime", help="time since epoch that you want to start your trial")
a
|
rgs = vars(ap.parse_args())
## parse trial details and pass it to the Trial class
if args.["feed"]:
T = Trial(args["presentedStim"], args["startTime"], 'feed')
else:
T = Trial(args["presentedStim"], args["startTime"], 'notfeed'))
T.checkPiIP()
T.whatStimulus()
T.videoFileName(args["species"], args["round"], args["fishstandardlength"],
args["sex"], args["fish"], args["day"], args["session"], args["fedSide"])
## initialize camera IF attached to Pi
if args["camera"]:
T.cameraInit()
else:
pass
## start camera recording IF attached to Pi and begin mainloop of Trial
if args["camera"]:
T.mainLoop('record')
else:
T.mainLoop('notrecord')
## stop camera recording IF attached to Pi
if args["camera"]:
T.stopRecording()
else:
pass
## cleanup camera IF attached to Pi
if args["camera"]:
T.cameraQuit()
## cleanup remaining processes and exit
T.safeQuit()
|
biomodels/MODEL1310160000
|
MODEL1310160000/model.py
|
Python
|
cc0-1.0
| 427
| 0.009368
|
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL1310160000.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
|
except ImportError:
return False
|
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString)
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractImpatientmtlreader533234643WordpressCom.py
|
Python
|
bsd-3-clause
| 594
| 0.031987
|
def extractImpatientmtlreader53
|
3234643WordpressCom(item):
'''
Parser for 'impatientmtlreader533234643.wordpress.com'
'''
vol, chp, frag, postfix = extrac
|
tVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
dangillet/cocos
|
samples/demo_multiple_scenes.py
|
Python
|
bsd-3-clause
| 3,011
| 0.001993
|
#
# cocos2d
# http://python.cocos2d.org
#
from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
from cocos.sprite import Sprite
class TestL
|
ayer(cocos.layer.Layer):
def __init__(self):
super(TestLayer, self).__init__()
x, y = director.get_window_size()
sprite1 = Sprite('grossini.png')
sprite2 = Sprite('grossinis_sister1.png')
sprite3 = Sprite('grossinis_sister2.png')
|
sprite1.position = (x // 2, y // 2)
sprite2.position = (x // 4, y // 2)
sprite3.position = (3 * x / 4.0, y // 2)
self.add(sprite2)
self.add(sprite1)
self.add(sprite3)
sprite1.do(RotateBy(360, 1) * 16)
sprite2.do(RotateBy(-360, 1) * 16)
sprite3.do(RotateBy(-360, 1) * 16)
if __name__ == "__main__":
director.init(resizable=True)
main_scene = cocos.scene.Scene()
main_scene.transform_anchor = (320, 240)
child1_scene = cocos.scene.Scene()
child2_scene = cocos.scene.Scene()
child3_scene = cocos.scene.Scene()
child4_scene = cocos.scene.Scene()
sprites = TestLayer()
sprites.transform_anchor = 320, 240
child1_scene.add(ColorLayer(0, 0, 255, 255))
child1_scene.add(sprites)
child1_scene.scale = 1.5
child1_scene.position = (-160, -120)
child1_scene.transform_anchor = (320, 240)
child2_scene.add(ColorLayer(0, 255, 0, 255))
child2_scene.add(sprites)
child2_scene.scale = 1.5
child2_scene.position = (160, 120)
child2_scene.transform_anchor = (320, 240)
child3_scene.add(ColorLayer(255, 0, 0, 255))
child3_scene.add(sprites)
child3_scene.scale = 1.5
child3_scene.position = (-160, 120)
child3_scene.transform_anchor = (320, 240)
child4_scene.add(ColorLayer(255, 255, 255, 255))
child4_scene.add(sprites)
child4_scene.scale = 1.5
child4_scene.position = (160, -120)
child4_scene.transform_anchor = (320, 240)
main_scene.add(child1_scene)
main_scene.add(child2_scene)
main_scene.add(child3_scene)
main_scene.add(child4_scene)
rot = RotateBy(-360, 2)
rot2 = RotateBy(360, 4)
sleep = Delay(2)
sleep2 = Delay(2)
sc1 = ScaleTo(0.5, 0.5) + Delay(1.5)
sc2 = Delay(0.5) + ScaleTo(0.5, 0.5) + Delay(1.0)
sc3 = Delay(1.0) + ScaleTo(0.5, 0.5) + Delay(0.5)
sc4 = Delay(1.5) + ScaleTo(0.5, 0.5)
child1_scene.do(sc4 + sleep + rot + sleep + rot + rot)
child2_scene.do(sc3 + sleep + rot + sleep + rot + Reverse(rot))
child3_scene.do(sc2 + sleep + rot + sleep + rot + Reverse(rot))
child4_scene.do(sc1 + sleep + rot + sleep + rot + rot)
main_scene.do(sleep + Reverse(rot) * 2 + rot * 2 + sleep)
sprites.do(Delay(4) + rot2 * 3)
director.run(main_scene)
|
materials-commons/materialscommons.org
|
backend/scripts/admin/check_for_top_dir.py
|
Python
|
mit
| 1,695
| 0.00295
|
#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
def run(rql):
try:
return rql.run()
except r.RqlRuntimeError:
return None
def main(port, include_deleted):
conn = r.connect('localhost', port, db='materialscommons')
cursor = r.table('project2datadir') \
.eq_join('datadir_id', r.table('datadirs')) \
.merge({
'right': {
'name2': r.row['right']['name']
}
}).zip() \
.eq_join('project_id', r.table('projects')).zip() \
.run(conn)
for doc in cursor:
project_name = doc['name']
dir_name = doc['name2']
owner = doc['owner']
if (owner == 'delete@materialscommons.org') and not include_deleted:
continue
if len(dir_name.split('/')) == 1:
if not project_name == dir_name:
print("Project '{}'({})".format(project_name, doc['project_id']))
print(" -> dir '{}'({})".format(dir_name, doc['datadir_id']))
print(" project owner = {}".for
|
mat(owner))
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int", help="rethinkdb port", default=30815)
parser.add_option("-I", "--include-deleted", dest="incd", action="store_true", help="include deleted files", default=False)
|
(options, args) = parser.parse_args()
include_deleted = options.incd
port = options.port
print("Using database port = {}".format(port))
if include_deleted:
print("Including deleted files in search")
else:
print("Excluding deleted files from search")
main(port, include_deleted)
|
petrutlucian94/nova
|
nova/tests/unit/api/openstack/compute/contrib/test_multiple_create.py
|
Python
|
apache-2.0
| 22,783
| 0.000088
|
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo_config import cfg
import webob
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import block_device_mapping as \
block_device_mapping_v21
from nova.api.openstack.compute.plugins.v3 import multiple_create as \
multiple_create_v21
from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
from nova.api.openstack.compute import servers as servers_v20
from nova.api.openstack import extensions as extensions_v20
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import db
from nova import exception
from nova.network import manager
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake
CONF = cfg.CONF
FAKE_UUID = fakes.FAKE_UUID
def fake_gen_uuid():
return FAKE_UUID
def return_security_group(context, instance_id, security_group_id):
pass
class MultiCreateExtensionTestV21(test.TestCase):
validation_error = exception.ValidationError
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(MultiCreateExtensionTestV21, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
|
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
ext_info = plugins.LoadedExtensio
|
nInfo()
self.controller = servers_v21.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist', 'os-multiple-create',
'osapi_v3')
self.no_mult_create_controller = servers_v21.ServersController(
extension_info=ext_info)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"security_groups": inst['security_groups'],
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
def instance_update(context, uuid, values):
instance = self.instance_cache_by_uuid[uuid]
instance.update(values)
return instance
def server_update(context, instance_uuid, params, update_cells=True,
columns_to_join=None):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return (inst, inst)
def fake_method(*args, **kwargs):
pass
def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'instance_add_security_group',
return_security_group)
self.stubs.Set(db, 'project_get_networks',
project_get_networks)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_system_metadata_update',
fake_method)
self.stubs.Set(db, 'instance_get', instance_get)
self.stubs.Set(db, 'instance_update', instance_update)
self.stubs.Set(db, 'instance_update_and_get_original',
server_update)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
self.req = fakes.HTTPRequest.blank('')
def _test_create_extra(self, params, no_image=False,
override_controller=None):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
if no_image:
server.pop('imageRef', None)
server.update(params)
body = dict(server=server)
if override_controller:
server = override_controller.create(self.req,
body=body).obj['server']
else:
server = self.controller.create(self.req,
body=body).obj['server']
def _check_multiple_create_extension_disabled(self, **kwargs):
# NOTE: on v2.1 API, "create a server" API doesn't add the following
# attributes into kwargs when non-loading multiple_create extension.
# However, v2.0 API adds them as values "1" instead. So we need to
# define checking methods for each API here.
self.assertNotIn('min_count', kwargs)
self.assertNotIn('max_count', kwargs)
def test_create_instance_with_multiple_create_disabled(self):
min_count = 2
max_count = 3
params = {
multiple_create_v21.MIN_ATTRIBUTE_NAME: min_count,
multiple_create_v21.MAX_ATTRIBUTE_NAME: max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self._check_multiple_create_extension_disabled(**kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(
params,
override_controller=self.no_mult_create_controller)
def test_multiple_create_with_string_type_min_and_max(self):
min_count = '2'
max_count = '3'
params = {
multiple_create_v21.MIN_ATTRIBUTE_NAME: min_count,
multiple_create_v21.MAX_ATTRIBUTE_NAME: max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIsInstance(kwargs['min_count'], int)
self.assertIsInstance(kwargs['max_count'], int)
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['max_count'], 3)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_with_multiple_create_enabled(self):
min_count = 2
max_count = 3
params = {
multiple_create_v21.
|
cgchemlab/chemlab
|
tools/convert_gromacs2espp.py
|
Python
|
gpl-3.0
| 4,036
| 0.005699
|
#!/usr/bin/env
|
python
# Copyright (C) 2012,2013,2015(H),2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
#
|
ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import math
import re
def convertTable(gro_in_file, esp_out_file, sigma=1.0, epsilon=1.0, c6=1.0, c12=1.0):
"""Convert GROMACS tabulated file into ESPResSo++ tabulated file (new file
is created). First column of input file can be either distance or angle.
For non-bonded files, c6 and c12 can be provided. Default value for sigma, epsilon,
c6 and c12 is 1.0. Electrostatics are not taken into account (f and fd columns).
Keyword arguments:
gro_in_file -- the GROMACS tabulated file name (bonded, nonbonded, angle
or dihedral).
esp_out_file -- filename of the ESPResSo++ tabulated file to be written.
sigma -- optional, depending on whether you want to convert units or not.
epsilon -- optional, depending on whether you want to convert units or not.
c6 -- optional
c12 -- optional
"""
# determine file type
bonded, angle, dihedral = False, False, False
re_bond = re.compile('.*_b[0-9]+.*')
re_angle = re.compile('.*_a[0-9]+.*')
re_dihedral = re.compile('.*_d[0-9]+.*')
if re.match(re_bond, gro_in_file):
bonded = True
elif re.match(re_angle, gro_in_file):
angle = True
bonded = True
elif re.match(re_dihedral, gro_in_file):
dihedral = True
bonded = True
fin = open(gro_in_file, 'r')
fout = open(esp_out_file, 'w')
if bonded: # bonded has 3 columns
for line in fin:
if line[0] == "#": # skip comment lines
continue
columns = line.split()
r = float(columns[0])
f = float(columns[1]) # energy
fd= float(columns[2]) # force
# convert units
if angle or dihedral: # degrees to radians
r = math.radians(r)
fd=fd*180/math.pi
else:
r = r / sigma
e = f / epsilon
f = fd*sigma / epsilon
if (not angle and not dihedral and r != 0) or \
(angle and r <= math.pi and r > 0) or \
(dihedral and r >= -math.pi and r <= math.pi):
fout.write("%15.8g %15.8g %15.8g\n" % (r, e, f))
else: # non-bonded has 7 columns
for line in fin:
if line.startswith('#'): # skip comment lines
continue
columns = line.split()
r = float(columns[0])
g = float(columns[3]) # dispersion
gd= float(columns[4])
h = float(columns[5]) # repulsion
hd= float(columns[6])
e = c6*g + c12*h
f = c6*gd+ c12*hd
# convert units
r = r / sigma
e = e / epsilon
f = f*sigma / epsilon
if r != 0: # skip 0
fout.write("%15.8g %15.8g %15.8g\n" % (r, e, f))
fin.close()
fout.close()
def _args():
parser = argparse.ArgumentParser()
parser.add_argument('in_file')
parser.add_argument('out_file')
return parser
def main():
args = _args().parse_args()
convertTable(args.in_file, args.out_file)
if __name__ == '__main__':
main()
|
ShivamSarodia/ShivC
|
rules.py
|
Python
|
gpl-2.0
| 15,247
| 0.00164
|
"""
The symbols and rules for the CFG of C. I generated these myself by hand, so
they're probably not perfectly correct.
"""
from rules_obj import *
from lexer import *
import tokens
### Symbols ###
# Most symbols are either self-explanatory, or best understood by examining the
# rules below to see how they're used.
S = Symbol("S")
main_setup = Symbol("main_setup") #TODO: is this neccesary?
# `statments` is a buch of `statement`s
statements = Symbol("statements")
# `statement` is a single C statement, semicolon included
statement = Symbol("statement")
# a generic expression
E = Symbol("E")
declare_separator = Symbol("declare_separator")
declare_type = Symbol("declare_type")
declare_expression = Symbol("declare_expression");
arr_start = Symbol("arr_start")
arr_end = Symbol("arr_end")
arr_list = Symbol("arr_list")
if_start = Symbol("if_start");
if_statement = Symbol("if_statement");
else_statement = Symbol("else_statement");
while_start = Symbol("while_start")
while_statement = Symbol("while_statement")
for_start = Symbol("for_start")
for1 = Symbol("for1")
for2 = Symbol("for2")
for3 = Symbol("for3")
for_expr = Symbol("for_expr")
arg_start = Symbol("arg_start")
func_dec = Symbol("func_dec")
func_def = Symbol("func_def")
func_call_start = Symbol("func_call_start")
### Rules ###
# After adding a rule, make sure to add it to the rules list at the bottom!
# something that stands alone as a program, plus a function definition or
# declaration, can also stand alone as a program.
main_func_dec_cont = Rule(S, [S, func_dec])
main_func_def_cont = Rule(S, [S, func_def])
main_func_dec = Rule(S, [func_dec])
main_func_def = Rule(S, [func_def])
# make a `statements` symbol by extending another `statements` symbol
statements_cont = Rule(statements, [statements,
statement])
# make a single `statement` symbol into a `statements` symbol
statements_end = Rule(statements, [statement])
# return statement
return_form = Rule(statement, [tokens.return_command,
E,
tokens.semicolon])
# a print statement
# The print statement is not valid C. I added it for ease of use, however, as
# I do not forsee this compiler being able to inclue stdio.h anytime soon.
print_form = Rule(statement, [tokens.print_command,
E,
tokens.semicolon])
# a declaration of the form int;
useless_declaration = Rule(statement, [Token("type"), tokens.semicolon])
# a declaration of the form `int a;` or `int a, b = 0;`
real_declaration = Rule(statement, [declare_expression, tokens.semicolon])
# the type part of a declaration, along with a
|
ny pointers on the first variable
declare_type_base = Rule(declare_type, [Token("type")])
declare_type_cont = Rule(declare_type, [declare_type, tokens.aster])
# used to separate declarations. all these are declare_separators:
# ,
# ,*
# , **
#
declare_separator_base = Rule(declare_separator, [tokens.comma])
declare_separator_cont = Rule(declare_separator, [declare_separator, tokens.aster])
# the base of a declaration, like `int hello` or `int* hello`.
base_declare
|
= Rule(declare_expression, [declare_type, Token("name")])
# a non-array declaration with an assignment, like `int hello = 4` or `int* hello = &p`.
assign_declare = Rule(declare_expression, [declare_expression, tokens.equal, E], 49)
# an array declaration with assignment, like `int hi[4] = {1, 2, 3, 4}`.
# Note--I imagine a better parser would catch things like `int hi = {1, 3}`.
# Mine, however, catches these errors at the code generation stage.
arr_assign_declare = Rule(declare_expression, [declare_expression, tokens.equal, arr_list], 49)
# Converts things like `int a, b` into a fresh declare_expression to chain declarations
cont_declare = Rule(declare_expression, [declare_expression, declare_separator, Token("name")])
# Defines `int a[5]` as a valid declare expression
array_num_declare = Rule(declare_expression, [declare_expression,
tokens.open_sq_bracket,
E,
tokens.close_sq_bracket])
# Defines `int a[]` as a valid declare expression
array_nonum_declare = Rule(declare_expression, [declare_expression,
tokens.open_sq_bracket,
tokens.close_sq_bracket])
E_num = Rule(E, [Token("integer")])
E_parens = Rule(E, [tokens.open_paren,
E,
tokens.close_paren])
# Badly named--E_add can be binary addition or subtraction
E_add = Rule(E, [E,
Token("addop"),
E], 85)
E_mult = Rule(E, [E,
tokens.aster,
E], 90)
E_div = Rule(E, [E,
tokens.slash,
E], 90)
E_mod = Rule(E, [E,
tokens.percent,
E], 90)
E_boolean_and = Rule(E, [E,
tokens.logic_and,
E], 65)
E_boolean_or = Rule(E, [E,
tokens.logic_or,
E], 60)
E_eq_compare = Rule(E, [E,
Token("eq_compare"),
E], 70)
E_compare = Rule(E, [E,
Token("compare"),
E], 75)
# Again, badly named. E_neg can be either unary addition or subtraction
E_neg = Rule(E, [Token("addop"),
E], 95)
# Note this covers all of `a = 5`, `a *= 5`, `a /= 5`, etc.
# We give this rule a priority of 49, which is less than 50 (the priority) of
# the assignment symbols. This makes it right associative.
E_equal = Rule(E, [E,
Token("assignment"),
E], 49)
E_boolean_not = Rule(E, [tokens.logic_not, E], 95)
# Covers both a++ and a--
E_inc_after = Rule(E, [E,
Token("crement")], 100)
# Covers both ++a and --a
E_inc_before = Rule(E, [Token("crement"),
E], 95)
E_point = Rule(E, [tokens.aster, E], 95)
E_deref = Rule(E, [tokens.amper, E], 95)
# Calling a function like `f()`
E_func_noarg = Rule(E, [E, tokens.open_paren, tokens.close_paren])
# The start of a function call and first argument, like `f(1`
E_func_call_start = Rule(func_call_start, [E, tokens.open_paren, E], 0)
# Chaining more arguments onto the function call
E_func_call_cont = Rule(func_call_start, [func_call_start, tokens.comma, E], 0)
# Completing the function call
E_func_call_end = Rule(E, [func_call_start, tokens.close_paren])
# Array referencing, like `a[4]`
E_array = Rule(E, [E, tokens.open_sq_bracket, E, tokens.close_sq_bracket], 100)
E_var = Rule(E, [Token("name")])
E_form = Rule(statement, [E, tokens.semicolon])
# We have to separate out the start so (E) doesn't reduce to E in `if(E)`
if_start_form = Rule(if_start, [tokens.if_keyword,
tokens.open_paren])
# an if statement like `if(E) {}`
if_form_brackets = Rule(if_statement, [if_start,
E,
tokens.close_paren,
tokens.open_bracket,
tokens.close_bracket])
# a one line if statement like `if(E) a = 5;`
# it's OK to use "statements" here because statement -> statements immediately,
# so then this rule will apply right away
if_form_oneline = Rule(if_statement, [if_start,
E,
tokens.close_paren,
statements])
# the most common if form, like `if(E) {a = 5;}`
if_form_main = Rule(if_statement, [if_start,
E,
tokens.close_paren,
tokens.open_bracket,
statements,
tokens.close_bracket])
# Same things, but for else
else_form_brackets = Rule(else_statement, [tokens.else_keyword,
tokens.open_bracket,
|
beeverycreative/beeconnect
|
WaitForConnection.py
|
Python
|
gpl-2.0
| 8,838
| 0.009391
|
#!/usr/bin/env python3
"""
* Copyright (c) 2015 BEEVC - Electronic Systems This file is part of BEESOFT
* software: you can redistribute it and/or modify it under the terms of the GNU
* General Public License as published by the Free Software Foundation, either
* version 3 of the License, or (at your option) any later version. BEESOFT is
* distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
* PARTICULAR PURPOSE. See the GNU General Public License for more details. You
* should have received a copy of the GNU General Public License along with
* BEESOFT. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = "Marcos Gomes"
__license__ = "MIT"
import FileFinder
import pygame
import Loaders.WaitForConnectionLoader
from beedriver import connection
import time
import FileFinder
class WaitScreen():
"""
@var connected: status of USB connection to the BTF
"""
connected = False
screen = None
exit = False
lblTop = None
lblBottom = None
bgImage = None
loader = None
nextPullTime = None
"""
BEEConnect vars
"""
beeCon = None
beeCmd = None
mode = None
displayWidth = 480
displayHeight = 320
"""*************************************************************************
Init Method
intis all compoments
*************************************************************************"""
def __init__(self, screen, dispWidth = 480, dispHeight = 320, shutdownCallback=None):
"""
.
"""
self.displayWidth = dispWidth
self.displayHeight = dispHeight
self.connected = False
print("Printer Connection: {0}".format(self.connected))
self.exit = False
self.screen = screen
self.currentScreen = 'WaitConnection'
self.loader = Loaders.WaitForConnectionLoader.WaitForConnectionLoader(self.displayWidth, self.displayHeight)
lblText = self.loader.GetLblsText()
lblX = self.loader.GetLblsXPos()
lblY = self.loader.GetLblsYPos()
lblFont = self.loader.GetLblsFont()
lblFontColor = self.loader.GetLblsFontColor()
for i in range(0,len(lblText)):
lbl = lblFont[i].render(lblText[i],1,lblFontColor[i])
self.screen.blit(lbl,(lblX[i],lblY[i]))
self.bgImage = pygame.image.load(self.loader.GetImagePath())
imgX = self.loader.GetImageX()
imgY = self.loader.GetImageY()
# Draw Image
self.screen.blit(self.bgImage,(imgX,imgY))
# update screen
pygame.display.update()
self.nextPullTime = time.time() + 0.5
tries = 10
while (not self.connected) and (not self.exit) and (tries > 0):
# Handle events
self.handle_events()
t = time.time()
if t > self.nextPullTime:
self.beeCon = connection.Conn(shutdownCallback)
# Connect to first Printer
self.beeCon.connectToFirstPrinter()
printerDict = self.beeCon.connectedPrinter
if(self.beeCon.isConnected() == True):
self.beeCmd = self.beeCon.getCommandIntf()
self.mode = self.beeCmd.getPrinterMode()
fwVersion = self.beeCmd.getFirmwareVersion()
#resp = self.beeCmd.startPrinter()
if('Firmware' in self.mode):
if '10.4.7' not in fwVersion and not self.beeCmd.isPrinting():
self.beeCmd.goToBootloader()
self.beeCon.close()
self.beeCon = None
else:
self.connected = self.beeCon.connected
elif('Bootloader' in self.mode):
printerVID = printerDict['VendorID']
printerPID = printerDict['ProductID']
fwName = ''
fwString = ''
if printerVID == '65535' and printerPID == '334':
#Old Bootloader Printer
fwString = 'BEEVC-BEETHEFIRST0-10.4.8'
fwName = '/Firmware/BEEVC-BEETHEFIRST0-Firmware-10.4.8.BIN'
elif printerVID == '10697':
#New Bootloader Printers
if printerPID == '1':
#BEETHEFIRST
fwString = 'BEEVC-BEETHEFIRST-10.4.8'
fwName = '/Firmware/BEEVC-BEETHEFIRST-Firmware-10.4.8.BIN'
elif printerPID == '2':
#BEETHEFIRST+
fwString = 'BEEVC-BEETHEFIRST_PLUS-10.4.8'
fwName = '/Firmware/BEEVC-BEETHEFIRST_PLUS-Firmware-10.4.8.BIN'
elif printerPID == '3':
#BEEME
fwString = 'BEEVC-BEEME-10.4.8'
fwName = '/Firmware/BEEVC-BEEME-Firmware-10.4.8.BIN'
elif printerPID == '4':
#BEEINSCHOOL
fwString = 'BEEVC-BEEINSCHOOL-10.4.8'
fwName = '/Firmware/BEEVC-BEEINSCHOOL-Firmware-10.4.8.BIN'
elif printerPID == '5':
#BEETHEFIRST_PLUS_A
fwString = 'BEEVC-BEETHEFIRST_PLUS_A-10.4.8'
fwName = '/Firmware/BEEVC-BEETHEFIRST_PLUS_A-Firmware-10.4.8.BIN'
if '10.4.8' not in fwVersion:
print('Falshing new Firmare')
ff = FileFinder.FileFinder()
fwPath = ff.GetAbsPath(fwName)
self.beeCmd.flashFirmware(fwPath,fwString)
while self.beeCmd.getTransferCompletionState() is not None:
time.sleep(0.5)
self.beeCon.close()
self.beeCon = None
else:
print("Changing to firmware")
self.beeCmd.goToFirmware()
#self.beeCon.close()
#time.sleep(1)
self.mode = self.beeCmd.getPrinterMode()
if 'Firmware' not in self.mode:
self.beeCon = None
else:
|
self.connected = self.beeCon.connected
#return True
else:
# USB Bu
|
ffer need cleaning
print('Printer not responding... cleaning buffer\n')
self.beeCmd.cleanBuffer()
self.beeCon.close()
self.beeCon = None
# return None
self.nextPullTime = time.time() + 0.5
#print("Wait for connection")
tries -= 1
if(tries <= 0):
print('Printer not found')
return False
else:
status = self.beeCmd.getStatus()
if status is not None:
if 'Shutdown' in status:
self.beeCmd.clearShutdownFlag()
return
"""*************************************************************************
handle_events
waits for a USB conenction to be stablished
*************************************************************************"""
def handle_events(sel
|
ewan-klein/nltk_twitter
|
twokenise.py
|
Python
|
apache-2.0
| 6,480
| 0.013622
|
""" tokenizer for tweets! might be appropriate for other social media dialects too.
general philosophy is to throw as little out as possible.
development philosophy: every time you change a rule, do a diff of this
program's output on ~100k tweets. if you iterate through many possible rules
and only accept the ones that seeem to result in good diffs, it's a sort of
statistical learning with in-the-loop human evaluation :)
"""
__author__="brendan o'connor (anyall.org)"
import re,sys
import emoticons
mycompile = lambda pat: re.compile(pat, re.UNICODE)
def regex_or(*items):
r = '|'.join(items)
r = '(' + r + ')'
return r
def pos_lookahead(r):
return '(?=' + r + ')'
def neg_lookahead(r):
return '(?!' + r + ')'
def optional(r):
return '(%s)?' % r
PunctChars = r'''['“".?!,:;]'''
Punct = '%s+' % PunctChars
Entity = '&(amp|lt|gt|quot);'
# one-liner URL recognition:
#Url = r'''https?://\S+'''
# more complex version:
UrlStart1 = regex_or('https?://', r'www\.')
CommonTLDs = regex_or('com','co\\.uk','org','net','info','ca')
UrlStart2 = r'[a-z0-9\.-]+?' + r'\.' + CommonTLDs + pos_lookahead(r'[/ \W\b]')
UrlBody = r'[^ \t\r\n<>]*?' # * not + for case of: "go to bla.com." -- don't want period
UrlExtraCrapBeforeEnd = '%s+?' % regex_or(PunctChars, Entity)
UrlEnd = regex_or( r'\.\.+', r'[<>]', r'\s', '$')
Url = (r'\b' +
regex_or(UrlStart1, UrlStart2) +
UrlBody +
pos_lookahead( optional(UrlExtraCrapBeforeEnd) + UrlEnd))
Url_RE = re.compile("(%s)" % Url, re.U|re.I)
Timelike = r'\d+:\d+'
NumNum = r'\d+\.\d+'
NumberWithCommas = r'(\d+,)+?\d{3}' + pos_lookahead(regex_or('[^,]','$'))
Abbrevs1 = ['am','pm','us','usa','ie','eg']
def regexify_abbrev(a):
chars = list(a)
icase = ["[%s%s]" % (c,c.upper()) for c in chars]
dotted = [r'%s\.' % x for x in icase]
return "".join(dotted)
Abbrevs = [regexify_abbrev(a) for a in Abbrevs1]
BoundaryNotDot = regex_or(r'\s', '[“"?!,:;]', Entity)
aa1 = r'''([A-Za-z]\.){2,}''' + pos_lookahead(BoundaryNotDot)
aa2 = r'''([A-Za-z]\.){1,}[A-Za-z]''' + pos_lookahead(BoundaryNotDot)
ArbitraryAbbrev = regex_or(aa1,aa2)
assert '-' != '―'
Separators = regex_or('--+', '―')
Decorations = r' [ ♫ ]+ '.replace(' ','')
EmbeddedApostrophe = r"\S+'\S+"
ProtectThese = [
emoticons.Emoticon,
Url,
Entity,
Timelike,
NumNum,
NumberWithCommas,
Punct,
ArbitraryAbbrev,
Separators,
Decorations,
EmbeddedApostrophe,
]
Protect_RE = mycompile(regex_or(*ProtectThese))
class Tokenization(list):
" list of tokens, plus extra info "
def __init__(self):
self.alignments = []
self.text = ""
def subset(self, tok_inds):
new = Tokenization()
new += [self[i] for i in tok_inds]
new.alignments = [self.alignments[i] for i in tok_inds]
new.text = self.text
return new
def assert_consistent(t):
assert len(t) == len(t.alignments)
assert [t.text[t.alignments[i] : (t.alignments[i]+len(t[i]))] for i in range(len(t))] == list(t)
def align(toks, orig):
s_i = 0
alignments = [None]*len(toks)
for tok_i in range(len(toks)):
while True:
length = len(toks[tok_i])
if orig[s_i:(s_i+length)] == toks[tok_i]:
alignments[tok_i] = s_i
s_i += length
break
s_i += 1
if s_i >= len(orig):
raise AlignmentFailed((orig, toks, alignments))
#if orig[s_i] != ' ': raise AlignmentFailed("nonspace advance: %s" % ((s_i,orig),))
if any(a is None for a in alignments):
raise AlignmentFailed((orig, toks, alignments))
return alignments
class AlignmentFailed(Exception):
pass
def unicodify(s, encoding='utf8', *args):
#if isinstance(s,str): return s
#if isinstance(s,str): return s.decode(encoding, *args)
return str(s)
def tokenize(tweet):
#text = unicodify(tweet)
text = squeeze_whitespace(tweet)
t = Tokenization()
t += simple_tokenize(text)
t.text = text
t.alignments = align(t, text)
return t
def simple_tokenize(text):
s = text
s = edge_punct_munge(s)
# strict alternating ordering through the string. first and last are goods.
# good bad good bad good bad good
goods = []
bads = []
i = 0
if Protect_RE.search(s):
for m in Protect_RE.finditer(s):
goods.append( (i, m.start()) )
bads.append(m.span())
i = m.end()
goods.append( (m.end(), len(s)) )
else:
goods = [ (0, len(s)) ]
assert len(bads)+1 == len(goods)
goods = [s[i:j] for i,j in goods]
bads = [s[i:j] for i,j in bads]
#print goods
#print bads
goods = [unprotected_tokenize(x) for x in goods]
res = []
for i in range(len(bads)):
res += goods[i]
res.append(bads[i])
res += goods[-1]
res = post_process(res)
return res
AposS = mycompile(r"(\S+)('s)$")
def post_process(pre_toks):
# hacky: further splitting of certain tokens
post_toks = []
for tok in pre_toks:
m = AposS.search(tok)
if m:
post_toks += m.groups()
else:
post_toks.append( tok )
return post_toks
WS_RE = mycompile(r'\s+')
def squeeze_whitespace(s):
new_string = WS_RE.sub(" ",s)
return new_string.strip()
# fun: copy and paste outta http://en.wikipedia.org/wiki/Smart_quotes
EdgePunct = r"""[ ' " “ ” ‘ ’ < > « » { } ( \) [ \] ]""".replace(' ','')
#NotEdgePunct = r"""[^'"([\)\]]""" # alignment failures?
NotEdgePunct = r"""[a-zA-Z0-9]"""
EdgePunctLeft = r"""(\s|^)(%s+)(%s)""" % (EdgePunct, NotEdgePunct)
EdgePunctRight = r"""(%s)(%s+)(\s|$)""" % (NotEdgePunct, EdgePunct)
EdgePunctL
|
eft_RE = mycompile(EdgePunctLeft)
EdgePunctRight_RE= mycompile(EdgePunctRight)
def edge_punct_munge(s):
|
s = EdgePunctLeft_RE.sub( r"\1\2 \3", s)
s = EdgePunctRight_RE.sub(r"\1 \2\3", s)
return s
def unprotected_tokenize(s):
return s.split()
if __name__=='__main__':
for line in open('tweets.txt'):
print(" ".join(tokenize(line[:-1])))
#for line in sys.stdin:
#print u" ".join(tokenize(line[:-1])).encode('utf-8')
#print "CUR\t" + " ".join(tokenize(line[:-1]))
#print "WS\t" + " ".join(line[:-1].split())
#print ansi.color(line.strip(),'red')
#print ansi.color(" ".join(tokenize(line.strip())),'blue','bold')
|
JustusSchwan/MasterThesis
|
trash/utility_positional.py
|
Python
|
mit
| 4,965
| 0.000604
|
import numpy as np
import cv2
import math
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
def rot_vec_to_euler(r):
# Rotate around x axis by 180 degrees to have [0, 0, 0] when facing forward
R = np.dot(np.array([[1, 0, 0],
[0, -1, 0],
[0, 0, -1]]),
np.array(cv2.Rodrigues(r)[0]))
sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
singular = sy < 1e-6
if not singular:
x = math.atan2(R[2, 1], R[2, 2])
y = math.atan2(-R[2, 0], sy)
z = math.atan2(R[1, 0], R[0, 0])
else:
x = math.atan2(-R[1, 2], R[1, 1])
y = math.atan2(-R[2, 0], sy)
z = 0
return np.array([x, y, z])
# Calculates Rotation Matrix given euler angles.
def euler_to_rot_vec(theta):
r_x = np.array([[1, 0, 0],
[0, math.cos(theta[0]), -math.sin(theta[0])],
[0, math.sin(theta[0]), math.cos(theta[0])]
])
r_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],
[0, 1, 0],
[-math.sin(theta[1]), 0, math.cos(theta[1])]
])
r_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
return np.array(cv2.Rodrigues(np.dot(np.array([[1, 0, 0],
[0, -1, 0],
[0, 0, -1]]),
np.dot(r_z, np.dot(r_y, r_x))))[0])
class poseExtractor:
def __init__(self):
self.image_points = np.array([30, 29, 28, 27, 33, 32, 34, 31, 35,
36, 45, 39, 42,
21, 22, 20, 23, 19, 24, 18, 25
], dtype=np.intp)
self.model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, 0.40412, -0.35702), # Nose 1
(0.0, 0.87034, -0.65485), # Nose 2
(0, 1.33462, -0.92843), # Nose 3
(0, -0.63441, -0.65887), # Under Nose #0
(0, 0, 0), # Under Nose #1, L
(0.25466, -0.59679, -0.80215), # Under Nose #1, R
(0, 0, 0), # Under Nose #2, L
(0.49277, -0.56169, -0.96709), # Under Nose #2, R
(0, 0, 0), # Left eye outer corner
(1.60745, 1.21855, -1.9585), # Right eye outer corner
(0, 0, 0), # Left eye inner corner
(0.53823, 1.15389, -1.37273), # Right eye inner corner
(0, 0, 0), # Eyebrow #0, L
(0.34309, 1.67208, -0.96486), # Eyebrow #0, R
(0, 0, 0), # Eyebrow #1, L
(0.65806, 1.85405, -1.04975), # Eyebrow #1, R
(0, 0, 0), # Eyebrow #2, L
(0.96421, 1.95277, -1.23015), # Eyebrow #2, R
(0, 0, 0), # Eyebrow #3, L
(1.32075, 1.95305, -1.48482) # Eyebrow #3, R
])
for i in range(5, self.model_points.shape[0], 2):
self.model_points[i, 0] = -self.model_points[i + 1, 0]
self.model_points[i, 1:3] = self.model_points[i + 1, 1:3]
self.camera_matrix = None # Hack so camera matrix can be used for printing later
self.dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
self.rvec = None
self.tvec = None
def get_head_rotation(self, landmarks, img_size):
# Camera internals
focal_length = img_s
|
ize[1]
center = (img_size[1] / 2, img_size[0] / 2)
self.camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype="double"
)
if self.rvec is None:
(success, self.rvec, self.tvec) = cv2.solvePnP(
self.model_points, landmarks[self.image_points[:, np.newaxis], :],
self.camera_matrix,
|
self.dist_coeffs, flags=cv2.SOLVEPNP_EPNP)
else:
(success, self.rvec, self.tvec) = cv2.solvePnP(
self.model_points, landmarks[self.image_points[:, np.newaxis], :],
self.camera_matrix, self.dist_coeffs, flags=cv2.SOLVEPNP_EPNP,
rvec=self.rvec, tvec=self.tvec, useExtrinsicGuess=True)
return success
def get_positional_features(self, landmarks, img_size):
rotation_success = self.get_head_rotation(landmarks, img_size)
if not rotation_success:
return None
return self.tvec, rot_vec_to_euler(self.rvec)
def get_position_by_average(landmarks, img_size):
position = np.mean(landmarks, axis=0)
size = 2 * np.mean(np.linalg.norm((landmarks - position), axis=1, ord=2))
return np.append(position / img_size[0], size / img_size[0])
|
cripplet/practice
|
hackerrank/quora/skeleton.py
|
Python
|
mit
| 393
| 0.045802
|
import fileinput
def str_to_int(s):
return([ int(x) for x in s.split() ])
# args = [ 'line 1', 'line 2', ... ]
def proc_input(args):
pass
def solve
|
(args, verbose=False):
r = proc_input(args)
def test():
assert(str_to_int('1 2 3') == [ 1, 2, 3 ])
if __name__ == '__main__':
|
from sys import argv
if argv.pop() == 'test':
test()
else:
solve(list(fileinput.input()), verbose=True)
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractAtarutranslationBlogspotCom.py
|
Python
|
bsd-3-clause
| 570
| 0.033333
|
def extractAtarutranslationBlogspotCom(item):
'''
Parser for 'atarutranslat
|
ion.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'
|
].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
mkusz/invoke
|
tests/_support/deeper_ns_list.py
|
Python
|
bsd-2-clause
| 212
| 0.023585
|
from invoke import task, Collection
@task
def toplevel(ctx):
pass
@task
def subtas
|
k(ctx):
pass
ns = Collection(
toplevel,
Collection('a', subtask,
Collection('nother', subtask)
)
|
)
|
potray/TFM-Web
|
tfm/migrations/0005_auto_20151124_1311.py
|
Python
|
gpl-2.0
| 467
| 0.002141
|
# -
|
*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tfm', '0004_auto_20151124_1307'),
]
operations = [
migrations.AlterField(
model_name='patient',
name='photo',
field=models.ImageField(default=None, upload_to=b'photos', blank=True),
preserve_default=False,
),
]
| |
uml-robotics/manus_arm
|
arm/src/arm/msg/_cartesian_moves.py
|
Python
|
bsd-2-clause
| 9,235
| 0.015268
|
"""autogenerated by genpy from arm/cartesian_moves.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import arm.msg
import genpy
import std_msgs.msg
class cartesian_moves(genpy.Message):
_md5sum = "56c11a250225b8cc4f58b0e6670caaa1"
_type = "arm/cartesian_moves"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# Cartesian movement sequence message
Header header
time end
cartesian_move[] moves
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: arm/cartesian_move
# Cartesian movement message
Header header
float32[7] positions
int8[7] speeds
"""
__slots__ = ['header','end','moves']
_slot_types = ['std_msgs/Header','time','arm/cartesian_move[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future messag
|
e
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,end,moves
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(cartesian_moves, self).__init__(*args, **
|
kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.end is None:
self.end = genpy.Time()
if self.moves is None:
self.moves = []
else:
self.header = std_msgs.msg.Header()
self.end = genpy.Time()
self.moves = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_2I.pack(_x.end.secs, _x.end.nsecs))
length = len(self.moves)
buff.write(_struct_I.pack(length))
for val1 in self.moves:
_v1 = val1.header
buff.write(_struct_I.pack(_v1.seq))
_v2 = _v1.stamp
_x = _v2
buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
_x = _v1.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_7f.pack(*val1.positions))
buff.write(_struct_7b.pack(*val1.speeds))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.end is None:
self.end = genpy.Time()
if self.moves is None:
self.moves = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.end.secs, _x.end.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.moves = []
for i in range(0, length):
val1 = arm.msg.cartesian_move()
_v3 = val1.header
start = end
end += 4
(_v3.seq,) = _struct_I.unpack(str[start:end])
_v4 = _v3.stamp
_x = _v4
start = end
end += 8
(_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v3.frame_id = str[start:end].decode('utf-8')
else:
_v3.frame_id = str[start:end]
start = end
end += 28
val1.positions = _struct_7f.unpack(str[start:end])
start = end
end += 7
val1.speeds = _struct_7b.unpack(str[start:end])
self.moves.append(val1)
self.end.canon()
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_2I.pack(_x.end.secs, _x.end.nsecs))
length = len(self.moves)
buff.write(_struct_I.pack(length))
for val1 in self.moves:
_v5 = val1.header
buff.write(_struct_I.pack(_v5.seq))
_v6 = _v5.stamp
_x = _v6
buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
_x = _v5.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(val1.positions.tostring())
buff.write(val1.speeds.tostring())
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.end is None:
self.end = genpy.Time()
if self.moves is None:
self.moves = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.end.secs, _x.end.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.moves = []
for i in range(0, length):
val1 = arm.msg.cartesian_move()
_v7 = val1.header
start = end
|
retrodpc/Bulbaspot-Cogs
|
sentry/sentry.py
|
Python
|
apache-2.0
| 11,849
| 0.005233
|
# Ivysalt's sentry module. It keeps track of people who join and leave a chat.
# LICENSE: This single module is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
# @category Tools
# @copyright Copyright (c) 2018 dpc
# @version 1.1
# @author dpc
import asyncio
import json
import os
from discord.ext import commands
from cogs.utils import checks
from cogs.utils.dataIO import fileIO
ban_message = "``Omae wa mou shindeiru.``"
joinleave_path = 'data/sentry/joinleave.json'
bans_path = 'data/sentry/bans.json'
def is_int(s):
"""Checks whether the input is an integer."""
try:
int(s)
if float(s) % 1 == 0:
return True
else:
return False
except ValueError:
return False
def check_folders():
folders = ["data/sentry"]
for folder in folders:
if not os.path.exists(folder):
|
print("Creating " + folder + " folder...")
os.makedirs(folder)
def check_files():
default = {}
if not os.path.isfile(joinleave_path):
print("Creating joinleave.json")
fileIO(joinleave_path, "save", default)
if not os.path.isfile(bans_path):
print("Creating bans.json")
fileIO(bans_path, "save", default)
# validating data
check_folders()
check_files()
w
|
ith open(joinleave_path) as joinleave_file:
joinleave_data = json.load(joinleave_file)
with open(bans_path) as sentry_file:
sentry_bans = json.load(sentry_file)
def save(path, data):
with open(path, "w") as file:
json.dump(data, file, indent=4)
class Sentry:
"""Adds various sentry commands.
This module was written specifically for a few servers."""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(ban_members=True)
@asyncio.coroutine
def preban(self, ctx, user_id: str):
"""Users added with this command will be banned on sight.
Only admins may use this command."""
# adding user id to the ban list
if is_int(user_id):
if (ctx.message.server.id in sentry_bans):
if (user_id in sentry_bans[ctx.message.server.id]):
yield from self.bot.say("That user is already pre-banned from this server.")
else:
sentry_bans[ctx.message.server.id].append(user_id)
save(bans_path, sentry_bans)
yield from self.bot.say("User has been pre-banned from this server.")
else:
sentry_bans[ctx.message.server.id] = [user_id]
save(bans_path, sentry_bans)
yield from self.bot.say("User has been pre-banned from this server.")
else:
yield from self.bot.say("Improper command usage.")
# checking if user's already in the server, and banning them if they are
for member in ctx.message.server.members:
if (member.id in sentry_bans[member.server.id]):
#yield from self.bot.send_message(member, ban_message)
yield from (asyncio.sleep(2))
yield from self.bot.ban(member, 7)
print("Banning user {0}#{2} with id {3} from {1}...".format(member.name, member.server.name, member.discriminator, member.id))
@commands.command(pass_context=True, no_pm=True, description=
"Note: users that have been already banned will not be unbanned.")
@checks.admin_or_permissions(ban_members=True)
@asyncio.coroutine
def unpreban(self, ctx, user_id: str):
"""Users removed with this command will not be banned on sight.
Only admins may use this command."""
if (ctx.message.server.id in sentry_bans):
if (user_id in sentry_bans[ctx.message.server.id]):
sentry_bans[ctx.message.server.id].remove(user_id)
save(bans_path, sentry_bans)
yield from self.bot.say("User removed from pre-ban list on this server.")
else:
yield from self.bot.say("User is not pre-banned on this server.")
else:
yield from self.bot.say("User is not pre-banned on this server.")
@commands.command(pass_context=True, no_pm=True, description=
"Note: users that have been already banned will not be unbanned.")
@checks.admin_or_permissions(ban_members=True)
@asyncio.coroutine
def listpreban(self, ctx):
"""Users removed with this command will not be banned on sight.
Only admins may use this command."""
if (ctx.message.server.id in sentry_bans):
if len(sentry_bans[ctx.message.server.id]) > 0:
user_id_list = "```\n=== Prebans in server {} ===\n".format(ctx.message.server.name)
for user_id in sentry_bans[ctx.message.server.id]:
user_id_list += user_id
user_id_list += "\n"
user_id_list += "```"
yield from self.bot.send_message(ctx.message.author, user_id_list)
else:
yield from self.bot.say("No pre-bans on this server.")
else:
yield from self.bot.say("No pre-bans on this server.")
@commands.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(ban_members=True)
@asyncio.coroutine
def unban(self, ctx, *, uid: str = None):
"""Removes a ban from the server.
Only admins may use this command."""
user = yield from self.bot.get_user_info(uid)
yield from self.bot.unban(ctx.message.server, user)
yield from self.bot.say('User {} unbanned.'.format(user.name))
@commands.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(ban_members=True)
@asyncio.coroutine
def setannounce(self, ctx, channel: str = "current"):
"""Sets the channel to announce server's arrivals and parts.\n\nOnly admins may use this command."""
# parses the input as a channel id
if (len(ctx.message.channel_mentions) == 1):
channel_id = ctx.message.channel_mentions[0].id
elif is_int(channel):
channel_id = channel
elif channel == "current":
channel_id = ctx.message.channel
else:
yield from self.bot.say("Sorry, I don't know what channel that is.")
return
#checks if channel is in server
channel_object = ctx.message.server.get_channel(channel_id)
if channel_object is None:
yield from self.bot.say("Sorry, I can't tell what channel that is.")
return
# assigns the announce channel
if (ctx.message.server.id in joinleave_data):
joinleave_data[ctx.message.server.id]["announce_channel"] = channel_id
save(joinleave_path, joinleave_data)
yield from self.bot.say("Saved announce channel {}.".format(channel_object.mention))
else:
joinleave_data[ctx.message.server.id] = {"announce_channel": channel_id, "autoassign_role": "", "join_announce": False, "leave_announce": True}
save(joinleave_path, joinleave_data)
yield from self.bot.say("Saved announce channel {}.".format(channel_object.mention))
@commands.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(ban_members=True)
@asyncio.coroutine
def delannounce(self, ctx):
"""Removes the bot announcements in this server.\n\nOnly admins may use this command."""
# assigns the announce channel
if (ctx.message.server.id in joinleave_data):
joinleave_data[ctx.message.server.id]["announce_channel"] = ""
yield from self.bot.say("Removed announce channel for this server.")
else:
joinleave_data[ctx.message.server.id] = {"announce_channel": "", "autoassign_role": "", "join_announce": False, "leave_announce": True}
yield from self.bot.say("There was no announce channel for this server.")
@commands.command(pass_context=True, no_pm=True)
@ch
|
DDMAL/Gamera
|
gamera/fudge.py
|
Python
|
gpl-2.0
| 2,585
| 0.001161
|
# -*- mode: python; indent-tabs-mode: nil; tab-width: 3 -*-
# vim: set tabstop=3 shiftwidth=3 expandtab:
|
#
# Copyright (C) 2001-2005 Ichiro Fujinaga, Michael Droettboom,
# and Karl MacMillan
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published b
|
y the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# TODO: These are fixed values. We need an intelligent way to vary them.
# This whole approach to fuzziness is syntactically convenient, but maybe
# not very efficient.
FUDGE_AMOUNT = 3
FUDGE_AMOUNT_2 = 6
from gamera.core import Rect, Point, Dim
# This is a factory function that looks like a constructor
def Fudge(o, amount=FUDGE_AMOUNT):
# For rectangles, just return a new rectangle that is slightly larger
if isinstance(o, Rect):
return Rect(Point(int(o.ul_x - amount), int(o.ul_y - amount)), Dim(int(o.ncols + amount * 2), int(o.nrows + amount * 2)))
# For integers, return one of our "fudge number proxies"
elif isinstance(o, int):
return FudgeInt(o, amount)
elif isinstance(o, float):
return FudgeFloat(o, amount)
F = Fudge
class FudgeNumber(object):
def __lt__(self, other):
return self.below < other
def __le__(self, other):
return self.below <= other
def __eq__(self, other):
return self.below <= other and self.above >= other
def __ne__(self, other):
return other < self.below and other > self.above
def __gt__(self, other):
return self.above > other
def __ge__(self, other):
return self.above >= other
class FudgeInt(FudgeNumber, int):
def __init__(self, value, amount=FUDGE_AMOUNT):
int.__init__(self, value)
self.below = int(value - amount)
self.above = int(value + amount)
class FudgeFloat(FudgeNumber, float):
def __init__(self, value, amount=FUDGE_AMOUNT):
int.__init__(self, value)
self.below = float(value - amount)
self.above = float(value + amount)
|
c-w/GettyArt
|
getty_art/util.py
|
Python
|
mit
| 488
| 0
|
"""Module for helper functions."""
import os
import tempfile
|
def tmpfile(suffix='', prefix='tmp', directory=None):
"""Wrapper around tempfile.mkstemp that creates a new temporary file path.
"""
filehandle, filename = tempfile.mkste
|
mp(suffix, prefix, directory)
os.close(filehandle)
return filename
def expandpath(path):
"""Expands all the variables in a path.
"""
path = os.path.expandvars(path)
path = os.path.expanduser(path)
return path
|
ravselj/subliminal
|
subliminal/tests/test_subliminal.py
|
Python
|
mit
| 8,711
| 0.002296
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import shutil
from unittest import TestCase, TestSuite, TestLoader, TextTestRunner
from babelfish import Language
from subliminal import list_subtitles, download_subtitles, save_subtitles, download_best_subtitles, scan_video
from subliminal.tests.common import MOVIES, EPISODES
TEST_DIR = 'test_data'
class ApiTestCase(TestCase):
def setUp(self):
os.mkdir(TEST_DIR)
def tearDown(self):
shutil.rmtree(TEST_DIR)
def test_list_subtitles_movie_0(self):
videos = [MOVIES[0]]
languages = {Language('eng')}
subtitles = list_subtitles(videos, languages)
self.assertEqual(len(subtitles), len(videos))
self.assertGreater(len(subtitles[videos[0]]), 0)
def test_list_subtitles_movie_0_por_br(self):
videos = [MOVIES[0]]
languages = {Language('por', 'BR')}
subtitles = list_subtitles(videos, languages)
self.assertEqual(len(subtitles), len(videos))
self.assertGreater(len(subtitles[videos[0]]), 0)
def test_list_subtitles_episodes(self):
videos = [EPISODES[0], EPISODES[1]]
languages = {Language('eng'), Language('fra')}
subtitles = list_subtitles(videos, languages)
self.assertEqual(len(subtitles), len(videos))
self.assertGreater(len(subtitles[videos[0]]), 0)
def test_download_subtitles(self):
videos = [EPISODES[0]]
for video in videos:
video.name = os.path.join(TEST_DIR, os.path.split(video.name)[1])
languages = {Language('eng')}
subtitles = list_subtitles(videos, languages)
download_subtitles(subtitles[videos[0]][:5])
self.assertGreaterEqual(len([s for s in subtitles[videos[0]] if s.content is not None]), 4)
def test_download_best_subtitles(self):
videos = [EPISODES[0], EPISODES[1]]
for video in videos:
video.name = os.path.join(TEST_DIR, os.path.split(video.name)[1])
languages = {Language('eng'), Language('fra')}
subtitles = download_best_subtitles(videos, languages)
for video in videos:
self.assertIn(video, subtitles)
self.assertEqual(len(subtitles[video]), 2)
def test_save_subtitles(self):
videos = [EPISODES[0], EPISODES[1]]
for video in videos:
video.name = os.path.join(TEST_DIR, os.path.split(video.name)[1])
languages = {Language('eng'), Language('fra')}
subtitles = list_subtitles(videos, languages)
# make a list of subtitles to download (one per language per video)
subtitles_to_download = []
for video, video_subtitles in subtitles.items():
video_subtitle_languages = set()
for video_subtitle in video_subtitles:
if video_subtitle.language in video_subtitle_languages:
continue
subtitles_to_download.append(video_subtitle)
video_subtitle_languages.add(video_subtitle.language)
if video_subtitle_languages == languages:
break
self.assertEqual(len(subtitles_to_download), 4)
# download
download_subtitles(subtitles_to_download)
save_subtitles(subtitles)
for video in videos:
self.assertTrue(os.path.exists(os.path.splitext(video.name)[0] + '.en.srt'))
self.assertTrue(os.path.exists(os.path.splitext(video.name)[0] + '.fr.srt'))
def test_save_subtitles_single(self):
videos = [EPISODES[0], EPISODES[1]]
for video in videos:
video.name = os.path.join(TEST_DIR, os.path.split(video.name)[1])
languages = {Language('eng'), Language('fra')}
subtitles = download_best_subtitles(videos, languages)
save_subtitles(subtitles, single=True)
for video in videos:
self.assertIn(video, subtitles)
self.assertEqual(len(subtitles[video]), 2)
self.assertTrue(os.path.exists(os.path.splitext(video.name)[0] + '.srt'))
def test_download_best_subtitles_min_score(self):
videos = [MOVIES[0]]
for video in videos:
video.name = os.path.join(TEST_DIR, os.path.split(video.name)[1])
languages = {Language('eng'), Language('fra')}
subtitles = download_best_subtitles(videos, languages, min_score=1000)
self.assertEqual(len(subtitles), 0)
def test_download_best_subtitles_hearing_impaired(self):
videos = [MOVIES[0]]
for video in videos:
video.name = os.path.join(TEST_DIR, os.path.split(video.name)[1])
languages = {Language('eng')}
subtitles = download_best_subtitles(videos, languages, hearing_impaired=True)
self.assertTrue(subtitles[videos[0]][0].hearing_impaired)
class VideoTestCase(TestCase):
def setUp(self):
os.mkdir(TEST_DIR)
for video in MOVIES + EPISODES:
open(os.path.join(TEST_DIR, os.path.split(video.name)[1]), 'w').close()
def tearDown(self):
shutil.rmtree(TEST_DIR)
def test_scan_video_movie(self):
video = MOVIES[0]
scanned_video = scan_video(os.path.join(TEST_DIR, os.path.split(video.name)[1]))
self.assertEqual(scanned_video.name, os.path.join(TEST_DIR, os.path.split(video.name)[1]))
self.assertEqual(scanned_video.title.lower(), video.title.lower())
self.assertEqual(scanned_video.year, video.year)
self.assertEqual(scanned_video.video_codec, video.video_codec)
self.assertEqual(scanned_video.format, video.format)
self.assertEqual(scanned_video.resolution, video.resolution)
self.assertEqual(scanned_video.release_group, video.release_group)
self.assertEqual(scanned_video.subtitle_languages, set())
self.assertEqual(scanned_video.hashes, {})
self.assertIsNone(scanned_video.audio_codec)
self.assertIsNone(scanned_video.imdb_id)
self.assertEqual(scanned_video.size, 0)
def test_scan_video_episode(self):
video = EPISODES[0]
scanned_video = scan_video(os.path.join(TEST_DIR, os.path.split(video.name)[1]))
self.assertEqual(scanned_video.name, os.path.join(TEST_DIR, os.path.split(video.name)[1]))
self.assertEqual(scanned_video.series, video.series)
self.assertEqual(scanned_video.season, video.season)
self.assertEqual(scanned_video.episode, video.episode)
self.assertEqual(scanned_video.video_codec, video.video_codec)
self.assertEqual(scanned_video.format, video.format)
self.assertEqual(scanned_video.resolution, video.resolution)
self.assertEqual(scanned_video.release_group, video.release_group)
self.assertEqual(scanned_video.subtitle_languages, set())
self.assertEqual(scanned_video.hashes, {})
self.assertIsNone(scanned_video.title)
self.assertIsNone(scanned_video.tvdb_id)
self.assertIsNone(scanned_video.imdb_id)
self.assertIsNone(scanned_video.audio_codec)
self.assertEqual(scanned_video.size, 0)
def test_scan_video_subtitle_language_und(self):
video = EPISODES[0]
open(os.path.join(TEST_DIR, os.path.splitext(os.path.split(video.name)[1])[0]) + '.srt', 'w').close()
scanned_video = scan_video(os.path.join(TEST_DIR, os.path.split(video.name)[1]))
self.assertEqual(scanned_video.subtitle_languages, {Language('und')})
def test_scan_video_subtitles_language_eng(self):
video = EPISODES[0]
open(os.path.join(TEST_DIR, os.path.splitext(os.path.split(video.name
|
)[1])[0]) + '.en.srt', 'w').close()
scanned_video = scan_video(os.path.join(TEST_DIR, os.path.split(video.name)[1]))
self.assertEqual(scanned_video.subtitle_languages, {Language('eng')})
def test_scan_video_subtitles_languages(self):
video = EPISODES[0]
open(os.path.join(TEST_DIR, os.path.splitext(os.path.split(video.name)[1])[0]) + '.en.srt', 'w').close()
open(os.path.join(TEST_DIR, os.path.splitext(os.path.split(vide
|
o.name)[1])[0]) + '.fr.srt', 'w').close()
open(os.path.join(TEST_DIR,
|
Marocco2/EpicRace
|
update.py
|
Python
|
lgpl-3.0
| 2,289
| 0.004806
|
from BOX.box_lib import requests
import os
import configparser
import traceback
import functools
import threading
configfile = os.path.join(os.path.dirname(__file__), 'EpicRace.ini')
config = configparser.ConfigParser()
config.read(configfile)
def async(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
t = threading.Thread(target=func, args=args, kwargs=kwargs)
t.daemon = True
t.start()
ret
|
urn t
return wrapper
def log(log):
log = ('update: ' + str(log))
with open("apps\\python\\EpicRace\\log.txt", 'w') as h:
h.write(log)
h.close()
#@async
def update():
with open("apps\\python\\EpicRace\\sha.txt", 'r') as g:
sha = g.read()
g.close()
try:
branch = config['SETTINGS']['branch']
check_link = "https://api.github.com/repos/Marocco2/EpicRace/commits/" + branch
headers =
|
{'Accept': 'application/vnd.github.VERSION.sha'}
r = requests.get(check_link, headers=headers)
if r.text != sha: # Check if server version and client version is the same
with open("apps\\python\\EpicRace\\sha.txt", 'w') as j:
j.write(r.text)
j.close()
download_link_epicrace = "https://raw.githubusercontent.com/Marocco2/EpicRace/" + branch + "/EpicRace.py"
download_link_update = "https://raw.githubusercontent.com/Marocco2/EpicRace/" + branch + "/update.py"
download_link_ini = "https://raw.githubusercontent.com/Marocco2/EpicRace/" + branch + "/EpicRace.ini"
get_file(download_link_epicrace, "apps\\python\\EpicRace\\EpicRace.py")
get_file(download_link_ini, "apps\\python\\EpicRace\\EpicRace.ini")
get_file(download_link_update, "apps\\python\\EpicRace\\update.py")
update_status = 0 # ok
log(update_status)
return update_status
else:
# "No new update"
update_status = 2
log(update_status)
return update_status
except:
log(traceback.format_exc())
update_status = 3
return update_status
#@async
def get_file(link, filed):
f = requests.get(link)
with open(filed, 'w') as j:
j.write(f.text)
j.close()
|
asherwunk/objconfig
|
tests/writer/test_abstractwriter.py
|
Python
|
mit
| 728
| 0.008242
|
"""
Test objconfig.writer.AbstractWriter
|
"""
import pytest
from objconfig.exception import RuntimeException
from objconfig.writer import WriterInterface
from objconfig.writer import AbstractWriter
from objconfig import Config
import os
def test_methods_abstractwriter():
writer = AbstractWriter()
conf = Config({})
assert isinstance(writer, WriterInterface), "AbstractWriter not instance of WriterInterface"
with pytest.raises(RuntimeException):
writer.toFile(os.path.join(os.path.dirname(o
|
s.path.realpath(__file__)), "test"), conf)
with pytest.raises(RuntimeException):
writer.toString(conf)
os.remove(os.path.join(os.path.dirname(os.path.realpath(__file__)), "test"))
|
10clouds/edx-platform
|
common/djangoapps/enrollment/tests/test_api.py
|
Python
|
agpl-3.0
| 10,138
| 0.002071
|
"""
Tests for student enrollment.
"""
from mock import patch, Mock
import ddt
from django.core.cache import cache
from nose.tools import raises
import unittest
from django.test import TestCase
from django.test.utils import override_settings
from django.conf import settings
from course_modes.models import CourseMode
from enrollment import api
from enrollment.errors import EnrollmentApiLoadError, EnrollmentNotFoundError, CourseModeNotFoundError
from enrollment.tests import fake_data_api
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
@ddt.ddt
@override_settings(ENROLLMENT_DATA_API="enrollment.tests.fake_data_api")
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class EnrollmentTest(CacheIsolationTestCase):
"""
Test student enrollment, especially with different course modes.
"""
USERNAME = "Bob"
COURSE_ID = "some/great/course"
ENABLED_CACHES = ['default']
def setUp(self):
super(EnrollmentTest, self).setUp()
fake_data_api.reset()
@ddt.data(
# Default (no course modes in the database)
# Expect automatically being enrolled as "honor".
([], 'honor'),
# Audit / Verified / Honor
# We should always go to the "choose your course" page.
# We should also be enrolled as "honor" by default.
(['honor', 'verified', 'audit'], 'honor'),
# Check for professional ed happy path.
(['professional'], 'professional'),
(['no-id-professional'], 'no-id-professional')
)
@ddt.unpack
def test_enroll(self, course_modes, mode):
# Add a fake course enrollment information to the fake data API
fake_data_api.add_course(self.COURSE_ID, course_modes=course_modes)
# Enroll in the course and verify the URL we get sent to
result = api.add_enrollment(self.USERNAME, self.COURSE_ID, mode=mode)
self.assertIsNotNone(result)
self.assertEquals(result['student'], self.USERNAME)
self.assertEquals(result['course']['course_id'], self.COURSE_ID)
self.assertEquals(result['mode'], mode)
get_result = api.get_enrollment(self.USERNAME, self.COURSE_ID)
self.assertEquals(result, get_result)
@ddt.data(
([CourseMode.DEFAULT_MODE_SLUG, 'verified', 'credit'], CourseMode.DEFAULT_MODE_SLUG),
(['audit', 'verified', 'credit'], 'audit'),
(['honor', 'verified', 'credit'], 'honor'),
)
@ddt.unpack
def test_enroll_no_mode_success(self, course_modes, expected_mode):
# Add a fake course enrollment information to the fake data API
fake_data_api.add_course(self.COURSE_ID, course_modes=course_modes)
with patch('enrollment.api.Course
|
Mode.modes_for_course') as mock_modes_for_course:
mock_course_modes = [Mock(slug=mode) for mode in course_modes]
mock_modes_for_course.return_value = mock_course_modes
# Enroll in the course and verify the URL we get sent to
result = api.add_enrollment(self.USERNAME, self.COURSE_ID)
|
self.assertIsNotNone(result)
self.assertEquals(result['student'], self.USERNAME)
self.assertEquals(result['course']['course_id'], self.COURSE_ID)
self.assertEquals(result['mode'], expected_mode)
@ddt.data(
['professional'],
['verified'],
['verified', 'professional'],
)
@raises(CourseModeNotFoundError)
def test_enroll_no_mode_error(self, course_modes):
# Add a fake course enrollment information to the fake data API
fake_data_api.add_course(self.COURSE_ID, course_modes=course_modes)
# Enroll in the course and verify that we raise CourseModeNotFoundError
api.add_enrollment(self.USERNAME, self.COURSE_ID)
@raises(CourseModeNotFoundError)
def test_prof_ed_enroll(self):
# Add a fake course enrollment information to the fake data API
fake_data_api.add_course(self.COURSE_ID, course_modes=['professional'])
# Enroll in the course and verify the URL we get sent to
api.add_enrollment(self.USERNAME, self.COURSE_ID, mode='verified')
@ddt.data(
# Default (no course modes in the database)
# Expect that users are automatically enrolled as "honor".
([], 'honor'),
# Audit / Verified / Honor
# We should always go to the "choose your course" page.
# We should also be enrolled as "honor" by default.
(['honor', 'verified', 'audit'], 'honor'),
# Check for professional ed happy path.
(['professional'], 'professional'),
(['no-id-professional'], 'no-id-professional')
)
@ddt.unpack
def test_unenroll(self, course_modes, mode):
# Add a fake course enrollment information to the fake data API
fake_data_api.add_course(self.COURSE_ID, course_modes=course_modes)
# Enroll in the course and verify the URL we get sent to
result = api.add_enrollment(self.USERNAME, self.COURSE_ID, mode=mode)
self.assertIsNotNone(result)
self.assertEquals(result['student'], self.USERNAME)
self.assertEquals(result['course']['course_id'], self.COURSE_ID)
self.assertEquals(result['mode'], mode)
self.assertTrue(result['is_active'])
result = api.update_enrollment(self.USERNAME, self.COURSE_ID, mode=mode, is_active=False)
self.assertIsNotNone(result)
self.assertEquals(result['student'], self.USERNAME)
self.assertEquals(result['course']['course_id'], self.COURSE_ID)
self.assertEquals(result['mode'], mode)
self.assertFalse(result['is_active'])
@raises(EnrollmentNotFoundError)
def test_unenroll_not_enrolled_in_course(self):
# Add a fake course enrollment information to the fake data API
fake_data_api.add_course(self.COURSE_ID, course_modes=['honor'])
api.update_enrollment(self.USERNAME, self.COURSE_ID, mode='honor', is_active=False)
@ddt.data(
# Simple test of honor and verified.
([
{'course_id': 'the/first/course', 'course_modes': [], 'mode': 'honor'},
{'course_id': 'the/second/course', 'course_modes': ['honor', 'verified'], 'mode': 'verified'}
]),
# No enrollments
([]),
# One Enrollment
([
{'course_id': 'the/third/course', 'course_modes': ['honor', 'verified', 'audit'], 'mode': 'audit'}
]),
)
def test_get_all_enrollments(self, enrollments):
for enrollment in enrollments:
fake_data_api.add_course(enrollment['course_id'], course_modes=enrollment['course_modes'])
api.add_enrollment(self.USERNAME, enrollment['course_id'], enrollment['mode'])
result = api.get_enrollments(self.USERNAME)
self.assertEqual(len(enrollments), len(result))
for result_enrollment in result:
self.assertIn(
result_enrollment['course']['course_id'],
[enrollment['course_id'] for enrollment in enrollments]
)
def test_update_enrollment(self):
# Add fake course enrollment information to the fake data API
fake_data_api.add_course(self.COURSE_ID, course_modes=['honor', 'verified', 'audit'])
# Enroll in the course and verify the URL we get sent to
result = api.add_enrollment(self.USERNAME, self.COURSE_ID, mode='audit')
get_result = api.get_enrollment(self.USERNAME, self.COURSE_ID)
self.assertEquals(result, get_result)
result = api.update_enrollment(self.USERNAME, self.COURSE_ID, mode='honor')
self.assertEquals('honor', result['mode'])
result = api.update_enrollment(self.USERNAME, self.COURSE_ID, mode='verified')
self.assertEquals('verified', result['mode'])
def test_update_enrollment_attributes(self):
# Add fake course enrollment information to the fake data API
fake_data_api.add_course(self.COURSE_ID, course_modes=['honor', 'verified', 'audit', 'credit'])
# Enroll in the course and verify the URL we get sent to
result = api.add_enrollment(self.USERNAME, s
|
beiko-lab/gengis
|
bin/Lib/site-packages/numpy/f2py/tests/test_array_from_pyobj.py
|
Python
|
gpl-3.0
| 21,255
| 0.017549
|
import unittest
import os
import sys
import copy
import nose
from numpy.testing import *
from numpy import array, alltrue,
|
ndarray, asarray, can_cast,zeros, dtype
from numpy.core.multiarray import typeinfo
import util
wrap = None
def setup():
"""
Build the required testing extension module
"""
global wrap
# Check compiler availability first
if not util.has_c_compiler():
raise nose.SkipTest("No C compiler available")
if wrap is None:
config_code = """
config.add_extension('test_arr
|
ay_from_pyobj_ext',
sources=['wrapmodule.c', 'fortranobject.c'],
define_macros=[])
"""
d = os.path.dirname(__file__)
src = [os.path.join(d, 'src', 'array_from_pyobj', 'wrapmodule.c'),
os.path.join(d, '..', 'src', 'fortranobject.c'),
os.path.join(d, '..', 'src', 'fortranobject.h')]
wrap = util.build_module_distutils(src, config_code,
'test_array_from_pyobj_ext')
def flags_info(arr):
flags = wrap.array_attrs(arr)[6]
return flags2names(flags)
def flags2names(flags):
info = []
for flagname in ['CONTIGUOUS','FORTRAN','OWNDATA','ENSURECOPY',
'ENSUREARRAY','ALIGNED','NOTSWAPPED','WRITEABLE',
'UPDATEIFCOPY','BEHAVED','BEHAVED_RO',
'CARRAY','FARRAY'
]:
if abs(flags) & getattr(wrap,flagname, 0):
info.append(flagname)
return info
class Intent(object):
def __init__(self,intent_list=[]):
self.intent_list = intent_list[:]
flags = 0
for i in intent_list:
if i=='optional':
flags |= wrap.F2PY_OPTIONAL
else:
flags |= getattr(wrap,'F2PY_INTENT_'+i.upper())
self.flags = flags
def __getattr__(self,name):
name = name.lower()
if name=='in_': name='in'
return self.__class__(self.intent_list+[name])
def __str__(self):
return 'intent(%s)' % (','.join(self.intent_list))
def __repr__(self):
return 'Intent(%r)' % (self.intent_list)
def is_intent(self,*names):
for name in names:
if name not in self.intent_list:
return False
return True
def is_intent_exact(self,*names):
return len(self.intent_list)==len(names) and self.is_intent(*names)
intent = Intent()
class Type(object):
_type_names = ['BOOL','BYTE','UBYTE','SHORT','USHORT','INT','UINT',
'LONG','ULONG','LONGLONG','ULONGLONG',
'FLOAT','DOUBLE','LONGDOUBLE','CFLOAT','CDOUBLE',
'CLONGDOUBLE']
_type_cache = {}
_cast_dict = {'BOOL':['BOOL']}
_cast_dict['BYTE'] = _cast_dict['BOOL'] + ['BYTE']
_cast_dict['UBYTE'] = _cast_dict['BOOL'] + ['UBYTE']
_cast_dict['BYTE'] = ['BYTE']
_cast_dict['UBYTE'] = ['UBYTE']
_cast_dict['SHORT'] = _cast_dict['BYTE'] + ['UBYTE','SHORT']
_cast_dict['USHORT'] = _cast_dict['UBYTE'] + ['BYTE','USHORT']
_cast_dict['INT'] = _cast_dict['SHORT'] + ['USHORT','INT']
_cast_dict['UINT'] = _cast_dict['USHORT'] + ['SHORT','UINT']
_cast_dict['LONG'] = _cast_dict['INT'] + ['LONG']
_cast_dict['ULONG'] = _cast_dict['UINT'] + ['ULONG']
_cast_dict['LONGLONG'] = _cast_dict['LONG'] + ['LONGLONG']
_cast_dict['ULONGLONG'] = _cast_dict['ULONG'] + ['ULONGLONG']
_cast_dict['FLOAT'] = _cast_dict['SHORT'] + ['USHORT','FLOAT']
_cast_dict['DOUBLE'] = _cast_dict['INT'] + ['UINT','FLOAT','DOUBLE']
_cast_dict['LONGDOUBLE'] = _cast_dict['LONG'] + ['ULONG','FLOAT','DOUBLE','LONGDOUBLE']
_cast_dict['CFLOAT'] = _cast_dict['FLOAT'] + ['CFLOAT']
_cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT','CDOUBLE']
_cast_dict['CLONGDOUBLE'] = _cast_dict['LONGDOUBLE'] + ['CFLOAT','CDOUBLE','CLONGDOUBLE']
def __new__(cls,name):
if isinstance(name,dtype):
dtype0 = name
name = None
for n,i in typeinfo.items():
if isinstance(i,tuple) and dtype0.type is i[-1]:
name = n
break
obj = cls._type_cache.get(name.upper(),None)
if obj is not None:
return obj
obj = object.__new__(cls)
obj._init(name)
cls._type_cache[name.upper()] = obj
return obj
def _init(self,name):
self.NAME = name.upper()
self.type_num = getattr(wrap,'NPY_'+self.NAME)
assert_equal(self.type_num,typeinfo[self.NAME][1])
self.dtype = typeinfo[self.NAME][-1]
self.elsize = typeinfo[self.NAME][2] / 8
self.dtypechar = typeinfo[self.NAME][0]
def cast_types(self):
return map(self.__class__,self._cast_dict[self.NAME])
def all_types(self):
return map(self.__class__,self._type_names)
def smaller_types(self):
bits = typeinfo[self.NAME][3]
types = []
for name in self._type_names:
if typeinfo[name][3]<bits:
types.append(Type(name))
return types
def equal_types(self):
bits = typeinfo[self.NAME][3]
types = []
for name in self._type_names:
if name==self.NAME: continue
if typeinfo[name][3]==bits:
types.append(Type(name))
return types
def larger_types(self):
bits = typeinfo[self.NAME][3]
types = []
for name in self._type_names:
if typeinfo[name][3]>bits:
types.append(Type(name))
return types
class Array(object):
def __init__(self,typ,dims,intent,obj):
self.type = typ
self.dims = dims
self.intent = intent
self.obj_copy = copy.deepcopy(obj)
self.obj = obj
# arr.dtypechar may be different from typ.dtypechar
self.arr = wrap.call(typ.type_num,dims,intent.flags,obj)
assert_(isinstance(self.arr, ndarray),`type(self.arr)`)
self.arr_attr = wrap.array_attrs(self.arr)
if len(dims)>1:
if self.intent.is_intent('c'):
assert_(intent.flags & wrap.F2PY_INTENT_C)
assert_(not self.arr.flags['FORTRAN'],`self.arr.flags,getattr(obj,'flags',None)`)
assert_(self.arr.flags['CONTIGUOUS'])
assert_(not self.arr_attr[6] & wrap.FORTRAN)
else:
assert_(not intent.flags & wrap.F2PY_INTENT_C)
assert_(self.arr.flags['FORTRAN'])
assert_(not self.arr.flags['CONTIGUOUS'])
assert_(self.arr_attr[6] & wrap.FORTRAN)
if obj is None:
self.pyarr = None
self.pyarr_attr = None
return
if intent.is_intent('cache'):
assert_(isinstance(obj,ndarray),`type(obj)`)
self.pyarr = array(obj).reshape(*dims).copy()
else:
self.pyarr = array(array(obj,
dtype = typ.dtypechar).reshape(*dims),
order=self.intent.is_intent('c') and 'C' or 'F')
assert_(self.pyarr.dtype == typ, \
`self.pyarr.dtype,typ`)
assert_(self.pyarr.flags['OWNDATA'], (obj, intent))
self.pyarr_attr = wrap.array_attrs(self.pyarr)
if len(dims)>1:
if self.intent.is_intent('c'):
assert_(not self.pyarr.flags['FORTRAN'])
assert_(self.pyarr.flags['CONTIGUOUS'])
assert_(not self.pyarr_attr[6] & wrap.FORTRAN)
else:
assert_(self.pyarr.flags['FORTRAN'])
assert_(not self.pyarr.flags['CONTIGUOUS'])
assert_(self.pyarr_attr[6] & wrap.FORTRAN)
assert_(self.arr_attr[1]==self.pyarr_attr[1]) # nd
assert_(self.arr_attr[2]==self.pyarr_attr[2]) # dimensions
if self.arr_attr[1]<=1:
|
voxie-viewer/voxie
|
ext/RawDataTestScript.py
|
Python
|
mit
| 1,854
| 0.001618
|
#!/usr/bin/python3
#
# Copyright (c) 2014-2022 The Voxie Authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following condit
|
ions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND
|
, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import numpy as np
import voxie
import dbus
args = voxie.parser.parse_args()
context = voxie.VoxieContext(args)
instance = context.createInstance()
if args.voxie_action != 'RunTool':
raise Exception('Invalid operation: ' + args.voxie_action)
inputObjectPath = args.voxie_script_target_object
inputObject = context.makeObject(context.bus, context.busName, inputObjectPath, [
'de.uni_stuttgart.Voxie.Object']).CastTo('de.uni_stuttgart.Voxie.DataObject')
inputData = inputObject.Data.CastTo('de.uni_stuttgart.Voxie.TomographyRawData2DAccessor')
# print('Number of images: %d' % (inputData.NumberOfImages,)) # Not implemented
print('Current version: %s' % (inputData.CurrentVersionString,))
|
zakandrewking/cobrapy
|
cobra/io/mat.py
|
Python
|
lgpl-2.1
| 11,174
| 0.000179
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import re
from collections import OrderedDict
from uuid import uuid4
from warnings import warn
from numpy import object as np_object
from numpy import array, inf, isinf
from six import string_types
from cobra.core import Metabolite, Model, Reaction
from cobra.util import create_stoichiometric_matrix
from cobra.util.solver import set_objective
try:
import scipy.sparse as scipy_sparse
import scipy.io as scipy_io
except ImportError:
scipy_sparse = None
scipy_io = None
# precompiled regular expressions
_bracket_re = re.compile("r\[[a-z]\]$")
_underscore_re = re.compile(r"_[a-z]$")
def _get_id_compartment(id):
"""extract the compartment from the id string"""
bracket_search = _bracket_re.findall(id)
if len(bracket_search) == 1:
return bracket_search[0][1]
underscore_search = _underscore_re.findall(id)
if len(underscore_search) == 1:
return underscore_search[0][1]
return None
def _cell(x):
"""translate an array x into a MATLAB cell array"""
x_no_none = [i if i is not None else "" for i in x]
return array(x_no_none, dtype=np_object)
def load_matlab_model(infile_path, variable_name=None, inf=inf):
"""Load a cobra model stored as a .mat file
Parameters
----------
infile_path: str
path to the file to to read
variable_name: str, optional
The variable name of the model in the .mat file. If this is not
specified, then the first MATLAB variable which looks like a COBRA
model will be used
inf: value
The value to use for infinite bounds. Some solvers do not handle
infinite values so for using those, set this to a high numeric value.
Returns
-------
cobra.core.Model.Model:
The resulting cobra model
"""
if not scipy_io:
raise ImportError('load_matlab_model requires scipy')
data = scipy_io.loadmat(infile_path)
possible_names = []
if variable_name is None:
# skip meta variables
meta_vars = {"__globals__", "__header__", "__version__"}
possible_names = sorted(i for i in data if i not in meta_vars)
if len(possible_names) == 1:
variable_name = possible_names[0]
if variable_name is not None:
return from_mat_struct(data[variable_name], model_id=variable_name,
inf=inf)
for possible_name in possible_names:
try:
return from_mat_struct(data[possible_name], model_id=possible_name,
inf=inf)
except ValueError:
pass
# If code here is executed, then no model was found.
raise IOError("no COBRA model found")
def save_matlab_model(model, file_name, varname=None):
"""Save the cobra model as a .mat file.
This .mat file can be used directly in the MATLAB version of COBRA.
Parameters
----------
model : cobra.core.Model.Model object
The model to save
file_name : str or file-like object
The file to save to
varname : string
The name of the variable within the workspace
"""
if not scipy_io:
raise ImportError('load_matlab_model requires scipy')
if varname is None:
varname = str(model.id) \
if model.id is not None and len(model.id) > 0 \
else "exported_model"
mat = create_mat_dict(model)
scipy_io.savemat(file_name, {varname: mat},
appendmat=True, oned_as="column")
def create_mat_metabolite_id(model):
for met in model.metabolites:
if not _get_id_compartment(met.id) and met.compartment:
yield '{}[{}]'.format(met.id,
model.compartments[met.compartment].lower())
else:
yield met.id
def create_mat_dict(model):
"""create a dict mapping model attributes to arrays"""
rxns = model.reactions
mets = model.metabolites
mat = OrderedDict()
mat["mets"] = _cell([met_id for met_id in create_mat_metabolite_id(model)])
mat["metNames"] = _cell(mets.list_attr("name"))
mat["metFormulas"] = _cell([str(m.formula) for m in mets])
try:
mat["metCharge"] = array(mets.list_attr("charge")) * 1.
except TypeError:
# can't have any None entries for charge, or this will fail
pass
mat["genes"] = _cell(model.genes.list_attr("id"))
# make a matrix for rxnGeneMat
# reactions are rows, genes are columns
rxn_gene = scipy_sparse.dok_matrix((len(model.reactions),
len(model.genes)))
i
|
f min(rxn_gene.shape) > 0:
for i, reaction in enumerate(model.reactions):
for gene in reaction.genes:
rxn_gene[i, model.genes.index(gene)] = 1
mat["rxnGeneMat"] = rxn_gene
mat["grRules"] = _cell(rxns.list_attr("gene_reaction_rule"))
mat["rxns"] = _cell(rxns.list_attr("id"))
mat["rxnNames"] = _cell(rxns.list_attr
|
("name"))
mat["subSystems"] = _cell(rxns.list_attr("subsystem"))
mat["csense"] = "".join((
met._constraint_sense for met in model.metabolites))
stoich_mat = create_stoichiometric_matrix(model)
mat["S"] = stoich_mat if stoich_mat is not None else [[]]
# multiply by 1 to convert to float, working around scipy bug
# https://github.com/scipy/scipy/issues/4537
mat["lb"] = array(rxns.list_attr("lower_bound")) * 1.
mat["ub"] = array(rxns.list_attr("upper_bound")) * 1.
mat["b"] = array(mets.list_attr("_bound")) * 1.
mat["c"] = array(rxns.list_attr("objective_coefficient")) * 1.
mat["rev"] = array(rxns.list_attr("reversibility")) * 1
mat["description"] = str(model.id)
return mat
def from_mat_struct(mat_struct, model_id=None, inf=inf):
"""create a model from the COBRA toolbox struct
The struct will be a dict read in by scipy.io.loadmat
"""
m = mat_struct
if m.dtype.names is None:
raise ValueError("not a valid mat struct")
if not {"rxns", "mets", "S", "lb", "ub"} <= set(m.dtype.names):
raise ValueError("not a valid mat struct")
if "c" in m.dtype.names:
c_vec = m["c"][0, 0]
else:
c_vec = None
warn("objective vector 'c' not found")
model = Model()
if model_id is not None:
model.id = model_id
elif "description" in m.dtype.names:
description = m["description"][0, 0][0]
if not isinstance(description, string_types) and len(description) > 1:
model.id = description[0]
warn("Several IDs detected, only using the first.")
else:
model.id = description
else:
model.id = "imported_model"
for i, name in enumerate(m["mets"][0, 0]):
new_metabolite = Metabolite()
new_metabolite.id = str(name[0][0])
if all(var in m.dtype.names for var in
['metComps', 'comps', 'compNames']):
comp_index = m["metComps"][0, 0][i][0] - 1
new_metabolite.compartment = m['comps'][0, 0][comp_index][0][0]
if new_metabolite.compartment not in model.compartments:
comp_name = m['compNames'][0, 0][comp_index][0][0]
model.compartments[new_metabolite.compartment] = comp_name
else:
new_metabolite.compartment = _get_id_compartment(new_metabolite.id)
if new_metabolite.compartment not in model.compartments:
model.compartments[
new_metabolite.compartment] = new_metabolite.compartment
try:
new_metabolite.name = str(m["metNames"][0, 0][i][0][0])
except (IndexError, ValueError):
pass
try:
new_metabolite.formula = str(m["metFormulas"][0][0][i][0][0])
except (IndexError, ValueError):
pass
try:
new_metabolite.charge = float(m["metCharge"][0, 0][i][0])
int_charge = int(new_metabolite.charge)
if new_metabolite.charge == int_charge:
new_metabolite.charge = int_charge
except (IndexError, ValueError):
pass
model.add_metabol
|
ros/catkin
|
setup.py
|
Python
|
bsd-3-clause
| 416
| 0
|
from catkin_pkg.python_setup import generate_distutils_setup
from setuptools import setup
d = generate_distutils_setup(
packages=['catkin
|
'],
package_dir={'': 'python'},
scripts=[
'bin/catkin_find',
'b
|
in/catkin_init_workspace',
'bin/catkin_make',
'bin/catkin_make_isolated',
'bin/catkin_test_results',
'bin/catkin_topological_order',
],
)
setup(**d)
|
m00dawg/holland
|
plugins/holland.backup.mysqldump/holland/backup/mysqldump/plugin.py
|
Python
|
bsd-3-clause
| 21,830
| 0.002199
|
"""Command Line Interface"""
import os
import re
import codecs
import logging
from holland.core.exceptions import BackupError
from holland.lib.compression import open_stream, lookup_compression
from holland.lib.mysql import MySQLSchema, connect, MySQLError
from holland.lib.mysql import include_glob, exclude_glob, \
include_glob_qualified, \
exclude_glob_qualified
from holland.lib.mysql import DatabaseIterator, MetadataTableIterator, \
SimpleTableIterator
from holland.backup.mysqldump.base import start
from holland.backup.mysqldump.util import INIConfig, update_config
from holland.backup.mysqldump.util.ini import OptionLine, CommentLine
from holland.lib.mysql.option import load_options, \
write_options, \
build_mysql_config
from holland.backup.mysqldump.command import MySQLDump, MySQLDumpError, \
MyOptionError
from holland.backup.mysqldump.mock import MockEnvironment
LOG = logging.getLogger(__name__)
# We validate our config against the following spec
CONFIGSPEC = """
[mysqldump]
extra-defaults = boolean(default=no)
mysql-binpath = force_list(default=list())
lock-method = option('flush-lock', 'lock-tables', 'single-transaction', 'auto-detect', 'none', default='auto-detect')
databases = force_list(default=list('*'))
exclude-databases = force_list(default=list())
tables = force_list(default=list("*"))
exclude-tables = force_list(default=list())
engines = force_list(default=list("*"))
exclude-engines = force_list(default=list())
exclude-invalid-views = boolean(default=no)
flush-logs = boolean(default=no)
flush-privileges = boolean(default=yes)
dump-routines = boolean(default=yes)
dump-events = boolean(default=yes)
stop-slave = boolean(default=no)
max-allowed-packet = string(default=128M)
bin-log-position
|
= boolean(default=no)
file-per-database = boolean(default=yes)
additional-options = force_list(default=list())
estimate-method = string(default='plugin')
[compression]
method = option('none', 'gzip', 'gzip-rsyncable', 'pigz', 'bzip2', 'pbzip2', 'lzma', 'lzop', 'gpg', default='gzip')
options = string(default="")
inline = boolean(default=yes)
level = integer(min=0, max=9, default=1)
[mysql:client]
defaults-extra-file = force_list(default=list('~/.my.cnf'))
user
|
= string(default=None)
password = string(default=None)
socket = string(default=None)
host = string(default=None)
port = integer(min=0, default=None)
""".splitlines()
class MySQLDumpPlugin(object):
"""MySQLDump Backup Plugin interface for Holland"""
CONFIGSPEC = CONFIGSPEC
def __init__(self, name, config, target_directory, dry_run=False):
self.name = name
self.config = config
self.target_directory = target_directory
self.dry_run = dry_run
self.config.validate_config(self.CONFIGSPEC) # -> ValidationError
# Setup a discovery shell to find schema items
# This will iterate over items during the estimate
# or backup phase, which will call schema.refresh()
self.schema = MySQLSchema()
config = self.config['mysqldump']
self.schema.add_database_filter(include_glob(*config['databases']))
self.schema.add_database_filter(
exclude_glob(*config['exclude-databases'])
)
self.schema.add_table_filter(include_glob_qualified(*config['tables']))
self.schema.add_table_filter(exclude_glob_qualified(*config['exclude-tables']))
self.schema.add_engine_filter(include_glob(*config['engines']))
self.schema.add_engine_filter(exclude_glob(*config['exclude-engines']))
self.mysql_config = build_mysql_config(self.config['mysql:client'])
self.client = connect(self.mysql_config['client'])
def estimate_backup_size(self):
"""Estimate the size of the backup this plugin will generate"""
LOG.info("Estimating size of mysqldump backup")
estimate_method = self.config['mysqldump']['estimate-method']
if estimate_method.startswith('const:'):
try:
return parse_size(estimate_method[6:])
except ValueError, exc:
raise BackupError(str(exc))
if estimate_method != 'plugin':
raise BackupError("Invalid estimate-method '%s'" % estimate_method)
try:
db_iter = DatabaseIterator(self.client)
tbl_iter = MetadataTableIterator(self.client)
try:
self.client.connect()
self.schema.refresh(db_iter=db_iter, tbl_iter=tbl_iter)
except MySQLError, exc:
LOG.error("Failed to estimate backup size")
LOG.error("[%d] %s", *exc.args)
raise BackupError("MySQL Error [%d] %s" % exc.args)
return sum([db.size for db in self.schema.databases])
finally:
self.client.disconnect()
def _fast_refresh_schema(self):
# determine if we can skip expensive table metadata lookups entirely
# and just worry about finding database names
# However, with lock-method=auto-detect we must look at table engines
# to determine what lock method to use
config = self.config['mysqldump']
fast_iterate = config['lock-method'] != 'auto-detect' and \
not config['exclude-invalid-views']
try:
db_iter = DatabaseIterator(self.client)
tbl_iter = SimpleTableIterator(self.client, record_engines=True)
try:
self.client.connect()
self.schema.refresh(db_iter=db_iter,
tbl_iter=tbl_iter,
fast_iterate=fast_iterate)
except MySQLError, exc:
LOG.debug("MySQLdb error [%d] %s", exc_info=True, *exc.args)
raise BackupError("MySQL Error [%d] %s" % exc.args)
finally:
self.client.disconnect()
def backup(self):
"""Run a MySQL backup"""
if self.schema.timestamp is None:
self._fast_refresh_schema()
mock_env = None
if self.dry_run:
mock_env = MockEnvironment()
mock_env.replace_environment()
LOG.info("Running in dry-run mode.")
try:
if self.config['mysqldump']['stop-slave']:
self.client = connect(self.mysql_config['client'])
if self.client.show_status('Slave_running', session=None) != 'ON':
raise BackupError("stop-slave enabled, but replication is "
"either not configured or the slave is not "
"running.")
self.config.setdefault('mysql:replication', {})
_stop_slave(self.client, self.config['mysql:replication'])
self._backup()
finally:
if self.config['mysqldump']['stop-slave'] and \
'mysql:replication' in self.config:
_start_slave(self.client, self.config['mysql:replication'])
if mock_env:
mock_env.restore_environment()
def _backup(self):
"""Real backup method. May raise BackupError exceptions"""
config = self.config['mysqldump']
# setup defaults_file with ignore-table exclusions
defaults_file = os.path.join(self.target_directory, 'my.cnf')
write_options(self.mysql_config, defaults_file)
if config['exclude-invalid-views']:
LOG.info("* Finding and excluding invalid views...")
definitions_path = os.path.join(self.target_directory,
'invalid_views.sql')
exclude_invalid_views(self.schema, self.client, definitions_path)
add_exclusions(self.schema, defaults_file)
# find the path to the mysqldu
|
adityavagarwal/DonkeyKong
|
board.py
|
Python
|
mpl-2.0
| 19,483
| 0.00272
|
__author__ = 'Aditya Vikram Agarwal'
import pygame
from random import randint
import random
import player
import princess
import donkey
import block
import fireball
import coin
import ladder
class Board:
def __init__(self, screen, testmode):
self.MODE = testmode
self.blocks = []
self.ladders = []
self.coins = []
self.fireballs = []
self.castleblocks = []
self.levellimits = {}
self.ladderlimits = {}
self.donkey = None
self.princess = None
self.donkey_group = []
self.princess_group = []
# start defining Constamts here
self.PLAYER_SPEED = 10
self.PLAYER_CLIMB_SPEED = 5
self.FULL_LADDER_HEIGHT = 95
self.LADDER_WIDTH = 30
self.HALF_LADDER_HEIGHT = 35
self.PLAYER_HEIGHT = 20
self.PLAYER_WIDTH = 20
self.COIN_WIDTH = 20
self.COIN_HEIGHT = 20
self.COIN_LEVELS = [470, 390, 310, 230, 150, 70]
self.FIREBALL_HEIGHT = 25
self.FIREBALL_WIDTH = 25
self.FIREBALL_SPEED = 5
self.JUMP_LIMIT = 30
self.PLAYER_SPAWN_LEVEL = 480
self.DONKEY_SPEED = 3
self.PLAYER_DROP_LEVEL = None
# End defining constants
self.block_group = pygame.sprite.RenderPlain(*self.blocks)
self.ladder_group = pygame.sprite.RenderPlain(*self.ladders)
self.coin_group = pygame.sprite.RenderPlain(*self.coins)
self.fireball_group = pygame.sprite.RenderPlain(*self.fireballs)
self.castle_block_group = pygame.sprite.RenderPlain(*self.blocks)
self.initlogs(screen)
self.initladders(screen)
self.initcoins(screen)
self.initdonkey(screen)
self.initprincess(screen)
self.initcastle(screen)
self.plr = [player.Player("Images/player2.png", "Images/player.png", "Images/player3.png", "Images/player4.png",
(0, self.PLAYER_SPAWN_LEVEL), self.PLAYER_WIDTH, self.PLAYER_HEIGHT, 0, 2)]
self.plr_group = pygame.sprite.RenderPlain(*self.plr)
if(self.MODE == 1):
self.plr_group.draw(screen)
self.playerparentdict = {}
self.fireballparentdict = {}
self.playerparentdict[500] = self.PLAYER_SPAWN_LEVEL
for i in range(499, 0, -1): # Player's regular positions in each level
if i in [480, 400, 320, 240, 160, 80]:
self.playerparentdict[i] = i
else:
self.playerparentdict[i] = self.playerparentdict[i + 1]
self.fireballparentdict[500] = self.PLAYER_SPAWN_LEVEL
for i in range(499, 0, -1): # Fireballs' regular positions in each level
if i in [480, 400, 320, 240, 160, 80]:
self.fireballparentdict[i] = i
else:
self.fireballparentdict[i] = self.fireballparentdict[i + 1]
def initlogs(self, screen): # Initialize all blocks
self.levellimits = {400: 1, 320: 2, 240: 1, 160: 2, 80: 1, 30: 3}
self.blocks = [block.Block("Images/log.png", "Images/log.png", (0, 0), 1200, 20),
block.Block("Images/log.png", "Images/log.png", (0, 100), 700, 20),
block.Block("Images/log.png", "Images/log.png", (200, 180), 1000, 20),
block.Block("Images/log.png", "Images/log.png", (0, 260), 1000, 20),
block.Block("Images/log.png", "Images/log.png", (200, 340), 1000, 20),
block.Block("Images/log.png", "Images/log.png", (0, 420), 1000, 20),
block.Block("Images/log.png", "Images/log.png", (0, 500), 1200, 20),
]
self.block_group = pygame.sprite.RenderPlain(*self.blocks)
if(self.MODE == 1): #1 implies game mode , 0 implies test mode
self.block_group.draw(screen)
def initdonkey(self, screen): # Initialize donkey
self.donkey = donkey.Donkey("Images/Donkey2.png", "Images/Donkey.png", (20, 50), 40, 50, 0)
self.donkey_group = pygame.sprite.RenderPlain(self.donkey)
if(self.MODE == 1):
self.donkey_group.draw(screen)
def initprincess(self, screen): # Initialize princess
self.princess = princess.Princess("Images/princess2.png", "Images/princess2.png", (120, 20), 20, 30, 0)
self.princess_group = pygame.sprite.RenderPlain(self.princess)
if(self.MODE == 1):
self.princess_group.draw(screen)
def initladders(self, screen): # Initialize all ladders
self.ladders = [ladder.Ladder("Images/ladder.png", "Images/ladder.png", (800, 419), self.LADDER_WIDTH,
self.FULL_LADDER_HEIGHT),
ladder.Ladder("Images/ladder.png", "Images/ladder.png", (300, 339), self.LADDER_WIDTH,
self.FULL_LADDER_HEIGHT),
ladder.Ladder("Images/ladder.png", "Images/ladder.png", (500, 259), self.LADDER_WIDTH,
self.FULL_LADDER_HEIGHT),
ladder.Ladder("Images/ladder.png", "Images/ladder.png", (900, 179), self.LADDER_WIDTH,
self.FULL_LADDER_HEIGHT),
ladder.Ladder("Images/ladder.png", "Images/ladder.png", (600, 99), self.LADDER_WIDTH,
self.FULL_LADDER_HEIGHT),
ladder.Ladder("Images/ladder_broken.png", "Images/ladder_broken.png", (650, 335),
self.LADDER_WIDTH, self.HALF_LADDER_HEIGHT),
ladder.Ladder("Images/ladder_broken_down.png", "Images/ladder_broken_down.png", (650, 400),
self.LADDER_WIDTH, self.HALF_LADDER_HEIGHT),
ladder.Ladder("Images/ladder_broken.png", "Images/ladder_broken.png", (850, 255),
self.LADDER_WIDTH, self.HALF_LADDER_HEIGHT),
ladder.Ladder("Images/ladder_broken_down.png", "Images/ladder_broken_down.png", (850, 320),
self.LADDER_WIDTH, self.HALF_LADDER_HEIGHT),
ladder.Ladder("Images/ladder_broken.png", "Images/ladder_broken.png", (300, 95),
self.LADDER_WIDTH, self.HALF_LADDER_HEIGHT),
ladder.Ladder("Images/ladder_broken_down.png", "Images/ladder_broken_down.png", (300, 160),
self.LADDER_WIDTH, self.H
|
ALF_LADDER_HEIGHT),
ladder.Ladder("Images/castleladder.png", "Images/castleladder.png", (220, 45),
self.LADDER_WIDTH, ((self.FULL_LADDER_HEIGHT - 5) * 2) / 3)
]
for
|
l in self.ladders:
x, y = l.getPosition()
w, h = l.getSize()
if h == self.FULL_LADDER_HEIGHT:
self.ladderlimits[l.getPosition()] = y + 1 + 60
else:
if h == ((self.FULL_LADDER_HEIGHT - 5) * 2) / 3:
self.ladderlimits[l.getPosition()] = y + 5 + 30
elif y % 10 == 0:
self.ladderlimits[l.getPosition()] = y
else:
self.ladderlimits[l.getPosition()] = y + 5 + 60
self.ladder_group = pygame.sprite.RenderPlain(*self.ladders)
if(self.MODE == 1):
self.ladder_group.draw(screen)
def initcoins(self, screen): # Initialize all coins
self.coins = []
x = 0
for i in range(0, 20):
y = self.COIN_LEVELS[randint(0, 5)]
if y == 470:
x = random.randrange(0, 1170, 30)
elif y in [390, 230]:
x = random.randrange(0, 1000, 30)
elif y in [310, 150]:
x = random.randrange(200, 1170, 30)
elif y == 70:
x = random.randrange(350, 700, 30)
self.coins += [coin.Coin("Images/coin.png", "Images/coin.png", (x, y), self.COIN_WIDTH, self.COIN_HEIGHT)]
self.coin_group = pygame.sprite.RenderPlain(*self.coins)
|
carthagecollege/django-djforms
|
djforms/music/theatre/summer_camp/views.py
|
Python
|
unlicense
| 3,884
| 0.002317
|
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from djforms.music.theatre.summer_camp import BCC, TO_LIST, REG_FEE
from djforms.processors.models import Contact, Order
from djforms.processors.forms import TrustCommerceForm
from djforms.music.theatre.summer_camp.forms import RegistrationForm
from djtools.utils.mail import send_mail
def registration(request):
status = None
msg = None
if request.POST:
form_reg = RegistrationForm(request.POST)
if form_reg.is_valid():
contact = form_reg.save()
# credit card payment
if contact.payment_method == 'Credit Card':
order = Order(
total=REG_FEE,auth='sale',status='In Process',
operator='DJMusicTheatreCamp'
)
form_proc = TrustCommerceForm(order, contact, request.POST)
if form_proc.is_valid():
r = form_proc.processor_response
order.status = r.msg['status']
order.transid = r.msg['transid']
order.cc_name = form_proc.name
order.cc_4_digits = form_proc.card[-4:]
order.save()
contact.order.add(order)
order.reg = contact
sent = send_mail(
request, TO_LIST,
'Music Theatre summer camp registration',
contact.email,
'music/theatre/summer_camp/registration_email.html',
order, BCC
)
order.send_mail = sent
order.save()
return HttpResponseRedirect(
reverse('music_theatre_summer_camp_success')
)
else:
r = form_proc.processor_response
if r:
order.status = r.status
else:
order.status = 'Form Invalid'
order.cc_name = form_proc.name
if form_proc.card:
order.cc_4_digits = form_proc.card[-4:]
order.save()
contact.order.add(order)
status = order.status
order.reg = contact
else:
order = Order(
total=REG_FEE,auth='COD',status='Pay later',
operator='DJMusicTheatreCamp'
)
order.save()
contact.order.add(order)
order.re
|
g = contact
sent = send_mail(
request,
|
TO_LIST,
'Music Theatre summer camp registration',
contact.email,
'music/theatre/summer_camp/registration_email.html',
order, BCC
)
order.send_mail = sent
order.save()
return HttpResponseRedirect(
reverse('music_theatre_summer_camp_success')
)
else:
if request.POST.get('payment_method') == 'Credit Card':
form_proc = TrustCommerceForm(None, request.POST)
form_proc.is_valid()
else:
form_proc = TrustCommerceForm()
else:
form_reg = RegistrationForm()
form_proc = TrustCommerceForm()
return render(
request,
'music/theatre/summer_camp/registration_form.html',
{
'form_reg': form_reg,'form_proc':form_proc,
'status':status,'msg':msg,
}
)
|
srijanmishra/django-facebook
|
django_facebook/decorators.py
|
Python
|
mit
| 2,793
| 0.003222
|
import facebook
from functools import update_wrapper, wraps
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.utils.decorators import available_attrs
from django.utils.http import urlquote
from django.conf import settings
def canvas_only(function=None):
"""
Decorator ensures that
|
a page is only accessed from within a facebook application.
"""
def _dec(view_func):
def _view(request, *args, **kwargs):
# Make sure we're receiving a signed_request from facebook
if not request.POST.get('signed_request'):
return HttpRes
|
ponseBadRequest('<h1>400 Bad Request</h1><p>Missing <em>signed_request</em>.</p>')
# Parse the request and ensure it's valid
signed_request = request.POST["signed_request"]
data = facebook.parse_signed_request(signed_request, settings.FACEBOOK_SECRET_KEY)
if data is False:
return HttpResponseBadRequest('<h1>400 Bad Request</h1><p>Malformed <em>signed_request</em>.</p>')
# If the user has not authorised redirect them
if not data.get('user_id'):
scope = getattr(settings, 'FACEBOOK_PERMS', None)
auth_url = facebook.auth_url(settings.FACEBOOK_APP_ID, settings.FACEBOOK_CANVAS_PAGE, scope)
markup = '<script type="text/javascript">top.location.href="%s"</script>' % auth_url
return HttpResponse(markup)
# Success so return the view
return view_func(request, *args, **kwargs)
return _view
return _dec(function)
def facebook_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
def _passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
if not login_url:
from django.conf import settings
login_url = settings.LOGIN_URL
def decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
if test_func(request):
return view_func(request, *args, **kwargs)
path = urlquote(request.get_full_path())
tup = login_url, redirect_field_name, path
return HttpResponseRedirect('%s?%s=%s' % tup)
return wraps(view_func, assigned=available_attrs(view_func))(_wrapped_view)
return decorator
actual_decorator = _passes_test(
lambda r: r.facebook,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
|
nott/next.filmfest.by
|
modeladminutils/wagtail_hooks.py
|
Python
|
unlicense
| 891
| 0
|
from django.conf.urls import include, url
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.core import urlresolvers
from django.utils.html import format_html
from wagtail.wagtailcore import hooks
from modeladminutils import admin_urls
@hooks.register('register_admin_urls')
def register_admin_urls():
return [
url(r'^adminmodel/', include(admin_urls,
namespace='modeladminutils',
app_name='modeladminutils')),
]
@hooks.register('insert_editor_js')
def editor_js():
return format_html(
|
"""
<script src="{0}"></script>
<script>window.chooserUrls.adminmodelChooser = '{1}';</script>
""",
static('modeladminutils/js/adminmodel-chooser.js'),
urlresolvers.reverse('modeladminutils:choose_adm
|
inmodel')
)
|
guescio/toolbox
|
searchPhaseTools.py
|
Python
|
gpl-2.0
| 2,268
| 0.019841
|
#!/usr/bin/env python
#******************************************
#collection of handy tools when dealing with fits and search pahse
#******************************************
#import stuff
import sys, os, math, ROOT
#******************************************
def simpleFit(fileName, histDir, histName, hmin=1100., hmax=13000., nPar=3, draw=False):
ROOT.TH1.SetDefaultSumw2()
ROOT.TH1.StatOverflows()
file = ROOT.TFile(fileName)
if not fi
|
le:
raise SystemExit('\n***ERROR*** couldn\'t find file: %s'%fileName)
if histDir != '':
hist = file.GetDirectory(histDir).Get(histName)
else:
hist = file.Get(histName)
if not hist:
raise SystemExit('\n**
|
*ERROR*** couldn\'t find hist: %s'%histName)
hist.Scale(1.,'width')
hist.GetXaxis().SetTitle('m [GeV]');
hist.GetYaxis().SetTitle('entries/GeV');#NOTE it's scaled
hist.SetMarkerColor(1);
hist.SetLineColor(1);
if draw is True:
c1 = ROOT.TCanvas('c1', 'c1', 100, 50, 800, 600)
c1.SetLogy(1)
c1.SetLogx(1)
hist.Draw();
if nPar == 5:
func = ROOT.TF1('mjjpar5function','[0] * pow(1-(x/13e3), [1]) * pow((x/13e3), [2]+[3]*log(x/13e3)+[4]*pow(log(x/13e3), 2))', hmin, hmax); #5 par
elif nPar == 4:
func = ROOT.TF1('mjj4parfunction','[0] * pow(1-(x/13e3), [1]) * pow((x/13e3), [2]+[3]*log(x/13e3))', hmin, hmax) #4 par
else:
func = ROOT.TF1('mjj4parfunction','[0] * pow(1-(x/13e3), [1]) * pow((x/13e3), [2])', hmin, hmax) #3 par
func.SetLineColor(2);
#dummy fit parameter values
func.SetParameter(0,0.000001)
func.SetParameter(1,0.94)
func.SetParameter(2,8.7)
if nPar == 4:
func.SetParameter(3,0.46)
if nPar == 5:
func.SetParameter(4,0.)
#fit twice
hist.Fit(func,'NMR')
hist.Fit(func,'NMR')
if draw is True:
func.Draw('same')
c1.Update()
c1.WaitPrimitive()
pars=[]
pars.append(func.GetParameter(0))
pars.append(func.GetParameter(1))
pars.append(func.GetParameter(2))
if nPar == 4:
pars.append(func.GetParameter(3))
if nPar == 5:
pars.append(func.GetParameter(4))
return pars
#******************************************
|
wattlebird/Bangumi_Spider
|
setup_bgmapi.py
|
Python
|
bsd-2-clause
| 207
| 0.057971
|
from setuptools import setu
|
p, find_packages
setup(
name = 'project',
version = '1.0',
packages = find_packages(),
entry_points = {'scrapy': ['settings = bgmapi.settin
|
gs']},
)
|
Torkvamedo/smx
|
Homework/lesson 6/second.py
|
Python
|
unlicense
| 414
| 0.01173
|
data = [set(op
|
en(i).read().split()) for i in ('C:\\Users\\Aliwka\\Desktop\\ДЗ-курсы\\Homework6\\first.txt', 'C:\\Users\\Aliwka\\Desktop\\ДЗ-курсы\\Homework6\\second.txt')]
diff = data[0].difference(data[1])
if diff:
print(diff, 'слова кото
|
рые есть в первом файле, но нет во втором')
print(data[1],data[0],'слова из обоих файлов')
|
valdergallo/raidmanager
|
manager/apps.py
|
Python
|
mit
| 107
| 0
|
# enco
|
ding: utf-8
from django.apps import AppConfig
class ManagerConfig(AppConfig):
name = 'man
|
ager'
|
TheRealBanana/bLyrics
|
src/dialogs/logic/active_filtering_search.py
|
Python
|
gpl-2.0
| 4,055
| 0.008878
|
# I'm pretty sure this idea is going to end up being a lot of code so its going in a separate file
#
# The idea is simple, don't create a new search tab but instead narrow down the library view as we type.
# Each letter typed should narrow down the library view instantly and without causing any interruption to the user.
#
# The way I'm planning on doing this is kind of convoluted but seems to be the best way to have the effect I want.
# The dataset that contains all songs will be static and instead we will make a dictionary tracking the "state" of
# each entry in the dataset, whether its valid or invalid. Changes to the song name, artist name, or lyrics fields will
# create a new object that we will store in a stack of similar objects. These objects each contain the filter they are
# applying and the resulting dataset that filter creates (created empty). They also have a function that tells the
# object to recompute its filtered dataset based on the new dataset entry provided by the function call.
#
# The idea is that we would have a chain of successive filtering stages that each were being called/recomputed
# less and less as we go. E.g. the very first entry in the chain is going through every single entry in the original
# dataset and forwarding anything that matches to the next object in the filtering chain, which applies its filter to
# that entry and forwards to the next if it matches, until we reach the end of the chain.
#
# Theoretically adding another letter won't slow us down or cause us to have to recalculate everything before again,
# even if we haven't finished the previous filtering calculations. It should be seamless to the user as they type
# that the library view is being narrowed down.
#
# The very first letter will cause the library view to go completely blank as it starts from 0 and adds entries that
# match. Then as the user continues to type many of those entries will be removed from the library view.
#
# Sounds simple lets do it.
from PyQt4 import QtCore
from collections import namedtuple
class FilterOperationObject(object):
def __init__(self, filterstr, field):
self.filterstr = filterstr
self.field = field
self.filtereddataset = {}
def setupConnections(self, otherentry):
# Single-linked-list-style connection here. This will be linked to the previous FilterOperationObject
pass
def filterEntry(self, entry):
#Here we apply our filter to the entry and if its good we add it to filtereddataset
pass
SearchResultItem = namedtuple("SearchResultItem", ["song", "artist", "lyrics"])
class ActiveFilteringFunctions(object):
def __init__(self, searchDialog, lyricsdataset):
self.lyricsdataset = lyricsdataset # list of namedtuples identical to SearchResultItem (song, artist, lyrics)
self.searchDialog = searchDialog
self.filteropchain = []
self.setupConnection()
print("Active filtering ready. Dataset size is %s" % len(lyricsdataset))
def setupConnection(self):
QtCore.QObject.connect(self.searchDialog.songNameInput, QtCore.SIGNAL("textChanged(QString)"), lambda qstr: self.entryChanged(qstr, "song"))
QtCore.QObject.connect(self.searchDialog.artistNameInput, QtCore.SIGNAL("textCh
|
anged(QString)"), lambda qstr: self.entryChanged(qstr, "artist"))
QtCore.QObject.connect(self.searchDialog.lyricsSearchStringInput, QtCore.SIGNAL("textChanged(QString)"), lambda qstr: self.entryChanged(qstr, "lyrics"))
def entryChanged(self, newstr, field):
print "EC: %s %s" % (newstr, field)
newfilterop = FilterOperationObject(newstr,field)
self.filteropchain.append(newfilterop)
self.upda
|
teFilters()
def updateFilters(self):
#All *Changed functions call this after adding thier new FilterOperationObject to filteropchain
pass
#Called when the very first filter object is created. We need to do things different with it since it iterates over
#the main library.
def chainInit(self):
pass
|
wearpants/osf.io
|
tests/test_addons.py
|
Python
|
apache-2.0
| 40,457
| 0.002027
|
# -*- coding: utf-8 -*-
import time
import mock
import datetime
import unittest
from nose.tools import * # noqa
import httplib as http
import jwe
import jwt
import furl
import itsdangerous
from modularodm import storage, Q
from framework.auth import cas
from framework.auth import signing
from framework.auth.core import Auth
from framework.exceptions import HTTPError
from framework.sessions.model import Session
from framework.mongo import set_up_storage
from tests import factories
from website import settings
from website.files import models
from website.files.models.base import PROVIDER_MAP, StoredFileNode, TrashedFileNode
from website.project.model import MetaSchema, ensure_schemas
from website.util import api_url_for, rubeus
from website.project import new_private_link
from website.project.views.node import _view_project as serialize_node
from website.addons.base import AddonConfig, AddonNodeSettingsBase, views
from tests.base import OsfTestCase, get_default_metaschema
from tests.factories import AuthUserFactory, ProjectFactory
from website.addons.github.exceptions import ApiError
from website.addons.github.tests.factories import GitHubAccountFactory
class TestAddonConfig(unittest.TestCase):
def setUp(self):
self.addon_config = AddonConfig(
short_name='test', full_name='test', owners=['node'],
added_to={'node': False}, categories=[],
settings_model=AddonNodeSettingsBase,
)
def test_static_url_relative(self):
url = self.addon_config._static_url('foo')
assert_equal(
url,
'/static/addons/test/foo'
)
def test_deleted_defaults_to_false(self):
class MyAddonSettings(AddonNodeSettingsBase):
pass
config = MyAddonSettings()
assert_is(config.deleted, False)
def test_static_url_absolute(self):
url = self.addon_config._static_url('/foo')
assert_equal(
url,
'/foo'
)
class SetEnvironMiddleware(object):
def __init__(self, app, **kwargs):
self.app = app
self.kwargs = kwargs
def __call__(self, environ, start_response):
environ.update(self.kwargs)
return self.app(environ, start_response)
class TestAddonAuth(OsfTestCase):
def setUp(self):
super(TestAddonAuth, self).setUp()
self.user = AuthUserFactory()
self.auth_obj = Auth(user=self.user)
self.node = ProjectFactory(creator=self.user)
self.session = Session(data={'auth_user_id': self.user._id})
self.session.save()
self.cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(self.session._id)
self.configure_addon()
self.JWE_KEY = jwe.kdf(settings.WATERBUTLER_JWE_SECRET.encode('ut
|
f-8'), settings.WATERBUTLER_JWE_SALT.encode('utf-8'))
def configure_addon(self):
self.user.add_addon('github')
self.user_addon = self.user.get_addon('github')
self.oauth_settings = GitHubAccountFactory(
|
display_name='john')
self.oauth_settings.save()
self.user.external_accounts.append(self.oauth_settings)
self.user.save()
self.node.add_addon('github', self.auth_obj)
self.node_addon = self.node.get_addon('github')
self.node_addon.user = 'john'
self.node_addon.repo = 'youre-my-best-friend'
self.node_addon.user_settings = self.user_addon
self.node_addon.external_account = self.oauth_settings
self.node_addon.save()
def build_url(self, **kwargs):
options = {'payload': jwe.encrypt(jwt.encode({'data': dict(dict(
action='download',
nid=self.node._id,
provider=self.node_addon.config.short_name,
), **kwargs),
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=settings.WATERBUTLER_JWT_EXPIRATION),
}, settings.WATERBUTLER_JWT_SECRET, algorithm=settings.WATERBUTLER_JWT_ALGORITHM), self.JWE_KEY)}
return api_url_for('get_auth', **options)
def test_auth_download(self):
url = self.build_url()
res = self.app.get(url, auth=self.user.auth)
data = jwt.decode(jwe.decrypt(res.json['payload'].encode('utf-8'), self.JWE_KEY), settings.WATERBUTLER_JWT_SECRET, algorithm=settings.WATERBUTLER_JWT_ALGORITHM)['data']
assert_equal(data['auth'], views.make_auth(self.user))
assert_equal(data['credentials'], self.node_addon.serialize_waterbutler_credentials())
assert_equal(data['settings'], self.node_addon.serialize_waterbutler_settings())
expected_url = furl.furl(self.node.api_url_for('create_waterbutler_log', _absolute=True))
observed_url = furl.furl(data['callback_url'])
observed_url.port = expected_url.port
assert_equal(expected_url, observed_url)
def test_auth_missing_args(self):
url = self.build_url(cookie=None)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_auth_bad_cookie(self):
url = self.build_url(cookie=self.cookie)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 200)
data = jwt.decode(jwe.decrypt(res.json['payload'].encode('utf-8'), self.JWE_KEY), settings.WATERBUTLER_JWT_SECRET, algorithm=settings.WATERBUTLER_JWT_ALGORITHM)['data']
assert_equal(data['auth'], views.make_auth(self.user))
assert_equal(data['credentials'], self.node_addon.serialize_waterbutler_credentials())
assert_equal(data['settings'], self.node_addon.serialize_waterbutler_settings())
expected_url = furl.furl(self.node.api_url_for('create_waterbutler_log', _absolute=True))
observed_url = furl.furl(data['callback_url'])
observed_url.port = expected_url.port
assert_equal(expected_url, observed_url)
def test_auth_cookie(self):
url = self.build_url(cookie=self.cookie[::-1])
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_auth_missing_addon(self):
url = self.build_url(provider='queenhub')
res = self.app.get(url, expect_errors=True, auth=self.user.auth)
assert_equal(res.status_code, 400)
@mock.patch('website.addons.base.views.cas.get_client')
def test_auth_bad_bearer_token(self, mock_cas_client):
mock_cas_client.return_value = mock.Mock(profile=mock.Mock(return_value=cas.CasResponse(authenticated=False)))
url = self.build_url()
res = self.app.get(url, headers={'Authorization': 'Bearer invalid_access_token'}, expect_errors=True)
assert_equal(res.status_code, 403)
class TestAddonLogs(OsfTestCase):
def setUp(self):
super(TestAddonLogs, self).setUp()
self.user = AuthUserFactory()
self.auth_obj = Auth(user=self.user)
self.node = ProjectFactory(creator=self.user)
self.session = Session(data={'auth_user_id': self.user._id})
self.session.save()
self.cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(self.session._id)
self.configure_addon()
def configure_addon(self):
self.user.add_addon('github')
self.user_addon = self.user.get_addon('github')
self.oauth_settings = GitHubAccountFactory(display_name='john')
self.oauth_settings.save()
self.user.external_accounts.append(self.oauth_settings)
self.user.save()
self.node.add_addon('github', self.auth_obj)
self.node_addon = self.node.get_addon('github')
self.node_addon.user = 'john'
self.node_addon.repo = 'youre-my-best-friend'
self.node_addon.user_settings = self.user_addon
self.node_addon.external_account = self.oauth_settings
self.node_addon.save()
def build_payload(self, metadata, **kwargs):
options = dict(
auth={'id': self.user._id},
action='create',
provider=self.node_addon.config.short_name,
metadata=metadata,
time=time.time() + 1000,
)
options.update(kwargs)
options = {
key: value
for key, value i
|
gizela/gizela
|
gizela/util/gama_data_fun.py
|
Python
|
gpl-3.0
| 1,739
| 0.00575
|
# gizela
#
# Copyright (C) 2010 Michal Seidl, Tomas Kubin
# Author: Tomas Kubin <tomas.kubin@fsv.cvut.cz>
# URL: <http://slon.fsv.cvut.cz/gizela>
#
# $Id$
"""
module with functions for
gama-data-obs.py and gama-data-adj.py scripts
"""
import sys
def read_configuration_file(configFile, localSystem2D, localSystem3D):
"""
reads configuration file
returns: configuration dictionary
localSystem
"""
configDict =
|
[]
localSystem = None
|
if configFile is not None:
from gizela.util.parse_config_file import parse_config_file
try:
configDict = parse_config_file(configFile)
except Exception, e:
print >>sys.stderr, \
"Parsing of configuration file '%s' failed." % configFile
print >>sys.stderr, e
sys.exit(1)
if localSystem2D:
if "localSystem2D" not in configDict:
print >>sys.stderr, \
"No localSystem2D section in config file %s" % configFile
sys.exit(1)
else:
from gizela.util.CoordSystemLocal2D import CoordSystemLocal2D
localSystem = CoordSystemLocal2D()
localSystem.parse_config_dict(configDict)
if localSystem3D:
if "localSystem3D" not in configDict:
print >>sys.stderr, \
"No localSystem3D section in config file %s" % configFile
sys.exit(1)
else:
from gizela.util.CoordSystemLocal3D import CoordSystemLocal3D
localSystem = CoordSystemLocal3D()
localSystem.parse_config_dict(configDict)
return configDict, localSystem
|
severr/severr-python
|
trakerr_client/models/app_event.py
|
Python
|
apache-2.0
| 18,881
| 0.000371
|
# coding: utf-8
"""
Trakerr API
Get your application events and errors to Trakerr via the *Trakerr API*.
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class AppEvent(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_key=None, classification=None, event_type=None, event_message=None, event_time=None, event_stacktrace=None, event_user=None, event_session=None, context_app_version=None, context_env_name=None, context_env_version=None, context_env_hostname=None, context_app_browser=None, context_app_browser_version=None, context_app_os=None, context_app_os_version=None, context_data_center=None, context_data_center_region=None, custom_properties=None, custom_segments=None):
"""
AppEvent - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_key': 'str',
'classification': 'str',
'event_type': 'str',
'event_message': 'str',
'event_time': 'int',
'event_stacktrace': 'Stacktrace',
'event_user': 'str',
'event_session': 'str',
'context_app_version': 'str',
'context_env_name': 'str',
'context_env_version': 'str',
'context_env_hostname': 'str',
'context_app_browser': 'str',
'context_app_browser_version': 'str',
'context_app_os': 'str',
'context_app_os_version': 'str',
'context_data_center': 'str',
'context_data_center_region': 'str',
'custom_properties': 'CustomData',
'custom_segments': 'CustomData'
}
self.attribute_map = {
'api_key': 'apiKey',
'classification': 'classification',
'event_type': 'eventType',
'event_message': 'eventMessage',
'event_time': 'eventTime',
'event_stacktrace': 'eventStacktrace',
'event_user': 'eventUser',
'event_session': 'eventSession',
'context_app_version': 'contextAppVersion',
'context_env_name': 'contextEnvName',
'context_env_version': 'contextEnvVersion',
'context_env_hostname': 'contextEnvHostname',
'context_app_browser': 'contextAppBrowser',
'context_app_browser_version': 'contextAppBrowserVersion',
'context_app_os': 'contextAppOS',
'context_app_os_version': 'contextAppOSVersion',
'context_data_center': 'contextDataCenter',
'context_data_center_region': 'contextDataCenterRegion',
'custom_properties': 'customProperties',
'custom_segments': 'customSegments'
}
self._api_key = api_key
self._classification = classification
self._event_type = event_type
self._event_message = event_message
self._event_time = event_time
self._event_stacktrace = event_stacktrace
self._event_user = event_user
self._event_session = event_session
self._context_app_version = context_app_version
self._context_env_name = context_env_name
self._context_env_version = context_env_version
self._context_env_hostname = context_env_hostname
self._context_app_browser = context_app_browser
self._context_app_browser_version = context_app_browser_version
self._context_app_os = context_app_os
self._context_app_os_version = context_app_os_version
self._context_data_center = context_data_center
self._context_data_center_region = context_data_center_region
self._custom_properties = custom_properties
self._custom_segments = custom_segments
@property
def api_key(self):
"""
Gets the api_key of this AppEvent.
API key generated for the application
:return: The api_key of this AppEvent.
:rtype: str
"""
return self._api_key
@api_key.setter
def api_key(self, api_key):
"""
Sets the api_key of this AppEvent.
API key generated for the application
:param api_key: The api_key of this AppEvent.
:type: str
"""
self._api_key = api_key
@property
def classification(self):
"""
Gets the classification of this AppEvent.
one of 'debug','info','warning','error' or a custom string
:return: The classification of this AppEvent.
:rtype: str
"""
return self._classification
@classification.setter
def classification(self, classification):
"""
Sets the classification of this AppEvent.
one of 'debug','info','warning','error' or a custom string
:param classification: The classification of this AppEvent.
:type: str
"""
self._classification = classification
@property
def event_type(self):
|
"""
Gets the event_type of this AppEvent.
type or event or error (eg. NullPointerException)
:return: The event_type of this AppEvent.
:rtype: str
|
"""
return self._event_type
@event_type.setter
def event_type(self, event_type):
"""
Sets the event_type of this AppEvent.
type or event or error (eg. NullPointerException)
:param event_type: The event_type of this AppEvent.
:type: str
"""
self._event_type = event_type
@property
def event_message(self):
"""
Gets the event_message of this AppEvent.
message containing details of the event or error
:return: The event_message of this AppEvent.
:rtype: str
"""
return self._event_message
@event_message.setter
def event_message(self, event_message):
"""
Sets the event_message of this AppEvent.
message containing details of the event or error
:param event_message: The event_message of this AppEvent.
:type: str
"""
self._event_message = event_message
@property
def event_time(self):
"""
Gets the event_time of this AppEvent.
(optional) event time in ms since epoch
:return: The event_time of this AppEvent.
:rtype: int
"""
return self._event_time
@event_time.setter
def event_time(self, event_time):
"""
Sets the event_time of this AppEvent.
(optional) event time in ms since epoch
:param event_time: The event_time of this AppEvent.
:type: int
"""
self._event_time = event_time
@property
def event_stacktrace(self):
"""
Gets the event_stacktrace of this AppEvent.
:return: The event_stacktrace of this AppEvent.
:rtype: Stacktrace
"""
return self._event_stacktrace
@event_stacktrace.setter
def event_stacktrace(self, event_stacktrace):
"""
Sets the event_stacktrace of this AppEvent.
:param event_stacktrace: The event_stacktrace of this AppEvent.
:type: Stacktrace
"""
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/Python/bin/2.78/scripts/addons_contrib/data_overrides/util.py
|
Python
|
gpl-3.0
| 3,986
| 0.006021
|
### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy, time, sys, hashlib
from bpy.types import UILayout
from math import *
def ifloor(x):
return int(x) if x >= 0.0 else int(x) - 1
def iceil(x):
return int(x) + 1 if x >= 0.0 else int(x)
# based on http://code.activestate.com/recipes/578114-round-number-to-specified-number-of-significant-di/
def round_sigfigs(num, sig_figs):
if num != 0:
return round(num, -int(floor(log10(abs(num))) - (sig_figs - 1)))
else:
return 0 # Can't take the log of 0
def data_uuid(id_data, path=""):
identifier = id_data.name.encode(encoding="utf-8")
if id_data.library:
identifier += b'\0' + id_data.library.filepath.encode(encoding="utf-8")
if path:
identifier += b'\0' + path.encode(encoding="utf-8")
m = hashlib.md5()
m.update(identifier)
return m.hexdigest(), int.from_bytes(m.digest(), byteorder='big') % 0xFFFFFFFF
_id_collections = [ c.identifier for c in bpy.types.BlendData.bl_rna.properties if isinstance(c, bpy.types.CollectionProperty) and isinstance(c.fixed_type, bpy.types.ID) ]
def _id_data_blocks(blend_data):
for name in _id_collections:
coll = getattr(blend_data, name)
for id_data in coll:
yield id_data
def find_id_data(blend_data, name, library):
if library:
for id_data in _id_data_blocks(blend_data):
if id_data.library and id_data.library.filepath == library and id_data.name == name:
return id_data
else:
for id_data in _id_data_blocks(blend_data):
if not id_data.library and id_data.name == name:
return id_data
def id_data_from_enum(identifier):
for id_data in _id_data_blocks(bpy.data):
if str(id_data.as_pointer()) == identifier:
return id_data
def id_data_enum_item(id_data):
#identifier, number = id_data_uuid(id_data)
number = id_data.as_pointer() % 0xFFFFFFFF
identifier = str(id_data.as_pointer())
return (identifier, id_data.name, "", UILayout.icon(id_data), number)
class OperatorCallContext():
def __enter__(self):
scene = bpy.context.scene
prefs = bpy.context.user_preferences
# store active/selected state to restore it after operator execution
self.curact = scene.objects.active
self.cursel = { ob : ob.select for ob in scene.objects }
# undo can store fil
|
es a lot when running operators internally,
# disable since we only need one undo step after main operators anyway
self.use_global_undo = prefs.edit.use_global_undo
prefs.edit.use_global_undo = False
return (self.curact, self.cursel)
def __exit__(self, exc_type, exc_value, traceback):
scene = bpy.context.scene
prefs = b
|
py.context.user_preferences
# restore active/selected state
scene.objects.active = self.curact
for ob in scene.objects:
ob.select = self.cursel.get(ob, False)
prefs.edit.use_global_undo = self.use_global_undo
def select_single_object(ob):
scene = bpy.context.scene
scene.objects.active = ob
for tob in scene.objects:
tob.select = (tob == ob)
|
jboissard/mathExperiments
|
toiletRoll.py
|
Python
|
apache-2.0
| 357
| 0.016807
|
"""
rel
|
ation between the length of material coiled around cylinder and its width (toilet paper)
http://math.stackexchange.com/questions/1633704/the-length-of-toilet-roll
"""
import numpy as np
x = 1 # width of one sheet
w = 80 #partial radius (total radius - minus radius of paper tube)
r = 30 # radius of paper tube
L = (np.pi/x)*w*(w+x+2*
|
r)
print L
|
PetePriority/home-assistant
|
homeassistant/components/velbus/binary_sensor.py
|
Python
|
apache-2.0
| 1,110
| 0
|
"""
Support for Velbus Binary Sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.velbus/
"""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.velbus import (
DOMAIN as VELBUS_DOMAIN, VelbusEntity)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['velbus']
async def async_setup_platform(hass, config, async_add_en
|
tities,
discovery_info=None):
"""Set u
|
p Velbus binary sensors."""
if discovery_info is None:
return
sensors = []
for sensor in discovery_info:
module = hass.data[VELBUS_DOMAIN].get_module(sensor[0])
channel = sensor[1]
sensors.append(VelbusBinarySensor(module, channel))
async_add_entities(sensors)
class VelbusBinarySensor(VelbusEntity, BinarySensorDevice):
"""Representation of a Velbus Binary Sensor."""
@property
def is_on(self):
"""Return true if the sensor is on."""
return self._module.is_closed(self._channel)
|
MarsBighead/mustang
|
Python/somescript.py
|
Python
|
mit
| 167
| 0.023952
|
#!/usr/bin/python
import sys
t
|
ext = sys.stdin.read()
print 'Text:',text
words = text.split()
print 'Words:
|
',words
wordcount = len(words)
print 'Wordcount:',wordcount
|
sailuh/perceive
|
Websites/Experiments/loginapp/migrations/0002_auto_20170205_1501.py
|
Python
|
gpl-2.0
| 427
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-05 15:01
from __future__ import unicode_literals
from django.db i
|
mport migrations
class Migration(migrations.Migration):
dependencies = [
(
|
'loginapp', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='registration',
old_name='comments',
new_name='comment',
),
]
|
popazerty/dvbapp2-gui
|
lib/python/Plugins/SystemPlugins/SoftwareManager/plugin.py
|
Python
|
gpl-2.0
| 81,566
| 0.028676
|
from Plugins.Plugin import PluginDescriptor
from Screens.Console import Console
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Screens.Ipkg import Ipkg
from Screens.SoftwareUpdate import UpdatePlugin
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Input import Input
from Components.Ipkg import IpkgComponent
from Components.Sources.StaticText import StaticText
from Components.ScrollLabel import ScrollLabel
from Components.Pixmap import Pixmap
from Components.MenuList import MenuList
from Components.Sources.List import List
from Components.Slider import Slider
from Components.Harddisk import harddiskmanager
from Components.config import config,getConfigListEntry, ConfigSubsection, ConfigText, ConfigLocations, ConfigYesNo, ConfigSelection
from Components.ConfigList import ConfigListScreen
from Components.Console import Console
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from Components.SelectionList import SelectionList
from Components.PluginComponent import plugins
from Components.About import about
from Components.PackageInfo import PackageInfoHandler
from Components.Language import language
from Components.AVSwitch import AVSwitch
from Components.Task import job_manager
from Tools.Directories import pathExists, fileExists, resolveFilename, SCOPE_PLUGINS, SCOPE_CURRENT_PLUGIN, SCOPE_ACTIVE_SKIN, SCOPE_METADIR
from Tools.LoadPixmap import LoadPixmap
from Tools.NumericalTextInput import NumericalTextInput
from enigma import eTimer, RT_HALIGN_LEFT, RT_VALIGN_CENTER, eListboxPythonMultiContent, eListbox, gFont, getDesktop, ePicLoad, eRCInput, getPrevAsciiCode, eEnv, iRecordableService
from cPickle import dump, load
from os import path as os_path, system as os_system, unlink, stat, mkdir, popen, makedirs, listdir, access, rename, remove, W_OK, R_OK, F_OK
from time import time, gmtime, strftime, localtime
from stat import ST_MTIME
from datetime import date
from twisted.web import client
from twisted.internet import reactor
from ImageWizard import ImageWizard
from BackupRestore import BackupSelection, RestoreMenu, BackupScreen, RestoreScreen, getBackupPath, getBackupFilename
from SoftwareTools import iSoftwareTools
config.plugins.configurationbackup = ConfigSubsection()
config.plugins.configurationbackup.backuplocation = ConfigText(default = '/media/hdd/', visible_width = 50, fixed_size = False)
config.plugins.configurationbackup.backupdirs = ConfigLocations(default=[eEnv.resolve('${sysconfdir}/enigma2/'), '/etc/network/interfaces', '/etc/wpa_supplicant.conf', '/etc/wpa_supplicant.ath0.conf', '/etc/wpa_supplicant.wlan0.conf', '/etc/resolv.conf', '/etc/default_gw', '/etc/hostname'])
config.plugins.softwaremanager = ConfigSubsection()
config.plugins.softwaremanager.overwriteConfigFiles = ConfigSelection(
[
("Y", _("Yes, always")),
("N", _("No, never")),
("ask", _("Always ask"))
], "Y")
config.plugins.softwaremanager.onSetupMenu = ConfigYesNo(default=False)
config.plugins.softwaremanager.onBlueButton = ConfigYesNo(default=False)
def write_cache(cache_file, cache_data):
#Does a cPickle dump
if not os_path.isdir( os_path.dirname(cache_file) ):
try:
mkdir( os_path.dirname(cache_file) )
except OSError:
print os_path.dirname(cache_file), 'is a file'
fd = open(cache_file, 'w')
dump(cache_data, fd, -1)
fd.close()
def valid_cache(cache_file, cache_ttl):
#See if the cache file exists and is still living
try:
mtime = stat(cache_file)[ST_MTIME]
except:
return 0
curr_time = time()
if (curr_time - mtime) > cache_ttl:
return 0
else:
return 1
def load_cache(cache_file):
#Does a cPickle load
fd = open(cache_file)
cache_data = load(fd)
fd.close()
return cache_data
class UpdatePluginMenu(Screen):
skin = """
<screen name="UpdatePluginMenu" position="center,center" size="610,410" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<ePixmap pixmap="border_menu_350.png" position="5,50" zPosition="1" size="350,300" transparent="1" alphatest="on" />
<widget source="menu" render="Listbox" position="15,60" size="330,290" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (2, 2), size = (330, 24), flags = RT_HALIGN_LEFT, text = 1), # index 0 is the MenuText,
],
"fonts": [gFont("Regular", 22)],
"itemHeight": 25
}
</convert>
</widget>
<widget source="menu" render="Listbox" position="360,50" size="240,300" scrollbarMode="showNever" selectionDisabled="1">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (2, 2), size = (240, 300), flags = RT_HALIGN_CENTER|RT_VALIGN_CENTER|RT_WRAP, text = 2), # index 2 is the Description,
],
"fonts": [gFont("Regular", 22)],
"itemHeight": 300
}
</convert>
</widget>
<widget source="status" render="Label" position="5,360" zPosition="10" size="600,50" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, args = 0):
Screen.__init__(self, session)
Screen.setTitle(self, _("Software management"))
self.skin_path = plugin_path
self.menu = args
self.list = []
self.oktext = _("\nPress OK on your remote control to continue.")
self.menutext = _("Press MENU on your remote control for additional options.")
self.infotext = _("Press INFO on your remote control for additional information.")
self.text = ""
self.backupdirs = ' '.join( config.plugins.configurationbackup.backupdirs.getValue() )
if self.menu == 0:
print "building menu entries"
self.list.append(("install-extensions", _("Manage extensions"), _("\nManage extensions or plugins for your STB_BOX" ) + self.oktext, None))
self.list.append(("software-update", _("Software update"), _("\nOnline update of your STB_BOX software." ) + self.oktext, None))
self.list.append(("software-restore", _("Software restore"), _("\nRestore your STB_BOX with a new firmware." ) + self.oktext, None))
self.list.append(("system-backup", _("Backup system settings"), _("\nBackup your STB_BOX settings." ) + self.oktext + "\n\n" + self.infotext, None))
self.list.append(("system-restore",_("Restore system settings"), _("\nRestore your STB_BOX settings." ) + self.oktext, None))
self.list.append(("ipkg-install", _("Install local extension"), _("\nS
|
can for local extensions and install them." ) + self.oktext, None))
for p in plugins.getPlugin
|
s(PluginDescriptor.WHERE_SOFTWAREMANAGER):
if p.__call__.has_key("SoftwareSupported"):
callFnc = p.__call__["SoftwareSupported"](None)
if callFnc is not None:
if p.__call__.has_key("menuEntryName"):
menuEntryName = p.__call__["menuEntryName"](None)
else:
menuEntryName = _('Extended Software')
if p.__call__.has_key("menuEntryDescription"):
menuEntryDescription = p.__call__["menuEntryDescription"](None)
else:
menuEntryDescription = _('Extended Software Plugin')
self.list.append(('default-plugin', menuEntryName, menuEntryDescription + self.oktext, callFnc))
if config.usage.setup_level.index >= 2: # expert+
self.list.append(("advanced", _("Advanced options"), _("\nAdvanced options and settings." ) + self.oktext, None))
elif self.menu == 1:
self.list.append(("advancedrestore", _("Advanced restore"), _("\nRestore your backups by date." ) + self.oktext, None))
self.list.append(("backuplocation", _("Select backup location"), _("\nSelect your backup device.\nCurrent device: " ) + config.plugins.configurationbackup.backuplocation.getValue() + self.oktext, None))
self.list.append(("backupfiles", _("Select backup files"), _("Select files for backup.") + self.oktext + "\n\n" + self.infotext, None))
if confi
|
vortex-ape/scikit-learn
|
sklearn/model_selection/tests/test_split.py
|
Python
|
bsd-3-clause
| 57,882
| 0.000017
|
"""Test the split module"""
from __future__ import division
import warnings
import pytest
import numpy as np
from scipy.sparse import coo_matrix, csc_matrix, csr_matrix
from scipy import stats
from itertools import combinations
from itertools import combinations_with_replacement
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.validation import _num_samples
from sklearn.utils.mocking import MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import PredefinedSplit
from sklearn.model_selection import check_cv
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.linear_model import Ridge
from sklearn.model_selection._split import _validate_shuffle_split
from sklearn.model_selection._split import _CVIterableWrapper
from sklearn.model_selection._split import _build_repr
from sklearn.model_selection._split import CV_WARNING
from sklearn.model_selection._split import NSPLIT_WARNING
from sklearn.datasets import load_digits
from sklearn.datasets import make_classification
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.utils.fixes import comb
from sklearn.svm import SVC
X = np.ones(10)
y = np.arange(10) // 2
P_sparse = coo_matrix(np.eye(5))
test_groups = (
np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3'])
digits = load_digits()
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
|
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample
|
_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
@ignore_warnings
def test_cross_validator_with_default_params():
n_samples = 4
n_unique_groups = 4
n_splits = 2
p = 2
n_shuffle_splits = 10 # (the default value)
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
X_1d = np.array([1, 2, 3, 4])
y = np.array([1, 1, 2, 2])
groups = np.array([1, 2, 3, 4])
loo = LeaveOneOut()
lpo = LeavePOut(p)
kf = KFold(n_splits)
skf = StratifiedKFold(n_splits)
lolo = LeaveOneGroupOut()
lopo = LeavePGroupsOut(p)
ss = ShuffleSplit(random_state=0)
ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2
loo_repr = "LeaveOneOut()"
lpo_repr = "LeavePOut(p=2)"
kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)"
skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)"
lolo_repr = "LeaveOneGroupOut()"
lopo_repr = "LeavePGroupsOut(n_groups=2)"
ss_repr = ("ShuffleSplit(n_splits=10, random_state=0, "
"test_size='default',\n train_size=None)")
ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))"
n_splits_expected = [n_samples, comb(n_samples, p), n_splits, n_splits,
n_unique_groups, comb(n_unique_groups, p),
n_shuffle_splits, 2]
for i, (cv, cv_repr) in enumerate(zip(
[loo, lpo, kf, skf, lolo, lopo, ss, ps],
[loo_repr, lpo_repr, kf_repr, skf_repr, lolo_repr, lopo_repr,
ss_repr, ps_repr])):
# Test if get_n_splits works correctly
assert_equal(n_splits_expected[i], cv.get_n_splits(X, y, groups))
# Test if the cross-validator works as expected even if
# the data is 1d
np.testing.assert_equal(list(cv.split(X, y, groups)),
list(cv.split(X_1d, y, groups)))
# Test that train, test indices returned are integers
for train, test in cv.split(X, y, groups):
assert_equal(np.asarray(train).dtype.kind, 'i')
assert_equal(np.asarray(train).dtype.kind, 'i')
# Test if the repr works without any errors
assert_equal(cv_repr, repr(cv))
# ValueError for get_n_splits methods
msg = "The 'X' parameter should not be None."
assert_raise_message(ValueError, msg,
loo.get_n_splits, None, y, groups
|
lukeexton96/Robotics
|
catkin_ws/build/commanding_velocity/catkin_generated/pkg.installspace.context.pc.py
|
Python
|
gpl-3.0
| 386
| 0
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_P
|
ACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "co
|
mmanding_velocity"
PROJECT_SPACE_DIR = "/home/computing/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
|
FrozenPigs/Taigabot
|
core/reload.py
|
Python
|
gpl-3.0
| 5,824
| 0.001545
|
import collections
import glob
import os
import re
import sys
import traceback
if 'mtimes' not in globals():
mtimes = {}
if 'lastfiles' not in globals():
lastfiles = set()
def make_signature(f):
return f.func_code.co_filename, f.func_name, f.func_code.co_firstlineno
def format_plug(plug, kind='', lpad=0):
out = ' ' * lpad + '{}:{}:{}'.format(*make_signature(plug[0]))
if kind == 'command':
out += ' ' * (50 - len(out)) + plug[1]['name']
if kind == 'event':
out += ' ' * (50 - len(out)) + ', '.join(plug[1]['events'])
if kind == 'regex':
out += ' ' * (50 - len(out)) + plug[1]['regex']
return out
def reload(init=False):
changed = False
if init:
bot.plugs = collections.defaultdict(list)
bot.threads = {}
core_fileset = set(glob.glob(os.path.join("core", "*.py")))
for filename in core_fileset:
mtime = os.stat(filename).st_mtime
if mtime != mtimes.get(filename):
mtimes[filename] = mtime
changed = True
try:
eval(compile(open(filename, 'U').read(), filename, 'exec'),
globals())
except Exception:
traceback.print_exc()
if init: # stop if there's an error (syntax?) in a core
sys.exit() # script on startup
continue
if filename == os.path.join('core', 'reload.py'):
reload(init=init)
return
fileset = set(glob.glob(os.path.join('plugins', '*.py')))
# remove deleted/moved plugins
for name, data in bot.plugs.iteritems():
bot.plugs[name] = [x for x in data if x[0]._filename in fileset]
for filename in list(mtimes):
if filename not in fileset and filename not in core_fileset:
mtimes.pop(filename)
for func, handler in list(bot.threads.iteritems()):
if func._filename not in fileset:
handler.stop()
del bot.threads[func]
# compile new plugins
for filename in fileset:
output = ''
mtime = os.stat(filename).st_mtime
if mtime != mtimes.get(filename):
mtimes[filename] = mtime
changed = True
try:
code = compile(open(filename, 'U').read(), filename, 'exec')
namespace = {}
eval(code, namespace)
except Exception:
traceback.print_exc()
continue
# output = '<module class="module" name="{}">\n\t<info>{}</info>\n\t'.format(filename.replace(".py",""), filename.replace(".py","<span>.py</span>"))
# remove plugins already loaded from this filename
for name, data in bot.plugs.iteritems():
bot.plugs[name] = [x for x in data
if x[0]._filename != filename]
for func, handler in list(bot.threads.iteritems()):
if func._filename == filename:
handler.stop()
del bot.threads[func]
for obj in namespace.itervalues():
if hasattr(obj, '_hook'): # check for magic
if obj._thread:
bot.threads[obj] = Handler(obj)
for type, data in obj._hook:
bot.plugs[type] += [data]
if not init:
# output+='<div class="command">{}</div>'.format(format_plug(data).replace('[','<opt>').replace(']','</opt>').replace('<','<req>').replace('>','</req>'))
print '### new plugin (type: %s) loaded:' % type, format_plug(data)
# output += '</module>'
# with open('index.txt', 'a') as file:
# file.write(u'{}\n'.format(output).encode('utf-8'))
if changed:
bot.commands = {}
for plug in bot.plugs['command']:
name = plug[1]['name'].lower()
if not re.match(r'^\w+$', name):
print '### ERROR: invalid command name "{}" ({})'.format(name, format_plug(plug))
continue
if name in bot.commands:
print "### ERROR: command '{}' already registered ({}, {})".format(name,
format_plug(bot.commands[name]),
format_plug(plug))
continue
bot.commands[name] = plug
bot.events = collections.defaultdict(list)
for func, args in bot.plugs['event']:
for event in args['events']:
bot.events[event].append((func, args))
if init:
print ' plugin listing:'
if bot.commands:
# hack to make commands with multiple aliases
# print nicely
print ' command:'
commands = collections.defaultdict(list)
for name, (func, args) in bot.commands.iteritems():
command
|
s[make_signature(func)].append(name)
for sig, names in sorted(commands.iteritems()):
names.sort(key=lambda x: (-len(x), x)) # long names first
out = ' ' * 6 + '%s:%s:%s' % sig
out += ' ' * (50 - len(out)) + ', '.join(names)
print out
for kind, plugs in sorted(bot.plugs.iteritems()):
if kind == 'command':
|
continue
print ' {}:'.format(kind)
for plug in plugs:
try:
print format_plug(plug, kind=kind, lpad=6)
except UnicodeEncodeError:
pass
print
|
jamesturk/oyster
|
oyster/core.py
|
Python
|
bsd-3-clause
| 10,992
| 0.000455
|
import
|
datetime
i
|
mport logging
import hashlib
import random
import sys
import pymongo
import scrapelib
from .mongolog import MongoHandler
from .storage import engines
from celery.execute import send_task
class Kernel(object):
""" oyster's workhorse, handles tracking """
def __init__(self, mongo_host='localhost', mongo_port=27017,
mongo_db='oyster', mongo_log_maxsize=100000000,
user_agent='oyster', rpm=60, timeout=300,
retry_attempts=3, retry_wait_minutes=60,
doc_classes=None, default_storage_engine='dummy',
):
"""
configurable for ease of testing, only one should be instantiated
"""
# set up the log
self.db = pymongo.Connection(mongo_host, mongo_port)[mongo_db]
self.log = logging.getLogger('oyster')
self.log.setLevel(logging.DEBUG)
self.log.addHandler(MongoHandler(mongo_db, host=mongo_host,
port=mongo_port,
capped_size=mongo_log_maxsize))
# create status document if it doesn't exist
if self.db.status.count() == 0:
self.db.status.insert({'update_queue': 0})
# ensure an index on _random
self.db.tracked.ensure_index('_random')
self.db.tracked.ensure_index('url')
self.scraper = scrapelib.Scraper(user_agent=user_agent,
requests_per_minute=rpm,
follow_robots=False,
raise_errors=True,
timeout=timeout)
self.retry_attempts = retry_attempts
self.retry_wait_minutes = retry_wait_minutes
# load engines
self.storage = {}
for name, StorageCls in engines.iteritems():
self.storage[name] = StorageCls(self)
# set document classes
_doc_class_fields = ('update_mins', 'onchanged')
self.doc_classes = doc_classes or {}
for dc_name, dc_props in self.doc_classes.iteritems():
for key in _doc_class_fields:
if key not in dc_props:
raise ValueError('doc_class %s missing key %s' %
(dc_name, key))
# set a default storage engine
if 'storage_engine' not in dc_props:
dc_props['storage_engine'] = default_storage_engine
def _wipe(self):
""" exists primarily for debug use, wipes entire db """
self.db.drop_collection('tracked')
self.db.drop_collection('logs')
self.db.drop_collection('status')
def _add_doc_class(self, doc_class, **properties):
self.doc_classes[doc_class] = properties
def track_url(self, url, doc_class, id=None, **kwargs):
"""
Add a URL to the set of tracked URLs, accessible via a given filename.
url
URL to start tracking
doc_class
document type, can be any arbitrary string
**kwargs
any keyword args will be added to the document's metadata
"""
if doc_class not in self.doc_classes:
error = 'error tracking %s, unregistered doc_class %s'
self.log.error(error, url, doc_class)
raise ValueError(error % (url, doc_class))
# try and find an existing version of this document
tracked = None
if id:
tracked = self.db.tracked.find_one({'_id': id})
else:
tracked = self.db.tracked.find_one({'url': url})
# if id exists, ensure that URL and doc_class haven't changed
# then return existing data (possibly with refreshed metadata)
if tracked:
if (tracked['url'] == url and
tracked['doc_class'] == doc_class):
if kwargs != tracked['metadata']:
tracked['metadata'] = kwargs
self.db.tracked.save(tracked, safe=True)
return tracked['_id']
else:
# id existed but with different URL
message = ('%s already exists with different data (tracked: '
'%s, %s) (new: %s, %s)')
args = (tracked['_id'], tracked['url'], tracked['doc_class'],
url, doc_class)
self.log.error(message, *args)
raise ValueError(message % args)
self.log.info('tracked %s [%s]', url, id)
newdoc = dict(url=url, doc_class=doc_class,
_random=random.randint(0, sys.maxint),
versions=[], metadata=kwargs)
if id:
newdoc['_id'] = id
return self.db.tracked.insert(newdoc, safe=True)
def md5_versioning(self, olddata, newdata):
""" return True if md5 changed or if file is new """
old_md5 = hashlib.md5(olddata).hexdigest()
new_md5 = hashlib.md5(newdata).hexdigest()
return old_md5 != new_md5
def update(self, doc):
"""
perform update upon a given document
:param:`doc` must be a document from the `tracked` collection
* download latest document
* check if document has changed using versioning func
* if a change has occurred save the file
* if error occured, log & keep track of how many errors in a row
* update last_update/next_update timestamp
"""
new_version = True
error = False
now = datetime.datetime.utcnow()
try:
doc_class = self.doc_classes[doc['doc_class']]
except KeyError:
raise ValueError('unregistered doc_class %s' % doc['doc_class'])
update_mins = doc_class['update_mins']
storage = self.storage[doc_class['storage_engine']]
# fetch strategies could be implemented here as well
try:
url = doc['url'].replace(' ', '%20')
newdata = self.scraper.urlopen(url)
content_type = newdata.response.headers['content-type']
except Exception as e:
new_version = False
error = str(e)
# only do versioning check if at least one version exists
if new_version and doc['versions']:
# room here for different versioning schemes
olddata = storage.get(doc['versions'][-1]['storage_key'])
new_version = self.md5_versioning(olddata, newdata)
if new_version:
storage_id = storage.put(doc, newdata, content_type)
doc['versions'].append({'timestamp': now,
'storage_key': storage_id,
'storage_type': storage.storage_type,
})
# fire off onchanged functions
for onchanged in doc_class.get('onchanged', []):
send_task(onchanged, (doc['_id'],))
if error:
# if there's been an error, increment the consecutive_errors count
# and back off a bit until we've reached our retry limit
c_errors = doc.get('consecutive_errors', 0)
doc['consecutive_errors'] = c_errors + 1
if c_errors <= self.retry_attempts:
update_mins = self.retry_wait_minutes * (2 ** c_errors)
else:
# reset error count if all was ok
doc['consecutive_errors'] = 0
# last_update/next_update are separate from question of versioning
doc['last_update'] = now
if update_mins:
doc['next_update'] = now + datetime.timedelta(minutes=update_mins)
else:
doc['next_update'] = None
if error:
self.log.warning('error updating %s [%s]', url, doc['_id'])
else:
new_version = ' (new)'
self.log.info('updated %s [%s]%s', url, doc['_id'], new_version)
self.db.tracked.save(doc, safe=True)
def get_update_queue(self):
"""
Get a list of what needs to be updated.
Documents that have never been updated take priority, follow
|
slozier/ironpython2
|
Tests/test_complex.py
|
Python
|
apache-2.0
| 2,104
| 0.008555
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import unittest
from iptest.type_util import *
from iptest import run_test
class ComplexTest(unittest.TestCase):
def test_from_string(self):
# complex from string: negative
# - space related
l = ['1.2', '.3', '4e3', '.3e-4', "0.031"]
for x in l:
for y in l:
self.assertRaises(ValueError, complex, "%s +%sj" % (x, y))
self.assertRaises(ValueError, complex, "%s+ %sj" % (x, y))
self.assertRaises(ValueError, complex, "%s - %sj" % (x, y))
self.assertRaises(ValueError, complex, "%s- %sj" % (x, y))
self.assertRaises(ValueError, complex, "%s-\t%sj" % (x, y))
self.assertRaises(ValueError, complex, "%sj+%sj" % (x, y))
self.assertEqual(complex(" %s+%sj" % (x, y)), complex(" %s+%sj " % (x, y)))
def test_misc(self):
self.assertEqual(mycomplex(), complex())
a = mycomplex(1)
b = mycomplex(1,0)
c = complex(1)
d = complex(1,0)
for x in [a,b,c,d]:
for y in [a,b,c,d]:
self.assertEqual(x,y)
self.assertEqual(a ** 2, a)
self.ass
|
ertEqual(a-complex(), a)
self.assertEqual(a+complex(), a)
self.assertEqual(complex()/a, complex())
self.assertEqual(complex()*a, complex())
self.assertEqual(complex()%a, complex())
self.assertEqual(complex() // a, complex())
self.assertEqual(complex(2), complex(2, 0))
def test_inherit(self):
class mycomplex(complex): pass
a = mycomplex(2+1j)
|
self.assertEqual(a.real, 2)
self.assertEqual(a.imag, 1)
def test_repr(self):
self.assertEqual(repr(1-6j), '(1-6j)')
def test_infinite(self):
self.assertEqual(repr(1.0e340j), 'infj')
self.assertEqual(repr(-1.0e340j),'-infj')
run_test(__name__)
|
ionelmc/python-redis-lock
|
tests/helper.py
|
Python
|
bsd-2-clause
| 2,211
| 0.000905
|
from __future__ import division
from __future__ import print_function
import logging
import os
import sys
import time
from redis import StrictRedis
from redis_lock import Lock
from conf import TIMEOUT
from conf import UDS_PATH
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='%(process)d %(asctime)s,%(msecs)05d %(name)s %(levelname)s %(message)s',
datefmt="%x~%X"
)
test_name = sys.argv[1]
if test_name == 'test_simple':
conn = StrictRedis(unix_socket_path=UDS_PATH)
with Lock(conn, "foobar"):
time.sleep(0.1)
elif test_name == 'test_no_block':
conn = StrictRedis(unix_socket_path=UDS_PATH)
lock = Lock(conn, "foobar")
res = lock.acquire(blocking=False)
logging.info("acquire=>%s", res)
elif test_name == 'test_timeout':
conn = StrictRedis(unix_socket_path=UDS_PATH)
with Lock(conn, "foobar"):
time.sleep(1)
elif test_name == 'test_expire':
conn = StrictRedis(unix_socket_path=UDS_PATH)
with Lock(conn, "foobar", expire=TIMEOUT/4):
time.sleep(0.1)
with Lock(conn, "foobar", expire=TIMEOUT/4):
time.sleep(0.1)
elif test_name == 'test_no_overlap':
from sched import scheduler
sched = scheduler(time.time, time.sleep)
start = time.time() + TIMEOUT/2
# the idea is to start all the lock at the same time - we use the scheduler to start everything in TIMEOUT/2 seconds, by
# that time all the forks should be ready
def cb_no_overlap():
with Lock(conn, "foobar"):
time.sleep(0.001)
sched.enterabs(start, 0, cb_no_overl
|
ap, ())
pids = []
for
|
_ in range(125):
pid = os.fork()
if pid:
pids.append(pid)
else:
try:
conn = StrictRedis(unix_socket_path=UDS_PATH)
sched.run()
finally:
os._exit(0)
for pid in pids:
os.waitpid(pid, 0)
else:
raise RuntimeError('Invalid test spec %r.' % test_name)
logging.info('DIED.')
|
sfu-fas/coursys
|
oldcode/planning/views/view_capabilities.py
|
Python
|
gpl-3.0
| 1,166
| 0.002573
|
from planning.models import TeachingCapability, PlanningCourse
from courselib.auth import requires_role
from coredata.models import Person
from django.shortcuts import render_to_response
from django.template import RequestContext
@requires_role('PLAN')
def
|
view_capabilities(request):
instructors = Person.objects.filter(role__role__i
|
n=["FAC", "SESS", "COOP"],
role__unit__in=request.units)
capabilities = []
for i in instructors:
capabilities.append(TeachingCapability.objects.filter(instructor=i))
capabilities_list = list(zip(instructors, capabilities))
courses = PlanningCourse.objects.filter(owner__in=request.units)
capabilities = []
for c in courses:
capabilities.append(TeachingCapability.objects.filter(course=c))
course_capabilities_list = list(zip(courses, capabilities))
return render(request, "planning/view_capabilities.html",
{'capabilities_list': capabilities_list,
'course_capabilities_list': course_capabilities_list},
context_instance=RequestContext(request))
|
hagenw/ltfat
|
mat2doc/mat/release_keep_tests.py
|
Python
|
gpl-3.0
| 654
| 0.010703
|
print "Creating downloadable package"
# Remove unwanted files
s=os.path.join(conf.t.dir,'timing')
rmrf(s)
os.rmdir(s)
# R
|
ecursively remove the .git files
for root, dirs, files in os.walk(conf.t.dir, topdown=False):
for name in files:
if name in ['.gitattributes','.gitignore','desktop.ini']:
os.remove(os.path.join(root, nam
|
e))
# "bootstrap" the configure files
os.system("cd "+conf.t.dir+"/src; ./bootstrap")
s=os.path.join(conf.t.dir,'src','autom4te.cache')
rmrf(s)
os.rmdir(s)
# Compile the Java classes
os.system("cd "+conf.t.dir+"/blockproc/java; make")
os.system("cd "+conf.t.dir+"/blockproc/java; make classclean")
|
Distrotech/bzr
|
bzrlib/tests/per_workingtree/test_check.py
|
Python
|
gpl-2.0
| 2,606
| 0.000767
|
# Copyright (C) 2009 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Tests for checking of trees."""
from bzrlib.tests.per_workingtree import TestCaseWithWorkingTree
from bzrlib.workingtree import InventoryWorkingTree
from bzrlib.tests import TestNotApplicable
class TestCheck(TestCaseWithWorkingTree):
def test__get_check_refs_new(self):
tree = self.make_branch_and_tree('tree')
if not isinstance(tree, InventoryWorkingTree):
raise TestNotApplicable(
"_get_check_refs only relevant for inventory working trees")
self.assertEqual(set([('trees', 'null:')]),
set(tree._get_check_refs()))
def test__get_check_refs_basis(self):
# with a basis, all current bzr trees cache it and so need the
# inventory to cross-check.
tree = self.make_branch_and_tree('tree')
if not isinstance(tree, InventoryWorkingTree):
raise TestNotApplicable(
"_get_check_refs only relevant for inventory working trees")
revid = tree.commit('first post')
self.assertEqual(set([('trees', revi
|
d)]),
set(tree._get_check_refs()))
def test__check_with_refs(self):
# _check can be called with a dict of the things required.
tree
|
= self.make_branch_and_tree('tree')
if not isinstance(tree, InventoryWorkingTree):
raise TestNotApplicable(
"_get_check_refs only relevant for inventory working trees")
tree.lock_write()
self.addCleanup(tree.unlock)
revid = tree.commit('first post')
needed_refs = tree._get_check_refs()
repo = tree.branch.repository
for ref in needed_refs:
kind, revid = ref
refs = {}
if kind == 'trees':
refs[ref] = repo.revision_tree(revid)
else:
self.fail('unknown ref kind')
tree._check(refs)
|
ewbankkit/cloud-custodian
|
tools/c7n_mailer/c7n_mailer/azure_mailer/__init__.py
|
Python
|
apache-2.0
| 101
| 0.009901
|
i
|
mport logging
logging.ge
|
tLogger('azure.storage.common.storageclient').setLevel(logging.WARNING)
|
sourcepole/qgis-wps-client
|
processingwps/WpsAlgorithm.py
|
Python
|
gpl-2.0
| 11,916
| 0.005874
|
from __future__ import absolute_import
from builtins import str
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.Processing import Processing
from processing.core.ProcessingLog import ProcessingLog
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.parameters.ParameterBoolean import ParameterBoolean
from processing.parameters.ParameterCrs import ParameterCrs
from processing.parameters.ParameterExtent import ParameterExtent
from processing.parameters.ParameterMultipleInput import ParameterMultipleInput
from processing.parameters.ParameterNumber import ParameterNumber
from processing.parameters.ParameterRaster import ParameterRaster
from processing.parameters.ParameterSelection import ParameterSelection
from processing.parameters.ParameterString import ParameterString
from processing.parameters.ParameterTable import ParameterTable
from processing.parameters.ParameterVector import ParameterVector
from processing.parameters.ParameterFile import ParameterFile
from processing.outputs.OutputRaster import OutputRaster
from processing.outputs.OutputVector import OutputVector
from processing.outputs.OutputString import OutputString
from processing.outputs.OutputFactory import OutputFactory
from processing.tools import dataobjects
from wps.wpslib.wpsserver import WpsServer
from wps.wpslib.processdescription import ProcessDescription
from wps.wpslib.processdescription import getFileExtension,isMimeTypeVector,isMimeTypeRaster,isMimeTypeText,isMimeTypeFile
from wps.wpslib.processdescription import StringInput, TextInput, SelectionInput, VectorInput, MultipleVectorInput, RasterInput, MultipleRasterInput, FileInput, MultipleFileInput, ExtentInput, CrsInput, VectorOutput, RasterOutput, StringOutput
from wps.wpslib.executionrequest import ExecutionRequest
from wps.wpslib.executionrequest import createTmpGML
from wps.wpslib.executionresult import ExecutionResult
from qgis.PyQt import QtGui
from PyQt4.QtCore import *
from qgis.PyQt.QtWidgets import qApp, QApplication, QMessageBox
import os
class WpsAlgorithm(GeoAlgorithm):
def __init__(self, process, bookmark = False):
self.process = process
self.bookmark = bookmark
GeoAlgorithm.__init__(self) #calls defineCharacteristics
def defineCharacteristics(self):
self.name = str(self.process.identifier)
if self.bookmark:
self.group = "Bookmarks"
else:
self.group = WpsAlgorithm.groupName(self.process.server)
self.loadProcessDescription()
self.buildParametersDialog()
def getIcon(self):
return QtGui.QIcon(os.path.dirname(__file__) + "/../images/wps.png")
@staticmethod
def groupName(server):
return "WPS %s" % server.connectionName
def loadProcessDescription(self):
#retrieve and save if not saved before
if not os.path.exists(self.process.processDescriptionFile(self.wpsDescriptionFolder())):
self.getProcessDescription()
if self.process.identifier == None or self.process.identifier == "":
#Error reading description
self.process.processXML = '' #Save empty description to prevent retry at next startup
self.process.saveDescription(self.wpsDescriptionFolder())
#load from file
self.process.loadDescription(self.wpsDescriptionFolder())
def wpsDescriptionFolder(self):
from .WpsAlgorithmProvider import WpsAlgorithmProvider
return WpsAlgorithmProvider.WpsDescriptionFolder()
def getProcessDescription(self):
self.process.requestDescribeProcess()
#Wait for answer
while not self.process.loaded():
qApp.processEvents()
def buildParametersDialog(self):
for input in self.process.inputs:
inputType = type(input)
if inputType == VectorInput:
self.addParameter(ParameterVector(str(input.identifier), str(input.title), ParameterVector.VECTOR_TYPE_ANY, input.minOccurs == 0))
elif inputType == MultipleVectorInput:
self.addParameter(ParameterMultipleInput(str(input.identifier), str(input.title), ParameterVector.VECTOR_TYPE_ANY, input.minOccurs == 0))
elif inputType == StringInput:
self.addParameter(ParameterString(str(input.identifier), str(input.title)))
elif inputType == TextInput:
self.addParameter(ParameterString(str(input.identifier), str(input.title)))
elif inputType == RasterInput:
self.addParameter(ParameterRaster(str(input.identifier), str(input.title), input.minOccurs == 0))
elif inputType == MultipleRasterInput:
self.addParameter(ParameterMultipleInput(str(input.identifier), str(input.title), ParameterMultipleInput.TYPE_RASTER, input.minOccurs == 0))
elif inputType == FileInput:
#self.addParameter(ParameterFile(str(input.identifier), str(input.title), False, input.minOccurs == 0))
self.addParameter(ParameterFile(str(input.identifier), str(input.title)))
elif inputType == MultipleFileInput:
pass #Not supported
elif inputType == SelectionInput:
self.addParameter(ParameterSelection(str(input.identifier), str(input.title), input.valList))
elif inputType == ExtentInput:
self.addParameter(ParameterExtent(str(input.identifier), str(input.title)))
elif inputType == CrsInput:
self.addParameter(ParameterCrs(str(input.identifier), "Projection", None))
for output in self.process.outputs:
outputType = type(output)
if outputType == VectorOutput:
self.addOutput(OutputVector(str(output.identifier), str(output.title)))
elif outputType == RasterOutput:
self.addOutput(OutputRaster(str(output.identifier), str(output.title))
|
)
elif outputType == StringOutput:
self.addOutput(OutputString(str(output.identifier), str(output.title)))
def defineProcess(self):
"""Create the execute request"""
request = ExecutionRequest(self.process)
request.addExecuteRequestHeader()
|
# inputs
useSelected = False
request.addDataInputsStart()
for input in self.process.inputs:
inputType = type(input)
value = self.getParameterValue(input.identifier)
if inputType == VectorInput:
layer = dataobjects.getObjectFromUri(value, False)
if layer is None:
raise Exception("Couldn't extract layer for parameter '%s' from '%s'" % (input.identifier, value))
mimeType = input.dataFormat["MimeType"]
data = createTmpGML(layer, useSelected, mimeType)
request.addGeometryInput(input.identifier, mimeType, input.dataFormat["Schema"], input.dataFormat["Encoding"], data, useSelected)
elif inputType == MultipleVectorInput:
#ParameterMultipleInput(input.identifier, input.title, ParameterVector.VECTOR_TYPE_ANY, input.minOccurs == 0))
pass
elif inputType == StringInput:
request.addLiteralDataInput(input.identifier, str(value))
elif inputType == TextInput:
request.addLiteralDataInput(input.identifier, str(value))
elif inputType == RasterInput:
layer = dataobjects.getObjectFromUri(value, False)
mimeType = input.dataFormat["MimeType"]
request.addGeometryBase64Input(input.identifier, mimeType, layer)
elif inputType == MultipleRasterInput:
#ParameterMultipleInput(input.identifier, input.title, ParameterVector.TYPE_RASTER, input.minOccurs == 0))
pass
elif inputType == FileInput:
mimeType = input.dataFormat["MimeType"]
request.addFileBase64Input(input.identifier, mimeType, value)
elif inputType == SelectionInput:
#Value is dropdown
|
tst-mswartz/earthenterprise
|
earth_enterprise/src/server/wsgi/search/plugin/coordinate_search_handler.py
|
Python
|
apache-2.0
| 10,743
| 0.003165
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for implementing the Coordinate search."""
import os
from string import Template
import sys
from search.common import exceptions
from search.common import geconstants
from search.common import utils
from search.plugin import coordinate_transform
class CoordinateSearch(object):
"""Class for performing the Coordinate search.
Coordinate search supports the following formats:
1. Decimal Degrees (DD)
2. Degrees Minutes Seconds (DMS)
3. Degrees Decimal Minutes (DDM)
4. Military Grid Reference System (MGRS)
5. Universal Transverse Mercator (UTM)
Coordinate search transforms coordinates from DMS, DDM, UTM, MGRS formats to
DD, validates the coordinates and sends the response back to the client.
Depending on the client type, KML or JSONP formats are supported.
"""
NUM_OF_COORDS_IN_LAT_LNG_FORMAT = 2
NUM_OF_COORDS_IN_MGRS_FORMAT = 1
def __init__(self):
"""Inits CoordinateSearch.
Initializes the logger "ge_search".
Initializes templates for kml, placemark templates for KML/JSONP outputs.
"""
self.utils = utils.SearchUtils()
self._transform = coordinate_transform.CoordinateTransform()
configs = self.utils.GetConfigs(
os.path.join(geconstants.SEARCH_CONFIGS_DIR, "CoordinateSearch.conf"))
self._jsonp_call = self.utils.jsonp_functioncall
self._geom = """
<name>%s</name>
<styleUrl>%s</styleUrl>
<Point>
<coordinates>%s,%s</coordinates>
</Point>\
"""
self._json_geom = """
{
"Point": {
"coordinates": "%s,%s"
}
}
"""
self._kml = """
<kml xmlns="http://www.opengis.net/kml/2.2"
xmlns:gx="http://www.google.com/kml/ext/2.2"
xmlns:kml="http://www.opengis.net/kml/2.2"
xmlns:atom="http://www.w3.org/2005/Atom">
<Folder>
<name>Coordinate Search Results</name>
<open>1</open>
<Style id="placemark_label">\
${style}
</Style>\
${placemark}
</Folder>
</kml>
"""
self._kml_template = Template(self._kml)
self._placemark_template = self.utils.placemark_template
self._json_template = self.utils.json_template
self._json_placemark_template = self.utils.json_placemark_template
style_template = self.utils.style_template
self.coordinates_in_lat_lng_format_ = ["DD", "DMS", "DDM"]
self.logger = self.utils.logger
self._style = style_template.substitute(
balloonBgColor=configs.get("balloonstyle.bgcolor"),
balloonTextColor=configs.get("balloonstyle.textcolor"),
balloonText=configs.get("balloonstyle.text"),
iconStyleScale=configs.get("iconstyle.scale"),
iconStyleHref=configs.get("iconstyle.href"),
lineStyleColor=configs.get("linestyle.color"),
lineStyleWidth=configs.get("linestyle.width"),
polyStyleColor=configs.get("polystyle.color"),
polyStyleColorMode=configs.get("polystyle.colormode"),
polyStyleFill=configs.get("polystyle.fill"),
polyStyleOutline=configs.get("polystyle.outline"),
listStyleHref=configs.get("iconstyle.href"))
def HandleSearchRequest(self, environ):
"""Fetches the search tokens from form and performs the coordinate search.
Args:
environ: A list of environment variables as supplied by the
WSGI interface to the coordinate search application interface.
Returns:
search_results: A KML/JSONP formatted string which contains search results.
Raises:
BadQueryException: if the search query is invalid.
"""
search_results = ""
# Fetch all the attributes provided by the user.
parameters = self.utils.GetParameters(environ)
response_type = self.utils.GetResponseType(environ)
# Retrieve the function call back name for JSONP response.
self.f_callback = self.utils.GetCallback(parameters)
original_query = self.utils.GetValue(parameters, "q")
if not original_query:
msg = "Empty search query received."
self.logger.error(msg)
raise exceptions.BadQueryException(msg)
search_status, search_results = self.DoSearch(original_query, response_type)
if not search_status:
folder_name = "Search returned no results."
search_results = self.utils.NoSearchResults(
folder_name, self._style, response_type, self.f_callback)
return (search_results, response_type)
def DoSearch(self, search_query, response_type):
"""Performs the coordinate search.
Args:
search_query: A string containing the search coordinates as
entered by the user.
response_type: Response type can be KML or JSONP, depending on the client.
Returns:
search_results: A KML/JSONP formatted string which contains search results.
Raises:
BadQueryException: if the search query is invalid.
"""
coordinate_type = ""
search_results = ""
input_coordinates = []
decimal_degrees_coordinates = []
search_tokens = self.utils.SearchTokensFromString(search_query)
self.logger.debug("coordinates: %s", ",".join(search_tokens))
input_coordinates = self._transform.GetInputCoordinates(
",".join(search_tokens))
number_of_coordinates = len(input_coordinates)
if number_of_coordinates == 0:
msg = "Incomplete search query %s submitted" % search_query
self.logger.error(msg)
raise exceptions.BadQueryException(msg)
coordinate_type = self._transform.GetInputType(input_coordinates)
self.logger.debug("Coordinate type is %s.", coordinate_type)
if coordinate_type in self.coordinates_in_lat_lng_format_:
reqd_num_of_coordinates = CoordinateSearch.NUM_OF_COORDS_IN_LAT_LNG_FORMAT
else:
reqd_num_of_coordinates = CoordinateSearch.NUM_OF_COORDS_IN_MGRS_FORMAT
if number_of_coordinates > reqd_num_of_coordinates:
self.logger.warning(
"extra search parameters ignored: %s", ",".join(
input_coordinates[reqd_num_of_coordinates:]))
input_coordinates = input_coordinates[:reqd_num_of_coordinates]
elif number_of_coordinates < reqd_num_of_coordinates:
msg = "Incomplete search query %s submitted" % search_query
self.logger.error(msg)
raise exceptions.BadQueryException(msg)
decimal_degrees_coordinates = self._transform.TransformToDecimalDegrees(
coordinate_type, input_coordinates)
search_results = self.ConstructResponse(
response_type, decimal_degrees_coordinates)
search_status = True if search_results else False
|
return search_status,
|
search_results
def ConstructKMLResponse(self, latitude, longitude):
"""Prepares KML response.
KML response has the below format:
<kml>
<Folder>
<name/>
<StyleURL>
---
</StyleURL>
<Point>
<coordinates/>
</Point>
</Folder>
</kml>
Args:
latitude: latitude in Decimal Degress format.
longitude: longitude in Decimal Degress format.
Returns:
kml_response: KML formatted response.
"""
placemark = ""
kml_response = ""
name = "%s, %s" % (latitude, longitude)
style_url = "#placemark_label"
geom = self._geom % (name, style_url, str(longitude), str(latitude))
placemark = self._placemark_template.substitute(geom=geom)
kml_response = self._kml_template.substitute(
style=self._style, placemark=placemark)
self.logger.info("KML response successfully formatted")
|
wesabe/fixofx
|
lib/ofxtools/csv_converter.py
|
Python
|
apache-2.0
| 14,873
| 0.004034
|
# Copyright 2005-2010 Wesabe, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND
|
, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ofxtools.CsvConverter - translate CSV files into OFX files.
#
import datetime
import dateutil.parser
import ofx
import ofxtools
import re
import sys
import xml.sax.saxutils as sax
from decimal import *
from ofx.builder import *
class CsvConverter:
|
def __init__(self, qif, colspec=None, fid="UNKNOWN", org="UNKNOWN",
bankid="UNKNOWN", accttype="UNKNOWN", acctid="UNKNOWN",
balance="UNKNOWN", curdef=None, lang="ENG", dayfirst=False,
debug=False):
self.qif = qif
self.colspec = colspec
self.fid = fid
self.org = org
self.bankid = bankid
self.accttype = accttype
self.acctid = acctid
self.balance = balance
self.curdef = curdef
self.lang = lang
self.debug = debug
self.dayfirst = dayfirst
self.parsed_csv = None
# FIXME: Move this to one of the OFX generation classes (Document or Response).
self.txns_by_date = {}
if self.debug: sys.stderr.write("Parsing document.\n")
parser = ofxtools.QifParser() # debug=debug)
self.parsed_qif = parser.parse(self.qif)
if self.debug: sys.stderr.write("Cleaning transactions.\n")
# We do a two-pass conversion in order to check the dates of all
# transactions in the statement, and convert all the dates using
# the same date format. The first pass does nothing but look
# at dates; the second actually applies the date conversion and
# all other conversions, and extracts information needed for
# the final output (like date range).
txn_list = self._extract_txn_list(self.parsed_qif)
self._guess_formats(txn_list)
self._clean_txn_list(txn_list)
def _extract_txn_list(self, qif):
stmt_obj = qif.asDict()["QifStatement"]
if self.accttype == "UNKNOWN":
if "BankTransactions" in stmt_obj:
self.accttype = "CHECKING"
elif "CreditCardTransactions" in stmt_obj:
self.accttype = "CREDITCARD"
txn_list = []
for stmt in stmt_obj:
for txn in stmt:
txn_list.append(txn)
if len(txn_list) == 0:
raise ValueError("Found no transactions to convert " +
"in the QIF source.")
else:
return txn_list
#
# Date methods
#
def _guess_formats(self, txn_list):
# Go through the transactions one at a time, and try to parse the date
# field and currency format. If we check the date format and find a
# transaction where the first number must be the day (that is, the first
# number is in the range 13..31), then set the state of the converter to
# use dayfirst for all transaction cleanups. This is a guess because the
# method will only work for UK dates if the statement contains a day in
# the 13..31 range. (We could also test whether a date appears out of
# order, or whether the jumps between transactions are especially long,
# if this guessing method doesn't work reliably.)
for txn_obj in txn_list:
txn = txn_obj.asDict()
txn_date = txn.get("Date", "UNKNOWN")
txn_currency = txn.get("Currency", "UNKNOWN")
# Look for date format.
parsed_date = self._parse_date(txn_date)
self._check_date_format(parsed_date)
def _parse_date(self, txn_date, dayfirst=False):
def _check_date_format(self, parsed_date):
# If we *ever* find a date that parses as dayfirst, treat
# *all* transactions in this statement as dayfirst.
if parsed_date is not None and parsed_date != "UNKNOWN" and parsed_date.microsecond == 3:
self.dayfirst = True
#
# Cleanup methods
#
def _clean_txn_list(self, txn_list):
for txn_obj in txn_list:
try:
txn = self._clean_txn(txn_obj)
txn_date = txn["Date"]
txn_date_list = self.txns_by_date.get(txn_date, [])
txn_date_list.append(txn)
self.txns_by_date[txn_date] = txn_date_list
except ValueError:
# The _clean_txn method will sometimes find transactions
# that are inherently unclean and are unable to be purified.
# In these cases it will reject the transaction by throwing
# a ValueError, which signals us not to store the transaction.
if self.debug: sys.stderr.write("Skipping transaction '%s'." %
str(txn_obj.asDict()))
# Sort the dates (in YYYYMMDD format) and choose the lowest
# date as our start date, and the highest date as our end
# date.
date_list = self.txns_by_date.keys()
date_list.sort()
self.start_date = date_list[0]
self.end_date = date_list[-1]
def _clean_txn(self, txn_obj):
# This is sort of the brute-force method of the converter. It
# looks at the data we get from the bank and tries as hard as
# possible to make best-effort guesses about what the OFX 2.0
# standard values for the transaction should be. There's a
# reasonable amount of guesswork in here -- some of it wise,
# maybe some of it not. If the cleanup method determines that
# the txn_obj shouldn't be in the data, it will return None.
# Otherwise, it will return a transaction cleaned to the best
# of our abilities.
txn = txn_obj.asDict()
self._clean_txn_date(txn)
self._clean_txn_amount(txn)
self._clean_txn_number(txn)
self._clean_txn_type(txn)
self._clean_txn_payee(txn)
return txn
def _clean_txn_date(self, txn):
txn_date = txn.get("Date", "UNKNOWN").strip()
if txn_date != "UNKNOWN":
parsed_date = self._parse_date(txn_date, dayfirst=self.dayfirst)
txn["Date"] = parsed_date.strftime("%Y%m%d")
else:
txn["Date"] = "UNKNOWN"
def _clean_txn_amount(self, txn):
txn_amount = txn.get("Amount", "00.00")
txn_amount2 = txn.get("Amount2", "00.00")
# Home Depot Credit Card seems to send two transaction records for each
# transaction. They're out of order (that is, the second record is not
# directly after the first, nor even necessarily after it at all), and
# the second one *sometimes* appears to be a memo field on the first one
# (e.g., a credit card payment will show up with an amount and date, and
# then the next transaction will have the same date and a payee that
# reads, "Thank you for your payment!"), and *sometimes* is the real
# payee (e.g., the first will say "Home Depot" and the second will say
# "Seasonal/Garden"). One of the two transaction records will have a
# transaction amount of "-", and the other will have the real
# transaction amount. Ideally, we would pull out the memo and attach it
# to the right transaction, but unless the two transactions are the only
# transactions on that date, there doesn't seem to be a good clue (order
# in statement, amount, etc.) as to how to associate them. So, instead,
# we're returning None, which means this transaction should be removed
# from the statement and not displayed to the user. The re
|
zwChan/VATEC
|
~/eb-virt/Lib/encodings/iso2022_jp_1.py
|
Python
|
apache-2.0
| 1,100
| 0.006364
|
#
# iso2022_jp_1.py: Python Unicode Codec for ISO2022_JP_1
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_1')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class Incre
|
mentalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder)
|
:
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_1',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
geographika/mappyfile
|
docs/examples/geometry/geometry.py
|
Python
|
mit
| 1,786
| 0.004479
|
import os
from copy import deepcopy
from shapely.geometry import LineString
import mappyfile
import sys, os
sys.path.append(os.path.abspath("./docs/examples"))
from helper import create_image
def dilation(mapfile):
line = LineString([(0, 0), (1, 1), (0, 2), (2, 2), (3, 1), (1, 0)])
ll = mappyfile.find(mapfile["layers"], "name", "line")
ll["features"][0]["wkt"] = line.wkt
dilated = line.buffer(0.5, cap_style=3)
pl = mappyfile.find(mapfile["layers"], "name", "polygon")
pl["features"][0]["wkt"] = dilated.wkt
mapfile["extent"] = " ".join(map(str, dilated.buffer(0.8).bounds))
return dilated
def erosion(mapfile, dilated):
"""
We will continue to work with the modified Mapfile
If we wanted to start from scratch we could simply reread it
"""
ll = mappyfile.find(mapfile["layers"], "name", "line")
ll["status"] = "OFF"
pl = mappyfile.find(mapfile["layers"], "name", "polygon")
# make a deep copy of the polygon layer in the Map
# so any modification are
|
made to this layer only
pl2 = deepcopy(pl)
pl2["name"] = "newpolygon"
mapfile["layers"].append(pl2)
dilated = dilated.buffer(-0.3)
pl2["features"][0]["wkt"] = dilated.wkt
style = pl["classes"][0]["styles"][0]
style["color"] = "#9999
|
99"
style["outlinecolor"] = "#b2b2b2"
def main():
mf = "./docs/examples/geometry/geometry.map"
mapfile = mappyfile.open(mf)
mapfile["size"] = [600, 600]
output_folder = os.path.join(os.getcwd(), "docs/images")
dilated = dilation(mapfile)
create_image("dilated", mapfile, output_folder=output_folder)
erosion(mapfile, dilated)
create_image("erosion", mapfile, output_folder=output_folder)
if __name__ == "__main__":
main()
print("Done!")
|
chenhh/PyMOGEP
|
src/PyMOGEP/decorator.py
|
Python
|
gpl-2.0
| 2,384
| 0.005872
|
#-*-coding:utf-8-*-
'''
@author: Hung-Hsin Chen
@mail: chenhh@par.cse.nsysu.edu.tw
@license: GPLv2
'''
import functools
def symbol(sym):
'''
Decorator that assigns a symbol to a function.
The symbol is stored in the function.symbol attribute.
@param sym: symbol to a function
'''
def decorator(func):
'''
Attaches a symbol to a function as its 'symbol' attribute
@param func: function to decorate
'''
func.symbol = sym
return func
return decorator
def cache(func):
'''
cache result of the class member method which has no argument.
The return value is cached on self._{method}_cache where
{method} is the name of the method.
usage:
@cache
def _get_something(self):
...
return 'something'
'''
cache_name = '_%s_cache' %(func.func_name)
@functools.wraps(func)
def decorator(self):
'''Assigns a cache attribute to self on demand'''
try:
return getattr(self, cache_name)
except AttributeError:
# Haven't cached anything yet
setattr(self, cache_name, func(self))
return getattr(self, cache_name)
return decorator
#
# def memory(func):
# '''
# cache result of the class member method which has exact one argument.
# self._{method}_memory where {method} is the name of the method.
#
# Note that the arg must be hashable, thus lists can't be memoized.
# The name of the memoized attribute is stored on the method
# itself as func.memory.
# usage:
# @memoize
# def _compute_something(self, arg):
# ...
# return 'something'
# '''
# func.memory = memory_name = '_%s_memory' %( func.func_name)
#
# @functools.wraps(func)
# def decorator(self, key):
# '''Assigns a memo hash to self on demand'''
# try:
# memo = getattr
|
(self, memory_name)
# except AttributeError:
# # Haven't memoized anything yet
# memo = {}
# setattr(self, memo
|
ry_name, memo)
#
# try:
# return memo[key]
# except KeyError:
# # Haven't seen this key yet
# memo[key] = results = func(self, key)
# return results
# return decorator
|
alingse/panshell
|
panshell/core.py
|
Python
|
apache-2.0
| 2,543
| 0
|
# coding=utf-8
from __future__ import print_function
import cmd
import inspect
import sys
from panshell.base import FS
class Shell(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.stack = []
self.fsmap = {}
self.fs = None
self._funcs = []
self._keywords = ['use', 'exit']
@property
def
|
prompt(self):
if self.fs:
return self.fs.prompt
return 'pansh$>'
def plugin(self, fscls, **setting):
if not issubclass(fscls, FS):
raise Exception('must inherit `panshell.base.FS`')
name = fscls.name
if name in self.fsmap:
raise Exception('FS <{}> has already plugin in '.format(name))
fs = fscls(**setting)
self.fsmap[name] = (fscls, setti
|
ng, fs)
def get_names(self):
"""
rewrite cmd.Cmd `dir(self.__class__)`
"""
return dir(self)
def __getattr__(self, name):
if name.startswith('do_'):
action = name[3:]
if action not in self._keywords:
return getattr(self.fs, name)
if name in self.__dict__:
return self.__dict__[name]
return cmd.Cmd.__getattr__(name)
def _plugin_in(self, fs):
for name in dir(fs):
action = name[3:]
if name.startswith('do_') and action not in self._keywords:
attr = getattr(fs, name)
if inspect.ismethod(attr):
self._funcs.append(action)
setattr(self, name, attr)
def _plugin_out(self):
for action in self._funcs:
name = 'do_' + action
delattr(self, name)
self._funcs = []
def set_fs(self, fs):
if self.fs is not None:
self._plugin_out()
self.fs = fs
self._plugin_in(fs)
def do_use(self, name):
"""use <fs> 选择使用某个fs
use baidu
use local
"""
if name not in self.fsmap:
raise Exception('not plugin in this FS with name %s', name)
fscls, setting, _ = self.fsmap[name]
fs = fscls(**setting)
self.stack.append(self.fs)
self.set_fs(fs)
def do_exit(self, line):
"""
退出 shell 或 当前 fs
"""
if self.fs is None:
print('exit-shell', file=sys.stdout)
sys.exit(0)
self.fs.do_exit(line)
self.set_fs(self.stack.pop())
def run(self):
self.cmdloop()
|
kisom/crypto_intro
|
src/secretkey.py
|
Python
|
isc
| 2,939
| 0
|
# secretkey.py: secret-key cryptographic functions
"""
Secret-key functions from chapter 1 of "A Working Introduction to
Cryptography with Python".
"""
import Crypto.Cipher.AES as AES
import Crypto.Hash.HMAC as HMAC
import Crypto.Hash.SHA384 as SHA384
import Crypto.Random.OSRNG.posix as RNG
import pbkdf2
import streql
__AES_KEYLEN = 32
__TAG_KEYLEN = 48
__TAG_LEN = __TAG_KEYLEN
KEYSIZE = __AES_KEYLEN + __TAG_KEYLEN
def pad_data(data):
"""pad_data pads out the data to an AES block length."""
# return data if no padding is required
if len(data) % 16 == 0:
return data
# subtract one byte that should be the 0x80
# if 0 bytes of padding are requ
|
ired, it means only
# a single \x80 is required.
padding_required = 15 - (len(data) % 16)
data = '%s\x80' % data
data = '%s%s' % (data, '\x00' * padding_required)
return data
def unpad_data(data):
"""unpad_data removes padding from the data."""
if not data:
return data
data = data.rs
|
trip('\x00')
if data[-1] == '\x80':
return data[:-1]
else:
return data
def generate_nonce():
"""Generate a random number used once."""
return RNG.new().read(AES.block_size)
def new_tag(ciphertext, key):
"""Compute a new message tag using HMAC-SHA-384."""
return HMAC.new(key, msg=ciphertext, digestmod=SHA384).digest()
def verify_tag(ciphertext, key):
"""Verify the tag on a ciphertext."""
tag_start = len(ciphertext) - __TAG_LEN
data = ciphertext[:tag_start]
tag = ciphertext[tag_start:]
actual_tag = new_tag(data, key)
return streql.equals(actual_tag, tag)
def decrypt(ciphertext, key):
"""
Decrypt a ciphertext encrypted with AES in CBC mode; assumes the IV
has been prepended to the ciphertext.
"""
if len(ciphertext) <= AES.block_size:
return None, False
tag_start = len(ciphertext) - __TAG_LEN
ivec = ciphertext[:AES.block_size]
data = ciphertext[AES.block_size:tag_start]
if not verify_tag(ciphertext, key[__AES_KEYLEN:]):
return None, False
aes = AES.new(key[:__AES_KEYLEN], AES.MODE_CBC, ivec)
data = aes.decrypt(data)
return unpad_data(data), True
def encrypt(data, key):
"""
Encrypt data using AES in CBC mode. The IV is prepended to the
ciphertext.
"""
data = pad_data(data)
ivec = generate_nonce()
aes = AES.new(key[:__AES_KEYLEN], AES.MODE_CBC, ivec)
ctxt = aes.encrypt(data)
tag = new_tag(ivec+ctxt, key[__AES_KEYLEN:])
return ivec + ctxt + tag
def generate_salt(salt_len):
"""Generate a salt for use with PBKDF2."""
return RNG.new().read(salt_len)
def password_key(passphrase, salt=None):
"""Generate a key from a passphrase. Returns the tuple (salt, key)."""
if salt is None:
salt = generate_salt(16)
passkey = pbkdf2.PBKDF2(passphrase, salt, iterations=16384).read(KEYSIZE)
return salt, passkey
|
jucimarjr/IPC_2017-1
|
lista02/lista02_exercicio01_questao05.py
|
Python
|
apache-2.0
| 883
| 0.002291
|
# ----------------------------------------------------------
# Introdução a Programação de Computadores - IPC
# Univ
|
ersidade do Estado do Amazonas - UEA
# Prof. Jucimar Jr
# Edson de Lima Barros 1715310043
# Gabriel Nascimento de Oliveira 1715310052
# Luiz Daniel Raposo Nunes de Mello 1715310049
# Renan de Almeida Campos
|
0825060036
# Tiago Ferreira Aranha 1715310047
# Wilbert Luís Evangelista Marins 1715310055
# Mackson Garcez Moreno de Oliveira júnior 1215090300
#
# 1.5. Faça um Programa que converta metros para centímetros.
# ----------------------------------------------------------
length_in_meters = int(input('Digite a medida (em metros): '))
length_in_centimeters = length_in_meters * 100
print ('%d metros são %d centímetros' % (length_in_meters, length_in_centimeters))
|
oblique-labs/pyVM
|
rpython/rtyper/rstr.py
|
Python
|
mit
| 35,786
| 0.000643
|
from rpython.annotator import model as annmodel
from rpython.rlib import jit
from rpython.rtyper import rint
from rpython.rtyper.error import TyperError
from rpython.rtyper.lltypesystem.lltype import Signed, Bool, Void, UniChar
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.rmodel import IteratorRepr, inputconst, Repr
from rpython.rtyper.rint import IntegerRepr
from rpython.rtyper.rfloat import FloatRepr
from rpython.tool.pairtype import pairtype, pair
from rpython.tool.sourcetools import func_with_new_name
class AbstractStringRepr(Repr):
@jit.elidable
def ll_decode_utf8(self, llvalue):
from rpython.rtyper.annlowlevel import hlstr
from rpython.rlib import runicode
value = hlstr(llvalue)
assert value is not None
errorhandler = runicode.default_unicode_error_decode
# NB. keep the arguments in sync with annotator/unaryop.py
u, pos = runicode.str_decode_utf_8_elidable(
value, len(value), 'strict', True, errorhandler, True)
# XXX maybe the whole ''.decode('utf-8') should be not RPython.
return self.ll.llunicode(u)
def _str_reprs(self, hop):
return hop.args_r[0].repr, hop.args_r[1].repr
def get_ll_eq_function(self):
return self.ll.ll_streq
def get_ll_hash_function(self):
return self.ll.ll_strhash
def get_ll_fasthash_function(self):
return self.ll.ll_strfasthash
def rtype_len(self, hop):
string_repr = self.repr
v_str, = hop.inputargs(string_repr)
return hop.gendirectcall(self.ll.ll_strlen, v_str)
def rtype_bool(self, hop):
s_str = hop.args_s[0]
if s_str.can_be_None:
string_repr = hop.args_r[0].repr
v_str, = hop.inputargs(string_repr)
return hop.gendirectcall(self.ll.ll_str_is_true, v_str)
else:
# defaults to checking the length
return super(AbstractStringRepr, self).rtype_bool(hop)
def rtype_method_startswith(self, hop):
str1_repr = hop.args_r[0].repr
str2_repr = hop.args_r[1]
v_str = hop.inputarg(str1_repr, arg=0)
if str2_repr == str2_repr.char_repr:
v_value = hop.inputarg(str2_repr.char_repr, arg=1)
fn = self.ll.ll_startswith_char
else:
v_value = hop.inputarg(str2_repr, arg=1)
fn = self.ll.ll_startswith
hop.exception_cannot_occur()
return hop.gendirectcall(fn, v_str, v_value)
def rtype_method_endswith(self, hop):
str1_repr = hop.args_r[0].repr
str2_repr = hop.args_r[1]
v_str = hop.inputarg(str1_repr, arg=0)
if str2_repr == str2_repr.char_repr:
v_value = hop.inputarg(str2_repr.char_repr, arg=1)
fn = self.ll.ll_endswith_char
else:
v_value = hop.inputarg(str2_repr, arg=1)
fn = self.ll.ll_endswith
hop.exception_cannot_occur()
return hop.gendirectcall(fn, v_str, v_value)
def rtype_method_find(self, hop, reverse=False):
# XXX binaryop
string_repr = hop.args_r[0].repr
char_repr = hop.args_r[0].char_repr
v_str = hop.inputarg(string_repr, arg=0)
if hop.args_r[1] == char_repr:
v_value = hop.inputarg(char_repr, arg=1)
llfn = reverse and self.ll.ll_rfind_char or self.ll.ll_find_char
else:
v_value = hop.inputarg(string_repr, arg=1)
llfn = reverse and self.ll.ll_rfind or self.ll.ll_find
if hop.nb_args > 2:
v_start = hop.inputarg(Signed, arg=2)
if not hop.args_s[2].nonneg:
raise TyperError("str.%s() start must be proven non-negative"
% (reverse and 'rfind' or 'find',))
else:
v_start = hop.inputconst(Signed, 0)
if hop.nb_args > 3:
v_end = hop.inputarg(Signed, arg=3)
if not hop.args_s[3].nonneg:
raise TyperError("str.%s() end must be proven non-negative"
% (reverse and 'rfind' or 'find',))
else:
v_end = hop.gendirectcall(self.ll.ll_strlen, v_str)
hop.exception_cannot_occur()
return hop.gendirectcall(llfn, v_str, v_value, v_start, v_end)
def rtype_method_rfind(self, hop):
return self.rtype_method_find(hop, reverse=True)
def rtype_method_count(self, hop):
rstr = hop.args_r[0].repr
v_str = hop.inputarg(rstr.repr, arg=0)
if hop.args_r[1] == rstr.char_repr:
v_value = hop.inputarg(rstr.char_repr, arg=1)
llfn = self.ll.ll_count_char
else:
v_value = hop.inputarg(rstr.repr, arg=1)
llfn = self.ll.ll_count
if hop.nb_args > 2:
v_start = hop.inputarg(Signed, arg=2)
if not hop.args_s[2].nonneg:
raise TyperError("str.count() start must be proven non-negative")
else:
v_start = hop.inputconst(Signed, 0)
if hop.nb_args > 3:
v_end = hop.inputarg(Signed, arg=3)
if not hop.args_s[3].nonneg:
raise TyperError("str.count() end must be proven non-negative")
else:
v_end = hop.gendirectcall(self.ll.ll_strlen, v_str)
hop.exception_cannot_occur()
return hop.gendirectcall(llfn, v_str, v_value, v_start, v_end)
def rtype_method_strip(self, hop, left=True, right=True):
rstr = hop.args_r[0].repr
v_str = hop.inputarg(rstr.repr, arg=0)
args_v = [v_str]
if len(hop.args_s) == 2:
if isinstance(hop.args_s[1], annmodel.SomeString):
v_stripstr = hop.inputarg(rstr.repr, arg=1)
args_v.append(v_stripstr)
func = self.ll.ll_strip_multiple
else:
v_char = hop.inputarg(rstr.char_repr, arg=1)
args_v.append(v_char)
func = self.ll.ll_strip
else:
func = self.ll.ll_strip_default
args_v.append(hop.inputconst(Bool, left))
args_v.append(hop.inputconst(Bool, right))
hop.exception_is_here()
return hop.gendirectcall(func, *args_v)
def rtype_method_lstrip(self, hop):
return self.rtype_method_strip(hop, left=True, right=False)
def rtype_method_rstrip(self, hop):
return self.rtype_method_strip(hop, left=False, right=True)
def rtype_method_upper(self, hop):
string_repr = hop.args_r[0].repr
v_str, = hop.inputargs(string_repr)
hop.exception_cannot_occur()
return hop.gendirectcall(self.ll.ll_upper, v_str)
def rtype_method_lower(self, hop):
string_repr = hop.args_r[0].repr
v_str, = hop.inputargs(string_repr)
hop.exception_cannot_occur()
return hop.gendirectcall(self.ll.ll_lower, v_str)
def rtype_metho
|
d_isdigit(self, hop):
string_repr = hop.args_r[0].repr
[v_str] = hop.inputargs(string_repr)
hop.exception_cannot_occur()
return hop.gendirectcall(self.ll.ll_isdigit, v_str)
def rtype_method_isalpha(self, hop):
string_repr = hop.args_r[0].repr
[v_str] = hop.inputargs(string_repr)
hop.exception_cannot_occur()
return hop.gendirectcall(self.ll.ll_isalpha, v_str)
def rtype_method_isalnum(self, hop):
|
string_repr = hop.args_r[0].repr
[v_str] = hop.inputargs(string_repr)
hop.exception_cannot_occur()
return hop.gendirectcall(self.ll.ll_isalnum, v_str)
def _list_length_items(self, hop, v_lst, LIST):
"""Return two Variables containing the length and items of a
list. Need to be overriden because it is typesystem-specific."""
raise NotImplementedError
def rtype_method_join(self, hop):
from rpython.rtyper.lltypesystem.rlist import BaseListRepr
from rpython.rtyper.lltypesystem.rstr import char_repr, unichar_repr
hop.exception_cannot_occur()
rstr = hop.args_r[0]
if hop.s_result.is_constant():
return inputconst(rstr.repr, hop.s_result.const)
r_lst = hop.args_r[1]
if not i
|
8l/beri
|
cheritest/trunk/tests/branch/test_raw_bltzall_lt_back.py
|
Python
|
apache-2.0
| 1,853
| 0.003238
|
#-
# Copyright (c) 2011 Steven J. Murdoch
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/
|
legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
class test_r
|
aw_bltzall_lt_back(BaseBERITestCase):
def test_before_bltzall(self):
self.assertRegisterNotEqual(self.MIPS.a0, 0, "instruction before bltzall missed")
def test_bltzall_branch_delay(self):
self.assertRegisterEqual(self.MIPS.a1, 2, "instruction in brach-delay slot missed")
def test_bltzall_skipped(self):
self.assertRegisterNotEqual(self.MIPS.a2, 3, "bltzall didn't branch")
def test_bltzall_target(self):
self.assertRegisterEqual(self.MIPS.a3, 4, "instruction at branch target didn't run")
def test_bltzall_ra(self):
self.assertRegisterEqual(self.MIPS.a4, self.MIPS.ra, "bltzall ra incorrect")
|
bcrochet/eve
|
eve/io/mongo/geo.py
|
Python
|
bsd-3-clause
| 3,352
| 0
|
# -*- coding: utf-8 -*-
"""
eve.io.mongo.geo
~~~~~~~~~~~~~~~~~~~
Geospatial functions and classes for mongo IO layer
:copyright: (c) 2017 by Nico
|
la Iarocci.
:license: BSD, see LICENSE for more details.
"""
class GeoJSON(dict):
def __init__(self, json):
try:
self['type'] = json['type']
except KeyError:
raise TypeError("Not compilant to GeoJSON")
self.update(json)
if len(self.keys()) != 2:
raise TypeError("Not compilant to GeoJSON")
|
def _correct_position(self, position):
return isinstance(position, list) and \
all(isinstance(pos, int) or isinstance(pos, float)
for pos in position)
class Geometry(GeoJSON):
def __init__(self, json):
super(Geometry, self).__init__(json)
try:
if not isinstance(self['coordinates'], list) or \
self['type'] != self.__class__.__name__:
raise TypeError
except (KeyError, TypeError):
raise TypeError("Geometry not compilant to GeoJSON")
class GeometryCollection(GeoJSON):
def __init__(self, json):
super(GeometryCollection, self).__init__(json)
try:
if not isinstance(self['geometries'], list):
raise TypeError
for geometry in self['geometries']:
factory = factories[geometry["type"]]
factory(geometry)
except (KeyError, TypeError, AttributeError):
raise TypeError("Geometry not compilant to GeoJSON")
class Point(Geometry):
def __init__(self, json):
super(Point, self).__init__(json)
if not self._correct_position(self['coordinates']):
raise TypeError
class MultiPoint(GeoJSON):
def __init__(self, json):
super(MultiPoint, self).__init__(json)
for position in self["coordinates"]:
if not self._correct_position(position):
raise TypeError
class LineString(GeoJSON):
def __init__(self, json):
super(LineString, self).__init__(json)
for position in self["coordinates"]:
if not self._correct_position(position):
raise TypeError
class MultiLineString(GeoJSON):
def __init__(self, json):
super(MultiLineString, self).__init__(json)
for linestring in self["coordinates"]:
for position in linestring:
if not self._correct_position(position):
raise TypeError
class Polygon(GeoJSON):
def __init__(self, json):
super(Polygon, self).__init__(json)
for linestring in self["coordinates"]:
for position in linestring:
if not self._correct_position(position):
raise TypeError
class MultiPolygon(GeoJSON):
def __init__(self, json):
super(MultiPolygon, self).__init__(json)
for polygon in self["coordinates"]:
for linestring in polygon:
for position in linestring:
if not self._correct_position(position):
raise TypeError
factories = dict([(_type.__name__, _type)
for _type in
[GeometryCollection, Point, MultiPoint, LineString,
MultiLineString, Polygon, MultiPolygon]])
|
ssh1/stbgui
|
lib/python/Screens/InfoBarGenerics.py
|
Python
|
gpl-2.0
| 119,400
| 0.029414
|
from ChannelSelection import ChannelSelection, BouquetSelect
|
or, SilentBouquetSelector
from Components.ActionMap
|
import ActionMap, HelpableActionMap
from Components.ActionMap import NumberActionMap
from Components.Harddisk import harddiskmanager
from Components.Input import Input
from Components.Label import Label
from Components.MovieList import AUDIO_EXTENSIONS, MOVIE_EXTENSIONS, DVD_EXTENSIONS
from Components.PluginComponent import plugins
from Components.ServiceEventTracker import ServiceEventTracker
from Components.Sources.Boolean import Boolean
from Components.config import config, ConfigBoolean, ConfigClock, ConfigText
from Components.SystemInfo import SystemInfo
from Components.UsageConfig import preferredInstantRecordPath, defaultMoviePath, ConfigSelection
from Components.VolumeControl import VolumeControl
from Components.Sources.StaticText import StaticText
from EpgSelection import EPGSelection
from Plugins.Plugin import PluginDescriptor
from Screen import Screen
from Screens import ScreenSaver
from Screens import Standby
from Screens.ChoiceBox import ChoiceBox
from Screens.Dish import Dish
from Screens.EventView import EventViewEPGSelect, EventViewSimple
from Screens.InputBox import InputBox
from Screens.MessageBox import MessageBox
from Screens.MinuteInput import MinuteInput
from Screens.TimerSelection import TimerSelection
from Screens.PictureInPicture import PictureInPicture
import Screens.Standby
from Screens.SubtitleDisplay import SubtitleDisplay
from Screens.RdsDisplay import RdsInfoDisplay, RassInteractive
from Screens.TimeDateInput import TimeDateInput
from Screens.UnhandledKey import UnhandledKey
from ServiceReference import ServiceReference, isPlayableForCur
from Tools import Notifications, ASCIItranslit
from Tools.Directories import fileExists, getRecordingFilename, moveFiles
from enigma import eTimer, eServiceCenter, eDVBServicePMTHandler, iServiceInformation, \
iPlayableService, eServiceReference, eEPGCache, eActionMap
from time import time, localtime, strftime
import os
from bisect import insort
from sys import maxint
####key debug
# from keyids import KEYIDS
# from datetime import datetime
from RecordTimer import RecordTimerEntry, RecordTimer, findSafeRecordPath
# hack alert!
from Menu import MainMenu, mdom
def isStandardInfoBar(self):
return self.__class__.__name__ == "InfoBar"
def setResumePoint(session):
global resumePointCache, resumePointCacheLast
service = session.nav.getCurrentService()
ref = session.nav.getCurrentlyPlayingServiceOrGroup()
if (service is not None) and (ref is not None): # and (ref.type != 1):
# ref type 1 has its own memory...
seek = service.seek()
if seek:
pos = seek.getPlayPosition()
if not pos[0]:
key = ref.toString()
lru = int(time())
l = seek.getLength()
if l:
l = l[1]
else:
l = None
resumePointCache[key] = [lru, pos[1], l]
if len(resumePointCache) > 50:
candidate = key
for k,v in resumePointCache.items():
if v[0] < lru:
candidate = k
del resumePointCache[candidate]
if lru - resumePointCacheLast > 3600:
saveResumePoints()
def delResumePoint(ref):
global resumePointCache, resumePointCacheLast
try:
del resumePointCache[ref.toString()]
except KeyError:
pass
if int(time()) - resumePointCacheLast > 3600:
saveResumePoints()
def getResumePoint(session):
global resumePointCache
ref = session.nav.getCurrentlyPlayingServiceOrGroup()
if (ref is not None) and (ref.type != 1):
try:
entry = resumePointCache[ref.toString()]
entry[0] = int(time()) # update LRU timestamp
return entry[1]
except KeyError:
return None
def saveResumePoints():
global resumePointCache, resumePointCacheLast
import cPickle
try:
f = open('/home/root/resumepoints.pkl', 'wb')
cPickle.dump(resumePointCache, f, cPickle.HIGHEST_PROTOCOL)
except Exception, ex:
print "[InfoBar] Failed to write resumepoints:", ex
resumePointCacheLast = int(time())
def loadResumePoints():
import cPickle
try:
return cPickle.load(open('/home/root/resumepoints.pkl', 'rb'))
except Exception, ex:
print "[InfoBar] Failed to load resumepoints:", ex
return {}
resumePointCache = loadResumePoints()
resumePointCacheLast = int(time())
class InfoBarDish:
def __init__(self):
self.dishDialog = self.session.instantiateDialog(Dish)
class InfoBarUnhandledKey:
def __init__(self):
self.unhandledKeyDialog = self.session.instantiateDialog(UnhandledKey)
self.hideUnhandledKeySymbolTimer = eTimer()
self.hideUnhandledKeySymbolTimer.callback.append(self.unhandledKeyDialog.hide)
self.checkUnusedTimer = eTimer()
self.checkUnusedTimer.callback.append(self.checkUnused)
self.onLayoutFinish.append(self.unhandledKeyDialog.hide)
eActionMap.getInstance().bindAction('', -maxint -1, self.actionA) #highest prio
eActionMap.getInstance().bindAction('', maxint, self.actionB) #lowest prio
self.flags = (1<<1)
self.uflags = 0
#this function is called on every keypress!
def actionA(self, key, flag):
####key debug
#try:
# print 'KEY: %s %s %s' % (key,(key_name for key_name,value in KEYIDS.items() if value==key).next(),getKeyDescription(key)[0])
#except:
# try:
# print 'KEY: %s %s' % (key,(key_name for key_name,value in KEYIDS.items() if value==key).next()) # inverse dictionary lookup in KEYIDS
# except:
# print 'KEY: %s' % (key)
self.unhandledKeyDialog.hide()
if flag != 4:
if self.flags & (1<<1):
self.flags = self.uflags = 0
self.flags |= (1<<flag)
if flag == 1: # break
self.checkUnusedTimer.start(0, True)
return 0
#this function is only called when no other action has handled this key
def actionB(self, key, flag):
if flag != 4:
self.uflags |= (1<<flag)
def checkUnused(self):
if self.flags == self.uflags:
self.unhandledKeyDialog.show()
self.hideUnhandledKeySymbolTimer.start(2000, True)
class InfoBarScreenSaver:
def __init__(self):
self.onExecBegin.append(self.__onExecBegin)
self.onExecEnd.append(self.__onExecEnd)
self.screenSaverTimer = eTimer()
self.screenSaverTimer.callback.append(self.screensaverTimeout)
self.screensaver = self.session.instantiateDialog(ScreenSaver.Screensaver)
self.onLayoutFinish.append(self.__layoutFinished)
def __layoutFinished(self):
self.screensaver.hide()
def __onExecBegin(self):
self.ScreenSaverTimerStart()
def __onExecEnd(self):
if self.screensaver.shown:
self.screensaver.hide()
eActionMap.getInstance().unbindAction('', self.keypressScreenSaver)
self.screenSaverTimer.stop()
def ScreenSaverTimerStart(self):
time = int(config.usage.screen_saver.value)
flag = self.seekstate[0]
if not flag:
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if ref and not (hasattr(self.session, "pipshown") and self.session.pipshown):
ref = ref.toString().split(":")
flag = ref[2] == "2" or os.path.splitext(ref[10])[1].lower() in AUDIO_EXTENSIONS
if time and flag:
self.screenSaverTimer.startLongTimer(time)
else:
self.screenSaverTimer.stop()
def screensaverTimeout(self):
if self.execing and not Standby.inStandby and not Standby.inTryQuitMainloop:
self.hide()
if hasattr(self, "pvrStateDialog"):
self.pvrStateDialog.hide()
self.screensaver.show()
eActionMap.getInstance().bindAction('', -maxint - 1, self.keypressScreenSaver)
def keypressScreenSaver(self, key, flag):
if flag:
self.screensaver.hide()
self.show()
self.ScreenSaverTimerStart()
eActionMap.getInstance().unbindAction('', self.keypressScreenSaver)
class SecondInfoBar(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.skin = None
class InfoBarShowHide(InfoBarScreenSaver):
""" InfoBar show/hide control, accepts toggleShow and hide actions, might start
fancy animations. """
STATE_HIDDEN = 0
STATE_HIDING = 1
STATE_SHOWING = 2
STATE_SHOWN = 3
def __init__(self):
self["ShowHideActions"] = ActionMap( ["InfobarShowHideActions"] ,
{
"toggleShow": self.okButtonCheck,
"hide": self.keyHide,
}, 1) # lower prio to make it possible to override ok and cancel..
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPla
|
ysh329/wordsDB
|
mydef/def_get_ngram_2_db.py
|
Python
|
apache-2.0
| 6,600
| 0.01
|
# -*- coding: utf-8 -*-
# !/usr/bin/python
################################### PART0 DESCRIPTION #################################
# Filename: def_get_ngram_2_db.py
# Description:
#
# Author: Shuai Yuan
# E-mail: ysh329@sina.com
# Create: 2015-8-17 22:17:26
# Last:
__author__ = 'yuens'
################################### PART1 IMPORT ######################################
import MySQLdb
import logging
################################### PART2 CLASS && FUNCTION ###########################
def get_one_bi_tri_gram(raw_string):
""" Get onegram, bigram, trigram from raw_string and
return.
Args:
raw_string (str): constitution.txt string stored the text
Returns:
(one_gram_list, bi_gram_list, tri_gram_list) (tuple):
each element in tuple is constitution.txt list of onegram or
bigram or trigram.
"""
one_gram_list = []
bi_gram_list = []
tri_gram_list = []
for idx in xrange(len(raw_string)):
# one-gram
one_gram = raw_string[idx]
one_gram_list.append(one_gram)
# bi-gram
if len(raw_string) > idx + 1:
bi_gram = raw_string[idx:idx+2]
bi_gram_list.append(bi_gram)
# tri-gram
if len(raw_string) > idx + 2:
tri_gram = raw_string[idx:idx+3]
tri_gram_list.append(tri_gram)
return (one_gram_list, bi_gram_list, tri_gram_list)
def insert_ngram_2_db(word, showtimes, database_name, table_name):
""" Insert ngram(word) and its show times(showtimes) in corpus to
table(table_name) of database(database_name).
Args:
word (str): ngram word
showtimes (int): this ngram word's show times in corpus
database_name (str): name of preparing inserted database
table_name (str): name of preparing inserted table
Returns:
None
"""
try:
con = MySQLdb.connect(host = "localhost", user = "root", passwd = "931209", db = database_name, charset = "utf8")
#logging.info("Success in connecting MySQL.")
except MySQLdb.Error, e:
logging.error("Fail in connecting MySQL.")
logging.error("MySQL Error %d: %s." % (e.args[0], e.args[1]))
cursor = con.cursor()
try:
cursor.execute("""SELECT id FROM %s.%s WHERE word='%s'"""\
% (database_name, table_name, word)
)
id_tuple = cursor.fetchone()
if id_tuple == None: # not existed word
try:
cursor.execute("""INSERT INTO %s.%s
(word, pinyin, showtimes, weight, cixing, type1, type2, source, gram, meaning)
VALUES('%s', '', '%s', 0.0, 'cx', 't1', 't2', 'stock-newspap
|
er-essence', '%s', 'ex')"""\
% (database_name, table_name, word, showtimes, len(word))\
)
con.commit()
except MySQLdb.Error, e:
con.rollback()
|
logging.error("Failed in inserting %s gram word %s, which is existed."\
% (len(word), word))
logging.error("MySQL Error %d: %s." % (e.args[0], e.args[1]))
else: # exited word
id = id_tuple[0]
try:
cursor.execute("""UPDATE %s.%s
SET showtimes=showtimes+'%s',
gram='%s'
WHERE id='%s'"""\
% (database_name, table_name, showtimes, len(word), id)\
)
con.commit()
except MySQLdb.Error, e:
con.rollback()
logging.error("Failed in updating %s gram word %s, which is existed."\
% (len(word), word))
logging.error("MySQL Error %d: %s." % (e.args[0], e.args[1]))
except MySQLdb.Error, e:
con.rollback()
logging.error("Fail in selecting %s gram word %s in table %s of database %s."\
% (len(word), word, table_name, database_name))
logging.error("MySQL Error %d: %s." % (e.args[0], e.args[1]))
finally:
con.close()
return None
def computation_corpus_scale_and_weight_2_db(database_name, table_name):
""" Compute the scale of corpus. Different ngram word, its corpus
scale is different, such as bigram word's corpus scale need to
compute the quantity of bigram words.
Args:
database_name (str): name of preparing updated database
table_name (str): name of preparing updated table
Returns:
None
"""
try:
con = MySQLdb.connect(host = "localhost",\
user = "root",\
passwd = "931209",\
db = database_name,\
charset = "utf8")
logging.info("Success in connecting MySQL.")
except MySQLdb.Error, e:
logging.error("Fail in connecting MySQL.")
logging.error("MySQL Error %d: %s." % (e.args[0], e.args[1]))
cursor = con.cursor()
try:
sql_list = []
sql_list.append("SET @onegram_num = (SELECT SUM(showtimes) FROM %s.%s WHERE gram = 1)" % (database_name, table_name))
sql_list.append("SET @bigram_num = (SELECT SUM(showtimes) FROM %s.%s WHERE gram = 2)" % (database_name, table_name))
sql_list.append("SET @trigram_num = (SELECT SUM(showtimes) FROM %s.%s WHERE gram = 3)" % (database_name, table_name))
sql_list.append("UPDATE %s.%s SET corpus_scale = @onegram_num WHERE gram = 1" % (database_name, table_name))
sql_list.append("UPDATE %s.%s SET corpus_scale = @bigram_num WHERE gram = 2" % (database_name, table_name))
sql_list.append("UPDATE %s.%s SET corpus_scale = @trigram_num WHERE gram = 3" % (database_name, table_name))
sql_list.append("UPDATE %s.%s SET weight = (showtimes / corpus_scale)" % (database_name, table_name))
map(lambda sql: cursor.execute(sql), sql_list)
con.commit()
logging.info("Success in updating corpus scale and weight of words.")
except MySQLdb.Error, e:
con.rollback()
logging.error("Fail in selecting gram word in table %s of database %s."\
% (table_name, database_name))
logging.error("MySQL Error %d: %s." % (e.args[0], e.args[1]))
finally:
con.close()
return None
################################### PART3 CLASS TEST ##################################
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.