repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
EijiSugiura/sstoraged
|
src/json/unittest/jsontestrunner.py
|
Python
|
gpl-2.0
| 2,199
| 0.032287
|
# Simple implementation of a json test runner to run the test against json-py.
import sys
import os.path
import json
import types
if len(sys.argv) != 2:
print "Usage: %s input-json-file", sys.argv[0]
sys.exit(3)
input_path = sys.argv[1]
base_path = os.path.splitext(input_path)[0]
actual_path = base_path + '.actual'
rewrite_path = base_path + '.rewrite'
rewrite_actual_path = base_path + '.actual-rewrite'
def valueTreeToString( fout, value, path = '.' ):
ty = type(value)
if ty is types.DictType:
fout.write( '%s={}\n' % path )
suffix = path[-1] != '.' and '.' or ''
names = value.keys()
names.sort()
for name in names:
valueTreeToString( fout, value[name], path + suffix + name )
elif ty is types.ListType:
fout.write( '%s=[]\n' % path )
for index, childValue in zip( xrange(0,len(value)), value ):
valueTreeToString( fout, childValue, path + '[%d]' % index )
elif ty is types.StringType:
fout.write( '%s="%s"\n' % (path,value) )
elif ty is types.IntType:
fout.write( '%s=%d\n' % (path,value) )
elif ty is types.FloatType:
fout.write( '
|
%s=%.16g\n' % (path,value) )
elif value is True:
fout.write( '%s=true\n' % path )
elif value is False:
fout.write( '%s=false\
|
n' % path )
elif value is None:
fout.write( '%s=null\n' % path )
else:
assert False and "Unexpected value type"
def parseAndSaveValueTree( input, actual_path ):
root = json.read( input )
fout = file( actual_path, 'wt' )
valueTreeToString( fout, root )
fout.close()
return root
def rewriteValueTree( value, rewrite_path ):
rewrite = json.write( value )
rewrite = rewrite[1:-1] # Somehow the string is quoted ! jsonpy bug ?
file( rewrite_path, 'wt').write( rewrite + '\n' )
return rewrite
input = file( input_path, 'rt' ).read()
root = parseAndSaveValueTree( input, actual_path )
rewrite = rewriteValueTree( json.write( root ), rewrite_path )
rewrite_root = parseAndSaveValueTree( rewrite, rewrite_actual_path )
sys.exit( 0 )
|
fstagni/DIRAC
|
WorkloadManagementSystem/Utilities/QueueUtilities.py
|
Python
|
gpl-3.0
| 8,673
| 0.01153
|
"""Utilities to help Computing Element Queues manipulation
"""
from __future__ import absolute_import
from __future__ import division
import six
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.List import fromChar
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getDIRACPlatform
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
__RCSID__ = '$Id$'
def getQueuesResolved(siteDict):
|
"""
Get the list of queue descriptions merging site/ce/queue parameters and adding some
derived parameters.
:param dict siteDict: dictionary with configuration data as returned by Resources.getQueues() method
:return: S_OK/S_ERROR, Value dictionary per queue with configuration data updated, e.g.
|
for SiteDirector
"""
queueFinalDict = {}
for site in siteDict:
for ce, ceDict in siteDict[site].items():
qDict = ceDict.pop('Queues')
for queue in qDict:
queueName = '%s_%s' % (ce, queue)
queueDict = qDict[queue]
queueDict['Queue'] = queue
queueDict['Site'] = site
# Evaluate the CPU limit of the queue according to the Glue convention
# To Do: should be a utility
if "maxCPUTime" in queueDict and "SI00" in queueDict:
maxCPUTime = float(queueDict['maxCPUTime'])
# For some sites there are crazy values in the CS
maxCPUTime = max(maxCPUTime, 0)
maxCPUTime = min(maxCPUTime, 86400 * 12.5)
si00 = float(queueDict['SI00'])
queueCPUTime = 60 / 250 * maxCPUTime * si00
queueDict['CPUTime'] = int(queueCPUTime)
# Tags & RequiredTags defined on the Queue level and on the CE level are concatenated
# This also converts them from a string to a list if required.
for tagFieldName in ('Tag', 'RequiredTag'):
ceTags = ceDict.get(tagFieldName, [])
if isinstance(ceTags, six.string_types):
ceTags = fromChar(ceTags)
queueTags = queueDict.get(tagFieldName, [])
if isinstance(queueTags, six.string_types):
queueTags = fromChar(queueTags)
queueDict[tagFieldName] = list(set(ceTags + queueTags))
# Some parameters can be defined on the CE level and are inherited by all Queues
for parameter in ['MaxRAM', 'NumberOfProcessors', 'WholeNode']:
queueParameter = queueDict.get(parameter, ceDict.get(parameter))
if queueParameter:
queueDict[parameter] = queueParameter
# If we have a multi-core queue add MultiProcessor tag
if queueDict.get('NumberOfProcessors', 1) > 1:
queueDict.setdefault('Tag', []).append('MultiProcessor')
queueDict['CEName'] = ce
queueDict['GridCE'] = ce
queueDict['CEType'] = ceDict['CEType']
queueDict['GridMiddleware'] = ceDict['CEType']
queueDict['QueueName'] = queue
platform = queueDict.get('Platform', ceDict.get('Platform', ''))
if not platform and "OS" in ceDict:
architecture = ceDict.get('architecture', 'x86_64')
platform = '_'.join([architecture, ceDict['OS']])
queueDict['Platform'] = platform
if platform:
result = getDIRACPlatform(platform)
if result['OK']:
queueDict['Platform'] = result['Value'][0]
queueFinalDict[queueName] = queueDict
return S_OK(queueFinalDict)
def matchQueue(jobJDL, queueDict, fullMatch=False):
"""
Match the job description to the queue definition
:param str job: JDL job description
:param bool fullMatch: test matching on all the criteria
:param dict queueDict: queue parameters dictionary
:return: S_OK/S_ERROR, Value - result of matching, S_OK if matched or
S_ERROR with the reason for no match
"""
# Check the job description validity
job = ClassAd(jobJDL)
if not job.isOK():
return S_ERROR('Invalid job description')
noMatchReasons = []
# Check job requirements to resource
# 1. CPUTime
cpuTime = job.getAttributeInt('CPUTime')
if not cpuTime:
cpuTime = 84600
if cpuTime > queueDict.get('CPUTime', 0.):
noMatchReasons.append('Job CPUTime requirement not satisfied')
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 2. Multi-value match requirements
for parameter in ['Site', 'GridCE', 'Platform', 'GridMiddleware',
'PilotType', 'SubmitPool', 'JobType']:
if parameter in queueDict:
valueSet = set(job.getListFromExpression(parameter))
if not valueSet:
valueSet = set(job.getListFromExpression('%ss' % parameter))
queueSet = set(fromChar(queueDict[parameter]))
if valueSet and queueSet and not valueSet.intersection(queueSet):
valueToPrint = ','.join(valueSet)
if len(valueToPrint) > 20:
valueToPrint = "%s..." % valueToPrint[:20]
noMatchReasons.append('Job %s %s requirement not satisfied' % (parameter, valueToPrint))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 3. Banned multi-value match requirements
for par in ['Site', 'GridCE', 'Platform', 'GridMiddleware',
'PilotType', 'SubmitPool', 'JobType']:
parameter = "Banned%s" % par
if par in queueDict:
valueSet = set(job.getListFromExpression(parameter))
if not valueSet:
valueSet = set(job.getListFromExpression('%ss' % parameter))
queueSet = set(fromChar(queueDict[par]))
if valueSet and queueSet and valueSet.issubset(queueSet):
valueToPrint = ','.join(valueSet)
if len(valueToPrint) > 20:
valueToPrint = "%s..." % valueToPrint[:20]
noMatchReasons.append('Job %s %s requirement not satisfied' % (parameter, valueToPrint))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 4. Tags
tags = set(job.getListFromExpression('Tag'))
nProc = job.getAttributeInt('NumberOfProcessors')
if nProc and nProc > 1:
tags.add('MultiProcessor')
wholeNode = job.getAttributeString('WholeNode')
if wholeNode:
tags.add('WholeNode')
queueTags = set(queueDict.get('Tags', []))
if not tags.issubset(queueTags):
noMatchReasons.append('Job Tag %s not satisfied' % ','.join(tags))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 4. MultiProcessor requirements
if nProc and nProc > int(queueDict.get('NumberOfProcessors', 1)):
noMatchReasons.append('Job NumberOfProcessors %d requirement not satisfied' % nProc)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 5. RAM
ram = job.getAttributeInt('RAM')
# If MaxRAM is not specified in the queue description, assume 2GB
if ram and ram > int(queueDict.get('MaxRAM', 2048)) / 1024:
noMatchReasons.append('Job RAM %d requirement not satisfied' % ram)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# Check resource requirements to job
# 1. OwnerGroup - rare case but still
if "OwnerGroup" in queueDict:
result = getProxyInfo(disableVOMS=True)
if not result['OK']:
return S_ERROR('No valid proxy available')
ownerGroup = result['Value']['group']
if ownerGroup != queueDict['OwnerGroup']:
noMatchReasons.append('Resource OwnerGroup %s requirement not satisfied' % queueDict['OwnerGroup'])
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 2. Required tags
requiredTags = set(queueDict.get('RequiredTags', []))
if not requiredTags.issubset(tags):
noMatchReasons.append('Resource RequiredTags %s not satisfied' % ','.join(requiredTags))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 3. RunningLimit
site = queueDict['Site']
opsHelper = Operations()
result = opsHelper.getSections('JobScheduling/RunningLimit')
if result['OK'] and site in result['Value']:
result = opsHelper.getSections('JobScheduling/RunningLimit/%s' % site)
if
|
JulienMcJay/eclock
|
windows/Python27/Lib/site-packages/pygments/__init__.py
|
Python
|
gpl-2.0
| 2,974
| 0.000336
|
# -*- coding: utf-8 -*-
"""
Pygments
~~~~
|
~~~~
Pygments is a syntax highlighting package written in Python.
It is a generic syntax highlighter for general use in all kinds of software
such as forum systems, wikis or other applications that need to prettify
source code. Highlights are:
* a wide range of common languages and markup formats is supported
* special attention is paid to details, increasing quality by a fair amount
|
* support for new languages and formats are added easily
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
formats that PIL supports, and ANSI sequences
* it is usable as a command-line tool and as a library
* ... and it highlights even Brainfuck!
The `Pygments tip`_ is installable with ``easy_install Pygments==dev``.
.. _Pygments tip:
http://bitbucket.org/birkenfeld/pygments-main/get/tip.zip#egg=Pygments-dev
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
__version__ = '1.6'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
import sys
from pygments.util import StringIO, BytesIO
def lex(code, lexer):
"""
Lex ``code`` with ``lexer`` and return an iterable of tokens.
"""
try:
return lexer.get_tokens(code)
except TypeError, err:
if isinstance(err.args[0], str) and \
'unbound method get_tokens' in err.args[0]:
raise TypeError('lex() argument must be a lexer instance, '
'not a class')
raise
def format(tokens, formatter, outfile=None):
"""
Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
try:
if not outfile:
#print formatter, 'using', formatter.encoding
realoutfile = formatter.encoding and BytesIO() or StringIO()
formatter.format(tokens, realoutfile)
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
except TypeError, err:
if isinstance(err.args[0], str) and \
'unbound method format' in err.args[0]:
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise
def highlight(code, lexer, formatter, outfile=None):
"""
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
return format(lex(code, lexer), formatter, outfile)
if __name__ == '__main__':
from pygments.cmdline import main
sys.exit(main(sys.argv))
|
bronydell/VK-GFC-bot
|
saver.py
|
Python
|
mit
| 348
| 0
|
import shelve
shelve_name = "data"
def savePref(user, key, value):
d = shelve.open(shelve_name)
d[str(user) + '.' + str(key)] = value
d.close()
def openPref(user, key, default):
d = shelve.open(shelve_name)
if (str(u
|
ser) + '.' + str(key)) in d:
retur
|
n d[str(user) + '.' + str(key)]
else:
return default
|
ajstarna/RicochetRobots
|
Brobot/experimentsFirstSolution.py
|
Python
|
bsd-2-clause
| 6,257
| 0.032603
|
#!/usr/bin/python
''' this file contains functions for experimenting with the different players and for running many trials and averaging results '''
from Player import RandomPlayer
from MCPlayer import MCPlayer, PNGSPlayer, GreedyPlayer
import Board
import sys, traceback
import time
def runRandomPlayerFirstSol(fileName, size, numSamples, depth):
# numSamples and depth are useless here, just makes it more convenient to call an arbitrary function
try:
rr = Board.StandardBoard(size, size, fileName)
rPlayer = RandomPlayer(rr)
rPlayer.setTarget()
moveSequence, numMoves = rPlayer.findFirstSolutionNoTimeLimit()
if rr.validateMoveSequence(moveSequence):
# if the move sequence
#print("valid sequence with {0} moves!".format(numMoves))
return numMoves
else:
print("Invalid sequence with {0} moves!".format(numMoves))
return None
except:
print("exception in runRandomPlayerFirstSol")
traceback.print_exc(file=sys.stdout)
return None
def runMCPlayerFirstSol(fileName, size, numSamples, depth):
try:
rr = Board.StandardBoard(size, size, fileName)
reachableWeight = 4
LBWeight = 1
totalReachableWeight = 3
mcPlayer = MCPlayer(rr, reachableWeight, LBWeight, totalReachableWeight)
mcPlayer.setTarget()
moveSequence, numMoves = mcPlayer.findFirstSolutionNoTimeLimit(numSamples, depth)
if rr.validateMoveSequence(moveSequence):
# if the move sequence
#print("valid sequence with {} moves!".format(numMoves))
return numMoves
else:
print("Invalid sequence with {} moves!".format(numMoves))
return None
except:
print("exception in runMCPlayerFirstSolution")
traceback.print_exc(file=sys.stdout)
return None
def runPNGSPlayerFirstSol(fileName, size, numSamples, depth):
try:
rr = Board.StandardBoard(size, size, fileName)
reachableWeight = 4
LBWeight = 1
totalReachableWeight = 3
pngsPlayer = PNGSPlayer(rr, reachableWeight, LBWeight, totalReachableWeight)
pngsPlayer.setTarget()
moveSequence, numMoves, numMovesBeforePNGS, findTime, pngsTime = pngsPlayer.findFirstSolutionNoTimeLimit(numSamples, depth)
if rr.validateMoveSequence(moveSequence):
# if the move sequence
#print("valid sequence with {} moves!".format(numMoves))
return numMoves, numMovesBeforePNGS, findTime, pngsTime
else:
print("Invalid sequence with {} moves!".format(numMoves))
return None
except:
print("exception in runPNGSPlayerFirstSolution")
traceback.print_exc(file=sys.stdout)
return None
def runGreedyPlayerFirstSol(fileName, size, numSamples, depth):
try:
rr = Board.StandardBoard(size, size, fileName)
reachableWeight = 4
LBWeight = 1
totalReachableWeight = 3
greedyPlayer = GreedyPlayer(rr, reachableWeight, LBWeight, totalReachableWeight)
greedyPlayer.setTarget()
moveSequence, numMoves, numMovesBeforePNGS, findTime, pngsTime = greedyPlayer.findFirstSolutionNoTimeLimit()
if rr.validateMoveSequence(moveSequence):
# if the move sequence
#print("valid sequence with {} moves!".format(numMoves))
return numMoves, numMovesBeforePNGS, findTime, pngsTime
else:
print("Invalid sequence with {} moves!".format(numMoves))
return None
except:
print("exception in runGreedyPlayerFirstSolution")
traceback.print_exc(file=sys.stdout)
return None
def playMultiplePNGSGames(function, numGames, fileName, size, numSamples, depth):
totalPNGSMoves = 0
totalFindMoves = 0
results = []
totalFindTime = 0
totalPNGSTime = 0
for i in xrange(numGames):
print("startGame {0}".format(i))
numMoves, numMovesBeforePNGS, findTime, pngsTime = function(fileName, size, numSamples, depth)
totalFindTime += findTime
totalPNGSTime += pngsTime
if numMoves == None:
print("Problem in function {0}".format(function))
sys.exit(-1)
else:
results.append((numMoves, numMovesBeforePNGS, findTime, pngsTime))
totalPNGSMoves += numMoves
totalFindMoves += numMovesBeforePNGS
return totalPNGSMoves/float(numGames), totalFindMoves/float(numGames), totalFindTime/float(numGames), totalPNGSTime/float(numGames), results
def playMultipleGames(function, numGames, fileName, size, numSamples, depth):
totalMoves = 0
results = []
for i in xrange(numGames):
numMoves = function(fileName, size, numSamples, depth)
if numMoves == None:
print("Problem in function {0}".format(function))
sys.exit(-1)
else:
results.append(numMoves)
totalMoves += currentMoves
return totalMoves/float(numGames), results
if __name__ == "__main__":
numGames = 10
numSamples = 10
depth = 4
fileName = "builtin4.txt"
print("Using file = {0}".format(fileName))
for depth in [3,4,5]: #,6,7,8]:
for numSamples in [14, 16, 18]: #8,10,12,14,16]:
print("Running PNGS with numGames = {2}, depth = {0} and numSamples = {1}".format(depth, numSamples, numGames))
PNGSAverage, MCAverage, findTime, pngsTime, PNGSResults = playMultiplePNGSGames(runPNGSPlayerFirstSol, numGames, fileName, 16, numSamples, depth)
#print(PNGSDict)
print("Averag
|
e Number of Moves Per Game = {0}".format(PNGSAverage))
print("Average Number of Moves Per Game Before Improvement = {0}".format(MCAverage))
print("Average findTime per game = {0}".format(findTime))
print("Average pngsTime per game = {0}".format(pngsTime))
print(PNGSResults)
print("")
'''
print("Running Greedy with numGames = {0}".format(numGames))
PNGSAverage, MCAverage, findTime, pngsTime, PNGSResults = playMultiplePNGSGames(runGreedyPla
|
yerFirstSol, numGames, fileName, 16, numSamples, depth)
#print(PNGSDict)
print("Average Number of Moves Per Game = {0}".format(PNGSAverage))
print("Average Number of Moves Per Game Before Improvement = {0}".format(MCAverage))
print("Average findTime per game = {0}".format(findTime))
print("Average pngsTime per game = {0}".format(pngsTime))
print(PNGSResults)
print("")
'''
'''tstart = time.clock()
print("Running Rand with numGames = {0}".format(numGames))
RandAverage, RandDict = playMultipleGames(runRandomPlayerFirstSol, numGames, fileName, 16, numSamples, depth)
#print(RandDict)
print("Average Number of Moves Per Game = {0}".format(RandAverage))
print("Average time per game = {0}\n".format((time.clock() - tstart)/ numGames))
'''
|
bumrush/blackjack
|
blackjack.py
|
Python
|
gpl-2.0
| 8,652
| 0.035483
|
#!/usr/bin/python
# Copyright 2014 Justin Cano
#
# This is a simple Python program of the game Blackjack
# (http://en.wikipedia.org/wiki/Blackjack), developed for
# a coding challenge from the 2014 Insight Data Engineering
# Fellows Program application.
#
# Licensed under the GNU General Public License, version 2.0
# (the "License"), this program is free software; you can
# redistribute it and/or modify it under the terms of the
# License.
#
# You should have received a copy of the License along with this
# program in the file "LICENSE". If not, you may obtain a copy of
# the License at
# http://www.gnu.org/licenses/gpl-2.0.html
#
import random
import time
MAX_DECKS = 8
def shuffleDeck(numDecks):
"""
Builds, shuffles, and returns a deck of 52 * numDecks cards
Deck is represented as a list of cards
Cards are represented as strings labeled as their rank and suit, e.g.
'7H' - 7 Hearts
'TS' - 10 Spades
"""
deck = [r+s for r in '23456789TJQKA'*numDecks for s in 'SHDC']
random.shuffle(deck)
return deck
def changeNumDecks():
"""
Prompts user to change the number of decks to use
Returns new number of decks to use
"""
numDecks = 0
while numDecks <= 0 or numDecks > MAX_DECKS:
try:
print "Enter number of decks to use (1-" + str(MAX_DECKS) + "):"
numDecks = int(raw_input("% "))
assert 0 < numDecks <= MAX_DECKS
except (ValueError, AssertionError):
print "Invalid input! Must be integer value greater than 0"
print "and less than 8"
return numDecks
def placeBet(chips):
"""
Prompts user for bet value
User input must be greater than 0 and less than chips
Fixed bet precision to one decimal place
Returns bet, rounded to nearest tenth
"""
bet = 0
while bet < 1 or bet > chips:
try:
print "How much do you wanna bet (1-" + str(chips) + ")?"
# Round bet to the nearest tenth
bet = round(float(raw_input("% ")),1)
assert 1 <= bet <= chips
except (ValueError, AssertionError):
print "Invalid input! Must be integer or float value at least 1"
print "and less than the number of available chips"
return bet
menuChoices = ['', "PLAY", "DECK", "EXIT"]
def menu():
"""
Menu
Prompts the user to choose menu option:
1 - Play
2 - Change # of decks
3 - Exit
Returns user selection
"""
choice = 0
maxChoice = len(menuChoices)-1
while choice <= 0 or choice > maxChoice:
try:
print "Menu"
print "-" * 10
print "[1] Play"
print "[2] Change # Decks"
print "[3] Exit"
choice = int(raw_input("% "))
assert 1 <=choice <= maxChoice
except (ValueError, AssertionError):
print "Invalid choice! Must be [1-" + str(maxChoice) + "]"
return menuChoices[choice]
blackjackChoices = ['', "HIT", "STAND", "DOUBLE"]
def blackjackMenu(playerCards, chips, bet):
"""
Prompts user to choose Blackjack option:
1 - Hit
2 - Stand
3 - Double Down (uses playerCards, chips, and
bet to determine if player can Double Down)
Can be extended for advanced options, i.e. split
Returns user selection
"""
choice = 0
maxChoice = len(blackjackChoices)-2
while choice <= 0
|
or choice > maxChoice:
try:
print "Actions:"
print "-" * 10
print "[1] Hit"
print "[2] Stand"
if len(playerCards) == 2 and chips >= bet:
"Double Down allowed"
print "[3] Double Down"
maxChoice += 1
choice = int(raw_input("% "))
assert 1 <= choice <= maxChoice
|
except (ValueError, AssertionError):
print "Invalid choice! Must be [1-" + str(maxChoice) + "]"
return blackjackChoices[choice]
def deal(deck):
"""
Pops and returns the first card in deck
"""
card = deck[0]
del deck[0]
return card
def rank(hand):
"""
Return the sum of the ranks in a hand
Face cards are of rank 10
Aces are of rank 11 or 1
Example: rank(['7H','AS','JD']) => 18
"""
# Extract all ranks from hand
ranks = [10 if r == 'T' or r == 'J' or r =='Q' or r == 'K' else
11 if r == 'A' else
int(r) for r,s in hand]
# While there are 11-ranked Aces in hand and hand rank is greater than 21,
while 11 in ranks and sum(ranks) > 21:
"""
Change rank of Aces to 1
one Ace at a time
until hand rank is less than 21
or until there are no more 11-ranked Aces
"""
index = ranks.index(11)
ranks[index] = 1
return sum(ranks)
def showCards(dealer, player, turn="player"):
"""
Print cards on screen
If player's turn, hide dealer's second card and rank
"""
print "=" * 20
print "Dealer Cards:", rank([dealer[0]]) if turn is "player" else rank(dealer)
for card in dealer:
if card is dealer[1] and turn is "player":
card = "--"
print card,
print
print "Player Cards:", rank(player)
for card in player:
print card,
print
print "=" * 20
def getPayout(dealer, player, chips, bet):
"""
Evaluates and compares dealer and player hands
Calculates winnings and adds to chips
Fixed chips precision to one decimal place
Returns chips rounded to nearest tenth
"""
if rank(player) > 21:
"Player bust"
print "Bust!"
elif rank(dealer) == rank(player):
"Push"
chips += bet
print "Push"
elif rank(player) == 21 and len(player) == 2:
"Player gets Blackjack"
chips += 2.5*bet
print "You got Blackjack!"
elif rank(dealer) > 21 or rank(player) > rank(dealer):
"Dealer bust or player beats dealer"
chips += 2*bet
print "You win!"
else:
"Dealer beats player"
print "You lose!"
return round(chips,1)
def blackjack(deck,chips):
"""
Play a round of (single player) Blackjack
using deck and chips. Player will be ask to
enter a valid bet value. Payout will be added
to available chips.
Return chips after payout.
"""
print "*" * 50
print "Chips:", chips
bet = placeBet(chips)
print "*" * 50
chips = chips - bet
print "Chips:", chips
print "Bet:", bet
dealerCards, playerCards = [], []
dealerRank, playerRank = 0, 0
# Deal starting cards by appending the
# first card from deck to list
playerCards.append(deal(deck))
dealerCards.append(deal(deck))
playerCards.append(deal(deck))
dealerCards.append(deal(deck))
# Player goes first
blackjack.turn = "player"
if rank(dealerCards) == 21:
"Check for dealer Blackjack"
showCards(dealerCards, playerCards, "dealer")
print "\nDealer got blackjack!"
blackjack.turn = None
elif rank(playerCards) == 21:
"Check player for Blackjack"
showCards(dealerCards, playerCards)
blackjack.turn = None
else:
showCards(dealerCards, playerCards)
while blackjack.turn is "player":
"Player's turn"
choice = blackjackMenu(playerCards, chips, bet)
if choice == "HIT":
playerCards.append(deal(deck))
elif choice == "STAND":
blackjack.turn = "dealer"
break
elif choice == "DOUBLE":
print "Double Down! Good luck!"
chips = chips - bet
print "Chips:", chips
bet = 2*bet
print "Bet:", bet
playerCards.append(deal(deck))
showCards(dealerCards, playerCards)
time.sleep(2)
blackjack.turn = "dealer"
if choice != "DOUBLE":
showCards(dealerCards, playerCards)
playerRank = rank(playerCards)
if playerRank > 21:
"Bust"
blackjack.turn = None
elif playerRank == 21:
"Twenty-One"
print "\nYou got 21!"
# Pause so player notices 21
time.sleep(2)
blackjack.turn = "dealer"
print
while blackjack.turn is "dealer":
"Dealer's turn"
showCards(dealerCards, playerCards, blackjack.turn)
dealerRank = rank(dealerCards)
if dealerRank > 21:
print "\nDealer busts!"
blackjack.turn = None
elif dealerRank < 17:
print "\nDealer hits"
dealerCards.append(deal(deck))
else:
blackjack.turn = None
# Pause between dealer moves so player can see dealer's actions
time.sleep(2)
# Compare hands and update available chips
chips = getPayout(dealerCards, playerCards, chips, bet)
time.sleep(1.5)
print
return chips
def main():
chips = 100
numDecks = changeNumDecks()
choice = ''
deck = shuffleDeck(numDecks)
while chips > 0:
"""
While there are still chips available to bet,
give the player the option to keep playing
"""
print "*" * 50
print "Chips:", chips
while choice != "PLAY":
"Display menu"
choice = menu()
if choice == "DECK":
numDecks = changeNumDecks()
print "Changed # of decks to:", numDecks
elif choice == "EXIT":
print "\nCashing out with
|
Punto0/addons-fm
|
website_product_brand/__openerp__.py
|
Python
|
agpl-3.0
| 1,807
| 0.003874
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2015 Serpent Consulting Services Pvt. Ltd. (<http://www.serpentcs.com>)
# Copyright (C) 2016 FairCoop (<http://fair.coop>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any l
|
ater version.
#
|
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
{
'name': 'Product Brand and Country filtering in Website',
'category': 'Website',
'author': 'FairCoop',
'website':'http://fair.coop',
'summary': '',
'version': '1.0',
'description': """
Allows to use product brands and countries as filtering for products in website.\n
This Module depends on product_brand module -https://github.com/OCA/product-attribute/tree/8.0/product_brand
""",
'depends': ['product_brand_custom','website_sale','web','product_custom'],
'data': [
"data/demands.xml",
"security/ir.model.access.csv",
"views/product_brand.xml",
"views/brand_page.xml",
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
|
mads-bertelsen/McCode
|
meta-pkgs/windows/Support/gnuplot-py-1.8/.happydoc.setup.py
|
Python
|
gpl-2.0
| 645
| 0.103876
|
(S'9d1d8dee18e2f5e4bae7551057c6c474'
p1
(ihappydoclib.parseinfo.moduleinfo
ModuleInfo
p2
(dp3
S'_namespaces'
p4
((dp5
(dp6
tp7
sS'_import_info'
p8
(ihappydo
|
clib.parseinfo.imports
ImportInfo
p9
(dp10
S'_named_imports'
p11
(dp12
sS'_straight_imports'
p13
(lp14
sbsS'_filename'
p15
S'Gnuplot/setup.py'
p16
sS'_docstring'
p17
S''
sS'_name'
p18
S'setup'
p19
sS'_parent'
p20
NsS'_comment_info'
p21
(dp22
sS'_configuration_values'
p23
(dp24
S'include_comments'
p25
I1
sS'cacheFilePrefix'
p26
S'.happydoc.'
p27
sS'useCa
|
che'
p28
I1
sS'docStringFormat'
p29
S'StructuredText'
p30
ssS'_class_info'
p31
g5
sS'_function_info'
p32
g6
sS'_comments'
p33
S''
sbt.
|
csunny/blog_project
|
source/apps/blog/admin/user.py
|
Python
|
mit
| 863
| 0.002317
|
#!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: magic
"""
from django.contrib import admin
from blog.models import User
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import ugettext, ugettext_lazy as _
class BlogUserAdmin(UserAdmin):
filesets = (
(None, {'fields': ('username', 'email', 'password')}),
(_('P
|
ersonal info'), {'fields': ('email', 'qq', 'phone')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': {'last_login', 'date_joined'}}),
)
add_fieldsets = (
(None, {
'classes': ('wide', ),
'f
|
ields': ('username', 'email', 'password1', 'password2'),
}),
)
admin.site.register(User, BlogUserAdmin)
|
helanan/Panda_Prospecting
|
panda_prospecting/prospecting/insights/high_lows.py
|
Python
|
mit
| 966
| 0
|
import csv
from datetime import datetime
from matplotlib import pyplot as plt
# Get dates, high, and low temperatures from file.
filename = 'sitka_weather_2017.csv'
with open(filename) as f:
reader = csv.reader(f)
header_row = next(reader)
dates, highs, lows = [], [], []
for row in reader:
current_date = datetime.strptime(row[0], "%Y-%m-%d")
dates.append(current_date)
high = int(row[1])
highs.append(high)
low = int(row[3])
lows.append(low)
# Plot data.
fig = plt.figure(dpi=128, figsize=(10, 6))
plt.plot(dates, highs, c='red', alpha=0.5)
plt.plot(dates, lows, c='blue', alpha=0.5)
plt.fill_between(dates, high
|
s, lows, facecolor='blue', alpha=0.1)
# Format plot.
plt.title("Daily high and low temperatures - 2017", fontsize=24)
plt.xlabel('', fontsize=16)
fig.autofmt_xdate()
plt.ylabel("Temperature (F)", fontsize=
|
16)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.show()
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/plugins/callback/full_skip.py
|
Python
|
bsd-3-clause
| 2,289
| 0.001747
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: full_skip
type: stdout
short_description: suppreses tasks if all hosts skipped
description:
- Use this plugin when you dont care about any output for tasks that were completly skipped
version_added: "2.4"
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout in configuation
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'full_skip'
def v2_runner_on_skipped(self, result):
self.outlines = []
def v2_playbook_item_on_skipped(self, result):
self.outlines = []
def v2_runner_item_on_skipped(self, result):
sel
|
f.outlines = []
def v2_runner_on_failed(self, result, ignore_errors=False):
self.display()
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
def v2_
|
playbook_on_task_start(self, task, is_conditional):
self.outlines = []
self.outlines.append("TASK [%s]" % task.get_name().strip())
if self._display.verbosity >= 2:
path = task.get_path()
if path:
self.outlines.append("task path: %s" % path)
def v2_playbook_item_on_ok(self, result):
self.display()
super(CallbackModule, self).v2_playbook_item_on_ok(result)
def v2_runner_on_ok(self, result):
self.display()
super(CallbackModule, self).v2_runner_on_ok(result)
def display(self):
if len(self.outlines) == 0:
return
(first, rest) = self.outlines[0], self.outlines[1:]
self._display.banner(first)
for line in rest:
self._display.display(line)
self.outlines = []
|
StratusLab/client
|
cli/user/code/main/python/stratuslab/cmd/stratus_detach_volume.py
|
Python
|
apache-2.0
| 3,765
| 0.003187
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Created as part of the StratusLab project (http://stratuslab.eu),
# co-funded by the European Commission under the Grant Agreement
# INFSO-RI-261552."
#
# Copyright (c) 2011, Centre National de la Recherche Scientifique (CNRS)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from stratuslab.commandbase.AuthnCommand import AuthnCommand
sys.path.append('/var/lib/stratuslab/python')
from stratuslab.CloudConnectorFactory import CloudConnectorFactory
from stratuslab.Util import printError
from stratuslab.commandbase.StorageCommand import StorageCommand
from stratuslab.volume_manager.volume_manager_factory import VolumeManagerFactory
from stratuslab.ConfigHolder import ConfigHolder
from stratuslab.Authn import AuthnFactory
from stratuslab.Exceptions import OneException
# initialize console logging
import stratuslab.api.LogUtil as LogUtil
LogUtil.get_console_logger()
class MainProgram(AuthnCommand, StorageCommand):
"""A command-line program to detach a persistent disk."""
def __init__(self):
super(MainProgram, self).__init__()
def parse(self):
self.parser.usage = '%prog [options] volume-uuid ...'
self.parser.description = '''
Detach one or more persistent volumes (disks) that were dynamically
attached to a running virtual machine. The volume-uuid arguments are
the unique identifiers of volumes to detach.
'''
self.parser.add_option('-i', '--instance', dest='instance',
help='The ID of the instance to which the volume attaches', metavar='VM_ID',
default=0, type='int')
StorageCommand.addPDiskEndpointOptions(self.parser)
AuthnCommand.addCloudEndpointOp
|
tions(self.parser)
super(MainProgram, self).parse()
self.options, self.uuids = self.parser.parse_args()
def checkOptions(self):
super(MainProgram, self).checkOptions()
if not self.uuids:
printError('Please provide at least one persistent disk UUID to detach')
if self.opt
|
ions.instance < 0:
printError('Please provide a VM ID on which to detach disk')
try:
self._retrieveVmNode()
except OneException, e:
printError(e)
def _retrieveVmNode(self):
credentials = AuthnFactory.getCredentials(self.options)
self.options.cloud = CloudConnectorFactory.getCloud(credentials)
self.options.cloud.setEndpoint(self.options.endpoint)
self.node = self.options.cloud.getVmNode(self.options.instance)
def doWork(self):
configHolder = ConfigHolder(self.options.__dict__, self.config or {})
configHolder.pdiskProtocol = "https"
pdisk = VolumeManagerFactory.create(configHolder)
for uuid in self.uuids:
try:
target = pdisk.hotDetach(self.options.instance, uuid)
print 'DETACHED %s from VM %s on /dev/%s' % (uuid, self.options.instance, target)
except Exception, e:
printError('DISK %s: %s' % (uuid, e), exit=False)
def main():
try:
MainProgram()
except KeyboardInterrupt:
print '\n\nExecution interrupted by the user... goodbye!'
return 0
|
luotao1/Paddle
|
python/paddle/fluid/contrib/layers/metric_op.py
|
Python
|
apache-2.0
| 7,067
| 0.000849
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contrib layers just related to metric.
"""
from __future__ import print_function
import warnings
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.initializer import Normal, Constant
from paddle.fluid.framework import Variable
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layers import nn
__all__ = ['ctr_metric_bundle']
def ctr_metric_bundle(input, label):
"""
ctr related metric layer
This function help compute the ctr related metrics: RMSE, MAE, predicted_ctr, q_value.
To compute the final values of these metrics, we should do following computations using
total instance number:
MAE = local_abserr / instance number
RMSE = sqrt(local_sqrerr / instance number)
predicted_ctr = local_prob / instance number
q = local_q / instance number
Note that if you are doing distribute job, you should all reduce these metrics and instance
number first
Args:
input(Variable): A floating-point 2D Variable, values are in the range
[0, 1]. Each row is sorted in descending order. This
input should be the output of topk. Typically, this
Variable indicates the probability of each label.
label(Variable): A 2D int Variable indicating the label of the training
data. The height is batch size and width is always 1.
Returns:
local_sqrerr(Variable): Local sum of squared error
local_abserr(Variable): Local sum of abs error
local_prob(Variable): Local sum of predicted ctr
local_q(Variable): Local sum of q value
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
label = fluid.layers.data(name="label", shape=[1], dtype="int32")
predict = fluid.layers.sigmoid(fluid.layers.fc(input=data, size=1))
auc_out = fluid.contrib.layers.ctr_metric_bundle(input=predict, label=label)
"""
assert input.shape == label.shape
helper = LayerHelper("ctr_metric_bundle", **locals())
local_abserr = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1])
local_sqrerr = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1])
local_prob = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1])
local_q = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1])
local_pos_num = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1])
local_ins_num = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1])
tmp_res_elesub = helper.create_global_variable(
persistable=False, dtype='float32', shape=[-1])
tmp_res_sigmoid = helper.create_global_variable(
persistable=False, dtype='float32', shape=[-1])
tmp_ones = helper.create_global_variable(
persistable=False, dtype='float32', shape=[-1])
batch_prob = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1])
batch_abserr = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1])
batch_sqrerr = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1])
batch_q = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1])
batch_pos_num = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1])
batch_ins_num = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1])
for var in [
local_abserr, batch_abserr, local_sqrerr, batch_sqrerr, local_prob,
batch_prob, local_q, batch_q, batch_pos_num, batch_ins_num,
local_pos_num, local_ins_num
]:
helper.set_variable_initializer(
var, Constant(
value=0.0, force_cpu=True))
helper.append_op(
type="elementwise_sub",
inputs={"X": [input],
"Y": [label]},
outputs={"Out": [tmp_res_elesub]})
helper.append_op(
type="squared_l2_norm",
inputs={"X": [tmp_res_elesub]},
outputs={"Out":
|
[batch_sqrerr]})
helper.appen
|
d_op(
type="elementwise_add",
inputs={"X": [batch_sqrerr],
"Y": [local_sqrerr]},
outputs={"Out": [local_sqrerr]})
helper.append_op(
type="l1_norm",
inputs={"X": [tmp_res_elesub]},
outputs={"Out": [batch_abserr]})
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_abserr],
"Y": [local_abserr]},
outputs={"Out": [local_abserr]})
helper.append_op(
type="reduce_sum", inputs={"X": [input]},
outputs={"Out": [batch_prob]})
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_prob],
"Y": [local_prob]},
outputs={"Out": [local_prob]})
helper.append_op(
type="sigmoid",
inputs={"X": [input]},
outputs={"Out": [tmp_res_sigmoid]})
helper.append_op(
type="reduce_sum",
inputs={"X": [tmp_res_sigmoid]},
outputs={"Out": [batch_q]})
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_q],
"Y": [local_q]},
outputs={"Out": [local_q]})
helper.append_op(
type="reduce_sum",
inputs={"X": [label]},
outputs={"Out": [batch_pos_num]})
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_pos_num],
"Y": [local_pos_num]},
outputs={"Out": [local_pos_num]})
helper.append_op(
type='fill_constant_batch_size_like',
inputs={"Input": label},
outputs={'Out': [tmp_ones]},
attrs={
'shape': [-1, 1],
'dtype': tmp_ones.dtype,
'value': float(1.0),
})
helper.append_op(
type="reduce_sum",
inputs={"X": [tmp_ones]},
outputs={"Out": [batch_ins_num]})
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_ins_num],
"Y": [local_ins_num]},
outputs={"Out": [local_ins_num]})
return local_sqrerr, local_abserr, local_prob, local_q, local_pos_num, local_ins_num
|
ComprasTransparentes/api
|
endpoints/proveedor.py
|
Python
|
gpl-3.0
| 22,914
| 0.00358
|
# coding=utf-8
import operator
import json
import falcon
import peewee
import dateutil
from models import models_api
from utils.myjson import JSONEncoderPlus
from utils.mypeewee import ts_match
class ProveedorId(object):
"""Endpoint para un proveedor en particular, identificado por ID"""
@models_api.database.atomic()
def on_get(self, req, resp, proveedor_id):
"""Obtiene la informacion de un proveedor en particular
================ ======= ===============
Parámetro de URL Ejemplo Descripción
================ ======= ===============
``proveedor_id`` 5 ID de proveedor
================ ======= ===============
"""
# Validar que proveedor_id es int
# TODO Delegar la validacion a la BD o raise HTTPBadRequest
try:
proveedor_id = int(proveedor_id)
except ValueError:
raise falcon.HTTPNotFound()
# Obtener un proveedor
try:
proveedor = models_api.ProveedorStats.get(
models_api.ProveedorStats.empresa == proveedor_id
)
except models_api.ProveedorStats.DoesNotExist:
raise falcon.HTTPNotFound()
# Construir la respuesta
response = {
'id': proveedor.empresa,
'nombre': proveedor.nombre_empresa,
'rut': proveedor.rut_sucursal
}
# Codificar la respuesta en JSON
resp.body = json.dumps(response, cls=JSONEncoderPlus, sort_keys=True)
class Proveedor(object):
"""Endpoint para todos los proveedores"""
MAX_RESULTS = 10
@models_api.database.atomic()
def on_get(self, req, resp):
"""Obtiene informacion de todos los proveedores.
Permite filtrar y paginar los resultados.
El paginamiento es de 10 elementos por pagina y es opcional.
**Nota**: Para usar un mismo filtro con diferentes valores, se debe usar el parametro tantas veces como
sea necesario. e.g.: `?proveedor=6&proveedor=8`. El filtro se aplicara usando la disyuncion de los
valores. i.e: ... `proveedor = 6 OR proveedor = 8`. El filtro ``q`` no puede ser usado de esta forma.
**Nota**: El campo `monto_adjudicado` de la respuesta solo tiene un valor si se ha usado el filtro
``monto_adjudicado`` en el request, si no, es ``null``.
Los parametros aceptados son:
Filtros
============================== ================== ============================================================
Parámetro Ejemplo Descripción
============================== ================== ============================================================
``q`` clavos y martillos Busqueda de texto
``proveedor`` 1 Por ID de proveedor
``fecha_adjudicacion`` 20140101|20141231 Por fecha de adjudicacion de licitaciones
``organismo_adjudicador`` 1 Por ID de organismos que han les concedido licitaciones
``n_licitaciones_adjudicadas`` 10|20 Por cantidad de licitaciones adjudicadas
``monto_adjudicado`` 10000|1000000 Por monto adjudicado en licitaciones
============================== ================== ============================================================
Modificadores
============================== ================ ============================================================
Parámetro Ejemplo Descripción
============================== ================ ============================================================
``orden`` monto_adjudicado Ordenar los resultados
``pagina`` 1 Paginar y entregar la pagina solicitada
============================== ================ ============================================================
"""
# Preparar los filtros y operaciones variables
selects = [
models_api.ProveedorOrganismoCruce.empresa,
models_api.ProveedorOrganismoCruce.nombre_empresa,
models_api.ProveedorOrganismoCruce.rut_sucursal
]
wheres = []
joins = []
order_bys = []
# Busqueda de texto
q_q = req.params.get('q', None)
if q_q:
# TODO Hacer esta consulta sobre un solo indice combinado en lugar de usar dos filtros separados por OR
wheres.append(ts_match(models_api.ProveedorOrganismoCruce.nombre_empresa, q_q) | ts_match(models_api.ProveedorOrganismoCruce.rut_sucursal, q_q))
# Filtrar por proveedor
q_proveedor = req.params.get('proveedor', None)
if q_proveedor:
if isinstance(q_proveedor, basestring):
q_proveedor = [q_proveedor]
try:
q_proveedor = map(lambda x: int(x), q_proveedor)
except ValueError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "proveedor debe ser un entero")
wheres.append(models_api.ProveedorOrganismoCruce.empresa << q_proveedor)
# Filtrar por fecha de adjudicacion
q_fecha_adjudicacion = req.params.get('fecha_adjudicacion', None)
if q_fecha_adjudicacion:
if isinstance(q_fecha_adjudicacion, basestring):
q_fecha_adjudicacion = [q_fecha_adjudicacion]
filter_fecha_adjudicacion = []
for fechas in q_fecha_adjudicacion:
fechas = fechas.split('|')
try:
fecha_adjudicacion_min = dateutil.parser.parse(fechas[0], dayfirst=True, yearfirst=True).date() if fechas[0] else None
fecha_adjudicacion_max = dateutil.parser.parse(fechas[1], dayfirst=True, yearfirst=True).date() if fechas[1] else None
except IndexError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "Los valores en fecha_adjudicacion deben estar separados por un pipe [|]")
except ValueError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "El formato de la fecha en fecha_adjudicacion no es correcto")
if fecha_adjudicacion_min and fecha_adjudicacion_max:
filter_fecha_adjudicacion.append((models_api.ProveedorOrganismoCruce.fecha_adjudicacion >= fecha_adjudicacion_min) & (models_api.ProveedorOrganismoCruce.fecha_adjudicacion <= fecha_adjudicacion_max))
elif f
|
echa_adjudicacion_min:
filter_fecha_adjudicacion.append(models_api.ProveedorOrganismoCruce.fecha_adjudicacion >= fecha_adjudicacion_min)
elif fecha_adjudicacion_max:
filter_fecha_adjudicacion.append(models_api.ProveedorOrganismoCruce.fecha_adjudicacion <= fecha_adjudicacion_max)
if filter_fecha_adjudicacion:
wheres.append(reduce
|
(operator.or_, filter_fecha_adjudicacion))
# Filtrar por organismo_adjudicador
q_organismo_adjudicador = req.params.get('organismo_adjudicador', None)
if q_organismo_adjudicador:
if isinstance(q_organismo_adjudicador, basestring):
q_organismo_adjudicador = [q_organismo_adjudicador]
try:
q_organismo_adjudicador = map(lambda x: int(x), q_organismo_adjudicador)
except ValueError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "organismo_adjudicador debe ser un entero")
wheres.append(models_api.ProveedorOrganismoCruce.organismo << q_organismo_adjudicador)
# Filtrar por n_licitaciones_adjudicadas
q_n_licitaciones_adjudicadas = req.params.get('n_licitaciones_adjudicadas')
if q_n_licitaciones_adjudicadas:
if isinstance(q_n_licitaciones_adjudicadas, basestring):
q_n_licitaciones_adjudicadas = [q_n_licitaciones_adjudicadas]
filter_n_licitaciones_adjudicadas = []
|
fjorba/invenio
|
modules/websearch/lib/websearch_templates.py
|
Python
|
gpl-2.0
| 196,591
| 0.005458
|
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
__revision__ = "$Id$"
import time
import cgi
import string
import re
import locale
from urllib import quote, urlencode
from xml.sax.saxutils import escape as xml_escape
from invenio.config import \
CFG_WEBSEARCH_LIGHTSEARCH_PATTERN_BOX_WIDTH, \
CFG_WEBSEARCH_SIMPLESEARCH_PATTERN_BOX_WIDTH, \
CFG_WEBSEARCH_ADVANCEDSEARCH_PATTERN_BOX_WIDTH, \
CFG_WEBSEARCH_AUTHOR_ET_AL_THRESHOLD, \
CFG_WEBSEARCH_USE_ALEPH_SYSNOS, \
CFG_WEBSEARCH_SPLIT_BY_COLLECTION, \
CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, \
CFG_BIBRANK_SHOW_READING_STATS, \
CFG_BIBRANK_SHOW_DOWNLOAD_STATS, \
CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS, \
CFG_BIBRANK_SHOW_CITATION_LINKS, \
CFG_BIBRANK_SHOW_CITATION_STATS, \
CFG_BIBRANK_SHOW_CITATION_GRAPHS, \
CFG_WEBSEARCH_RSS_TTL, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_SITE_NAME_INTL, \
CFG_VERSION, \
CFG_SITE_URL, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_ADMIN_EMAIL, \
CFG_CERN_SITE, \
CFG_INSPIRE_SITE, \
CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE, \
CFG_WEBSEARCH_ENABLED_SEARCH_INTERFACES, \
CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS, \
CFG_BIBINDEX_CHARS_PUNCTUATION, \
CFG_WEBCOMMENT_ALLOW_COMMENTS, \
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBSEARCH_WILDCARD_LIMIT, \
CFG_WEBSEARCH_SHOW_COMMENT_COUNT, \
CFG_WEBSEARCH_SHOW_REVIEW_COUNT, \
CFG_SITE_RECORD, \
CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT
from invenio.search_engine_config import CFG_WEBSEARCH_RESULTS_OVERVIEW_MAX_COLLS_TO_PRINT
from invenio.dbquery import run_sql
from invenio.messages import gettext_set_language
from invenio.urlutils import make_canonical_urlargd, drop_default_urlargd, create_html_link, create_url
from invenio.htmlutils import nmtoken_from_string
from invenio.webinterface_handler import wash_urlargd
from invenio.bibrank_citation_searcher import get_cited_by_count
from invenio.webuser import session_param_get
from invenio.intbitset import intbitset
from invenio.websearch_external_collections import external_collection_get_state, get_external_collection_engine
from invenio.websearch_external_collections_utils import get_collection_id
from invenio.websearch_external_collections_config import CFG_EXTERNAL_COLLECTION_MAXRESULTS
from invenio.search_engine_utils import get_fieldvalues
_RE_PUNCTUATION = re.compile(CFG_BIBINDEX_CHARS_PUNCTUATION)
_RE_SPACES = re.compile(r"\s+")
class Template:
# This dictionary maps Invenio language code to locale codes (ISO 639)
tmpl_localemap = {
'bg': 'bg_BG',
'ar': 'ar_AR',
'ca': 'ca_ES',
'de': 'de_DE',
'el': 'el_GR',
'en': 'en_US',
'es': 'es_ES',
'pt': 'pt_BR',
'fr': 'fr_FR',
'it': 'it_IT',
'ka': 'ka_GE',
'lt': 'lt_LT',
'ro': 'ro_RO',
'ru': 'ru_RU',
'rw': 'rw_RW',
'sk': 'sk_SK',
'cs': 'cs_CZ',
'no': 'no_NO',
'sv': 'sv_SE',
'uk': 'uk_UA',
'ja': 'ja_JA',
'pl': 'pl_PL',
'hr': 'hr_HR',
'zh_CN': 'zh_CN',
'zh_TW': 'zh_TW',
'hu': 'hu_HU',
'af': 'af_ZA',
'gl': 'gl_ES'
}
tmpl_default_locale = "en_US" # which locale to use by default, useful in case of failure
# Type of the allowed parameters for the web interface for search results
search_results_default_urlargd = {
'cc': (str, CFG_SITE_NAME),
'c': (list, []),
'p': (str, ""), 'f': (str, ""),
'rg': (int, CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS),
'sf': (str, ""),
'so': (str, "d"),
'sp': (str, ""),
'rm': (str, ""),
'of': (str, "hb"),
'ot': (list, []),
'em': (str,""),
'aas': (int, CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE),
'as': (int, CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE),
'p1': (str, ""), 'f1': (str, ""), 'm1': (str, ""), 'op1':(str, ""),
'p2': (str, ""), 'f2': (str, ""), 'm2': (str, ""), 'op2':(str, ""),
'p3': (str, ""), 'f3': (str, ""), 'm3': (str, ""),
'sc': (int, 0),
'jrec': (int, 0),
'recid': (int, -1), 'recidb': (int, -1), 'sysno': (str, ""),
'id': (int, -1), 'idb': (int, -1), 'sysnb': (str, ""),
'action': (str, "search"),
'action_search': (str, ""),
'action_browse': (str, ""),
'd1': (str, ""),
'd1y': (int, 0), 'd1m': (int, 0), 'd1d': (int, 0),
'd2': (str, ""),
'd2y': (int, 0), 'd2m': (int, 0), 'd2d': (int, 0),
'dt': (str, ""),
'ap': (int, 1),
'verbose': (int, 0),
'ec': (list, []),
'wl': (int, CFG_WEBSEARCH_WILDCARD_LIMIT),
}
# ...and for search interfaces
search_interface_default_urlargd = {
'aas': (int, CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE),
'as': (int, CFG_WEBSEARCH_DEFAULT_SEARCH_INTERFACE),
'verbose': (int, 0),
'em' : (str, "")}
# ...and for RSS feeds
rss_default_urlargd = {'c' : (list, []),
'cc' : (str, ""),
'p' : (str, ""),
'f' : (str, ""),
'p1' : (str, ""),
'f1' : (str, ""),
'm1' : (str, ""),
'op1': (str, ""),
'p2' : (str, ""),
'f2' : (str, ""),
'm2' : (str, ""),
'op2': (str, ""),
'p3' : (str, ""),
'f3' : (str, ""),
'm3' : (str, ""),
'wl' : (int, CFG_WEBSEARCH_WILDCARD_LIMIT)}
tmpl_openurl_accepted_args = {
'id' : (list, []),
'genre' : (str, ''),
'aulast' : (str, ''),
'aufirst' : (str, ''),
'auinit' : (str, ''),
'auinit1' : (str, ''),
'auinitm' : (str, ''),
'issn' : (str, ''),
'eissn' : (str, ''),
'coden' : (str, ''),
'isbn' : (str, ''),
'sici' : (str, ''),
'bici' : (str, ''),
'title' : (str, ''),
'stitle' : (str, ''),
'atitle' : (str, ''),
'volume' : (str, ''),
'part' : (str, ''),
'issue' : (str, ''),
'spage' : (str, ''),
'epage' : (s
|
tr, ''),
'pages' : (str, ''),
'artnum' : (str, ''),
'date' : (str, ''),
'ssn' : (str, ''),
'quarter' : (str, ''),
'url_ver' : (str, ''),
'ctx_ver' : (str, ''),
'rft_val_fmt' : (str, ''),
'rft_id' : (list, []),
'rft.atitle' : (str, ''),
'rft.title' : (str, ''),
'rft.jtitle' : (str, ''),
'rft.stitle' : (str, ''),
|
'rft.date' : (str, ''),
'rft.volume' : (str, ''),
'rft.issue' : (str, ''),
'rft.spage' : (str, ''),
'rft.epage' : (str, ''),
'rft.pages' : (str, ''),
'rft.artnumber' : (str, ''),
'rft.issn' : (str, ''),
'rft.eissn' : (str, ''),
'rft.aulast' : (str, ''),
'rft.aufirs
|
botswana-harvard/edc-describe
|
edc_describe/forms/select_model_form.py
|
Python
|
gpl-2.0
| 232
| 0
|
from django import forms
class SelectModelForm(forms.Form):
app_label = forms.CharField(
l
|
abel="App label",
required=Tr
|
ue)
model_name = forms.CharField(
label="Model name",
required=True)
|
candlepin/virt-who
|
virtwho/daemon/daemon.py
|
Python
|
gpl-2.0
| 25,080
| 0
|
# -*- coding: utf-8 -*-
from __future__ import print_function
# daemon/daemon.py
# Part of python-daemon, an implementation of PEP 3143.
#
# Copyright © 2008–2010 Ben Finney <ben+python@benfinney.id.au>
# Copyright © 2007–2008 Robert Niederreiter, Jens Klein
# Copyright © 2004–2005 Chad J. Schroeder
# Copyright © 2003 Clark Evans
# Copyright © 2002 Noah Spurrier
# Copyright © 2001 Jürgen Hermann
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Daemon process behaviour.
"""
import os
import sys
import resource
import errno
import signal
import socket
import atexit
class DaemonError(Exception):
""" Base exception class for errors from this module. """
class DaemonOSEnvironmentError(DaemonError, OSError):
""" Exception raised when daemon OS environment setup receives error. """
c
|
lass DaemonProcessDetachError(DaemonError, OSError):
""" Exception raised when process detach fails. """
class DaemonContext(object):
""" Context for turning the current program into a daemon process.
A `DaemonContext` instance represents the behaviour settings and
process context for the program when it becomes a
|
daemon. The
behaviour and environment is customised by setting options on the
instance, before calling the `open` method.
Each option can be passed as a keyword argument to the `DaemonContext`
constructor, or subsequently altered by assigning to an attribute on
the instance at any time prior to calling `open`. That is, for
options named `wibble` and `wubble`, the following invocation::
foo = daemon.DaemonContext(wibble=bar, wubble=baz)
foo.open()
is equivalent to::
foo = daemon.DaemonContext()
foo.wibble = bar
foo.wubble = baz
foo.open()
The following options are defined.
`files_preserve`
:Default: ``None``
List of files that should *not* be closed when starting the
daemon. If ``None``, all open file descriptors will be closed.
Elements of the list are file descriptors (as returned by a file
object's `fileno()` method) or Python `file` objects. Each
specifies a file that is not to be closed during daemon start.
`chroot_directory`
:Default: ``None``
Full path to a directory to set as the effective root directory of
the process. If ``None``, specifies that the root directory is not
to be changed.
`working_directory`
:Default: ``'/'``
Full path of the working directory to which the process should
change on daemon start.
Since a filesystem cannot be unmounted if a process has its
current working directory on that filesystem, this should either
be left at default or set to a directory that is a sensible “home
directory” for the daemon while it is running.
`umask`
:Default: ``0``
File access creation mask (“umask”) to set for the process on
daemon start.
Since a process inherits its umask from its parent process,
starting the daemon will reset the umask to this value so that
files are created by the daemon with access modes as it expects.
`pidfile`
:Default: ``None``
Context manager for a PID lock file. When the daemon context opens
and closes, it enters and exits the `pidfile` context manager.
`detach_process`
:Default: ``None``
If ``True``, detach the process context when opening the daemon
context; if ``False``, do not detach.
If unspecified (``None``) during initialisation of the instance,
this will be set to ``True`` by default, and ``False`` only if
detaching the process is determined to be redundant; for example,
in the case when the process was started by `init`, by `initd`, or
by `inetd`.
`signal_map`
:Default: system-dependent
Mapping from operating system signals to callback actions.
The mapping is used when the daemon context opens, and determines
the action for each signal's signal handler:
* A value of ``None`` will ignore the signal (by setting the
signal action to ``signal.SIG_IGN``).
* A string value will be used as the name of an attribute on the
``DaemonContext`` instance. The attribute's value will be used
as the action for the signal handler.
* Any other value will be used as the action for the
signal handler. See the ``signal.signal`` documentation
for details of the signal handler interface.
The default value depends on which signals are defined on the
running system. Each item from the list below whose signal is
actually defined in the ``signal`` module will appear in the
default map:
* ``signal.SIGTTIN``: ``None``
* ``signal.SIGTTOU``: ``None``
* ``signal.SIGTSTP``: ``None``
* ``signal.SIGTERM``: ``'terminate'``
Depending on how the program will interact with its child
processes, it may need to specify a signal map that
includes the ``signal.SIGCHLD`` signal (received when a
child process exits). See the specific operating system's
documentation for more detail on how to determine what
circumstances dictate the need for signal handlers.
`uid`
:Default: ``os.getuid()``
`gid`
:Default: ``os.getgid()``
The user ID (“UID”) value and group ID (“GID”) value to switch
the process to on daemon start.
The default values, the real UID and GID of the process, will
relinquish any effective privilege elevation inherited by the
process.
`prevent_core`
:Default: ``True``
If true, prevents the generation of core files, in order to avoid
leaking sensitive information from daemons run as `root`.
`stdin`
:Default: ``None``
`stdout`
:Default: ``None``
`stderr`
:Default: ``None``
Each of `stdin`, `stdout`, and `stderr` is a file-like object
which will be used as the new file for the standard I/O stream
`sys.stdin`, `sys.stdout`, and `sys.stderr` respectively. The file
should therefore be open, with a minimum of mode 'r' in the case
of `stdin`, and mode 'w+' in the case of `stdout` and `stderr`.
If the object has a `fileno()` method that returns a file
descriptor, the corresponding file will be excluded from being
closed during daemon start (that is, it will be treated as though
it were listed in `files_preserve`).
If ``None``, the corresponding system stream is re-bound to the
file named by `os.devnull`.
"""
def __init__(
self,
chroot_directory=None,
working_directory='/',
umask=0,
uid=None,
gid=None,
prevent_core=True,
detach_process=None,
files_preserve=None,
pidfile=None,
stdin=None,
stdout=None,
stderr=None,
signal_map=None):
""" Set up a new instance. """
self.chroot_directory = chroot_directory
self.working_directory = working_directory
self.umask = umask
self.prevent_core = prevent_core
self.files_preserve = files_preserve
self.pidfile = pidfile
|
zentralopensource/zentral
|
zentral/conf/config.py
|
Python
|
apache-2.0
| 9,328
| 0.000858
|
import base64
import itertools
import json
import logging
import os
import re
import time
from .buckets import get_bucket_client
from .params import get_param_client
from .secrets import get_secret_client
logger = logging.getLogger("zentral.conf.config")
class Proxy:
pass
class EnvProxy(Proxy):
def __init__(self, name):
self._name = name
def get(self):
return os.environ[self._name]
class ResolverMethodProxy(Proxy):
def __init__(self, resolver, proxy_type, key):
if proxy_type == "file":
self._method = resolver.get_file_content
elif proxy_type == "param":
self._method = resolver.get_parameter_value
elif proxy_type == "secret":
self._method = resolver.get_secret_value
elif proxy_type == "bucket_file":
self._method = resolver.get_bucket_file
else:
raise ValueError("Unknown proxy type %s", proxy_type)
self._key = key
def get(self):
return self._method(self._key)
class JSONDecodeFilter(Proxy):
def __init__(self, child_proxy):
self._child_
|
proxy = child_proxy
def get(self):
return json.loads(self._child_proxy.get())
class Base64DecodeFilter(Proxy):
def __init__(self, child_proxy):
self._child_proxy = child_proxy
def get(self):
return base64.b64decode(self._child_proxy.get())
class ElementFilter(Proxy):
def __init__(self, key, child_proxy):
try:
self._key = int(key)
except ValueError:
self._key = key
self._child_proxy = child_pr
|
oxy
def get(self):
return self._child_proxy.get()[self._key]
class Resolver:
def __init__(self):
self._cache = {}
self._bucket_client = None
self._param_client = None
self._secret_client = None
def _get_or_create_cached_value(self, key, getter, ttl=None):
# happy path
try:
expiry, value = self._cache[key]
except KeyError:
pass
else:
if expiry is None or time.time() < expiry:
logger.debug("Key %s from cache", key)
return value
logger.debug("Cache for key %s has expired", key)
# get value
value = getter()
if ttl:
expiry = time.time() + ttl
else:
expiry = None
self._cache[key] = (expiry, value)
logger.debug("Set cache for key %s", key)
return value
def get_file_content(self, filepath):
cache_key = ("FILE", filepath)
def getter():
with open(filepath, "r") as f:
return f.read()
return self._get_or_create_cached_value(cache_key, getter)
def get_secret_value(self, name):
cache_key = ("SECRET", name)
if not self._secret_client:
self._secret_client = get_secret_client()
def getter():
return self._secret_client.get(name)
return self._get_or_create_cached_value(cache_key, getter, ttl=600)
def get_bucket_file(self, key):
cache_key = ("BUCKET_FILE", key)
if not self._bucket_client:
self._bucket_client = get_bucket_client()
def getter():
return self._bucket_client.download_to_tmpfile(key)
return self._get_or_create_cached_value(cache_key, getter)
def get_parameter_value(self, key):
cache_key = ("PARAM", key)
if not self._param_client:
self._param_client = get_param_client()
def getter():
return self._param_client.get(key)
return self._get_or_create_cached_value(cache_key, getter, ttl=600)
class BaseConfig:
PROXY_VAR_RE = re.compile(
r"^\{\{\s*"
r"(?P<type>bucket_file|env|file|param|secret)\:(?P<key>[^\}\|]+)"
r"(?P<filters>(\s*\|\s*(jsondecode|base64decode|element:[a-zA-Z_\-/0-9]+))*)"
r"\s*\}\}$"
)
custom_classes = {}
def __init__(self, path=None, resolver=None):
self._path = path or ()
if not resolver:
resolver = Resolver()
self._resolver = resolver
def _make_proxy(self, key, match):
proxy_type = match.group("type")
key = match.group("key").strip()
if proxy_type == "env":
proxy = EnvProxy(key)
else:
proxy = ResolverMethodProxy(self._resolver, proxy_type, key)
filters = [f for f in [rf.strip() for rf in match.group("filters").split("|")] if f]
for filter_name in filters:
if filter_name == "jsondecode":
proxy = JSONDecodeFilter(proxy)
elif filter_name == "base64decode":
proxy = Base64DecodeFilter(proxy)
elif filter_name.startswith("element:"):
key = filter_name.split(":", 1)[-1]
proxy = ElementFilter(key, proxy)
else:
raise ValueError("Unknown filter %s", filter_name)
return proxy
def _from_python(self, key, value):
new_path = self._path + (key,)
if isinstance(value, dict):
value = self.custom_classes.get(new_path, ConfigDict)(value, new_path)
elif isinstance(value, list):
value = self.custom_classes.get(new_path, ConfigList)(value, new_path)
elif isinstance(value, str):
match = self.PROXY_VAR_RE.match(value)
if match:
value = self._make_proxy(key, match)
return value
def _to_python(self, value):
if isinstance(value, Proxy):
return value.get()
else:
return value
def __len__(self):
return len(self._collection)
def __delitem__(self, key):
del self._collection[key]
def __setitem__(self, key, value):
self._collection[key] = self._from_python(key, value)
def pop(self, key, default=None):
value = self._collection.pop(key, default)
if isinstance(value, Proxy):
value = value.get()
return value
class ConfigList(BaseConfig):
def __init__(self, config_l, path=None, resolver=None):
super().__init__(path=path, resolver=resolver)
self._collection = []
for key, value in enumerate(config_l):
self._collection.append(self._from_python(str(key), value))
def __getitem__(self, key):
value = self._collection[key]
if isinstance(key, slice):
slice_repr = ":".join(str("" if i is None else i) for i in (key.start, key.stop, key.step))
logger.debug("Get /%s[%s] config key", "/".join(self._path), slice_repr)
return [self._to_python(item) for item in value]
else:
logger.debug("Get /%s[%s] config key", "/".join(self._path), key)
return self._to_python(value)
def __iter__(self):
for element in self._collection:
yield self._to_python(element)
def serialize(self):
s = []
for v in self:
if isinstance(v, BaseConfig):
v = v.serialize()
s.append(v)
return s
class ConfigDict(BaseConfig):
def __init__(self, config_d, path=None, resolver=None):
super().__init__(path=path, resolver=resolver)
self._collection = {}
for key, value in config_d.items():
self._collection[key] = self._from_python(key, value)
def __getitem__(self, key):
logger.debug("Get /%s config key", "/".join(self._path + (key,)))
value = self._collection[key]
return self._to_python(value)
def get(self, key, default=None):
try:
value = self[key]
except KeyError:
value = self._to_python(default)
return value
def __iter__(self):
yield from self._collection
def keys(self):
return self._collection.keys()
def values(self):
for value in self._collection.values():
yield self._to_python(value)
def items(self):
for key, value in self._collection.items():
yield key, self._to_python(value)
def clear(self):
|
gpciceri/milagathos
|
SixWeeksPreparationForReadingCaesar/pgm/generateSintagmaProblem_6WP.py
|
Python
|
lgpl-3.0
| 3,153
| 0.024738
|
#! /use/bin/env python
# -*- coding: utf-8 -*-
'''/* generateSintagmaProblem_6WP.py
*
* Copyright (C) 2016 Gian Paolo Ciceri
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) a
|
ny later version.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
*
|
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* Author:
* gian paolo ciceri <gp.ciceri@gmail.com>
*
*
* Release:
* 2016.08.24 - initial release.
*
*/
'''
import random
import time
import locale
import re
from sixWeeksExercises import *
from latinGrammarRules import *
locale.setlocale(locale.LC_ALL, '')
NUMEX = (48*6)+12
NUMTRY = 10000
ID = "6WP"
PERIOD = "PAG11"
PRE = ID + '-' + PERIOD + "_esercizi_"
PRE_NAME = 'EX_' + ID + '_' + PERIOD
PREAMBLE = '''#
# -*- coding: utf-8 -*-'''
RUNCODE = '''if __name__ == "__main__":
import sys
for item in %s:
for word in item[3:6]:
print(word.encode(sys.stdin.encoding).decode(sys.stdout.encoding), end=" ")
print()
''' % (PRE_NAME,)
NOW = time.strftime('%Y%m%d-%H00')
TODAY = time.strftime('%Y.%m.%d')
PROBLEMI = PRE + NOW + ".py"
GENERE = LATIN_GENDERS
PERSONE = LATIN_PERSONS
CASI = LATIN_CASES_VOCDECL
REGOLA = (CASI,)
LESSICO = SIXWEEKS_PAG11_VOC_EX1
NUMLEX = 1
if __name__ == "__main__":
random.seed()
item = 0
num = 0
problemDict = dict()
exDict = dict()
fields = '("<LAT>", "<ITA>", "<ING>", "%s", "%s", "%s"),'
while (item < NUMEX and num < NUMTRY):
num += 1
regola = list()
esempio = list()
for rule in REGOLA:
theRule = random.choice(rule)
regola.append(theRule[0])
esempio.append(theRule[1])
lessico = list()
while 1:
word = random.choice(LESSICO)[0]
if word not in lessico and len(lessico) < NUMLEX:
lessico.append(word)
if len(lessico) == NUMLEX:
break
regola_string = ', '.join(regola)
esempio_string = ', '.join(esempio)
lessico_string = ', '.join(lessico)
voceX = fields % (regola_string, esempio_string, lessico_string)
# con questo "comprimo" i doppi spazi in uno solo
voce = voceX.replace(" ", " ")
idItem = "%s.%d" % (ID, item)
try:
p = problemDict[(voce,)]
print(item, num, "DOPPIO:", voce)
except KeyError:
problemDict[(voce)] = (voce,)
exDict[item] = (voce, idItem)
item += 1
pf = open(PROBLEMI, "wb")
pf.write(bytes(PREAMBLE + "\n\n", 'UTF-8'))
pf.write(bytes(PRE_NAME + " = [\n", 'UTF-8'))
for pitem in sorted(exDict.keys()):
problema = exDict[pitem][0]
pf.write(bytes(problema + "\n", 'UTF-8'))
pf.write(bytes("]\n", 'UTF-8'))
pf.write(bytes("\n\n" + RUNCODE + "\n\n", 'UTF-8'))
pf.close()
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/bandwidth/auto_bandwidth/underflow/state/__init__.py
|
Python
|
apache-2.0
| 29,125
| 0.001373
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbi
|
nd.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbin
|
d.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/lsps/constrained-path/tunnels/tunnel/bandwidth/auto-bandwidth/underflow/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State information for MPLS underflow bandwidth
adjustment
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__enabled",
"__underflow_threshold",
"__trigger_event_count",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__underflow_threshold = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="underflow-threshold",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=False,
)
self.__trigger_event_count = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="trigger-event-count",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"lsps",
"constrained-path",
"tunnels",
"tunnel",
"bandwidth",
"auto-bandwidth",
"underflow",
"state",
]
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/bandwidth/auto_bandwidth/underflow/state/enabled (boolean)
YANG Description: enables bandwidth underflow
adjustment on the lsp
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/bandwidth/auto_bandwidth/underflow/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: enables bandwidth underflow
adjustment on the lsp
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_underflow_threshold(self):
"""
Getter method for underflow_threshold, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/bandwidth/auto_bandwidth/underflow/state/underflow_threshold (oc-types:percentage)
YANG Description: bandwidth percentage change to trigger
and underflow event
"""
return self.__underflow_threshold
def _set_underflow_threshold(self, v, load=False):
"""
Setter method for underflow_threshold, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/b
|
h3/django-webcore
|
webcore/urls/robots.py
|
Python
|
bsd-3-clause
| 172
| 0.011628
|
fr
|
om django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^robots.txt', 'django.views.generic.sim
|
ple.direct_to_template', {'template': 'robots.txt'}),
)
|
shurain/archiver
|
archiver/sink.py
|
Python
|
mit
| 4,439
| 0.003379
|
# -*- coding: utf-8 -*-
import hashlib
import binascii
from thrift.transport.THttpClient import THttpClient
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
from evernote.edam.userstore import UserStore
from evernote.edam.notestore import NoteStore
import evernote.edam.type.ttypes as Types
import evernote.edam.error.ttypes as Errors
from evernote.api.client import EvernoteClient
from .settings import EVERNOTE_NOTEBOOK
import logging
class Sink(object):
pass
class EvernoteSink(Sink):
def __init__(self, token, sandbox=False):
"""Initialize evernote connection.
Client connection handle is assigned to the client property.
Two properties user_store and note_store are provided for the convenience.
"""
self.token = token
self.client = EvernoteClient(token=self.token, sandbox=sandbox)
self.user_store = self.client.get_user_store()
self.note_store = self.client.get_note_store()
def image_resource(self, item):
#FIXME create pdf resource
md5 = hashlib.md5()
md5.update(item.content)
hashvalue = md5.digest()
data = Types.Data()
|
data.size = len(item.content) #FIXME better ways of doing this calculation?
data.bodyHash = hashvalue
data.body = item.content
resource = Types.Resource()
resource.mime = item.content_type
resource.data = data
return resource
def pdf_resource(self, item):
#FIXME create pdf resource
md5 = hashlib.md5()
|
md5.update(item.content)
hashvalue = md5.digest()
data = Types.Data()
data.size = len(item.content) #FIXME better ways of doing this calculation?
data.bodyHash = hashvalue
data.body = item.content
resource = Types.Resource()
resource.mime = 'application/pdf'
resource.data = data
return resource
def note_attribute(self, source_url=''):
attributes = Types.NoteAttributes()
attributes.sourceURL = source_url
return attributes
def create_note(self, title, content, notebook_name='', tags='', attributes=None, resources=None):
note = Types.Note()
note.title = title
if attributes:
note.attributes = attributes
if tags:
note.tagNames = [t.encode('utf-8', 'xmlcharrefreplace') for t in tags.split()] # Assuming no spaces in tags
logging.debug(note.tagNames)
if notebook_name:
notebooks = self.note_store.listNotebooks(self.token)
for notebook in notebooks:
if notebook.name == notebook_name:
note.notebookGuid = notebook.guid
break
else:
pass # create a note in default notebook
note.content = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd">
<en-note>{}""".format(content.encode('utf-8', 'xmlcharrefreplace'))
if resources:
note.resources = resources
for r in resources:
note.content += """<en-media type="{}" hash="{}"/>""".format(r.mime, binascii.hexlify(r.data.bodyHash))
note.content += "</en-note>"
logging.debug(note.content)
created_note = self.note_store.createNote(self.token, note)
return created_note
def push(self, item):
kwargs = {
'title': item.title.encode('utf-8', 'xmlcharrefreplace'),
'content': item.body,
'tags': item.tags,
'notebook_name': EVERNOTE_NOTEBOOK,
'attributes': self.note_attribute(item.url),
}
if item.itemtype == 'PDF':
resource = self.pdf_resource(item)
kwargs['resources'] = [resource]
elif item.itemtype == 'image':
resource = self.image_resource(item)
kwargs['resources'] = [resource]
elif item.itemtype == 'HTML':
#FIXME check for image inside and create image resources
kwargs['content'] = item.content
elif item.itemtype == 'text':
kwargs['content'] = item.content
else:
# XXX Assuming plaintext type
# Should I raise exception for unknown items?
item.itemtype = 'text'
self.create_note(**kwargs)
class Database(Sink):
pass
|
yingxuanxuan/fabric_script
|
shadowsocks.py
|
Python
|
apache-2.0
| 2,157
| 0.001391
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from fabric.api import reboot, sudo, settings
logging.basicConfig(level=logging.INFO)
def ssserver(port, password, method):
try:
sudo('hash yum')
sudo('hash python')
sudo('yum -y update 1>/dev/null')
sudo('yum -y install python-setuptools 1>/dev/null')
sudo('yum -y install m2crypto 1>/dev/null')
sudo('easy_install pip 1>/dev/null')
sudo('pip install shadowsocks 1>/dev/null')
sudo('hash ssserver')
sudo("sed -i '/ssserver/d' /etc/rc.d/rc.local")
cmd = '/usr/bin/python /usr/bin/ssserver -p %s -k %s -m %s --user nobody -d start' % \
(port, password, method)
sudo("sed -i '$a %s' /etc/rc.d/rc.local" % cmd)
sudo('chmod +x /etc/rc.d/rc.local')
sudo('firewall-cmd --zone=public --add-port=%s/tcp --permanent' % port)
with settings(warn_only=True):
reboot()
sudo('ps -ef | grep ssserver')
return True
except BaseException as e:
logging.error(e)
return False
def sslocal(server_addr, server_port, server_password, method, local_port):
try:
sudo('hash yum')
sudo('hash python')
sudo('yum -y update 1>/dev/null')
sudo('yum -y install python-setuptools 1>/dev/null')
sudo('yum -y install m2crypto 1>/dev/null')
sudo('easy_install pip 1>/dev/null')
|
sudo('pip install shadowsocks 1>/dev/null')
sudo('hash sslocal')
sudo("sed -i '/sslocal /d' /etc/rc.d/rc.local")
cmd = '/usr/bin/python /usr/bin
|
/sslocal -s %s -p %s -k %s -m %s -b 0.0.0.0 -l %s --user nobody -d start' % \
(server_addr, server_port, server_password, method, local_port)
sudo("sed -i '$a %s' /etc/rc.d/rc.local" % cmd)
sudo('chmod +x /etc/rc.d/rc.local')
sudo('firewall-cmd --zone=public --add-port=%s/tcp --permanent' % local_port)
with settings(warn_only=True):
reboot()
sudo('ps -ef | grep sslocal')
return True
except BaseException as e:
logging.error(e)
return False
|
pkuwwt/pydec
|
Examples/HodgeDecomposition/driver.py
|
Python
|
bsd-3-clause
| 2,724
| 0.020558
|
"""
Compute a harmonic 1-cochain basis for a square with 4 holes.
"""
from numpy import asarray, eye, outer, inner, dot, vstack
from numpy.random import seed, rand
from numpy.linalg import norm
from scipy.sparse.linalg import cg
from pydec import d, delta, simplicial_complex, read_mesh
def hodge_decomposition
|
(omega):
"""
For a given p-cochain \omega there is a unique decomposition
\omega = d(\alpha) + \delta(\beta) (+) h
for p-1 cochain \alpha, p+1 cochain \beta, and harmonic p-cochain h.
This function ret
|
urns (non-unique) representatives \beta, \gamma, and h
which satisfy the equation above.
Example:
#decompose a random 1-cochain
sc = SimplicialComplex(...)
omega = sc.get_cochain(1)
omega.[:] = rand(*omega.shape)
(alpha,beta,h) = hodge_decomposition(omega)
"""
sc = omega.complex
p = omega.k
alpha = sc.get_cochain(p - 1)
beta = sc.get_cochain(p + 1)
# Solve for alpha
A = delta(d(sc.get_cochain_basis(p - 1))).v
b = delta(omega).v
alpha.v = cg( A, b, tol=1e-8 )[0]
# Solve for beta
A = d(delta(sc.get_cochain_basis(p + 1))).v
b = d(omega).v
beta.v = cg( A, b, tol=1e-8 )[0]
# Solve for h
h = omega - d(alpha) - delta(beta)
return (alpha,beta,h)
def ortho(A):
"""Separates the harmonic forms stored in the rows of A using a heuristic
"""
A = asarray(A)
for i in range(A.shape[0]):
j = abs(A[i]).argmax()
v = A[:,j].copy()
if A[i,j] > 0:
v[i] += norm(v)
else:
v[i] -= norm(v)
Q = eye(A.shape[0]) - 2 * outer(v,v) / inner(v,v)
A = dot(Q,A)
return A
seed(0) # make results consistent
# Read in mesh data from file
mesh = read_mesh('mesh_example.xml')
vertices = mesh.vertices
triangles = mesh.elements
# remove some triangle from the mesh
triangles = triangles[list(set(range(len(triangles))) - set([30,320,21,198])),:]
sc = simplicial_complex((vertices,triangles))
H = [] # harmonic forms
# decompose 4 random 1-cochains
for i in range(4):
omega = sc.get_cochain(1)
omega.v[:] = rand(*omega.v.shape)
(beta,gamma,h) = hodge_decomposition(omega)
h = h.v
for v in H:
h -= inner(v,h) * v
h /= norm(h)
H.append(h)
H = ortho(vstack(H))
# plot the results
from pylab import figure, title, quiver, axis, show
from pydec import triplot, simplex_quivers
for n,h in enumerate(H):
figure()
title('Harmonic 1-cochain #%d' % n)
triplot(vertices,triangles)
bases,dirs = simplex_quivers(sc,h)
quiver(bases[:,0],bases[:,1],dirs[:,0],dirs[:,1])
axis('equal')
show()
|
angr/angr
|
angr/procedures/definitions/win32_aclui.py
|
Python
|
bsd-2-clause
| 1,451
| 0.004824
|
# pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("aclui.dll")
prototypes = \
{
#
'CreateSecurityPage': SimTypeFunction([SimTypeBottom(label="ISecurityInformation")], SimTypePointer(SimType
|
Int(signed=True, label="Int"), label="IntPtr", offset=0), arg_names=["psi"]),
#
'Edit
|
Security': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeBottom(label="ISecurityInformation")], SimTypeInt(signed=True, label="Int32"), arg_names=["hwndOwner", "psi"]),
#
'EditSecurityAdvanced': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeBottom(label="ISecurityInformation"), SimTypeInt(signed=False, label="SI_PAGE_TYPE")], SimTypeInt(signed=True, label="Int32"), arg_names=["hwndOwner", "psi", "uSIPage"]),
}
lib.set_prototypes(prototypes)
|
Donkyhotay/MoonPy
|
zope/configuration/tests/test_conditions.py
|
Python
|
gpl-3.0
| 3,562
| 0.000842
|
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
r'''How to conditionalize specific directives
There is a "condition" attribute in the
"http://namespaces.zope.org/zcml" namespace which is honored on all
elements in ZCML. The value of the attribute is an expression
which is used to determine if that element and its descendents are
used. If the condition is true, processing continues normally,
otherwise that element and its descendents are ignored.
Currently the expression is always of the form "have featurename", and it
checks for the presence of a <meta:provides feature="featurename" />.
Our demonstration uses a trivial registry; each registration consists
of a simple id inserted in the global `registry` in this module. We
can checked that a registration was made by checking whether the id is
present in `registry`.
We start by loading the example ZCML file, *conditions.zcml*::
>>> import zope.configuration.tests
>>> import zope.configuration.xmlconfig
>>> context = zope.configuration.xmlconfig.file("conditions.zcml",
... zope.configuration.tests)
To show that our sample directive works, we see that the unqualified
registration was successful::
>>> "unqualified.registration" in registry
True
When the expression specified with ``zcml:condition`` evaluates to
true, the element it is attached to and al
|
l contained elements (not
otherwise conditioned) should be processed normally::
>>> "direct.true.condition" in registry
True
|
>>> "nested.true.condition" in registry
True
However, when the expression evaluates to false, the conditioned
element and all contained elements should be ignored::
>>> "direct.false.condition" in registry
False
>>> "nested.false.condition" in registry
False
Conditions on container elements affect the conditions in nested
elements in a reasonable way. If an "outer" condition is true, nested
conditions are processed normally::
>>> "true.condition.nested.in.true" in registry
True
>>> "false.condition.nested.in.true" in registry
False
If the outer condition is false, inner conditions are not even
evaluated, and the nested elements are ignored::
>>> "true.condition.nested.in.false" in registry
False
>>> "false.condition.nested.in.false" in registry
False
Now we need to clean up after ourselves::
>>> del registry[:]
'''
__docformat__ = "reStructuredText"
import zope.interface
import zope.schema
import zope.testing.doctest
class IRegister(zope.interface.Interface):
"""Trivial sample registry."""
id = zope.schema.Id(
title=u"Identifier",
description=u"Some identifier that can be checked.",
required=True,
)
registry = []
def register(context, id):
context.action(discriminator=('Register', id),
callable=registry.append,
args=(id,)
)
def test_suite():
return zope.testing.doctest.DocTestSuite()
|
akrawchyk/amweekly
|
amweekly/slack/tests/test_models.py
|
Python
|
mit
| 313
| 0
|
from django.core.e
|
xceptions import ValidationError
from amweekly.slack.tests.factories import SlashCommandFactory
import pytest
pytest.mark.unit
def test_slash_command_raises_with_invalid_token(settings):
settings.SLACK_TOKENS = ''
with pytest.rais
|
es(ValidationError):
SlashCommandFactory()
|
EduPepperPDTesting/pepper2013-testing
|
lms/djangoapps/polls/views.py
|
Python
|
agpl-3.0
| 3,557
| 0.001125
|
from mitxmako.shortcuts import render_to_response
from django.http import HttpResponse
from .models import poll_store
from datetime import datetime
from django.utils import timezone
import json
import urllib2
from math import floor
def poll_form_view(request, poll_type=None):
if poll_type:
return render_to_response('polls/' + poll_type + '_form.html')
def poll_form_submit(request, poll_type):
try:
poll_id = request.POST.get('poll_id')
question = request.POST.get('question')
answers = get_post_array(request.POST, 'answers')
expiration = request.POST.get('expiration', '')
expiration_object = None
if expiration:
expiration_object = datetime.strptime(expiration, '%m/%d/%Y')
poll_connect = poll_store()
poll_connect.set_poll(poll_type, poll_id, question, answers, expiration_object)
response = {'Success': True}
except Exception as e:
response = {'Success': False, 'Error': 'Error: {0}'.format(e)}
return HttpResponse(json.dumps(response), content_type='application/json')
def vote_calc(poll_dict, poll_type, poll_id):
poll_connect = poll_store()
votes = dict()
total = 0
for idx, answer in poll_dict['answers'].iteritems():
votes.update({idx: {'count': poll_connect.get_answers(poll_type, poll_id, idx).count()}})
total += votes[idx]['count']
for key, vote in votes.iteritems():
vote.update({'percent': floor((float(vote['count']) / total) * 100) if total else 0})
return votes
def poll_data(poll_type, poll_id, user_id):
poll_connect = poll_store()
poll_dict = poll_connect.get_poll(poll_type, poll_id)
user_answered = poll_connect.user_answered(poll_type, poll_id, user_id)
votes = vote_calc(poll_dict, poll_type, poll_id)
expired =
|
False
if poll_dict['expiration'] is not None:
if poll_dict['expiration'] <=
|
timezone.now():
expired = True
data = {'question': poll_dict['question'],
'answers': poll_dict['answers'],
'expiration': poll_dict['expiration'],
'expired': expired,
'user_answered': user_answered,
'votes': votes,
'poll_type': poll_dict['type'],
'poll_id': poll_dict['identifier'],
}
return data
def poll_view(request, poll_type, poll_id):
data = poll_data(poll_type, poll_id, request.user.id)
return render_to_response('polls/' + poll_type + '_poll.html', data)
def poll_vote(request):
try:
poll_type = request.POST.get('poll_type')
poll_id = request.POST.get('poll_id')
vote = request.POST.get('vote')
poll_connect = poll_store()
poll_connect.set_answer(poll_type, poll_id, request.user.id, vote)
poll_dict = poll_connect.get_poll(poll_type, poll_id)
votes = vote_calc(poll_dict, poll_type, poll_id)
response = {'Success': True, 'Votes': votes, 'Answers': poll_dict['answers']}
except Exception as e:
response = {'Success': False, 'Error': 'Error: {0}'.format(e)}
return HttpResponse(json.dumps(response), content_type='application/json')
def get_post_array(post, name):
"""
Gets array values from a POST.
"""
output = dict()
for key in post.keys():
value = urllib2.unquote(post.get(key))
if key.startswith(name + '[') and not value == 'undefined':
start = key.find('[')
i = key[start + 1:-1]
output.update({i: value})
return output
|
ryfeus/lambda-packs
|
Keras_tensorflow_nightly/source2.7/keras/optimizers.py
|
Python
|
mit
| 25,887
| 0.000579
|
from __future__ import absolute_import
import six
import copy
from six.moves import zip
from . import backend as K
from .utils.generic_utils import serialize_keras_object
from .utils.generic_utils import deserialize_keras_object
if K.backend() == 'tensorflow':
import tensorflow as tf
def clip_norm(g, c, n):
if c <= 0: # if clipnorm == 0 no need to add ops to the graph
return g
# tf require using a special op to multiply IndexedSliced by scalar
if K.backend() == 'tensorflow':
condition = n >= c
then_expression = tf.scalar_mul(c / n, g)
else_expression = g
# saving the shape to
|
avoid converting sparse tensor to dense
if isinstance(then_expression, tf.Tensor):
g_shape = copy.copy(then_expression.get_shape())
elif isinstance(then_expression, tf.IndexedSlices):
g_shape = copy.copy(then_expression.dense_shape)
if condition.dtype != tf.
|
bool:
condition = tf.cast(condition, 'bool')
g = tf.cond(condition,
lambda: then_expression,
lambda: else_expression)
if isinstance(then_expression, tf.Tensor):
g.set_shape(g_shape)
elif isinstance(then_expression, tf.IndexedSlices):
g._dense_shape = g_shape
else:
g = K.switch(K.greater_equal(n, c), g * c / n, g)
return g
class Optimizer(object):
"""Abstract optimizer base class.
Note: this is the parent class of all optimizers, not an actual optimizer
that can be used for training models.
All Keras optimizers support the following keyword arguments:
clipnorm: float >= 0. Gradients will be clipped
when their L2 norm exceeds this value.
clipvalue: float >= 0. Gradients will be clipped
when their absolute value exceeds this value.
"""
def __init__(self, **kwargs):
allowed_kwargs = {'clipnorm', 'clipvalue'}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError('Unexpected keyword argument '
'passed to optimizer: ' + str(k))
self.__dict__.update(kwargs)
self.updates = []
self.weights = []
def get_updates(self, params, constraints, loss):
raise NotImplementedError
def get_gradients(self, loss, params):
grads = K.gradients(loss, params)
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
if hasattr(self, 'clipvalue') and self.clipvalue > 0:
grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
return grads
def set_weights(self, weights):
"""Sets the weights of the optimizer, from Numpy arrays.
Should only be called after computing the gradients
(otherwise the optimizer has no weights).
# Arguments
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the optimizer (i.e. it should match the
output of `get_weights`).
# Raises
ValueError: in case of incompatible weight shapes.
"""
params = self.weights
weight_value_tuples = []
param_values = K.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError('Optimizer weight shape ' +
str(pv.shape) +
' not compatible with '
'provided weight shape ' + str(w.shape))
weight_value_tuples.append((p, w))
K.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current value of the weights of the optimizer.
# Returns
A list of numpy arrays.
"""
return K.batch_get_value(self.weights)
def get_config(self):
config = {}
if hasattr(self, 'clipnorm'):
config['clipnorm'] = self.clipnorm
if hasattr(self, 'clipvalue'):
config['clipvalue'] = self.clipvalue
return config
@classmethod
def from_config(cls, config):
return cls(**config)
class SGD(Optimizer):
"""Stochastic gradient descent optimizer.
Includes support for momentum,
learning rate decay, and Nesterov momentum.
# Arguments
lr: float >= 0. Learning rate.
momentum: float >= 0. Parameter updates momentum.
decay: float >= 0. Learning rate decay over each update.
nesterov: boolean. Whether to apply Nesterov momentum.
"""
def __init__(self, lr=0.01, momentum=0., decay=0.,
nesterov=False, **kwargs):
super(SGD, self).__init__(**kwargs)
self.iterations = K.variable(0., name='iterations')
self.lr = K.variable(lr, name='lr')
self.momentum = K.variable(momentum, name='momentum')
self.decay = K.variable(decay, name='decay')
self.initial_decay = decay
self.nesterov = nesterov
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * self.iterations))
self.updates .append(K.update_add(self.iterations, 1))
# momentum
shapes = [K.get_variable_shape(p) for p in params]
moments = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + moments
for p, g, m in zip(params, grads, moments):
v = self.momentum * m - lr * g # velocity
self.updates.append(K.update(m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
# apply constraints
if p in constraints:
c = constraints[p]
new_p = c(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'momentum': float(K.get_value(self.momentum)),
'decay': float(K.get_value(self.decay)),
'nesterov': self.nesterov}
base_config = super(SGD, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RMSprop(Optimizer):
"""RMSProp optimizer.
It is recommended to leave the parameters of this optimizer
at their default values
(except the learning rate, which can be freely tuned).
This optimizer is usually a good choice for recurrent
neural networks.
# Arguments
lr: float >= 0. Learning rate.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor.
decay: float >= 0. Learning rate decay over each update.
# References
- [rmsprop: Divide the gradient by a running average of its recent magnitude](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
"""
def __init__(self, lr=0.001, rho=0.9, epsilon=1e-8, decay=0.,
**kwargs):
super(RMSprop, self).__init__(**kwargs)
self.lr = K.variable(lr, name='lr')
self.rho = K.variable(rho, name='rho')
self.epsilon = epsilon
self.decay = K.variable(decay, name='decay')
self.initial_decay = decay
self.iterations = K.variable(0., name='iterations')
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
accumulators = [K.zeros(K.get_variable_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = accumulators
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * self.iterations))
self.updates.append(K.update_add(self.iterations, 1))
|
BobStevens/micropython
|
BMP085/BMP085.py
|
Python
|
mit
| 8,779
| 0.015947
|
#!/usr/bin/python
from pyb import I2C
import time
# ===========================================================================
# BMP085 Class
# Based mostly on Adafruit_BMP085.py
# For use with a Micro Python pyboard http://micropython.org
# and a BMP180 Barometric Pressure/Temperature/Altitude Sensor
# tested with http://www.adafruit.com/products/1603
# ===========================================================================
class BMP085 :
i2c = None
# I guess these can be public
# I2C address
BMP085_ADDRESS = 0x77
# Operating Modes
BMP085_ULTRALOWPOWER = 0
BMP085_STANDARD = 1
BMP085_HIGHRES = 2
BMP085_ULTRAHIGHRES = 3
# BMP085 Regis
|
ters
__BMP085_CAL_AC1 = 0xAA # R Calibration data (16 bits)
__BMP085_CAL_AC2 = 0xAC # R Calibration data (16 bits)
__BMP085_CAL_AC3 = 0xAE # R Calibration data (16 bits)
__BMP085_CAL_AC4 = 0xB0 # R Calibration data (16 bits)
__BMP085_CAL_AC5 = 0xB2 # R Calibration data (16 bits)
__BMP085_CAL_AC6
|
= 0xB4 # R Calibration data (16 bits)
__BMP085_CAL_B1 = 0xB6 # R Calibration data (16 bits)
__BMP085_CAL_B2 = 0xB8 # R Calibration data (16 bits)
__BMP085_CAL_MB = 0xBA # R Calibration data (16 bits)
__BMP085_CAL_MC = 0xBC # R Calibration data (16 bits)
__BMP085_CAL_MD = 0xBE # R Calibration data (16 bits)
__BMP085_CONTROL = 0xF4
__BMP085_TEMPDATA = 0xF6
__BMP085_PRESSUREDATA = 0xF6
__BMP085_READTEMPCMD = 0x2E
__BMP085_READPRESSURECMD = 0x34
# Private Fields
_cal_AC1 = 0
_cal_AC2 = 0
_cal_AC3 = 0
_cal_AC4 = 0
_cal_AC5 = 0
_cal_AC6 = 0
_cal_B1 = 0
_cal_B2 = 0
_cal_MB = 0
_cal_MC = 0
_cal_MD = 0
# Constructor
def __init__(self, port=1, address=BMP085_ADDRESS, mode=BMP085_STANDARD, debug=False):
self.i2c = I2C(port, I2C.MASTER)
self.address = address
self.debug = debug
# Make sure the specified mode is in the appropriate range
if ((mode < 0) | (mode > 3)):
if (self.debug):
print("Invalid Mode: Using STANDARD by default")
self.mode = self.BMP085_STANDARD
else:
self.mode = mode
# Read the calibration data
self.readCalibrationData()
def readS16(self, register):
"Reads a signed 16-bit value"
hi = ord(self.i2c.mem_read(1, self.address, register))
if hi > 127: hi -= 256
lo = ord(self.i2c.mem_read(1, self.address, register+1))
return (hi << 8) + lo
def readU16(self, register):
"Reads an unsigned 16-bit value"
hi = ord(self.i2c.mem_read(1, self.address, register))
lo = ord(self.i2c.mem_read(1, self.address, register+1))
return (hi << 8) + lo
def readCalibrationData(self):
"Reads the calibration data from the IC"
self._cal_AC1 = self.readS16(self.__BMP085_CAL_AC1) # INT16
self._cal_AC2 = self.readS16(self.__BMP085_CAL_AC2) # INT16
self._cal_AC3 = self.readS16(self.__BMP085_CAL_AC3) # INT16
self._cal_AC4 = self.readU16(self.__BMP085_CAL_AC4) # UINT16
self._cal_AC5 = self.readU16(self.__BMP085_CAL_AC5) # UINT16
self._cal_AC6 = self.readU16(self.__BMP085_CAL_AC6) # UINT16
self._cal_B1 = self.readS16(self.__BMP085_CAL_B1) # INT16
self._cal_B2 = self.readS16(self.__BMP085_CAL_B2) # INT16
self._cal_MB = self.readS16(self.__BMP085_CAL_MB) # INT16
self._cal_MC = self.readS16(self.__BMP085_CAL_MC) # INT16
self._cal_MD = self.readS16(self.__BMP085_CAL_MD) # INT16
if (self.debug):
self.showCalibrationData()
def showCalibrationData(self):
"Displays the calibration values for debugging purposes"
print("DBG: AC1 = %6d" % (self._cal_AC1))
print("DBG: AC2 = %6d" % (self._cal_AC2))
print("DBG: AC3 = %6d" % (self._cal_AC3))
print("DBG: AC4 = %6d" % (self._cal_AC4))
print("DBG: AC5 = %6d" % (self._cal_AC5))
print("DBG: AC6 = %6d" % (self._cal_AC6))
print("DBG: B1 = %6d" % (self._cal_B1))
print("DBG: B2 = %6d" % (self._cal_B2))
print("DBG: MB = %6d" % (self._cal_MB))
print("DBG: MC = %6d" % (self._cal_MC))
print("DBG: MD = %6d" % (self._cal_MD))
def readRawTemp(self):
"Reads the raw (uncompensated) temperature from the sensor"
self.i2c.mem_write(self.__BMP085_READTEMPCMD, self.address, self.__BMP085_CONTROL)
time.sleep(0.005) # Wait 5ms
raw = self.readU16(self.__BMP085_TEMPDATA)
if (self.debug):
print("DBG: Raw Temp: 0x%04X (%d)" % (raw & 0xFFFF, raw))
return raw
def readRawPressure(self):
"Reads the raw (uncompensated) pressure level from the sensor"
self.i2c.mem_write(self.__BMP085_READPRESSURECMD + (self.mode << 6), self.address, self.__BMP085_CONTROL)
if (self.mode == self.BMP085_ULTRALOWPOWER):
time.sleep(0.005)
elif (self.mode == self.BMP085_HIGHRES):
time.sleep(0.014)
elif (self.mode == self.BMP085_ULTRAHIGHRES):
time.sleep(0.026)
else:
time.sleep(0.008)
msb = ord(self.i2c.mem_read(1, self.address, self.__BMP085_PRESSUREDATA))
lsb = ord(self.i2c.mem_read(1, self.address, self.__BMP085_PRESSUREDATA+1))
xlsb = ord(self.i2c.mem_read(1, self.address, self.__BMP085_PRESSUREDATA+2))
raw = ((msb << 16) + (lsb << 8) + xlsb) >> (8 - self.mode)
if (self.debug):
print("DBG: Raw Pressure: 0x%04X (%d)" % (raw & 0xFFFF, raw))
return raw
def readTemperature(self):
"Gets the compensated temperature in degrees celcius"
UT = 0
X1 = 0
X2 = 0
B5 = 0
temp = 0.0
# Read raw temp before aligning it with the calibration values
UT = self.readRawTemp()
X1 = ((UT - self._cal_AC6) * self._cal_AC5) >> 15
X2 = (self._cal_MC << 11) / (X1 + self._cal_MD)
B5 = int(X1 + X2)
temp = ((B5 + 8) >> 4) / 10.0
if (self.debug):
print("DBG: Calibrated temperature = %f C" % temp)
return temp
def readPressure(self):
"Gets the compensated pressure in pascal"
UT = 0
UP = 0
B3 = 0
B5 = 0
B6 = 0
X1 = 0
X2 = 0
X3 = 0
p = 0
B4 = 0
B7 = 0
UT = self.readRawTemp()
UP = self.readRawPressure()
# You can use the datasheet values to test the conversion results
# dsValues = True
dsValues = False
if (dsValues):
UT = 27898
UP = 23843
self._cal_AC6 = 23153
self._cal_AC5 = 32757
self._cal_MB = -32768;
self._cal_MC = -8711
self._cal_MD = 2868
self._cal_B1 = 6190
self._cal_B2 = 4
self._cal_AC3 = -14383
self._cal_AC2 = -72
self._cal_AC1 = 408
self._cal_AC4 = 32741
self.mode = self.BMP085_ULTRALOWPOWER
if (self.debug):
self.showCalibrationData()
# True Temperature Calculations
X1 = ((UT - self._cal_AC6) * self._cal_AC5) >> 15
X2 = (self._cal_MC << 11) / (X1 + self._cal_MD)
B5 = int(X1 + X2)
if (self.debug):
print("DBG: X1 = %d" % (X1))
print("DBG: X2 = %d" % (X2))
print("DBG: B5 = %d" % (B5))
print("DBG: True Temperature = %.2f C" % (((B5 + 8) >> 4) / 10.0))
# Pressure Calculations
B6 = B5 - 4000
X1 = (self._cal_B2 * (B6 * B6) >> 12) >> 11
X2 = (self._cal_AC2 * B6) >> 11
X3 = X1 + X2
B3 = (((self._cal_AC1 * 4 + X3) << self.mode) + 2) / 4
if (self.debug):
print("DBG: B6 = %d" % (B6))
print("DBG: X1 = %d" % (X1))
print("DBG: X2 = %d" % (X2))
print("DBG: X3 = %d" % (X3))
X1 = (self._cal_AC3 * B6) >> 13
X2 = (self._cal_B1 * ((B6 * B6) >> 12)) >> 16
X3 = ((X1 + X2) + 2) >> 2
B4 = (self._cal_AC4 * (X3 + 32768)) >> 15
B7 = (UP - B3) * (50000 >> self.mode)
if (self.debug):
print("DBG: X1 = %d" % (X1))
print("DBG: X2 = %d" % (X2))
print("DBG: X3 = %d" % (X3))
print("DBG: B4 = %d" % (B4))
print("DBG: B7 = %d" % (B7))
if (B7 < 0x80000000):
p = int((B7 * 2) / B4)
else:
p = int((B7 / B4) * 2)
if (self.debug):
print("DBG: X1 = %d" % (X1))
X1 = (p >> 8) * (p >> 8)
X1 = (X1 * 3038) >>
|
BrendanLeber/adventofcode
|
2020/20-jurassic_jigsaw/code.py
|
Python
|
mit
| 5,544
| 0.002345
|
import math
import re
from collections import defaultdict
def matches(t1, t2):
t1r = "".join([t[-1] for t in t1])
t2r = "".join([t[-1] for t in t2])
t1l = "".join([t[0] for t in t1])
t2l = "".join([t[0] for t in t2])
t1_edges = [t1[0], t1[-1], t1r, t1l]
t2_edges = [t2[0], t2[-1], t2[0][::-1], t2[-1][::-1], t2l, t2l[::-1], t2r, t2r[::-1]]
for et1 in t1_edges:
for et2 in t2_edges:
if et1 == et2:
return True
return False
def flip(t):
return [l[::-1] for l in t]
# https://stackoverflow.com/a/34347121
def rotate(t):
return [*map("".join, zip(*reversed(t)))]
def set_corner(cor, right, down):
rr = "".join([t[-1] for t in right])
dr = "".join([t[-1] for t in down])
rl = "".join([t[0] for t in right])
dl = "".join([t[0] for t in down])
r_edges = [right[0], right[-1], right[0][::-1], right[-1][::-1], rr, rr[::-1], rl, rl[::-1]]
d_edges = [down[0], down[-1], down[0][::-1], down[-1][::-1], dr, dr[::-1], dl, dl[::-1]]
for _ in range(2):
cor = flip(cor)
for _ in range(4):
cor = rotate(cor)
if cor[-1] in d_edges and "".join([t[-1] for t in cor]) in r_edges:
return cor
return None
def remove_border(t):
return [x[1:-1] for x in t[1:-1]]
def set_left_edge(t1, t2):
ref = "".join([t[-1] for t in t1])
for _ in range(2):
t2 = flip(t2)
for _ in range(4):
t2 = rotate(t2)
if "".join([t[0] for t in t2]) == ref:
return t2
return None
def set_upper_edge(t1, t2):
ref = t1[-1]
for _ in range(2):
t2 = flip(t2)
for _ in range(4):
t2 = rotate(t2)
if t2[0] == ref:
return t2
return None
def assemble_image(img, tiles):
whole_image = []
for l in img:
slice = [""] * len(tiles[l[0]])
for t in l:
for i, s in enumerate(tiles[t]):
slice[i] += s
for s in slice:
whole_image.append(s)
return whole_image
def part1():
tiles = defaultdict(list)
for l in open("input.txt"):
if "Tile" in l:
tile = int(re.findall(r"\d+", l)[0])
elif "." in l or "#" in l:
tiles[tile].append(l.strip())
connected = defaultdict(set)
for i in tiles:
for t in tiles:
if i == t:
continue
if matches(tiles[i], tiles[t]):
connected[i].add(t)
connected[t].add(i)
prod = 1
for i in connected:
if len(connected[i]) == 2:
prod *= i
print(prod)
def part2():
tiles = defaultdict(list)
for l in open("input.txt"):
if "Tile" in l:
tile = int(re.findall(r"\d+", l)[0])
elif "." in l or "#" in l:
tiles[tile].append(l.strip())
connected = defaultdict(set)
for i in tiles:
for t in tiles:
if i == t:
continue
if matches(tiles[i], tiles[t]):
connected[i].add(t)
connected[t].add(i)
sz = int(math.sqrt(len(connected)))
image = [[0 for _ in range(sz)] for _ in range(sz)]
for i in connected:
if len(connected[i]) == 2:
corner = i
break
image[0][0] = corner
added = {corner}
for y in range(1, sz):
pos = connected[image[0][y - 1]]
for cand in pos:
if cand not in added and len(connected[cand]) < 4:
image[0][y] = cand
added.add(cand)
break
for x in range(1, sz):
for y in range(sz):
pos = connected[image[x - 1][y]]
for cand in pos:
if cand not in added:
image[x][y] = cand
added.add(cand)
break
tiles[image[0][0]] = set_corner(tiles[image[0][0]], tiles[image[0][1]], tiles[image[1][0]])
for y, l in enumerate(image):
if y != 0:
prv = image[y - 1][0]
tiles[l[0]] = set_upper_edge(tiles[prv], tiles[l[0]])
for x, tile in enumerate(l):
if x != 0:
prv = image[y][x - 1]
tiles[tile] = set_left_edge(tiles[prv], tiles[tile])
for t in tiles:
tiles[t] = remove_border(tiles[t])
image = assemble_image(image, tiles)
ky = 0
monst
|
er = set()
for l in open("monster.txt").read().split("\n"):
kx = len(l)
for i, ch in enumerate(l):
if ch == "#":
monster.add((i, ky))
ky += 1
for _ in range(2):
image = flip(image)
for _ in range(4):
image = rotate(image)
for x in range(0, len(image) - kx):
f
|
or y in range(0, len(image) - ky):
parts = []
for i, p in enumerate(monster):
dx = x + p[0]
dy = y + p[1]
parts.append(image[dy][dx] == "#")
if all(parts):
for p in monster:
dx = x + p[0]
dy = y + p[1]
image[dy] = image[dy][:dx] + "O" + image[dy][dx + 1 :]
with open("output.txt", "w+") as f:
for l in rotate(rotate(rotate(image))):
f.write(l + "\n")
print(sum([l.count("#") for l in image]))
if __name__ == "__main__":
part1()
part2()
|
rpavlik/chromium
|
grabcmake.py
|
Python
|
bsd-3-clause
| 3,469
| 0.025368
|
import subprocess
import os
pathToGrabber = os.path.abspath("grab.mk")
def getVariable(var):
proc = subprocess.Popen(["make", "--silent", "-f", pathToGrabber, "GETVAR", "VARNAME=%s" % var], stdout = subprocess.PIPE)
return proc.communicate()[0].strip()
def getVariableList(var):
return [item.strip() for item in getVariable(var).split()]
#print getTargetOutput("LIBRARY")
#pri
|
nt getTargetOutput("FILES")
#print getTargetOutput("PROGRAM")
def getSources():
return [
|
"%s.c" % fn.strip() for fn in getVariableList("FILES")]
def getTarget():
ret = {}
prog = getVariable("PROGRAM")
lib = getVariable("LIBRARY")
if len(prog) > 0:
return prog, "add_executable", ""
elif len(lib) > 0:
if getVariable("SHARED") == "1":
return lib, "add_library", "SHARED"
else:
return lib, "add_library", "STATIC"
else:
return None, "", ""
def doDirectory(sourcedir):
print sourcedir
os.chdir(sourcedir)
with open("CMakeLists.txt","w") as cmake:
cmake.write("include_directories(.)\n")
target, targetcommand, targettype = getTarget()
if target is not None:
generated = getVariableList("PRECOMP")
if len(generated) > 0:
cmake.write("set(GENERATED\n")
cmake.writelines(["\t%s\n" % fn for fn in generated])
cmake.write(")\n")
cmake.write("# TODO: generate these files!\n\n\n")
sources = getSources()
cmake.write("set(SOURCES\n")
cmake.writelines(["\t%s\n" % fn for fn in sources if fn not in generated])
cmake.writelines(["\t${CMAKE_CURRENT_BINARY_DIR}/%s\n" % fn for fn in generated])
cmake.writelines(["\t%s\n" % fn for fn in getVariableList("LIB_DEFS") if fn not in generated and fn not in sources])
cmake.write(")\n")
libs = [ lib.replace("-l", "") for lib in getVariableList("LIBRARIES") ]
if getVariable("OPENGL"):
libs.append("${OPENGL_LIBRARIES}")
cmake.write("include_directories(${OPENGL_INCLUDE_DIR})\n")
if getVariable("GLUT"):
libs.append("${GLUT_LIBRARIES}")
cmake.write("include_directories(${GLUT_INCLUDE_DIR})\n")
cmake.write("%s(%s %s ${SOURCES})\n" % (targetcommand, target, targettype))
if len(libs) > 0:
cmake.write("target_link_libraries(%s %s)\n" % (target, " ".join(libs)))
cmake.write("target_link_libraries(%s ${EXTRA_LIBS})\n" % target)
if "-lX11" in getVariableList("LDFLAGS"):
cmake.write("if(X11_PLATFORM)\n")
cmake.write("\ttarget_link_libraries(%s ${X11_LIBRARIES})\n" % target)
cmake.write("endif()\n")
cmake.write("""install(TARGETS %s
LIBRARY DESTINATION lib COMPONENT runtime
ARCHIVE DESTINATION lib COMPONENT dev
RUNTIME DESTINATION bin COMPONENT runtime)\n""" % target)
for copy in getVariableList("LIB_COPIES"):
copytarget = "%s_%s_copy" % (copy, target)
cmake.write("%s(%s %s ${SOURCES})\n" % (targetcommand, copytarget, targettype))
if len(libs) > 0:
cmake.write("target_link_libraries(%s %s)\n" % (copytarget, " ".join(libs)))
cmake.write("target_link_libraries(%s ${EXTRA_LIBS})\n" % copytarget)
cmake.write("""install(TARGETS %s
LIBRARY DESTINATION lib COMPONENT runtime
ARCHIVE DESTINATION lib COMPONENT dev
RUNTIME DESTINATION bin COMPONENT runtime)\n""" % copytarget)
dirs = getVariableList("SUBDIRS")
if len(dirs) > 0:
cmake.writelines(["add_subdirectory(%s)\n" % dirname for dirname in dirs])
sourcedirs = [ dirpath for (dirpath, dirnames, filenames) in os.walk(os.path.abspath(".")) if "Makefile" in filenames]
for sourcedir in sourcedirs:
doDirectory(sourcedir)
|
kexiaojiu/alien_invasion
|
game_functions.py
|
Python
|
gpl-3.0
| 11,663
| 0.009822
|
#coding=utf-8
import sys
import pygame
from bullet import Bullet
from alien import Alien
from time import sleep
def check_key_down_events(event,ai_settings, screen, stats, play_button, ship,
aliens, bullets):
"""响应按键"""
if event.key == pygame.K_RIGHT:
#向右移动飞船
ship.moving_right = True
elif event.key == pygame.K_LEFT:
#向左移动飞船
ship.moving_left = True
elif event.key == pygame.K_SPACE:
#创建一个子弹,并将子弹加入到编组bullets中
fire_bullet(ai_settings, screen, ship, bullets)
elif event.key == pygame.K_p:
|
#按键p开始游戏
start_game(ai_settings, screen, stats, play_button, ship, aliens,
bullets)
elif event.
|
key == pygame.K_q:
#按键q退出游戏
save_high_score(stats)
sys.exit()
def save_high_score(stats):
"""保存最高分到high_score.txt"""
file_name = stats.store_high_score_file_name
high_score_str = str(stats.high_score)
with open(file_name, 'w') as f_obj:
f_obj.write(high_score_str)
def check_key_up_events(event, ship):
if event.key == pygame.K_RIGHT:
#停止移动飞船
ship.moving_right = False
elif event.key == pygame.K_LEFT:
#停止移动飞船
ship.moving_left = False
def start_game(ai_settings, screen, stats, score, play_button, ship, aliens,
bullets):
"""开始游戏"""
if not stats.game_active:
# 重置游戏设置
ai_settings.initialize_dynamic_settings()
# 隐藏鼠标
pygame.mouse.set_visible(False)
# 重置游戏统计信息
stats.rest_stats()
stats.game_active = True
# 重置记分牌图像
score.prep_score()
score.prep_high_score()
score.prep_level()
# 重置剩余飞船数目信息
score.prep_ships()
# 清空外星人和子弹列表
aliens.empty()
bullets.empty()
# 创建一群外星人,并将飞船放在底部正中央
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
def check_play_button(ai_settings, screen, stats, score, play_button, ship,
aliens, bullets, mouse_x, mouse_y):
"""在玩家点击Play按钮时候开始游戏"""
button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)
if button_clicked:
start_game(ai_settings, screen, stats, score, play_button, ship, aliens,
bullets)
def check_events(ai_settings, screen, stats,score, play_button, ship, aliens,
bullets):
"""监视键盘和鼠标事件"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
save_high_score(stats)
sys.exit()
elif event.type == pygame.KEYDOWN:
check_key_down_events(event,ai_settings, screen, stats, play_button,
ship, aliens, bullets)
elif event.type == pygame.KEYUP:
check_key_up_events(event, ship)
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(ai_settings, screen, stats, score, play_button,
ship, aliens, bullets, mouse_x, mouse_y)
def check_high_score(stats, score):
"""检查是否诞生了最高分"""
if stats.score > stats.high_score:
stats.high_score = stats.score
score.prep_high_score()
def update_bullets(ai_settings, screen, stats, ship, aliens, bullets, score):
"""更新子弹位置,删除已经消失的子弹"""
# 更新子弹位置
bullets.update()
# 删除已经消失的子弹
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
check_bullet_alien_collisions(ai_settings, screen, stats, ship, aliens,
bullets, score)
def check_bullet_alien_collisions(ai_settings, screen, stats, ship, aliens,
bullets, score):
# 检查是否有子弹击中外星人,如果有,就删除对应的子弹和外星人
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
# 每杀死一个外星人,加分
if collisions:
# 添加击中外星人音效
play_sound_effect_bomp(ai_settings)
for aliens in collisions.values():
stats.score += ai_settings.alien_points * len(aliens)
score.prep_score()
# 检查是否超过最高分
check_high_score(stats, score)
# 升级
start_new_level(ai_settings, screen, stats, ship, aliens, bullets, score)
def start_new_level(ai_settings, screen, stats, ship, aliens, bullets, score):
if len(aliens) == 0:
# 删除现有的子弹,增加一个等级,加快游戏节奏,并且新建一群外星人
bullets.empty()
ai_settings.increase_speed()
stats.level += 1
score.prep_level()
create_fleet(ai_settings, screen, ship, aliens)
def play_sound_effect_shot(ai_settings):
# 添加发射子弹的音效
file_sound_shot = ai_settings.file_sound_shot
try:
sound_effect_shot = pygame.mixer.Sound(file_sound_shot)
sound_effect_shot.play()
except pygame.error:
print("The file " + file_sound_shot + " does not exist!")
def play_sound_effect_bomp(ai_settings):
# 添加击中外星人的音效
file_sound_bomp = ai_settings.file_sound_bomp
try:
sound_effect_bomp = pygame.mixer.Sound(file_sound_bomp)
sound_effect_bomp.play()
except pygame.error:
print("The file " + file_sound_bomp + " does not exist!")
def play_sound_effect_game_over(ai_settings):
# 添加游戏结束的音效
file_sound_game_over = ai_settings.file_sound_game_over
try:
sound_effect_game_over = pygame.mixer.Sound(file_sound_game_over)
sound_effect_game_over.play()
except pygame.error:
print("The file " + file_sound_game_over + " does not exist!")
def fire_bullet(ai_settings, screen, ship, bullets):
"""如果没有超过子弹数上限,就发射一颗子弹"""
if len(bullets) < ai_settings.bullets_allowed:
# 添加发射子弹的音效
play_sound_effect_shot(ai_settings)
# 创建一个子弹,并将子弹加入到编组bullets中
net_bullet = Bullet(ai_settings, screen, ship)
bullets.add(net_bullet)
def get_number_aliens_x(ai_settings, alien_width):
"""计算每行可以容纳多少外星人"""
available_space_x = ai_settings.screen_width - 2 * alien_width
number_aliens_x = int(available_space_x / (2 * alien_width))
return number_aliens_x
def get_number_rows(ai_settings, ship_height, alien_height):
"""计算屏幕可容纳多少行外星人"""
available_space_y = ai_settings.screen_height - 3 * alien_height - ship_height
number_rows = int(available_space_y / (2 * alien_height))
return number_rows
def create_alien(ai_settings, screen, aliens, alien_number, row_number):
"""创建一个外星人并把它加入当前行"""
alien = Alien(ai_settings, screen)
alien_width = alien.rect.width
alien.x = alien_width + 2 * alien_width * alien_number
alien.rect.x = alien.x
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number
aliens.add(alien)
def create_fleet(ai_settings, screen, ship, aliens):
"""创建外星人群"""
# 创建一个外星人并计算一行可以容纳多少外星人
# 外星人间距为外星人宽度
alien = Alien(ai_settings, screen)
number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)
number_rows = get_number_rows(ai_settings, ship.rect.height, alien.rect.height)
for row_number in range(number_rows):
# 创建第一行外星人
for alien_number in range(number_aliens_x):
# 创建一个外星人并把它加入当前行
create_alien(ai_settings, screen, aliens, alien_number, row_number)
def check_fleet_edges(ai_settings, aliens):
"""有外星人到达屏幕边缘时采取相应措施"""
for alien in aliens:
if alien.check_edges():
change_fleet_direction(ai_settings, aliens)
break
def change_fleet_direction(ai_settings, aliens):
"""将外星人下移并改变他们的方向"""
for alien in aliens.sprites():
alien.rect.y += ai_settings.fleet_drop_speed
ai_settings.fleet_direction *= -1
def update_aliens(ai_settings, stats, score, screen, ship, aliens, bullets):
"""检查是否有外星人到达屏幕边缘,然后更新外星人位置"""
check_fleet_edges(ai_settings, aliens)
aliens.update()
# 检查外星人和飞船之间的碰撞
if pygame.sprite.spritecollideany(ship, aliens):
ship_hit(ai_settings, stats, score, screen, ship, aliens, bullets)
|
jinmingda/MicroorganismSearchEngine
|
mse/release.py
|
Python
|
mit
| 449
| 0
|
# -*- co
|
ding: utf-8 -*-
# Release information about mse
version = '1.0'
# description = "Your plan to rule the world"
# long_description = "More description about your plan"
# author = "Your Name Here"
# email = "YourEmail@YourDomain"
# copyright = "Copyright 2011 - the year of the Rabbit"
# Of it's open source, you might want to
|
specify these:
# url = 'http://yourcool.site/'
# download_url = 'http://yourcool.site/download'
# license = 'MIT'
|
ArmchairArmada/COS125Project01
|
src/tests/test_assets.py
|
Python
|
mit
| 239
| 0.004184
|
#!/usr/bin/env python
|
"""
Unit tests for the assets module
"""
# TODO: Write actual tests.
import unittest
import assets
class TestAssets(unittest.TestCase):
def setUp(self):
|
pass
def test_getImage(self):
pass
|
t3dev/odoo
|
addons/project/__manifest__.py
|
Python
|
gpl-3.0
| 1,315
| 0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Project',
'version': '1.1',
'website': 'https://www.odoo.com/page/project-management',
'category': 'Operations/Project',
'sequence': 10,
'summary': 'Organize and schedule your projects ',
'depends': [
'base_setup',
'mail',
'portal',
'rating',
'resource',
'web',
'web_tour',
'digest',
],
'description': "",
'data': [
'security/project_security.xml',
'security/ir.model.access.csv',
'report/project_report_views.xml',
'views/digest_views.xml',
'views/rating_views.xml',
'views/project_views.xml',
'views/res_partner_vie
|
ws.xml',
'views/res_config_settings_views.xml',
'views/mail_activity_views.xml',
'views/project_assets.xml',
'views/project_portal_templates.xml',
'views/project_rating_templates.xml',
'data/digest_data.xml',
'data/project_mail_template_data.xml',
'data/project_data.xml',
],
'qweb': ['static/src/xml/project.xml'],
'demo': ['data/project_demo.xml'],
|
'test': [
],
'installable': True,
'auto_install': False,
'application': True,
}
|
ThomasSweijen/yadesolute2
|
examples/test/vtk-exporter/vtkExporter.py
|
Python
|
gpl-2.0
| 976
| 0.067623
|
from yade import export,polyhedra_utils
mat = PolyhedraMat()
O.bodies.append([
sphere((0,0,0),1),
sphere((0,3,0),1),
sphere((0,2,4),2),
sphere((0,5,2),1.5),
facet([Vector3(0,-3,-1),Vector3(0,-2,5),Vector3(5,4,0)]),
facet([Vector3(0,-3,-1),Vector3(0,-2,5),Vector3(-5,4,0)]),
polyhedra_utils.polyhedra(mat,(1,2,3),0),
polyhedra_utils.polyhedralBall(2,20,mat,(-2,-2,4)),
])
O.bodies[-1].state.pos = (-2,-2,-2)
O.bodies[-1].state.ori = Quaternion((1,1,2),1)
O.bodies[-2].state.pos = (-2,-2,3)
O.bodies[-2].state.ori = Quaternion((1,2,0),1)
createInteraction(0,1)
createInteraction(0,2)
createInteraction(0,3)
createInteraction(1,2)
createInteraction(1,3)
createInteraction(2,3)
O.step()
vtkExporter = export.VTKExporter('vtkExporterTesting')
vtkExporter.exportSpheres
|
(what=[('dist','b.state.pos.norm()')])
vtkExporter.exportFacets(what=[('pos','b.state.pos')])
vtkExporter.expor
|
tInteractions(what=[('kn','i.phys.kn')])
vtkExporter.exportPolyhedra(what=[('n','b.id')])
|
AntonKhorev/spb-budget-db
|
4-xls/main.py
|
Python
|
bsd-2-clause
| 21,223
| 0.05248
|
#!/usr/bin/env python3
inputFilename='../3-db.out/db.sql'
outputDirectory='../4-xls.out'
import os
if not os.path.exists(outputDirectory):
os.makedirs(outputDirectory)
from decimal import Decimal
import collections
import copy
import sqlite3
import xlwt3 as xlwt
import xlsxwriter
class LevelTable:
def __init__(self,levelColLists,levelNames,fakeYearCols,fakeYearNameFns,yearsInAppendices,rows,nHeaderRows=None):
if nHeaderRows is None:
nHeaderRows=1+len(fakeYearCols)
self.fakeYearNameFns=fakeYearNameFns
# years below are not really years - they are spreadsheet columns
def rowFakeYear(row):
return tuple(row[k] for k in fakeYearCols)
# levelCols are spreadsheet rows, db col
|
s
self.levelColLists=levelColLists
if type(yearsInAppendices) is dict:
self.yearsInAppendices=yearsInAppendices
self.years=[year for appendix,years in sorted(yearsInAppendices.items()) for year in years]
else:
self.yearsInAppendices={} # don't use 'numbers in appendices'
self.years=yearsInAppendices
self.nHeaderRows=nHeaderRows
self.outRows=[[None]*len(self.yearsInAppendic
|
es)+['Итого']+[None for levelColList in self.levelColLists for col in levelColList]+[None]*len(self.years)]
self.levels=[-1]
self.formulaValues=collections.defaultdict(lambda: collections.defaultdict(lambda: Decimal(0)))
def outputLevelRow(row,level):
outRow=[None]*len(self.yearsInAppendices)
outRow.append(row[levelNames[level]])
for l,levelColList in enumerate(self.levelColLists):
for levelCol in levelColList:
if l<=level:
outRow.append(row[levelCol])
else:
outRow.append(None)
outRow+=[None]*len(self.years)
return outRow
nLevels=len(self.levelColLists)
nFirstAmountCol=len(self.outRows[0])-len(self.years)
outRow=None
insides=[None]*nLevels
summands=[[]]+[None]*nLevels
sums=[0]+[None]*nLevels
nRow=0
def makeClearSumsForLevel(level):
levelSummands=copy.deepcopy(summands[level+1])
levelSums=copy.deepcopy(sums[level+1])
def getColChar(n):
a=ord('A')
radix=ord('Z')-a+1
if n<radix:
return chr(a+n)
else:
return chr(a+n//radix-1)+chr(a+n%radix)
def fn(clearRow):
if level<nLevels and levelSummands:
for y in range(len(self.years)):
colChar=getColChar(nFirstAmountCol+y)
# +
# self.outRows[levelSums][nFirstAmountCol+y]='='+'+'.join(
# colChar+str(1+self.nHeaderRows+summand) for summand in levelSummands
# )
# SUBTOTAL
self.outRows[levelSums][nFirstAmountCol+y]='=SUBTOTAL(9,'+colChar+str(1+self.nHeaderRows+levelSums)+':'+colChar+str(1+self.nHeaderRows+clearRow-1)+')'
#
self.formulaValues[levelSums][nFirstAmountCol+y]=sum(
self.formulaValues[summand][nFirstAmountCol+y] for summand in levelSummands
)
return fn
def putAppendixNumber(nCol):
if self.outRows[-1][nCol]:
return
nRow=len(self.outRows)-1
level=self.levels[nRow]
putForLevels=[(level,nRow)]
while True:
nRow-=1
if self.levels[nRow]<0:
break
if self.levels[nRow]>level or (self.levels[nRow]==level and not self.outRows[nRow][nCol]):
continue
if self.outRows[nRow][nCol]:
break
assert self.levels[nRow]==level-1, 'on row,col '+str(nRow)+','+str(nCol)+' met level '+str(self.levels[nRow])+' while on level '+str(level)
level-=1
putForLevels.insert(0,(level,nRow))
nRow0=nRow
for level,nRow in putForLevels:
if self.levels[nRow0]==level-1:
if self.outRows[nRow0][nCol] is None:
self.outRows[nRow][nCol]='1.'
else:
self.outRows[nRow][nCol]=self.outRows[nRow0][nCol]+'1.'
else:
assert self.levels[nRow0]==level
a=str(self.outRows[nRow0][nCol]).split('.')
a[-2]=str(int(a[-2])+1)
self.outRows[nRow][nCol]='.'.join(a)
nRow0=nRow
for row in rows:
clearRow=0
clearStack=[]
for level,levelColList in enumerate(self.levelColLists):
nextInside=tuple(row[col] for col in levelColList)
if clearRow or insides[level]!=nextInside:
nRow+=1
clearStack.append(makeClearSumsForLevel(level))
summands[level+1]=[]
sums[level+1]=nRow
summands[level].append(nRow)
outRow=outputLevelRow(row,level)
self.outRows.append(outRow)
self.levels.append(level)
insides[level]=nextInside
if not clearRow:
clearRow=nRow
for fn in reversed(clearStack):
fn(clearRow)
nCol=nFirstAmountCol+self.years.index(rowFakeYear(row))
self.formulaValues[nRow][nCol]=outRow[nCol]=Decimal(row['amount'])/1000
for nCol,(appendix,yearsInAppendix) in enumerate(self.yearsInAppendices.items()):
if rowFakeYear(row) in yearsInAppendix:
putAppendixNumber(nCol)
clearStack=[]
for level in range(-1,nLevels):
clearStack.append(makeClearSumsForLevel(level))
for fn in reversed(clearStack):
fn(nRow+1)
def makeSheetHeader(self,columns,setCellWidth,writeCellText,writeMergedCellText):
skip={}
nRow=self.nHeaderRows-len(self.fakeYearNameFns)
for nCol,col in enumerate(columns):
setCellWidth(nCol,col['width'])
if type(col['text']) is list:
for nRow2,textLine in enumerate(col['text']):
if nRow2 in skip and nCol<skip[nRow2]:
continue
nCol2=nCol
while nCol2<len(columns):
if type(columns[nCol2]['text']) is not list or columns[nCol2]['text'][nRow2]!=textLine:
break
nCol2+=1
if nCol2>nCol+1:
skip[nRow2]=nCol2
writeMergedCellText(nRow+nRow2,nCol,nRow+nRow2,nCol2-1,textLine,col['headerStyle'])
else:
writeCellText(nRow+nRow2,nCol,textLine,col['headerStyle'])
else:
if len(self.fakeYearNameFns)>1:
writeMergedCellText(nRow,nCol,self.nHeaderRows-1,nCol,col['text'],col['headerStyle'])
else:
writeCellText(nRow,nCol,col['text'],col['headerStyle'])
def makeXls(self,tableTitle,outputFilename,tableSubtitle=None):
nLevels=len(self.levelColLists)
wb=xlwt.Workbook()
ws=wb.add_sheet('expenditures')
styleTableTitle=xlwt.easyxf('font: bold on, height 240')
styleHeader=xlwt.easyxf('font: bold on; align: wrap on')
styleThinHeader=xlwt.easyxf('font: bold on, height 180; align: wrap on')
styleVeryThinHeader=xlwt.easyxf('font: height 140; align: wrap on')
styleStandard=xlwt.easyxf('')
styleShallowStandard=xlwt.easyxf('font: bold on')
styleAmount=xlwt.easyxf(num_format_str='#,##0.0;-#,##0.0;""')
styleShallowAmount=xlwt.easyxf('font: bold on',num_format_str='#,##0.0;-#,##0.0;""')
codeColumnsData={
'departmentCode': {'text':'Код ведомства', 'width':4, 'headerStyle':styleVeryThinHeader, 'cellStyle':styleStandard,'shallowCellStyle':styleShallowStandard},
'superSectionCode': {'text':'Код надраздела', 'width':5, 'headerStyle':styleThinHeader, 'cellStyle':styleStandard,'shallowCellStyle':styleShallowStandard},
'sectionCode': {'text':'Код раздела', 'width':5, 'headerStyle':styleThinHeader, 'cellStyle':styleStandard,'shallowCellStyle':styleShallowStandard},
'categoryCode': {'text':'Код целевой статьи', 'width':8, 'headerStyle':styleThinHeader, 'cellStyle':styleStandard,'shallowCellStyle':styleShallowStandard},
'typeCode': {'text':'Код вида расходов', 'width':4, 'headerStyle':styleVeryThinHeader, 'cellStyle':styleStandard,'shallowCellStyle':styleShallowStandard},
}
columns=[
{'text':'№ в приложении '+str(appendix),'width':10,'headerStyle':styleThinHeader,'cellStyle':styleStandard,'shallowCellStyle':styleShallowStandard} for appendix in self.yearsInAppendices
]+[
{'text':'Наименование','width':100,'headerStyle':styleHeader,'cellStyle':styleStandard,'shallowCellStyle':styleShallowStandard}
]+[
codeColumnsData[col] for cols in self.levelColLists for col in cols
]+[
{'text':[f(v) for f,v in zip(self.fakeYearNameFns,year)],'width':15,'headerStyle':styleHeader,'cellStyle':styleAmount,'shallowCellStyle':styleShallowAmount} for year in self.years
]
ws.set_panes_frozen(True)
ws.set_horz_split_pos(self.nHeaderRows)
ws.row(0).height=400
ws.merge(0,0,0,len(columns)-1)
ws.write(0,0,tableTitle,styleTableTitle)
if tableSubtitle:
ws.merge(1,1,0,len(columns)-1)
ws.write(1,0,tableSubtitle)
for i in range(self.nHeaderRows-len(self.fakeYearNameFns),self.nHeader
|
ktsitsikas/odemis
|
util/launch_xrced.py
|
Python
|
gpl-2.0
| 1,995
| 0.002506
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" This custom XRCED launcher allows a small wx function to be wrapped
so it provides a little extra needed functionality.
XRC sometimes need to check if a node contains a filename. It does so by
checking node types. This works fine, until we start working with custom
controls, of which XRC knows nothing by default.
The little wrapper added to the pywxrc.XmlResourceCompiler.NodeContainsFilename
method, will return true if it contains a value ending with '.png', indicating
the content is an PNG image.
"""
import os
import sys
if __name__ == '__main__':
try:
# XRCed is sometimes installed standalone
from XRCed.xrced import main
sys.modules['wx.tools.XRCed'] = sys.modules['XRCed']
except ImportError:
try:
from wx.tools.XRCed.xrced import main
from wx.tools.XRCed.
|
globals import set_debug
set_debug(True)
except ImportError:
print >> sys.stderr, 'Check that XRCed is installed and is in PYTHONPATH'
raise
from wx.tools import pywxrc
# The XRCEDPATH environment variable is used to define additional plugin directories
xrced_path = os.getenv('XRCEDPATH')
this_path = os.path.dirname(__file__)
os.environ['XRCEDPATH'] = xrced_path or os.path.join(this_path, "../src/odem
|
is/gui/xmlh")
print "'XRCEDPATH' is set to %s" % os.getenv('XRCEDPATH')
# Move this to a separate launcher so it can be spread with
# odemis
def ncf_decorator(ncf):
def wrapper(self, node):
if node.firstChild and node.firstChild.nodeType == 3:
if node.firstChild.nodeValue.lower().endswith((".png", ".jpg")):
# print node.firstChild.nodeValue
return True
return ncf(self, node)
return wrapper
pywxrc.XmlResourceCompiler.NodeContainsFilename = ncf_decorator(pywxrc.XmlResourceCompiler.NodeContainsFilename)
main()
|
MiniSEC/GRR_clone
|
lib/hunts/standard.py
|
Python
|
apache-2.0
| 36,298
| 0.00697
|
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Some multiclient flows aka hunts."""
import re
import stat
import logging
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import cron
from grr.lib import data_store
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import type_info
from grr.lib import utils
from grr.lib.aff4_objects import aff4_grr
from grr.lib.hunts import implementation
from grr.lib.hunts import output_plugins
class Error(Exception):
pass
class HuntError(Error):
pass
class CreateAndRunGenericHuntFlow(flow.GRRFlow):
"""Create and run GenericHunt with given name, args and rules.
As direct write access to the data store is forbidden, we have to use flows to
perform any kind of modifications. This flow delegates ACL checks to
access control manager.
"""
# This flow can run on any client without ACL enforcement (an SUID flow).
ACL_ENFORCED = False
# TODO(user): describe proper types for hunt_flow_args and hunt_rules
flow_typeinfo = type_info.TypeDescriptorSet(
type_info.String(
description="Hunt id.",
name="hunt_id"),
type_info.String(
description="Hunt flow name.",
name="hunt_flow_name"),
type_info.Any(
description="A dictionary of hunt flow's arguments.",
name="hunt_flow_args"),
type_info.Any(
description="Foreman rules for the hunt.",
name="hunt_rules"),
type_info.Duration(
description="Expiration time for this hunt in seconds.",
default=rdfvalue.Duration("31d"),
name="expiry_time"),
type_info.Integer(
description="A client limit.",
default=None,
name="client_limit"),
type_info.List(
name="output_plugins",
description="The output plugins to use for this hunt.",
default=[("CollectionPlugin", {})],
validator=type_info.List(validator=type_info.Any()),
),
)
@flow.StateHandler()
def Start(self):
"""Create the hunt, perform permissions check and run it."""
hunt = implementation.GRRHunt.StartHunt(
"GenericHunt",
flow_name=self.state.hunt_flow_name,
args=self.state.hunt_flow_args,
expiry_time=self.state.expiry_time,
client_limit=self.state.client_limit,
output_plugins=self.state.output_plugins,
token=self.token)
hunt.AddRule(rules=self.state.hunt_rules)
hunt.WriteToDataStore()
# We have to create special token here, because within the flow
# token has supervisor access.
check_token = access_control.ACLToken(username=self.token.username,
reason=self.token.reason)
data_store.DB.security_manager.CheckHuntAccess(check_token, hunt.urn)
# General GUI workflow assumes that we're not gonna get here, as there are
# not enough approvals for the newly created hunt.
hunt.Run()
class ScheduleGenericHuntFlow(flow.GRRFlow):
"""Create a cron job that runs given hunt periodically."""
# This flow can run on any client without ACL enforcement (an SUID flow).
ACL_ENFORCED = False
flow_typeinfo = (
CreateAndRunGenericHuntFlow.flow_typeinfo +
type_info.TypeDescriptorSet(
type_info.Integer(
description="Hunt periodicity.",
default=7,
name="hunt_periodicity"
)
)
)
def CheckCronJobApproval(self, subject, token):
"""Find the approval for for this token and CheckAccess()."""
logging.debug("Checking approval for cron job %s, %s", subject, token)
if not token.username:
raise access_control.UnauthorizedAccess(
"Must specify a username for access.",
subject=subject)
if not token.reason:
raise access_control.UnauthorizedAccess(
"Must specify a reason for access.",
subject=subject)
# Build the approval URN.
approval_urn = aff4.ROOT_URN.Add("ACL").Add(subject.Path()).Add(
token.username).Add(utils.EncodeReasonString(token.reason))
try:
approval_request = aff4.FACTORY.Open(
approval_urn, aff4_type="Approval", mode="r",
token=token, age=aff4.ALL_TIMES)
except IOError:
# No Approval found, reject this request.
raise access_control.UnauthorizedAccess(
"No approval found for hunt %s." % subject, subject=subject)
if approval_request.CheckAccess(token):
return True
else:
raise access_control.UnauthorizedAccess(
"Approval %s was rejected." % approval_urn, subject=subject)
@flow.StateHandler()
def Start(self):
"""Start handler of a flow."""
flow_args = dict(CreateAndRunGenericHuntFlow.flow_typeinfo.ParseArgs(
self.state.AsDict()))
uid = utils.PRNG.GetUShort()
job_name = "Hunt_%s_%s" % (self.state.hunt_flow_name, uid)
# No approval is needed to create a cron job, but approval is required
# to enable it. Therefore first we create a disabled cron job and then
# try to enable it.
cron_job_urn = cron.CRON_MANAGER.ScheduleFlow(
"CreateAndRunGenericHuntFlow", flow_args=flow_args,
frequency=rdfvalue.Duration(str(self.state.hunt_periodicity) + "d"),
token=self.token, disabled=True, job_name=job_name)
# We have to create special token here, because within the flow
# token has supervisor access. We use this token for a CheckCronJobApproval
# check.
check_token = access_control.ACLToken(username=self.token.username,
reason=self.token.reason)
self.CheckCronJobApproval(cron_job_urn, check_token)
cron.CRON_MANAGER.EnableJob(cron_job_urn, token=self.token)
class RunHuntFlow(flow.GRRFlow):
"""Run already created hu
|
nt with given id.
As direct write access to the data store is forbidden, we have to use flows to
perform any kind of modifications. This flow delegates ACL checks to
access control manager.
"""
# This flow can run on any client without ACL enforcement (an SUID flow).
ACL_ENFORCED = False
flow_typeinfo = type_info.TypeDescriptorSet(
type_info.RDFURNType(
description="The
|
URN of the hunt to execute.",
name="hunt_urn"),
)
@flow.StateHandler()
def Start(self):
"""Find a hunt, perform a permissions check and run it."""
hunt = aff4.FACTORY.Open(self.state.hunt_urn, aff4_type="GRRHunt",
age=aff4.ALL_TIMES, mode="rw", token=self.token)
# We have to create special token here, because within the flow
# token has supervisor access.
check_token = access_control.ACLToken(username=self.token.username,
reason=self.token.reason)
data_store.DB.security_manager.CheckHuntAccess(check_token, hunt.urn)
# Make the hunt token a supervisor so it can be started.
hunt.token.supervisor = True
hunt.Run()
class PauseHuntFlow(flow.GRRFlow):
"""Run already created hunt with given id."""
# This flow can run on any client without ACL enforcement (an SUID flow).
ACL_ENFORCED = False
flow_typeinfo = type_info.TypeDescriptorSet(
type_info.RDFURNType(
description="The URN of the hunt to pause.",
name="hunt_urn"),
)
@flow.StateHandler()
def Start(self):
"""Find a hunt, perform a permissions check and pause it."""
hunt = aff4.FACTORY.Open(self.state.hunt_urn, aff4_type="GRRHunt",
age=aff4.ALL_TIMES, mode="rw", token=self.token)
# We have to create special token here, because within the flow
# token has supervisor access.
check_token = access_control.ACLToken(username=self.token.username,
reason=self.token.reason)
data_store.DB.security_manager.CheckHuntAccess(check_token, hunt.urn)
# Make the hunt token a supervisor so it can be started.
hunt.token.supervisor = True
hunt.Pause()
class ModifyHuntFlow(flow.GRRFlow):
"""Modify already created hunt with given id.
As direct write acc
|
trichter/yam
|
yam/stack.py
|
Python
|
mit
| 3,034
| 0
|
# Copyright 2017-2019 Tom Eulenfeld, MIT license
"""Stack correlations"""
import numpy as np
import obspy
from obspy import UTCDateTime as UTC
from yam.util import _corr_id, _time2sec, IterTime
def stack(stream, length=None, move=None):
"""
Stack traces in stream by correlation id
:param stream: |Stream| object with correlations
:param length: time span of one trace in the stack in seconds
(alternatively a string consisting of a number and a unit
-- ``'d'`` for days and ``'h'`` for hours -- can be specified,
i.e. ``'3d'`` stacks together all traces inside a three days time
window, default: None, which stacks to
|
gether all traces)
:param move: define a moving stack, float or string,
default:
|
None -- no moving stack,
if specified move usually is smaller than length to get an overlap
in the stacked traces
:return: |Stream| object with stacked correlations
"""
stream.sort()
stream_stack = obspy.Stream()
ids = {_corr_id(tr) for tr in stream}
ids.discard(None)
for id_ in ids:
traces = [tr for tr in stream if _corr_id(tr) == id_]
if length is None:
data = np.mean([tr.data for tr in traces], dtype='float16',
axis=0)
tr_stack = obspy.Trace(data, header=traces[0].stats)
tr_stack.stats.key = tr_stack.stats.key + '_s'
if 'num' in traces[0].stats:
tr_stack.stats.num = sum(tr.stats.num for tr in traces)
else:
tr_stack.stats.num = len(traces)
stream_stack.append(tr_stack)
else:
t1 = traces[0].stats.starttime
lensec = _time2sec(length)
movesec = _time2sec(move) if move else lensec
if (lensec % (24 * 3600) == 0 or
isinstance(length, str) and 'd' in length):
t1 = UTC(t1.year, t1.month, t1.day)
elif (lensec % 3600 == 0 or
isinstance(length, str) and 'm' in length):
t1 = UTC(t1.year, t1.month, t1.day, t1.hour)
t2 = max(t1, traces[-1].stats.endtime - lensec)
for t in IterTime(t1, t2, dt=movesec):
sel = [tr for tr in traces
if -0.1 <= tr.stats.starttime - t <= lensec + 0.1]
if len(sel) == 0:
continue
data = np.mean([tr.data for tr in sel], dtype='float16',
axis=0)
tr_stack = obspy.Trace(data, header=sel[0].stats)
key_add = '_s%s' % length + (move is not None) * ('m%s' % move)
tr_stack.stats.key = tr_stack.stats.key + key_add
tr_stack.stats.starttime = t
if 'num' in traces[0].stats:
tr_stack.stats.num = sum(tr.stats.num for tr in sel)
else:
tr_stack.stats.num = len(sel)
stream_stack.append(tr_stack)
return stream_stack
|
lsaffre/lino-cosi
|
lino_cosi/lib/cosi/models.py
|
Python
|
agpl-3.0
| 213
| 0.004695
|
# -*- coding: UTF-8 -*-
# Copyright 2011-2015 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""
The
|
:x
|
file:`models.py` module for :ref:`cosi`.
This is empty.
"""
|
theislab/scanpy
|
scanpy/plotting/palettes.py
|
Python
|
bsd-3-clause
| 4,616
| 0.000867
|
"""Color palettes in addition to matplotlib's palettes."""
from typing import Mapping, Sequence
from matplotlib import cm, colors
# Colorblindness adjusted vega_10
# See https://github.com/theislab/scanpy/issues/387
vega_10 = list(map(colors.to_hex, cm.tab10.colors))
vega_10_scanpy = vega_10.copy()
vega_10_scanpy[2] = '#279e68' # green
vega_10_scanpy[4] = '#aa40fc' # purple
vega_10_scanpy[8] = '#b5bd61' # kakhi
# default matplotlib 2.0 palette
# see 'category20' on https://github.com/vega/vega/wiki/Scales#scale-range-literals
vega_20 = list(map(colors.to_hex, cm.tab20.colors))
# reorderd, some removed, some added
vega_20_scanpy = [
# dark without grey:
*vega_20[0:14:2],
*vega_20[16::2],
# light without grey:
*vega_20[1:15:2],
*vega_20[17::2],
# manual additions:
'#ad494a',
'#8c6d31',
]
vega_20_scanpy[2] = vega_10_scanpy[2]
vega_20_scanpy[4] = vega_10_scanpy[4]
vega_20_scanpy[7] = vega_10_scanpy[8] # kakhi shifted by missing grey
# TODO: also replace pale colors if necessary
default_20 = vega_20_scanpy
# https://graphicdesign.stackexchange.com/questions/3682/where-can-i-find-a-large-palette-set-of-contrasting-colors-for-coloring-many-d
# update 1
# orig reference http://epub.wu.ac.at/1692/1/document.pdf
zeileis_28 = [
"#023fa5",
"#7d87b9",
"#bec1d4",
"#d6bcc0",
"#bb7784",
"#8e063b",
"#4a6fe3",
"#8595e1",
"#b5bbe3",
"#e6afb9",
"#e07b91",
"#d33f6a",
"#11c638",
"#8dd593",
"#c6dec7",
"#ead3c6",
"#f0b98d",
"#ef9708",
"#0fcfc0",
"#9cded6",
"#d5eae7",
"#f3e1eb",
"#f6c4e1",
"#f79cd4",
# these last ones were added:
'#7f7f7f',
"#c7c7c7",
"#1CE6FF",
"#336600",
]
default_28 = zeileis_28
# from http://godsnotwheregodsnot.blogspot.de/2012/09/color-distribution-methodology.html
godsnot_102 = [
# "#000000", # remove the black, as often, we have black colored annotation
"#FFFF00",
"#1C
|
E6FF",
"#FF34FF",
"#FF4A46",
"#008941",
"#006FA6",
"#A30059",
"#FFDBE5",
"#7A4900",
"#0000A6",
"#63FFAC",
"#B79762",
"#004D43",
"#8FB0FF",
"#997D87",
"#5A0007",
"#809693",
"#6A3A4C",
|
"#1B4400",
"#4FC601",
"#3B5DFF",
"#4A3B53",
"#FF2F80",
"#61615A",
"#BA0900",
"#6B7900",
"#00C2A0",
"#FFAA92",
"#FF90C9",
"#B903AA",
"#D16100",
"#DDEFFF",
"#000035",
"#7B4F4B",
"#A1C299",
"#300018",
"#0AA6D8",
"#013349",
"#00846F",
"#372101",
"#FFB500",
"#C2FFED",
"#A079BF",
"#CC0744",
"#C0B9B2",
"#C2FF99",
"#001E09",
"#00489C",
"#6F0062",
"#0CBD66",
"#EEC3FF",
"#456D75",
"#B77B68",
"#7A87A1",
"#788D66",
"#885578",
"#FAD09F",
"#FF8A9A",
"#D157A0",
"#BEC459",
"#456648",
"#0086ED",
"#886F4C",
"#34362D",
"#B4A8BD",
"#00A6AA",
"#452C2C",
"#636375",
"#A3C8C9",
"#FF913F",
"#938A81",
"#575329",
"#00FECF",
"#B05B6F",
"#8CD0FF",
"#3B9700",
"#04F757",
"#C8A1A1",
"#1E6E00",
"#7900D7",
"#A77500",
"#6367A9",
"#A05837",
"#6B002C",
"#772600",
"#D790FF",
"#9B9700",
"#549E79",
"#FFF69F",
"#201625",
"#72418F",
"#BC23FF",
"#99ADC0",
"#3A2465",
"#922329",
"#5B4534",
"#FDE8DC",
"#404E55",
"#0089A3",
"#CB7E98",
"#A4E804",
"#324E72",
]
default_102 = godsnot_102
def _plot_color_cycle(clists: Mapping[str, Sequence[str]]):
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, BoundaryNorm
fig, axes = plt.subplots(nrows=len(clists)) # type: plt.Figure, plt.Axes
fig.subplots_adjust(top=0.95, bottom=0.01, left=0.3, right=0.99)
axes[0].set_title('Color Maps/Cycles', fontsize=14)
for ax, (name, clist) in zip(axes, clists.items()):
n = len(clist)
ax.imshow(
np.arange(n)[None, :].repeat(2, 0),
aspect='auto',
cmap=ListedColormap(clist),
norm=BoundaryNorm(np.arange(n + 1) - 0.5, n),
)
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3] / 2.0
fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)
# Turn off all ticks & spines
for ax in axes:
ax.set_axis_off()
fig.show()
if __name__ == '__main__':
_plot_color_cycle(
{name: colors for name, colors in globals().items() if isinstance(colors, list)}
)
|
bukun/TorCMS
|
tester/test_handlers/test_index_handler.py
|
Python
|
mit
| 448
| 0.004464
|
# -*- coding:utf-8 -*-
'''
Test
'''
import sys
sys.path.append('.')
from tornado.testing
|
import AsyncHTTPSTestCase
from application import APP
class TestSomeHandler(AsyncHTTPSTestCase):
'''
Test
'''
|
def get_app(self):
'''
Test
'''
return APP
def test_index(self):
'''
Test index.
'''
response = self.fetch('/')
self.assertEqual(response.code, 200)
|
Buggaboo/gimp-plugin-export-layers
|
export_layers/pygimplib/objectfilter.py
|
Python
|
gpl-3.0
| 11,296
| 0.015935
|
#-------------------------------------------------------------------------------
#
# This file is part of pygimplib.
#
# Copyright (C) 2014, 2015 khalim19 <khalim19@gmail.com>
#
# pygimplib is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pygimplib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pygimplib. If not, see <http://www.gnu.org/licenses/>.
#
#-------------------------------------------------------------------------------
"""
This module defines a class to filter objects according to specified filter
rules.
"""
#===============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
str = unicode
#===============================================================================
import inspect
from contextlib import contextmanager
#===============================================================================
class ObjectFilter(object):
"""
This class is a filter containing a set of rules that determines whether
a given object matches the rules or not (using the `is_match()` method).
Attributes:
* `match_type` (read-only) - Match type. Possible match types:
* MATCH_ALL - For `is_match()` to return True, the object must match
all rules.
* MATCH_ANY - For `is_match()` to return True, the object must match
at least one rule.
For greater flexibility, the filter can also contain nested `ObjectFilter`
objects, called "subfilters", each with their own set of rules and match type.
"""
__MATCH_TYPES = MATCH_ALL, MATCH_ANY = (0, 1)
def __init__(self, match_type):
self._match_type = match_type
# Key: function (rule_func)
# Value: tuple (rule_func_args) or ObjectFilter instance (a subfilter)
self._filter_items = {}
@property
def match_type(self):
return self._match_type
def has_rule(self, rule_func):
return rule_func in self._filter_items
def add_rule(self, rule_func, *rule_func_args):
"""
Add the specified rule as a function to the filter.
If `rule_func` already exists in the filter, nothing happens.
If you need to later remove the rule from the filter (using the
`remove_rule()` method), pass a named function rather than an inline lambda
expression. Alternatively, you can use `add_rule_temp()` for temporary
filters.
Parameters:
* `rule_func` - Function to filter objects by. The function must always have
at least one argument - the object to match (used by the `is_match()`
method).
* `*rule_func_args` - Arguments for the `rule_func` function.
Raises:
* `TypeError` - `rule_func` is not callable.
* `ValueError` - `rule_func` does not have at least one argument.
"""
if self.has_rule(rule_func):
return
if not callable(rule_func):
raise TypeError("not a function")
if len(inspect.getargspec(rule_func)[0]) < 1:
raise TypeError("function must have at least one argument (the object to match)")
self._filter_items[rule_func] = rule_func_args
def remove_rule(self, rule_func, raise_if_not_found=True):
"""
Remove the rule (`rule_func` function) from the filter.
Parameters:
* `rule_func` - Function to remove from the filter.
* `raise_if_not_found` - If True, raise `ValueError` if `rule_func` is not
found in the filter.
Raises:
* `ValueError` - `rule_func` is not found in the filter and
`raise_if_not_found` is True.
"""
if self.has_rule(rule_func):
del self._filter_items[rule_func]
else:
if raise_if_not_found:
raise ValueError("\"" + str(rule_func) + "\" not found in filter")
@contextmanager
def add_rule_temp(self, rule_func, *rule_func_args):
"""
Temporarily add a rule. Use as a context manager:
with filter.add_rule_temp(rule_func):
# do stuff
If `rule_func` already exists in the filter, the existing rule will not be
overridden and will not be removed.
Parameters:
* `rule_func` - Function to filter objects by. The function must always have
at least one argument - the object to match (used by the `is_match()`
method).
* `*rule_func_args` - Arguments for the `rule_func` function.
Raises:
* `TypeError` - `rule_func` is not callable.
* `ValueError` - `rule_func` does not have at least one argument.
"""
has_rule_already = self.has_rule(rule_func)
if not has_rule_already:
self.add_rule(rule_func, *rule_func_args)
try:
yield
finally:
if not has_rule_already:
self.remove_rule(rule_func)
@contextmanager
def remove_rule_temp(self, rule_func, raise_if_not_found=True):
"""
Temporarily remove a rule. Use as a context manager:
with filter.remove_rule_temp(rule_func):
# do stuff
Parameters:
* `rule_func` - Function to remove from the filter.
* `raise_if_not_found` - If True, raise `ValueError` if `rule_func` is not
in the filter.
Raises:
* `ValueError` - `rule_func` is not found in the filter and
`raise_if_not_found` is True.
"""
has_rule = self.has_rule(rule_func)
if not has_rule:
if raise_if_not_found:
raise ValueError("\"" + str(rule_func) + "\" not found in filter")
else:
rule_func_args = self._filter_items[rule_func]
self.remove_rule(rule_func)
try:
yield
finally:
if has_rule:
self.add_rule(rule_func, *rule_func_args)
def has_subfilter(self, subfilter_name):
return subfilter_name in self._filter_items
def add_subfilter(self, subfilter_name, subfilter):
"""
Add the specified subfilter (`ObjectFilter` instance) to the filter.
The subfilter can be later accessed by the `get_subfilter` method.
Raises:
* `ValueError` - `subfilter_name` already exists in the filter.
"""
if self.has_subfilter(subfilter_name):
raise ValueError("subfilter named \"" + str(subfilter_name) + "\" already exists in the filter")
if not isinstance(subfilter, ObjectFilter):
raise ValueError("subfilter named \"" + str(subfilter_name) + "\" is not a subfilter")
self._filter_items[subfilter_name] = subfilter
def get_subfilter(self, subfilter_name):
"""
Get the subfilter specified by its name.
Raises:
* `ValueError` - `subfilter_name` does not exist in the filter or the value
associated with `subfilter_name` is not a subfilter.
"""
if not self.has_subfilter(subfilter_name):
raise ValueError("subfilter named \"" + str(subfilter_name) + "\" not found in filter")
item = self._filter_items[subfilter_name]
|
return item
# Provide alias to `get_subfilter` for easier access.
__getitem__ = get_subfilter
def remove_subfilter(self, subfilter_name, raise_if_not_found=True):
"""
Remove the subfilter with the corresponding subfilter name.
Parameters:
* `subfilter name` - Su
|
bfilter name.
* `raise_if_not_found` - If True, raise `ValueError` if `subfilter_name`
is not found in the filter.
Raises:
* `ValueError` - `subfilter_name` is not found in the filter and
`raise_if_not_found` is True.
"""
if self.has_subfilter(subfilter_name):
del self._filter_items[subfilter_name]
else:
if raise_if_not_found:
raise ValueError("subf
|
foxmask/orotangi
|
orotangi/api/serializers.py
|
Python
|
bsd-3-clause
| 597
| 0
|
from orotangi.models import Books, Notes
from rest_framework import seriali
|
zers
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer)
|
:
class Meta:
model = User
fields = '__all__'
class BookSerializer(serializers.ModelSerializer):
class Meta:
model = Books
fields = '__all__'
class NoteSerializer(serializers.ModelSerializer):
class Meta:
model = Notes
fields = ('id', 'user', 'book', 'url', 'title', 'content',
'date_created', 'date_modified', 'date_deleted', 'status')
|
neurodata/ndmg
|
m2g/utils/qa_utils.py
|
Python
|
apache-2.0
| 2,414
| 0.010356
|
#!/usr/bin/env python
"""
m2g.utils.qa_utils
~~~~~~~~~~~~~~~~~~~~
Contains small-scale qa utilities
"""
import numpy as np
def get_min_max(data, minthr=2, maxthr=95):
"""
A function to find min,max values at designated percentile thresholds
Parameters
-----------
data: np array
3-d regmri data to threshold.
minthr: int
lower percentile threshold
maxthr: int
upper percentile threshold
Returns
-----------
min_max: tuple
tuple of minimum and maximum values
"""
min_val = np.percentile(data, minthr)
max_val = np.percentile(data, maxthr)
min_max = (min_val.astype(float), max_val.astype(float))
return min_max
def opaque_colorscale(basemap, reference, vmin=None, vmax=None, alpha=1):
"""
A function to return a colorscale, with opacities
dependent on reference intensities.
Parameters
---------
basemap: matplotlib colormap
the colormap to use for this colorscale.
reference: np array
the reference matrix.
Returns
---------
cmap = matplotlib colormap
"""
if vmin is not None:
reference[reference > vmax] = vmax
if vmax is not None:
reference[reference < vmin] = vmin
cmap = basemap(reference)
maxval = np.nanmax(reference)
# all values beteween 0 opacity and 1
opaque_scale = alpha * reference / float(maxval)
# remaps intensities
cmap[:, :, 3] = opaque_scale
return cmap
def pad_im(image,ma
|
x_dim,pad_val=255,rgb=False):
"""
Pads an image to be same dimensions as given max_dim
Parameters
-----------
|
image: np array
image object can be multiple dimensional or a slice.
max_dim: int
dimension to pad up to
pad_val: int
value to pad with. default is 255 (white) background
rgb: boolean
flag to indicate if RGB and last dimension should not be padded
Returns
-----------
padded_image: np array
image with padding
"""
pad_width = []
for i in range(image.ndim):
pad_width.append(((max_dim-image.shape[i])//2,(max_dim-image.shape[i])//2))
if rgb:
pad_width[-1] = (0,0)
pad_width = tuple(pad_width)
padded_image = np.pad(image, pad_width=pad_width, mode='constant', constant_values=pad_val)
return padded_image
|
tantexian/sps-2014-12-4
|
sps/openstack/common/db/sqlalchemy/session.py
|
Python
|
apache-2.0
| 35,114
| 0
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this
|
file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by app
|
licable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Session Handling for SQLAlchemy backend.
Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
`model_query()` will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.
.. note:: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
the code around quotas and `reservation_rollback()`.
Examples:
.. code:: python
def get_foo(context, foo):
return (model_query(context, models.Foo).
filter_by(foo=foo).
first())
def update_foo(context, id, newfoo):
(model_query(context, models.Foo).
filter_by(id=id).
update({'foo': newfoo}))
def create_foo(context, values):
foo_ref = models.Foo()
foo_ref.update(values)
foo_ref.save()
return foo_ref
* Within the scope of a single method, keep all the reads and writes within
the context managed by a single session. In this way, the session's
`__exit__` handler will take care of calling `flush()` and `commit()` for
you. If using this approach, you should not explicitly call `flush()` or
`commit()`. Any error within the context of the session will cause the
session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be
raised in `session`'s `__exit__` handler, and any try/except within the
context managed by `session` will not be triggered. And catching other
non-database errors in the session will not trigger the ROLLBACK, so
exception handlers should always be outside the session, unless the
developer wants to do a partial commit on purpose. If the connection is
dropped before this is possible, the database will implicitly roll back the
transaction.
.. note:: Statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
do not need to call `model.save()`:
.. code:: python
def create_many_foo(context, foos):
session = sessionmaker()
with session.begin():
for foo in foos:
foo_ref = models.Foo()
foo_ref.update(foo)
session.add(foo_ref)
def update_bar(context, foo_id, newbar):
session = sessionmaker()
with session.begin():
foo_ref = (model_query(context, models.Foo, session).
filter_by(id=foo_id).
first())
(model_query(context, models.Bar, session).
filter_by(id=foo_ref['bar_id']).
update({'bar': newbar}))
.. note:: `update_bar` is a trivially simple example of using
``with session.begin``. Whereas `create_many_foo` is a good example of
when a transaction is needed, it is always best to use as few queries as
possible.
The two queries in `update_bar` can be better expressed using a single query
which avoids the need for an explicit transaction. It can be expressed like
so:
.. code:: python
def update_bar(context, foo_id, newbar):
subq = (model_query(context, models.Foo.id).
filter_by(id=foo_id).
limit(1).
subquery())
(model_query(context, models.Bar).
filter_by(id=subq.as_scalar()).
update({'bar': newbar}))
For reference, this emits approximately the following SQL statement:
.. code:: sql
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
.. note:: `create_duplicate_foo` is a trivially simple example of catching an
exception while using ``with session.begin``. Here create two duplicate
instances with same primary key, must catch the exception out of context
managed by a single session:
.. code:: python
def create_duplicate_foo(context):
foo1 = models.Foo()
foo2 = models.Foo()
foo1.id = foo2.id = 1
session = sessionmaker()
try:
with session.begin():
session.add(foo1)
session.add(foo2)
except exception.DBDuplicateEntry as e:
handle_error(e)
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call `session.begin()` on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.
Note that this incurs more overhead in SQLAlchemy than the above means
due to nesting transactions, and it is not possible to implicitly retry
failed database operations when using this approach.
This also makes code somewhat more difficult to read and debug, because a
single database transaction spans more than one method. Error handling
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
.. code:: python
def myfunc(foo):
session = sessionmaker()
with session.begin():
# do some database things
bar = _private_func(foo, session)
return bar
def _private_func(foo, session=None):
if not session:
session = sessionmaker()
with session.begin(subtransaction=True):
# do some other database things
return bar
There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary.
This means that your ``with session.begin()`` block should be as short
as possible, while still containing all the related calls for that
transaction.
* Avoid ``with_lockmode('UPDATE')`` when possible.
In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
has an overly broad WHERE clause, or doesn't properly use an index.
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.
The better long-term solution is to use
``INSERT .. ON DUPLICATE KEY UPDATE``.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
Enabling soft deletes:
* To use/enable soft-deletes, the `SoftDeleteMixin` must be added
to your model class. For example:
.. code:: python
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted:
`model.soft_delete()` and `query.soft_delet
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
python-packages/mne-python-0.10/examples/simulation/plot_simulate_evoked_data.py
|
Python
|
bsd-3-clause
| 2,787
| 0.000359
|
"""
==============================
Generate simulated evoked data
==============================
"""
# Author: Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
# Al
|
exandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mne import (read_proj, read_forward_solution, read_cov, read_label,
pick_types_forward, pick_types)
from mne.i
|
o import Raw, read_info
from mne.datasets import sample
from mne.time_frequency import fit_iir_model_raw
from mne.viz import plot_sparse_source_estimates
from mne.simulation import simulate_sparse_stc, simulate_evoked
print(__doc__)
###############################################################################
# Load real data as templates
data_path = sample.data_path()
raw = Raw(data_path + '/MEG/sample/sample_audvis_raw.fif')
proj = read_proj(data_path + '/MEG/sample/sample_audvis_ecg_proj.fif')
raw.info['projs'] += proj
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # mark bad channels
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
fwd = read_forward_solution(fwd_fname, force_fixed=True, surf_ori=True)
fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
cov = read_cov(cov_fname)
info = read_info(ave_fname)
label_names = ['Aud-lh', 'Aud-rh']
labels = [read_label(data_path + '/MEG/sample/labels/%s.label' % ln)
for ln in label_names]
###############################################################################
# Generate source time courses from 2 dipoles and the correspond evoked data
times = np.arange(300, dtype=np.float) / raw.info['sfreq'] - 0.1
rng = np.random.RandomState(42)
def data_fun(times):
"""Function to generate random source time courses"""
return (1e-9 * np.sin(30. * times) *
np.exp(- (times - 0.15 + 0.05 * rng.randn(1)) ** 2 / 0.01))
stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times=times,
random_state=42, labels=labels, data_fun=data_fun)
###############################################################################
# Generate noisy evoked data
picks = pick_types(raw.info, meg=True, exclude='bads')
iir_filter = fit_iir_model_raw(raw, order=5, picks=picks, tmin=60, tmax=180)[1]
snr = 6. # dB
evoked = simulate_evoked(fwd, stc, info, cov, snr, iir_filter=iir_filter)
###############################################################################
# Plot
plot_sparse_source_estimates(fwd['src'], stc, bgcolor=(1, 1, 1),
opacity=0.5, high_resolution=True)
plt.figure()
plt.psd(evoked.data[0])
evoked.plot()
|
sebastiandev/biyuya
|
biyuya/models/filters.py
|
Python
|
mit
| 2,106
| 0.00095
|
import datetime
import arrow
def arrow_datetime(value, name):
try:
value = arrow.get(value).datetime
except Exception as e:
raise ValueError(e)
return value
class BaseFilter(object):
# TODO: Move this class to be part of API FiltrableResource
# Leaving implementation to be defined by base class
name = None
value_type = None
allow_multiple = None
@classmethod
def condition(cls, *args, **kwargs):
raise NotImplementedError()
@classmethod
def apply(cls, model, *args, **kwargs):
return model.find(cls.condition(*args, **kwargs), limit=kwargs.get('limit', 0))
class DateFilter(BaseFilter):
name = 'date'
value_type = arrow_datetime
allow_multiple = False
@classmethod
def condition(cls, date_value, **kwargs):
return {'date': date_value}
class DateRangeFilter(BaseFilter):
name = 'date_range'
value_type = arrow_datetime
allow_multiple = True
@classmethod
def condition(cls, from_date, to_date, **kwargs):
return {'date': {"$gte": from_date, "$lte": to_date}}
class AccountFilter(BaseFilter):
name = 'account_name'
value_type = str
allow_multiple = False
@classmethod
def condition(cls, account_name):
return {
'account': {
"$regex": '.*?{}.*?'.format(account_name),
"$options": 'si'
}
}
class NameFilter(BaseFilter):
name = 'name'
|
value_type = str
allow_multiple = False
@classmethod
def condition(cls, name):
return {
'name': {
"$regex": '.*?{}.*?'.format(name),
"$options": 'si'
}
}
class TagFilter(Ba
|
seFilter):
name = 'tag'
value_type = str
allow_multiple = True
@classmethod
def condition(cls, *tags, **kwargs):
return {
'tags': {
"$elemMatch": {
"$regex": ".*?{}.*?".format('|'.join(tags)),
"$options": "si"
}
}
}
|
knightmare2600/d4rkc0de
|
others/goog-mail.py
|
Python
|
gpl-2.0
| 2,312
| 0.014273
|
#!/usr/bin/python
import sys
import re
import string
import httplib
import urllib2
import re
def StripTags(text):
finished = 0
while not finished:
finished = 1
start = text.find("<")
if start >= 0:
stop = text[start:].find(">")
if stop >= 0:
text = text[:start] + text[start+stop+1:]
finished = 0
return text
if len(sys.argv) != 2:
print "\nExtracts emails from google results.\n"
print "\nUsage : ./goog-mail.py <domain-name>\n"
sys.exit(1)
domain_name=sys.argv[1]
d={}
page_counter = 0
try:
while page_counter < 50 :
results = 'http://groups.google.com/groups?q='+str(domain_name)+'&hl=en&lr=&ie=UTF-8&start=' + repr(page_counter) + '&sa=N'
request = urllib2.Request(results)
request.add_header('User-Agent','Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0)')
opener = urllib2.build_opener()
text = opener.open(request).read()
emails = (re.findall('([\w\.\-]+@'+domain_name+')',StripTags(text)))
for email in emails:
d[email]=1
uniq_emails=d.keys()
page_counter = page_counter +10
|
except IOError:
print "Can't connect to Google Groups!"+""
page_counter_web=0
try:
print "\n\n+++++++++++++++++++++++++++++++++++++++++++++++++++++"+""
print "+ Google Web & Group Results:"+""
print "+++++++++++++++++
|
++++++++++++++++++++++++++++++++++++\n\n"+""
while page_counter_web < 50 :
results_web = 'http://www.google.com/search?q=%40'+str(domain_name)+'&hl=en&lr=&ie=UTF-8&start=' + repr(page_counter_web) + '&sa=N'
request_web = urllib2.Request(results_web)
request_web.add_header('User-Agent','Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0)')
opener_web = urllib2.build_opener()
text = opener_web.open(request_web).read()
emails_web = (re.findall('([\w\.\-]+@'+domain_name+')',StripTags(text)))
for email_web in emails_web:
d[email_web]=1
uniq_emails_web=d.keys()
page_counter_web = page_counter_web +10
except IOError:
print "Can't connect to Google Web!"+""
for uniq_emails_web in d.keys():
print uniq_emails_web+""
|
botswana-harvard/bcpp
|
bcpp/tests/test_views/test_enumeration.py
|
Python
|
gpl-3.0
| 3,020
| 0.001987
|
from django.test import TestCase, tag
from member.tests.test_mixins import MemberMixin
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from django.urls.base import reverse
from enumeration.views import DashboardView, ListBoardView
class TestEnumeration(MemberMixin, TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = User.objects.create_user(username='erik')
self.household_structure = self.make_household_ready_for_enumeration(make_hoh=False)
def test_dashboard_view(self):
url = reverse('enumeration:dashboard_url', kwargs=dict(
household_identifier=self.household_structure.household.household_identifier,
survey=self.household_structure.survey))
request = self.factory.get(url)
request.user = self.user
response =
|
DashboardView.as_view()(request)
self.assertEqual(response.status_code, 200)
def test_dashboard_view2(self):
url = reverse('enumeration:dashboard_url', kwargs=dict(
household_identifier=self.household_structure.household.household_identifier,
survey=self.household_stru
|
cture.survey))
self.client.force_login(self.user)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_list_view1(self):
url = reverse('enumeration:listboard_url')
request = self.factory.get(url)
request.user = self.user
response = ListBoardView.as_view()(request)
self.assertEqual(response.status_code, 200)
def test_list_view2(self):
url = reverse('enumeration:listboard_url', kwargs=dict(page=1))
request = self.factory.get(url)
request.user = self.user
response = ListBoardView.as_view()(request)
self.assertEqual(response.status_code, 200)
def test_list_view3(self):
url = reverse('enumeration:listboard_url', kwargs=dict(
household_identifier=self.household_structure.household.household_identifier))
request = self.factory.get(url)
request.user = self.user
response = ListBoardView.as_view()(request)
self.assertEqual(response.status_code, 200)
def test_list_view4(self):
url = reverse('enumeration:listboard_url', kwargs=dict(
household_identifier=self.household_structure.household.household_identifier,
survey=self.household_structure.survey))
request = self.factory.get(url)
request.user = self.user
response = ListBoardView.as_view()(request)
self.assertEqual(response.status_code, 200)
def test_list_view5(self):
url = reverse('enumeration:listboard_url', kwargs=dict(
plot_identifier=self.household_structure.household.plot.plot_identifier))
request = self.factory.get(url)
request.user = self.user
response = ListBoardView.as_view()(request)
self.assertEqual(response.status_code, 200)
|
lap00zza/arc
|
api_server/arc_api/snowflake.py
|
Python
|
gpl-3.0
| 2,140
| 0.000935
|
"""
arc - dead simple chat
Copyright (C) 2017 Jewel Mahanta <jewelmahanta@gmail.com>
This file is part of arc.
arc is free software: you
|
can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
arc is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for m
|
ore details.
You should have received a copy of the GNU General Public License
along with arc. If not, see <http://www.gnu.org/licenses/>.
"""
import time
import math
ARC_EPOCH = 1496595546533
class Snowflake:
"""
Arc snowflake has the following structure
+------------------+-----------------+-----------------+
| 41 bit timestamp | 13 bit shard_id | 10 bit sequence |
+------------------+-----------------+-----------------+
We use our custom epoch. Each components have the following
upper limits:
timestamp (2^41) - 1 = 2199023255551
shard_id (2^13) - 1 = 8191
sequence (2^10) - 1 = 1023
So roughly speaking, we can generate 1024 id's per millisecond
per shard.
Credits:
--------
This id generation technique borrows heavily from instagram's
implementation of twitter snowflake. You can read more about
it here: https://engineering.instagram.com/sharding-ids-at-instagram-1cf5a71e5a5c
"""
def __init__(self, shard_id=0):
self.last_timestamp = 0
self.sequence = 0
self.SHARD_ID = shard_id
def generate(self):
timestamp = (math.floor(time.time()) * 1000) - ARC_EPOCH
if self.last_timestamp == timestamp:
self.sequence += 1
else:
self.sequence = 0
if self.sequence >= 1023:
print("Sleeping")
time.sleep(1/1000)
self.last_timestamp = timestamp
gen_id = (timestamp << 23) + (self.SHARD_ID << 10) + self.sequence
return gen_id
snowflake = Snowflake()
|
crookedreyes/py4e-specialization
|
course-3/chapter-13/json1.py
|
Python
|
lgpl-2.1
| 269
| 0.007435
|
import json
data = '''{
"name" : "Chuck",
"phone": {
"type" : "int1",
"number"
|
: "+1
|
734 303 4456"
},
"email": {
"hide" : "yes"
}
}'''
info = json.loads(data)
print('Name:',info["name"])
print('Hide:',info["email"]["hide"])
|
DxCx/nzbToMedia
|
libs/beets/mediafile.py
|
Python
|
gpl-3.0
| 56,908
| 0.000914
|
# This file is part of beets.
# Copyright 2014, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Handles low-level interfacing for files' tags. Wraps Mutagen to
automatically detect file types and provide a unified interface for a
useful subset of music files' tags.
Usage:
>>> f = MediaFile('Lucy.mp3')
>>> f.title
u'Lucy in the Sky with Diamonds'
>>> f.artist = 'The Beatles'
>>> f.save()
A field will always return a reasonable value of the correct type, even
if no tag is present. If no value is available, the value will be false
(e.g., zero or the empty string).
Internally ``MediaFile`` uses ``MediaField`` descriptors to access the
data from the tags. In turn ``MediaField`` uses a number of
``StorageStyle`` strategies to handle format specific logic.
"""
import mutagen
import mutagen.mp3
import mutagen.oggopus
import mutagen.oggvorbis
import mutagen.mp4
import mutagen.flac
import mutagen.monkeysaudio
import mutagen.asf
import datetime
import re
import base64
import math
import struct
import imghdr
import os
import logging
import traceback
from beets.util.enumeration import enum
__all__ = ['UnreadableFileError', 'FileTypeError', 'MediaFile']
# Logger.
log = logging.getLogger('beets')
# Exceptions.
class UnreadableFileError(Exception):
"""Indicates a file that MediaFile can't read.
"""
pass
class FileTypeError(UnreadableFileError):
"""Raised for files that don't seem to have a type MediaFile
supports.
"""
pass
class MutagenError(UnreadableFileError):
"""Raised when Mutagen fails unexpectedly---probably due to a bug.
"""
# Constants.
# Human-readable type names.
TYPES = {
'mp3': 'MP3',
'aac': 'AAC',
'alac': 'ALAC',
'ogg': 'OGG',
'opus': 'Opus',
'flac': 'FLAC',
'ape': 'APE',
'wv': 'WavPack',
'mpc': 'Musepack',
'asf': 'Windows Media',
}
# Utility.
def _safe_cast(out_type, val):
"""Try to covert val to out_type but never raise an exception. If
the value can't be converted, then a sensible default value is
returned. out_type should be bool, int, or unicode; otherwise, the
value is just passed through.
"""
if out_type == int:
if val is None:
return 0
elif isinstance(val, int) or isinstance(val, float):
# Just a number.
return int(val)
else:
# Process any other type as a string.
if not isinstance(val, basestring):
val = unicode(val)
# Get a number from the front of the string.
val = re.match(r'[0-9]*', val.strip()).group(0)
if not val:
return 0
else:
return int(val)
elif out_type == bool:
if val is None:
return False
else:
try:
# Should work for strings, bools, ints:
return bool(int(val))
except ValueError:
return False
elif out_type == unicode:
if val is None:
return u''
else:
if isinstance(val, str):
return val.decode('utf8', 'ignore')
elif isinstance(val, unicode):
return val
else:
return unicode(val)
elif out_type == float:
if val is None:
return 0.0
elif isinstance(val, int) or isinstance(val, float):
return float(val)
else:
if not isinstance(val, basestring):
val = unicode(val)
val = re.match(r'[\+-]?[0-9\.]*', val.strip()).group(0)
if not val:
return 0.0
else:
return float(val)
else:
return val
# Image coding for ASF/WMA.
def _unpack_asf_image(data):
"""Unpack image data from a WM/Picture tag. Return a tuple
containing the MIME type, the raw image data, a type indicator, and
the image's description.
This function is treated as "untrusted" and could throw all manner
of exceptions (out-of-bounds, etc.). We should clean this up
sometime so that the failure modes are well-defined.
"""
type, size = struct.unpack_from("<bi", data)
pos = 5
mime = ""
while data[pos:pos + 2] != "\x00\x00":
mime += data[pos:pos + 2]
pos += 2
pos += 2
description = ""
while data[pos:pos + 2] != "\x00\x00":
description += data[pos:pos + 2]
pos += 2
pos += 2
image_data = data[pos:pos + size]
return (mime.decode("utf-16-le"), image_data, type,
description.decode("utf-16-le"))
def _pack_asf_image(mime, data, type=3, description=""):
"""Pack image data for a WM/Picture tag.
"""
tag_data = struct.pack("<bi", type, len(data))
tag_data += mime.encode("utf-16-le") + "\x00\x00"
tag_data += description.encode("utf-16-le") + "\x00\x00"
tag_data += data
return tag_data
# iTunes Sound Check encoding.
def _sc_decode(soundcheck):
"""Convert a Sound Check string value to a (gain, peak) tuple as
used by ReplayGain.
"""
# SoundCheck tags consist of 10 numbers, each represented by 8
# characters of ASCII hex preceded by a space.
try:
soundcheck = soundcheck.replace(' ', '').decode('hex')
soundcheck = struct.unpack('!iiiiiiiiii', soundcheck)
except (struct.error, TypeError):
# SoundCheck isn't in the format we expect, so return default
# values.
return 0.0, 0.0
# SoundCheck stores absolute calculated/measured RMS value in an
# unknown unit. We need to find the ratio of this measurement
# compared to a reference value of 1000 to get our gain in dB. We
# play it safe by using the larger of the two values (i.e., the most
# attenuation).
maxgain = max(soundcheck[:2])
if maxgain > 0:
gain = math.log10(maxgain / 1000.0) * -10
else:
# Invalid gain value found.
gain = 0.0
# SoundCheck stores peak values as the actual value of the sample,
# and again separately for the left and right channels. We need to
# convert this to a percentage of full scale, which is 32768 for a
# 16 bit sample. Once again, we play it safe by using the larger of
# the two values.
peak = max(soundcheck[6:8]) / 32768.0
return round(gain, 2), round(peak, 6)
def _sc_encode(gain, peak):
"""Encode ReplayGain gain/peak values as a Sound Check string.
"""
# SoundCheck stores the peak value as the actual value of the
# sample, rather than the percentage of full scale that RG uses, so
# we do a simple conversion assuming 16 bit samples.
peak *= 32768.0
# SoundCheck stores absolute RMS values in some unknown units rather
# than the dB values RG uses. We can calculate these absolute values
# from the
|
gain ratio using a reference value of 1000 units. We also
# enforce the maximum value here, which is equivalent to about
# -18.2dB.
g1 = min(round((10 ** (gain / -10)) * 1000), 65534)
# Same as above, except our reference level is 2500 units.
g2 = min(round((10 ** (gain / -10)) * 2500), 65534)
# The purpose of these value
|
s are unknown, but they also seem to be
# unused so we just use zero.
uk = 0
values = (g1, g1, g2, g2, uk, uk, peak, peak, uk, uk)
return (u' %08X' * 10) % values
# Cover art and other images.
def _image_mime_type(data):
"""Return the MIME type of the image data (a bytestring).
"""
kind = imghdr.what(None, h=data)
if kind in ['gif', 'jpeg', 'png', 'tiff', 'bmp']:
|
alexryndin/ambari
|
ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/params.py
|
Python
|
apache-2.0
| 9,467
| 0.007711
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import default
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions import format
from resource_management.libraries.functions.ver
|
sion import format_stack_version, compare_versions
from ambari_commons.os_check import OSCheck
from resource_management.libraries.script.script import Script
config = Script.get_config()
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
iop_stack_version = form
|
at_stack_version(stack_version_unformatted)
# hadoop default params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
hadoop_lib_home = stack_select.get_hadoop_dir("lib")
hadoop_bin = stack_select.get_hadoop_dir("sbin")
hadoop_home = '/usr'
create_lib_snappy_symlinks = True
# IOP 4.0+ params
if Script.is_stack_greater_or_equal("4.0"):
mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
hadoop_home = stack_select.get_hadoop_dir("home")
create_lib_snappy_symlinks = False
current_service = config['serviceName']
#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']
#users and groups
has_hadoop_env = 'hadoop-env' in config['configurations']
mapred_user = config['configurations']['mapred-env']['mapred_user']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
yarn_user = config['configurations']['yarn-env']['yarn_user']
user_group = config['configurations']['cluster-env']['user_group']
#hosts
hostname = config["hostname"]
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
rm_host = default("/clusterHostInfo/rm_host", [])
slave_hosts = default("/clusterHostInfo/slave_hosts", [])
oozie_servers = default("/clusterHostInfo/oozie_server", [])
hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
hive_server_host = default("/clusterHostInfo/hive_server_host", [])
hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
hs_host = default("/clusterHostInfo/hs_host", [])
jtnode_host = default("/clusterHostInfo/jtnode_host", [])
namenode_host = default("/clusterHostInfo/namenode_host", [])
zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
has_namenode = not len(namenode_host) == 0
has_resourcemanager = not len(rm_host) == 0
has_slaves = not len(slave_hosts) == 0
has_oozie_server = not len(oozie_servers) == 0
has_hcat_server_host = not len(hcat_server_hosts) == 0
has_hive_server_host = not len(hive_server_host) == 0
has_hbase_masters = not len(hbase_master_hosts) == 0
has_zk_host = not len(zk_hosts) == 0
has_ganglia_server = not len(ganglia_server_hosts) == 0
has_metric_collector = not len(ams_collector_hosts) == 0
is_namenode_master = hostname in namenode_host
is_jtnode_master = hostname in jtnode_host
is_rmnode_master = hostname in rm_host
is_hsnode_master = hostname in hs_host
is_hbase_master = hostname in hbase_master_hosts
is_slave = hostname in slave_hosts
if has_ganglia_server:
ganglia_server_host = ganglia_server_hosts[0]
if has_metric_collector:
if 'cluster-env' in config['configurations'] and \
'metrics_collector_vip_host' in config['configurations']['cluster-env']:
metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
else:
metric_collector_host = ams_collector_hosts[0]
if 'cluster-env' in config['configurations'] and \
'metrics_collector_vip_port' in config['configurations']['cluster-env']:
metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
else:
metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
if metric_collector_web_address.find(':') != -1:
metric_collector_port = metric_collector_web_address.split(':')[1]
else:
metric_collector_port = '6188'
pass
metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 60)
#hadoop params
if has_namenode:
hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
hbase_tmp_dir = "/tmp/hbase-hbase"
#db params
server_db_name = config['hostLevelParams']['db_name']
db_driver_filename = config['hostLevelParams']['db_driver_filename']
oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
ambari_server_resources = config['hostLevelParams']['jdk_location']
oracle_driver_symlink_url = format("{ambari_server_resources}oracle-jdbc-driver.jar")
mysql_driver_symlink_url = format("{ambari_server_resources}mysql-jdbc-driver.jar")
ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
rca_enabled = config['configurations']['hadoop-env']['rca_enabled']
else:
rca_enabled = False
rca_disabled_prefix = "###"
if rca_enabled == True:
rca_prefix = ""
else:
rca_prefix = rca_disabled_prefix
#hadoop-env.sh
java_home = config['hostLevelParams']['java_home']
jsvc_path = "/usr/lib/bigtop-utils"
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize = "1024m"
ttnode_heapsize = "1024m"
dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
#log4j.properties
yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
#log4j.properties
if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
log4j
|
tiexinliu/odoo_addons
|
smile_log/tools/db_logger.py
|
Python
|
agpl-3.0
| 2,985
| 0.00067
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Smile (<http://www.smile.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import logging
from openerp.modules.registry import RegistryManager
from .misc import add_timing, add_trace
cl
|
ass SmileDBLogger:
def __init__(self, dbname, model_name, res_id, uid=0):
assert isinstance(uid, (int, long)), 'uid should be an integer'
self._logger = logging.getLogger('smile_log')
db = RegistryManager.get(dbname)._db
pid = 0
try:
cr = db.cursor()
cr.autocommit(True)
cr.execute("select relname from pg_class where relname='smile_log_seq'")
if not cr.rowcount:
cr.execut
|
e("create sequence smile_log_seq")
cr.execute("select nextval('smile_log_seq')")
res = cr.fetchone()
pid = res and res[0] or 0
finally:
cr.close()
self._logger_start = datetime.datetime.now()
self._logger_args = {'dbname': dbname, 'model_name': model_name, 'res_id': res_id, 'uid': uid, 'pid': pid}
@property
def pid(self):
return self._logger_args['pid']
def setLevel(self, level):
self._logger.setLevel(level)
def getEffectiveLevel(self):
return self._logger.getEffectiveLevel()
def debug(self, msg):
self._logger.debug(msg, self._logger_args)
def info(self, msg):
self._logger.info(msg, self._logger_args)
def warning(self, msg):
self._logger.warning(msg, self._logger_args)
def log(self, msg):
self._logger.log(msg, self._logger_args)
@add_trace
def error(self, msg):
self._logger.error(msg, self._logger_args)
@add_trace
def critical(self, msg):
self._logger.critical(msg, self._logger_args)
@add_trace
def exception(self, msg):
self._logger.exception(msg, self._logger_args)
@add_timing
def time_info(self, msg):
self._logger.info(msg, self._logger_args)
@add_timing
def time_debug(self, msg):
self._logger.debug(msg, self._logger_args)
|
DigitalCampus/django-oppia
|
tests/oppia/management/test_data_retention.py
|
Python
|
gpl-3.0
| 2,111
| 0
|
import datetime
from django.contrib.auth.models import User
from django.core.management import call_command
from django.utils import timezone
from io import StringIO
from oppia.models import Tracker
from oppia.test import OppiaTestCase
class DataRetentionTest(OppiaTestCase):
fixtures = ['tests/test_user.json',
'tests/test_oppia.json',
'default_gamification_events.json',
'tests/test_tracker.json',
'tests/test_permissions.json',
'default_badges.json',
'tests/test_course_permissions.json']
STR_NO_INPUT = '--noinput'
def test_data_retention_no_delete(self):
out = StringIO()
start_user_count = User.objects.all().count()
call_command('data_retention', self.STR_NO_INPUT, stdout=out)
end_user_count = User.objects.all().count()
self.assertEqual(start_user_count, end_user_count)
def test_data_retention_old_user(self):
out = StringIO()
user = User()
user.username = "olduser"
user.last_login = timezone.make_aware(
datetime.datetime.strptime('2000-01-01', "%Y-%m-%
|
d"),
timezone.get_current_timezone())
user.save()
start_user_count = User.objects.all().count()
call_command('data_retention', self.STR_NO_INPUT, stdout=out)
end_user_count = User.objects.all().count()
self.assertEqual(start_user_count-1, end_user_count)
def test_data_retention_old_user_new_tracker(self):
out = StringIO()
user = User()
user.username = "olduse
|
r"
user.last_login = timezone.make_aware(
datetime.datetime.strptime('2000-01-01', "%Y-%m-%d"),
timezone.get_current_timezone())
user.save()
tracker = Tracker()
tracker.user = user
tracker.save()
start_user_count = User.objects.all().count()
call_command('data_retention', self.STR_NO_INPUT, stdout=out)
end_user_count = User.objects.all().count()
self.assertEqual(start_user_count, end_user_count)
|
bohdan-shramko/learning-python
|
source/chapter09/alien_blaster.py
|
Python
|
mit
| 656
| 0.009146
|
# Alien Blaster
# Demonstrates object interaction
class Player(object):
""" A player in a shooter game. """
def blast(self, enemy):
print("The player blasts an enemy.\n")
enemy.die()
class Alien(object):
""" An alien in a shooter game. """
def die(self):
print("The alien gasps and says, 'Oh, this is it. This is the big one. \n" \
|
"Yes, it's getting d
|
ark now. Tell my 1.6 million larvae that I loved them... \n" \
"Good-bye, cruel universe.'")
# main
print("\t\tDeath of an Alien\n")
hero = Player()
invader = Alien()
hero.blast(invader)
input("\n\nPress the enter key to exit.")
|
stfc/cvmfs-stratum-uploader
|
uploader/projects/admin.py
|
Python
|
apache-2.0
| 642
| 0
|
from django import forms
from django.contrib import admin
from django.contrib.admin import ModelAdmin
from guardian.admin import GuardedModelAdmin
from uploader.projects.models import FileSystem, Project
class FileSystemAdminForm(forms.ModelForm):
class Meta:
model = FileSystem
class ProjectAdmin(GuardedModelAdmin):
list_display = ('__unicode__', 'file_system', 'directory')
class FileSystemAdmin(ModelAdmin):
list_display = ('__unicode__', 'alias', 'mount_point')
form = FileSystemAdminForm
a
|
dmin.site.register(FileSystem, admin_class=FileSystemAdmin)
admin.site.register(Project, admi
|
n_class=ProjectAdmin)
|
ktan2020/legacy-automation
|
win/Lib/distutils/tests/test_build_scripts.py
|
Python
|
mit
| 3,712
| 0.000808
|
"""Tests for distutils.command.build_scripts."""
import os
import unittest
from distutils.command.build_scripts import build_scripts
from distutils.core import Distribution
import sysconfig
from distutils.tests import support
from test.test_support import run_unittest
class BuildScriptsTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_default_settings(self):
cmd = self.get_build_scripts_cmd("/foo/bar", [])
self.assertTrue(not cmd.force)
self.assertTrue(cmd.build_dir is None)
cmd.finalize_options()
self.assertTrue(cmd.force)
self.assertEqual(cmd.build_dir, "/foo/bar")
def test_build(self):
source = self.mkdtemp()
target = self.mkdtemp()
expected = self.write_sample_scripts(source)
cmd = self.get_build_scripts_cmd(target,
[os.path.join(source, fn)
for fn in expected])
cmd.finalize_options()
cmd.run()
|
built = os.listdir(target)
for name in expected:
self.assertTrue(name in built)
def get_build_scripts_cmd(self, target, scripts):
import sys
dist = Distribution()
dist.scripts = scripts
dist.command_obj["build"] = supp
|
ort.DummyCommand(
build_scripts=target,
force=1,
executable=sys.executable
)
return build_scripts(dist)
def write_sample_scripts(self, dir):
expected = []
expected.append("script1.py")
self.write_script(dir, "script1.py",
("#! /usr/bin/env python2.3\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
expected.append("script2.py")
self.write_script(dir, "script2.py",
("#!/usr/bin/python\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
expected.append("shell.sh")
self.write_script(dir, "shell.sh",
("#!/bin/sh\n"
"# bogus shell script w/ sh-bang\n"
"exit 0\n"))
return expected
def write_script(self, dir, name, text):
f = open(os.path.join(dir, name), "w")
try:
f.write(text)
finally:
f.close()
def test_version_int(self):
source = self.mkdtemp()
target = self.mkdtemp()
expected = self.write_sample_scripts(source)
cmd = self.get_build_scripts_cmd(target,
[os.path.join(source, fn)
for fn in expected])
cmd.finalize_options()
# http://bugs.python.org/issue4524
#
# On linux-g++-32 with command line `./configure --enable-ipv6
# --with-suffix=3`, python is compiled okay but the build scripts
# failed when writing the name of the executable
old = sysconfig.get_config_vars().get('VERSION')
sysconfig._CONFIG_VARS['VERSION'] = 4
try:
cmd.run()
finally:
if old is not None:
sysconfig._CONFIG_VARS['VERSION'] = old
built = os.listdir(target)
for name in expected:
self.assertTrue(name in built)
def test_suite():
return unittest.makeSuite(BuildScriptsTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
stevetjoa/algorithms
|
kth_smallest_loop.py
|
Python
|
mit
| 470
| 0.002128
|
def kth_smallest(arr, k):
n = len(arr)
a = 0
b = n
while a < b:
piv = a
for i in range(a, b):
if ar
|
r[piv] > arr[i]:
arr[i], arr[piv] = arr[piv], arr[i]
piv = i
if piv == k:
return arr[piv]
elif piv < k:
a = piv+1
else:
b = piv-1
arr = [9, 3, 5, 6, 1, 3, 3]
prin
|
t arr
for i in range(-1, len(arr)+1):
print i, kth_smallest(arr, i)
|
lyubomir1993/AlohaServer
|
Client.py
|
Python
|
apache-2.0
| 616
| 0.008117
|
#!/usr/bin/python
import json
class Client():
def __init__(self, clientHostName, clientPort, channel):
self.clientHostName = clientHostName
self.clientPort = clientPort
self.c
|
lientType = self.getClientType()
self.channel = channel
# TO DO implement this method properly
def getClientType(self):
try:
self.WebClient = "Web Client"
self.MobileClient = "Mobile Client"
return self.WebClient
except ImportError as e:
print json.dumps({"status" : "error", "Client.getClientType" : str(e)})
|
exit(0)
|
arocks/edge
|
src/accounts/forms.py
|
Python
|
mit
| 3,105
| 0.000966
|
from __future__ import unicode_literals
from django.contrib.auth.forms import AuthenticationForm
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, HTML, Field
from authtools import forms as authtoolsforms
from django.contrib.auth import forms as authforms
from django.urls import reverse
class LoginForm(AuthenticationForm):
remember_me = forms.BooleanField(required=False, initial=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.fields["username"].widget.input_type = "email" # ugly hack
self.helper.layout = Layout(
Field("username", placeholder="Enter Email", autofocus=""),
Field("password", placeholder="Enter Password"),
HTML(
'<a href="{}">Forgot Password?</a>'.format(
reverse("accounts:password-reset")
)
),
Field("remember_me"),
Submit("sign_in", "Log in", css_class="btn btn-lg btn-primary btn-block"),
)
class SignupForm(authtoolsforms.UserCreationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.fields["email"].widget.input_type = "email" # ugly hack
self.helper.layout = Layout(
Field("email", placeholder="Enter Email", autofocus=""),
Field("name", placeholder="Enter Full Name"),
Field("password1", placeholder="Enter Password"),
Field("password2", placeholder="Re-enter Passwor
|
d"),
Submit("sign_up", "Sign up", css_class="btn-warning"),
|
)
class PasswordChangeForm(authforms.PasswordChangeForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Field("old_password", placeholder="Enter old password", autofocus=""),
Field("new_password1", placeholder="Enter new password"),
Field("new_password2", placeholder="Enter new password (again)"),
Submit("pass_change", "Change Password", css_class="btn-warning"),
)
class PasswordResetForm(authtoolsforms.FriendlyPasswordResetForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Field("email", placeholder="Enter email", autofocus=""),
Submit("pass_reset", "Reset Password", css_class="btn-warning"),
)
class SetPasswordForm(authforms.SetPasswordForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Field("new_password1", placeholder="Enter new password", autofocus=""),
Field("new_password2", placeholder="Enter new password (again)"),
Submit("pass_change", "Change Password", css_class="btn-warning"),
)
|
effa/flocs
|
blocks/migrations/0005_blockmodel_difficulty.py
|
Python
|
gpl-2.0
| 475
| 0.002105
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blocks', '0004_auto_20160305_2025'),
]
operations = [
migrations.AddField(
model_
|
name='blockmodel',
name='difficulty',
field=
|
models.FloatField(default=1.0, help_text='real number between -1 (easiest) and 1 (most difficult)'),
),
]
|
lenovo-network/networking-lenovo
|
networking_lenovo/db/migration/alembic_migrations/__init__.py
|
Python
|
apache-2.0
| 639
| 0
|
# Copyright (c) 2017, Lenovo. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may n
|
ot use this file except in compliance with the License.
# You may obtain a copy of the L
|
icense at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION_TABLE = 'lenovo_alembic_version'
|
mcmcplotlib/mcmcplotlib
|
api/generated/arviz-plot_kde-7.py
|
Python
|
apache-2.0
| 43
| 0
|
az.plot_kde(mu_posterior, cumula
|
tive=True)
|
|
pstjohn/pyefm
|
pyefm/ElementaryFluxVectors.py
|
Python
|
bsd-2-clause
| 5,857
| 0.002733
|
import pandas as pd
import numpy as np
import cobra
from pyefm.ElementaryFluxModes import EFMToolWrapper
from tqdm import tqdm
class EFVWrapper(EFMToolWrapper):
def create_matrices(self, extra_g=None, extra_h=None):
""" Initialize the augmented stoichiometric matrix.
extra_g: (n x nr) array
Extra entries in the constraint matrix. postive values for lower
bounds, negative values for upper bounds
extra_h: (n) array
Corresponding bounds for the extra entries matrix
"""
# Create stoichiometric matrix, get key dimensions
N = cobra.util.create_stoichiometric_matrix(self.model)
nm, nr = N.shape
self.nm = nm
self.nr = nr
# Construct full G and h matrices, then drop homogeneous (or near
# homogeneous) entries
g_full = np.vstack([np.eye(nr), -np.eye(nr)])
h_full = np.array([(r.lower_bound, -r.upper_bound)
for r in self.model.reactions]).T.flatten()
inhomogeneous = ~((h_full <= -1000) | np.isclose(h_full, 0))
h_full = h_full[inhomogeneous]
g_full = g_full[inhomogeneous]
if extra_g is not None:
assert extra_g.shape[1] == nr
assert extra_g.shape[0] == len(extra_h)
g_full = np.vstack([g_full, extra_g])
h_full = np.hstack([h_full, extra_h])
G = g_full
h = h_full
self.nt = nt = len(h)
self.D = np.vstack([
np.hstack([N, np.zeros((nm, nt)), np.zeros((nm,
|
1))]),
np.hstack([G, -np.eye(nt), np.atleast_2d(-h).T])
])
def create_model_files(self, temp_dir):
# Stoichiometric Matrix
np.savetxt(temp_dir + '/stoich.txt', self.D, delimiter='\t')
# Reaction reversibilities
np.savetxt(
temp_dir + '/revs.txt', np.hstack([
np.array([r.low
|
er_bound < 0 for r in self.model.reactions]),
np.zeros((self.nt + 1))]),
delimiter='\t', fmt='%d', newline='\t')
# Reaction Names
r_names = np.hstack([
np.array([r.id for r in self.model.reactions]),
np.array(['s{}'.format(i) for i in range(self.nt)]),
np.array(['lambda'])
])
with open(temp_dir + '/rnames.txt', 'w') as f:
f.write('\t'.join(('"{}"'.format(name) for name in r_names)))
# Metabolite Names
m_names = np.hstack([
np.array([m.id for m in self.model.metabolites]),
np.array(['s{}'.format(i) for i in range(self.nt)]),
])
with open(temp_dir + '/mnames.txt', 'w') as f:
f.write('\t'.join(('"{}"'.format(name) for name in m_names)))
pass
def read_double_out(self, out_file):
with open(out_file, 'rb') as f:
out_arr = np.fromstring(f.read()[13:], dtype='>d').reshape(
(-1, self.nt + self.nr + 1)).T
out_arr = np.asarray(out_arr, dtype=np.float64).T
# Sort by the absolute value of the stoichiometry
sort_inds= np.abs(out_arr[:, :self.nr]).sum(1).argsort()
out_arr = out_arr[sort_inds]
unbounded = out_arr[np.isclose(out_arr[:,-1], 0.)]
bounded = out_arr[~np.isclose(out_arr[:,-1], 0.)]
if bounded.size: # Test if its empty
bounded /= np.atleast_2d(bounded[:,-1]).T
unbounded_df = pd.DataFrame(
unbounded[:, :self.nr],
columns=[r.id for r in self.model.reactions],
index=['UEV{}'.format(i)
for i in range(1, unbounded.shape[0] + 1)])
bounded_df = pd.DataFrame(
bounded[:, :self.nr],
columns=[r.id for r in self.model.reactions],
index=('BEV{}'.format(i)
for i in range(1, bounded.shape[0] + 1)))
return unbounded_df.append(bounded_df)
def calculate_elementary_vectors(cobra_model, opts=None, verbose=True,
java_args=None, extra_g=None, extra_h=None):
"""Calculate elementary flux vectors, which capture arbitrary linear
constraints. Approach as detailed in S. Klamt et al., PLoS Comput Biol. 13,
e1005409–22 (2017).
Augmented constraints as a hacky workaround for implementing more
complicated constraints without using optlang.
java_args: string
Extra command-line options to pass to the java virtual machine.
Eg. '-Xmx1g' will set the heap space to 1 GB.
extra_g: (n x nr) array
Extra entries in the constraint matrix. postive values for lower
bounds, negative values for upper bounds
extra_h: (n) array
Corresponding bounds for the extra entries matrix
"""
efv_wrap = EFVWrapper(cobra_model, opts, verbose, java_args=java_args)
efv_wrap.create_matrices(extra_g=extra_g, extra_h=extra_h)
return efv_wrap()
def get_support_minimal(efvs):
"""Return only those elementary flux vectors whose support is not a proper
superset of another EFV"""
bool_df = pd.DataFrame(np.isclose(efvs, 0),
columns=efvs.columns, index=efvs.index)
set_df = bool_df.apply(lambda x: set(x.index[~x]), 1)
set_df = set_df[set_df != set()] # Drop the empty set EFV
set_dict = set_df.to_dict()
is_support_minimal = _get_support_minimal_list(set_dict)
return efvs.loc[is_support_minimal]
def _get_support_minimal_list(set_dict):
all_keys = set(set_dict.keys())
is_support_minimal = []
for this_key, val in tqdm(set_dict.items()):
for key in all_keys.difference(set([this_key])):
if val.issuperset(set_dict[key]):
break
else:
is_support_minimal.append(this_key)
return is_support_minimal
|
kmichel/po-localization
|
po_localization/po_file.py
|
Python
|
mit
| 6,436
| 0.001709
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import re
from io import StringIO
from .strings import escape
EMBEDDED_NEWLINE_MATCHER = re.compile(r'[^\n]\n+[^\n]')
class PoFile(object):
def __init__(self):
self.header_fields = []
self._header_index = {}
self.entries = {}
def clone(self):
po_file = PoFile()
po_file.header_fields.extend(self.header_fields)
for msgid, entry in self.entries.items():
po_file.entries[msgid] = entry.clone()
return po_file
def add_header_field(self, field, value):
if field in self._header_index:
self.header_fields[self._header_index[field]] = (field, value)
else:
self._header_index[field] = len(self.header_fields)
self.header_fields.append((field, value))
def add_entry(self, message, plural=None, context=None):
msgid = get_msgid(message, context)
if msgid in self.entries:
entry = self.entries[msgid]
# Allow merging a non-plural entry with a plural entry
# If more than one plural entry only keep the first
if entry.plural is None:
entry.plural = plural
else:
entry = TranslationEntry(message, plural, context)
self.entries[msgid] = entry
return entry
def dump(self, fp, include_locations=True, prune_obsoletes=False):
needs_blank_line = False
if len(self.header_fields):
print('msgid ""', file=fp)
print('msgstr ""', file=fp)
for field, value in self.header_fields:
print(r'"{}: {}\n"'.format(field, value), file=fp)
needs_blank_line = True
nplurals = self.get_nplurals()
for entry in sorted(self.entries.values(), key=get_entry_sort_key):
if needs_blank_line:
print('', file=fp)
needs_blank_line = entry.dump(
fp, nplurals, include_locations=include_locations, prune_obsolete=prune_obsoletes)
def dumps(self, include_locations=True, prune_obsoletes=False):
string_file = StringIO()
self.dump(string_file, include_locations, prune_obsoletes)
return string_file.getvalue()
def get_catalog(self):
catalog = {}
for entry in self.entries.values():
entry.fill_catalog(catalog)
return catalog
def get_nplurals(self):
plural_field_index = self._header_index.get('Plural-Forms', -1)
if plural_field_index != -1:
field, value = self.header_fields[plural_field_index]
if field == 'Plural-Forms':
for pair in value.split(';'):
parts = pair.partition('=')
if parts[0].strip() == 'nplurals':
return int(parts[2].strip())
return None
class TranslationEntry(object):
MIN_NPLURALS = 2
def __init__(self, message, plural=None, context=None):
self.message = message
self.plural = plural
self.context = context
self.locations = []
self.translations = {}
def clone(self):
entry = TranslationEntry(self.message, self.plural, self.context)
entry.locations.extend(self.locations)
entry.translations = self.translations.copy()
return entry
def add_location(self, filename, lineno):
self.locations.append((filename, lineno))
def add_translation(self, translation):
self.add_plural_translation(0, translation)
def add_plural_translation(self, index, translation):
self.translations[index] = translation
def fill_catalog(self, catalog):
msgid = get_msgid(self.message, self.context)
if self.plural is not None:
for index, translation in self.translations.items():
if translation:
catalog[(msgid, index)] = translation
else:
translation = self.translations.get(0, '')
if translation:
catalog[msgid] = translation
def dump(self, fp, nplurals=None, include_locations=True, prune_obsolete=False):
"""
If plural, shows exactly 'nplurals' plurals if 'nplurals' is not None, else shows at least min_nplurals.
All plural index are ordered and consecutive, missing entries are displayed with an empty string.
"""
if not len(self.locations):
if prune_obsolete or all(translation == '' for index, translation in self.translations.items()):
return False
else:
print('#. obsolete entry', file=fp)
if include_locations and len(self.locations):
print('#: {}'.format(' '.join('{}:{}'.format(*location) for location in self.locations)), file=fp)
if self.context is not None:
print('msgctxt {}'.format(multiline_escape(self.context)), file=fp)
print('msgid {}'.format(multiline_escape(self.message)), file=fp)
if self.plural is not None:
print('msgid_plural {}'.format(multiline_escape(self.plural)), file=fp)
if nplurals is None:
nplurals = self.get_suggested_nplurals()
for index in range(nplurals):
print('msgstr[{}] {}'.format(index, multiline_escape(self.translations.get(index, ''))), file=fp)
else:
print('msgstr {}'.format(multiline_escape(self.translations.get(0, ''))), file=fp)
return True
def get_suggested_nplurals(self):
if len(self.translations) > 0:
|
return max(max(self.translations.keys()) + 1, self.MIN_NPLURALS)
else:
return self.MIN_NPLURALS
def multiline_escape(string):
if EMBEDDED_NEWLINE_MATCHER.search(string):
lines = string.split('\n')
return (
'""\n'
+ '\n'.join('"{}\\n"'.format(escape(line)) for line in lines[:-1])
+ ('\n"{}"'.format(escape(lines[-1])) if len(lines[-1]) else ""))
else:
return '"{}"'.format(escape(string))
def get_msgid(message, context=None):
if context is not None:
return '{}\x04{}'.format(context, message)
else:
return message
def get_entry_sort_key(entry):
return entry.locations, entry.context if entry.context else '', entry.message
|
|
inclement/kivy
|
kivy/base.py
|
Python
|
mit
| 19,012
| 0
|
# pylint: disable=W0611
'''
Kivy Base
=========
This module contains the Kivy core functionality and is not intended for end
users. Feel free to look through it, but bare in mind that calling any of
these methods directly may result in an unpredictable behavior as the calls
access directly the event loop of an application.
'''
__all__ = (
'EventLoop',
'EventLoopBase',
'ExceptionHandler',
'ExceptionManagerBase',
'ExceptionManager',
'runTouchApp',
'async_runTouchApp',
'stopTouchApp',
)
import sys
import os
from kivy.config import Config
from kivy.logger import Logger
from kivy.utils import platform
from kivy.clock import Clock
from kivy.event import EventDispatcher
from kivy.lang import Builder
from kivy.context import register_context
# private vars
EventLoop = None
class ExceptionHandler(object):
'''Base handler that catches exceptions in :func:`runTouchApp`.
You can subclass and extend it as follows::
class E(ExceptionHandler):
def handle_exception(self, inst):
Logger.exception('Exception catched by ExceptionHandler')
return ExceptionManager.PASS
ExceptionManager.add_handler(E())
All exceptions will be set to PASS, and logged to the console!
'''
def __init__(self):
pass
def handle_exception(self, exception):
'''Handle one exception, defaults to returning
`ExceptionManager.RAISE`.
'''
return ExceptionManager.RAISE
class ExceptionManagerBase:
'''ExceptionManager manages exceptions handlers.'''
RAISE = 0
PASS = 1
def __init__(self):
self.handlers = []
self.policy = ExceptionManagerBase.RAISE
def add_handler(self, cls):
'''Add a new exception handler to the stack.'''
if cls not in self.handlers:
self.handlers.append(cls)
def remove_handler(self, cls):
'''Remove a exception handler from the stack.'''
if cls in self.handlers:
self.handlers.remove(cls)
def handle_exception(self, inst):
'''Called when an exception occurred in the :func:`runTouchApp`
main loop.'''
ret = self.policy
for handler in self.handlers:
r = handler.handle_exception(inst)
if r == ExceptionManagerBase.PASS:
ret = r
return ret
#: Instance of a :class:`ExceptionManagerBase` implementation.
ExceptionManager = register_context('ExceptionManager', ExceptionManagerBase)
class EventLoopBase(EventDispatcher):
'''Main event loop. This loop handles the updating of input and
dispatching events.
'''
__events__ = ('on_start', 'on_pause', 'on_stop')
def __init__(self):
super(EventLoopBase, self).__init__()
self.quit = False
self.input_events = []
self.postproc_modules = []
self.status = 'idle'
self.stopping = False
self.input_providers = []
self.input_providers_autoremove = []
self.event_listeners = []
self.window = None
self.me_list = []
@property
def touches(self):
'''Return the list of all touches currently in down or move states.
'''
return self.me_list
def ensure_window(self):
'''Ensure that we have a window.
'''
import kivy.core.window # NOQA
if not self.window:
Logger.critical('App: Unable to get a Window, abort.')
sys.exit(1)
def set_window(self, window):
'''Set the window used for the event loop.
'''
self.window = window
def add_input_provider(self, provider, auto_remove=False):
'''Add a new input provider to listen for touch events.
'''
if provider not in self.input_providers:
self.input_providers.append(provider)
if auto_remove:
self.input_providers_autoremove.append(provider)
def remove_input_provider(self, provider):
'''Remove an input provider.
'''
if provider in self.input_providers:
self.input_providers.remove(provider)
def add_event_listener(self, listener):
'''Add a new event listener for getting touch events.
'''
if listener not in self.event_listeners:
self.event_listeners.append(listener)
def remove_event_listener(self, listener):
'''Remove an event listener from the list.
'''
if listener in self.event_listeners:
self.event_listeners.remove(listener)
def start(self):
'''Must be called only once before :meth:`EventLoopBase.run()`.
This starts all configured input providers.'''
self.status = 'started'
self.quit = False
for provider in self.input_providers:
provider.start()
self.dispatch('on_start')
def close(self):
'''Exit from the main loop and stop all configured
input providers.'''
self.quit = True
self.stop()
self.status = 'closed'
def stop(self):
'''Stop all input providers and call callbacks registered using
`EventLoop.add_stop_callback()`.'''
# XXX stop in reverse order that we started them!! (like push
# pop), very important because e.g. wm_touch and WM_PEN both
# store old window proc and the restore, if order is messed big
# problem happens, crashing badly without error
for provider in reversed(self.input_providers[:]):
provider.stop()
if provider in self.input_providers_autoremove:
self.input_providers_autoremove.remove(provider)
self.input_providers.remove(provider)
# ensure any restart will not break anything later.
self.input_events = []
self.stopping = False
self.status = 'stopped'
self.dispatch('on_stop')
def add_postproc_module(self, mod):
'''Add a postproc input module (DoubleTap, TripleTap, DeJitter
RetainTouch are defaults).'''
if mod not in self.postproc_modules:
self.postproc_modules.append(mod)
def remove_postproc_module(self, mod):
'''Remove a postproc module.'''
if mod in self.postproc_modules:
self.postproc_modules.remove(mod)
def remove_android_splash(self, *args):
'''Remove android presplash in SDL2 bootstrap.'''
try:
from android import remove_presplash
remove_presplash()
except ImportError:
Logger.warni
|
ng(
'Base: Failed to import "andr
|
oid" module. '
'Could not remove android presplash.')
return
def post_dispatch_input(self, etype, me):
'''This function is called by :meth:`EventLoopBase.dispatch_input()`
when we want to dispatch an input event. The event is dispatched to
all listeners and if grabbed, it's dispatched to grabbed widgets.
'''
# update available list
if etype == 'begin':
self.me_list.append(me)
elif etype == 'end':
if me in self.me_list:
self.me_list.remove(me)
# dispatch to listeners
if not me.grab_exclusive_class:
for listener in self.event_listeners:
listener.dispatch('on_motion', etype, me)
# dispatch grabbed touch
me.grab_state = True
for _wid in me.grab_list[:]:
# it's a weakref, call it!
wid = _wid()
if wid is None:
# object is gone, stop.
me.grab_list.remove(_wid)
continue
root_window = wid.get_root_window()
if wid != root_window and root_window is not None:
me.push()
w, h = root_window.system_size
if platform == 'ios' or root_window._density != 1:
w, h = root_window.size
kheight = root_window.keyboard_height
smode = root_window.softinput_mode
me.scale_for_screen(w, h, rotation=root_window.rotation,
|
fxb22/BioGUI
|
plugins/Views/BLASTView.py
|
Python
|
gpl-2.0
| 5,154
| 0.005627
|
import wx
import listControl as lc
import getPlugins as gpi
from decimal import Decimal
import os
class Plugin():
def OnSize(self):
# Respond to size change
self.bPSize = self.bigPanel.GetSize()
self.list.SetSize((self.bPSize[0] - 118, self.bPSize[1] - 40))
self.ButtonShow(False)
self.SetButtons()
self.ButtonShow(True)
def Refresh(self,record):
self.GetExec(record)
def Clear(self):
self.list.Show(False)
self.ButtonShow(False)
def ButtonShow(self,tf):
for b in self.buttons:
b.Show(tf)
def SetButtons(self):
self.views = gpi.GetPlugIns(
self.hd+r"\plugins\Views\BLASTViewPlugins")
xPos = 300
self.buttons = []
for v in self.views.values():
self.buttons.append(wx.Button(self.bigPanel, -1,
str(v.GetName()),
pos = (self.bPSize[0] * xPos / 747,
self.bPSize[1] - 35),
size = (90, 22),
style = wx.NO_BORDER))
self.buttons[-1].SetBackgroundColour(
self.colorList[v.GetColors()]['Back'])
self.buttons[-1].SetForegroundColour(
self.colorList[v.GetColors()]['Fore'])
xPos += 100
self.bigPa
|
nel.Bind(wx.EVT_BUTTON, self.DoView
|
, self.buttons[-1])
def Init(self, parent, bigPanel, colorList):
self.hd = os.getcwd()
self.colorList = colorList
self.bigPanel = bigPanel
self.bPSize = self.bigPanel.GetSize()
self.list = lc.TestListCtrl(self.bigPanel, -1, size = (0,0),
pos = (self.bPSize[0] - 118,
self.bPSize[1] - 40),
style = wx.LC_REPORT|wx.LC_VIRTUAL,
numCols = 7)
self.list.SetBackgroundColour(
self.colorList['ViewPanelList']['Back'])
self.list.SetForegroundColour(
self.colorList['ViewPanelList']['Fore'])
self.SetButtons()
self.ListCntrlFill()
self.list.Show(True)
self.ButtonShow(False)
def GetExec(self, Rec):
self.SetButtons()
self.list.Show(True)
self.ButtonShow(True)
self.BlastRec = Rec[0]
self.OnSelect(wx.EVT_IDLE)
def ListRefill(self):
listData = dict()
j = 0
for alignment in self.BlastRec.alignments:
for hsp in alignment.hsps:
listData[j] = (str(alignment.title), alignment.length,
hsp.score,
Decimal(hsp.expect).quantize(Decimal(10) ** -5),
hsp.identities, hsp.positives, hsp.gaps)
j += 1
self.list.Refill(listData)
def ListCntrlFill(self):
cols = ['Title', 'Length', 'Score', 'E Values',
'Idents.', 'Posits.', 'Gaps']
colWidths = [318, 50, 50, 59, 48, 48, 40]
self.list.Fill(cols, colWidths)
def OnSelect(self, event):
self.ListCntrlFill()
self.ListRefill()
def RecInfo(self):
pos = self.list.GetSelected()
matches = ['']
seqs = []
titles = []
alignment = self.BlastRec.alignments[self.list.itemIndexMap[0]]
titles.append('query')
for p in pos:
alignment = self.BlastRec.alignments[self.list.itemIndexMap[p]]
for hsp in alignment.hsps:
query = ''
i = 1
strtblnk = ''
while i < hsp.query_start:
strtblnk += '-'
query += self.BlastRec.alignments[0].hsps[0].query[i-1]
i += 1
query += hsp.query
i = 0
endblnk = ''
j = len(strtblnk)+len(hsp.query)
while i + j < len(self.BlastRec.alignments[0].hsps[0].query):
endblnk += '-'
query += self.BlastRec.alignments[0].hsps[0].query[j+i]
i += 1
t = str(alignment.title).split('|')
titles.append(str(t[0] + '|' + t[1]))
matches.append(strtblnk + str(hsp.match) + endblnk)
seqs.append(query)
seqs.append(strtblnk + str(hsp.sbjct) + endblnk)
return [matches,seqs,titles]
def DoView(self,event):
for v in self.views.values():
if v.GetName() == event.GetEventObject().GetLabelText():
v.Plugin().GetExec(self.RecInfo(), self.bigPanel, self.hd,
self.BlastRec.alignments,
self.BlastRec.application)
def GetType(self):
return "Blast Results"
def GetName(self):
return "BLASTView"
def GetType():
return "Blast Results"
def GetName():
return "BLASTView"
|
skosukhin/spack
|
var/spack/repos/builtin/packages/xcb-util-wm/package.py
|
Python
|
lgpl-2.1
| 1,996
| 0.000501
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spa
|
ck import *
class XcbUtilWm(AutotoolsPackage):
"""The XCB util modules provides a number of libraries which sit on top
of libxcb, the core X protocol library, and some of the extension
libraries. These experimental libraries provide convenience functions
and interfaces which make the raw X protocol more usable. Some of the
libraries also provide client-side code which is not strictly part of
the X protocol but which have traditionally been provided by Xlib.""
|
"
homepage = "https://xcb.freedesktop.org/"
url = "https://xcb.freedesktop.org/dist/xcb-util-wm-0.4.1.tar.gz"
version('0.4.1', '0831399918359bf82930124fa9fd6a9b')
depends_on('libxcb@1.4:')
depends_on('pkg-config@0.9.0:', type='build')
|
johnbellone/gtkworkbook
|
etc/socketTest.py
|
Python
|
gpl-2.0
| 626
| 0.01278
|
#!/usr/bin/env python
import sys
import optparse
import socket
def main():
p = optparse.OptionParser()
p.add_option("--port", "-p", default=8888)
p.add_option("--input", "-i", default="test.txt")
options,
|
arguments = p.parse_args()
|
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", options.port))
fp = open(options.input, "r")
ii = 0
sock.sendall ("^0^1^sheet1^1000000^3\n")
while ii < 1000000:
sock.sendall ("^%d^0^sheet1^%d^0^^0\n" %(ii, ii))
ii = ii + 1
sock.close()
if __name__ == '__main__':
main()
|
splone/splonebox-client
|
test/functional/test_complete_call.py
|
Python
|
lgpl-3.0
| 4,205
| 0
|
"""
This file is part of the splonebox python client library.
The splonebox python client library is free software: you can
redistribute it and/or modify it under the terms of the GNU Lesser
General Public License as published by the Free Software Foundation,
either version 3 of the License or any later version.
It is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this splonebox python client library. If not,
see <http://www.gnu.org/licenses/>.
"""
import ctypes
import unittest
import msgpack
from splonebox.api.plugin import Plugin
from splonebox.api.remoteplugin import RemotePlugin
from splonebox.api.core import Core
from splonebox.api.remotefunction import RemoteFunction
from splonebox.rpc.message import MRequest, MResponse
from threading import Lock
from test import mocks
class CompleteCall(unittest.TestCase):
def setUp(self):
# cleanup remote_functions
RemoteFunction.remote_functions = []
def test_complete_run(self):
# In this test a plugin is created and is calling itself.
# The called_lock is used to ensure that we receive
# the response before the result
called_lock = Lock()
called_lock.acquire()
def add(a: ctypes.c_int64, b: ctypes.c_int64):
"add two ints"
called_lock.acquire()
return a + b
RemoteFunction(add)
core = Core()
plug = Plugin("foo", "bar", "bob", "alice", core)
rplug = RemotePlugin("plugin_id", "foo", "bar", "bob", "alice", core)
mock_send = mocks.rpc_connection_send(core._rpc)
result = rplug.run("add", [7, 8])
# receive request
msg = MRequest.from_unpacked(msgpack.unpackb(mock_send.call_args[0][
0]))
msg.arguments[0][0] = None # remove plugin id
msg.arguments[0][1] = 123 # set call id
core._rpc._message_callback(msg.pack())
# receive response
data = mock_send.call_args_list[1][0][0]
core._rpc._message_callback(data)
# start execution
called_lock.release()
# wait for execution to finish
plug._active_threads[123].join()
# receive result
data = mock_send.call_args_list[2][0][0]
core._rpc._message_callback(data)
self.assertEqual(result.get_result(blocking=True), 15)
self.assertEqual(result.get_status(), 2)
self.assertEqual(result._error, None)
self.assertEqual(result.get_id(), 123)
def test_complete_register(self):
def fun():
pass
RemoteFunction(fun)
core = Core()
plug = Plugin("foo", "bar", "bob", "alice", core)
mock_send = mocks.rpc_connection_send(core._rpc)
result = plug.register(blocking=False)
outgoing = msgpack.unpackb(mock_send.call_args_list[0][0][0])
# validate outgoing
self.assertEqual(0, outgoing[0])
self.assertEqual(b'register', outgoing[2])
self.assertEqual(
[b"foo", b"bar", b"bob", b"alice"], outgoing[3][0])
self.assertIn([b'fun', b'', []], outgoing[3][1])
# test response handling
self.assertEqual(result.get_status(), 0) # no response yet
response = MResponse(outgoing[1])
# send invalid response (Second field is set to None)
core._rpc._handle_response(response)
self.assertEqual(res
|
ult.get_status(), -1)
# make sure response is o
|
nly handled once
with self.assertRaises(KeyError):
core._rpc._handle_response(response)
# test valid response
result = plug.register(blocking=False)
outgoing = msgpack.unpackb(mock_send.call_args_list[1][0][0])
response = MResponse(outgoing[1])
response.response = []
core._rpc._handle_response(response)
self.assertEqual(result.get_status(), 2)
# cleanup remote_functions
RemoteFunction.remote_functions = {}
|
gepatino/github-indicator
|
ghindicator/options.py
|
Python
|
gpl-3.0
| 1,950
| 0.001539
|
# -*- coding: utf
|
-8 -*-
"""
github-indicator options
Author: Gabriel Patiño <gepatino@gmail.com>
License: Do whatever you want
"""
import optparse
import os
import xdg.BaseDirectory
from ghindicator import language
__v
|
ersion__ = (0, 0, 4)
# Hack to fix a missing function in my version of xdg
if not hasattr(xdg.BaseDirectory, 'save_cache_path'):
def save_cache_path(resource):
path = os.path.join('/', xdg.BaseDirectory.xdg_cache_home, resource)
if not os.path.exists(path):
os.makedirs(path)
return path
xdg.BaseDirectory.save_cache_path = save_cache_path
APPNAME = 'github-indicator'
ICON_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), 'icons'))
DATA_DIR = xdg.BaseDirectory.save_data_path(APPNAME)
CONFIG_DIR = xdg.BaseDirectory.save_config_path(APPNAME)
CACHE_DIR = xdg.BaseDirectory.save_cache_path(APPNAME)
parser = optparse.OptionParser(version='%prog ' + '.'.join(map(str, __version__)))
parser.add_option('-s', '--status-icon', action='store_true',
dest='status_icon', default=False,
help=_('Use a gtk status icon instead of appindicator'))
parser.add_option('-u', '--username', action='store',
dest='username', default=None,
help=_('GitHub username'))
parser.add_option('-p', '--password', action='store',
dest='password', default=None,
help=_('GitHub password (won\'t be saved)'))
parser.add_option('-t', '--update-time', action='store',
dest='update_time', default=60, type='int',
help=_('Checks for status updates after the specified amount of time [in seconds].'))
parser.add_option('-l', '--log-level', action='store',
dest='log_level', default='error',
help=_('Sets logging level to one of [debug|info|warning|error|critical]'))
def get_options():
return parser.parse_args()
|
trezor/micropython
|
tests/extmod/vfs_blockdev.py
|
Python
|
mit
| 1,588
| 0.003778
|
# Test for behaviour of combined standard and extended block device
try:
import uos
uos.VfsFat
uos.VfsLfs2
except (ImportError, AttributeError):
print("S
|
KIP")
raise SystemExit
class RAMBlockDevice:
ERASE_BLOCK_SIZE = 512
def __init__(self, blocks):
self.data = bytearray(blocks * self.ERASE_BLOCK_SIZE)
def readblocks(self, block, buf, off=0):
addr = block * self.ERASE_BLOCK_SIZE + off
for i in range(len(buf)):
bu
|
f[i] = self.data[addr + i]
def writeblocks(self, block, buf, off=None):
if off is None:
# erase, then write
off = 0
addr = block * self.ERASE_BLOCK_SIZE + off
for i in range(len(buf)):
self.data[addr + i] = buf[i]
def ioctl(self, op, arg):
if op == 4: # block count
return len(self.data) // self.ERASE_BLOCK_SIZE
if op == 5: # block size
return self.ERASE_BLOCK_SIZE
if op == 6: # erase block
return 0
def test(bdev, vfs_class):
print('test', vfs_class)
# mkfs
vfs_class.mkfs(bdev)
# construction
vfs = vfs_class(bdev)
# statvfs
print(vfs.statvfs('/'))
# open, write close
f = vfs.open('test', 'w')
for i in range(10):
f.write('some data')
f.close()
# ilistdir
print(list(vfs.ilistdir()))
# read
with vfs.open('test', 'r') as f:
print(f.read())
try:
bdev = RAMBlockDevice(50)
except MemoryError:
print("SKIP")
raise SystemExit
test(bdev, uos.VfsFat)
test(bdev, uos.VfsLfs2)
|
hschilling/pyOpt
|
examples/history.py
|
Python
|
gpl-3.0
| 1,731
| 0.023108
|
#!/usr/bin/env python
'''
Solves Constrainted Toy Problem Storing Optimization History.
min x1^2 + x2^2
s.t.: 3 - x1 <= 0
2 - x2 <= 0
-10 <= x1 <= 10
-10 <= x2 <= 10
'''
# =============================================================================
# Standard Python modules
# =============================================================================
import os, sys, time
import pdb
# =============================================================================
# Extension modules
# =============================================================================
from pyOpt import Optimization
from pyOpt import SLSQP
# =============================================================================
#
# =============================================================================
def objfunc(x):
f = x[0]**2 + x[1]**2
g = [0.0]*2
g[0] = 3 - x[0]
g[1] = 2 - x[1]
fail = 0
return f,g,fail
# =============================================================================
#
# =============================================================================
# Instanciate Optimization Problem
opt_prob = Optimization('TOY Constraint Problem',objfunc)
opt_prob.addVar('x1','c',value=1.0,lower=0.0,upper=10.0)
opt_prob.addVar('x2','c',value=1.0,lower=0.0,upper=10.0)
opt_prob.addObj('f')
opt_prob.addCon('g1','i')
opt_prob.addCon('g2','i')
print opt_prob
# Instanciate Optimizer (ALPSO) & Solve Problem Storing History
slsqp = SLSQP()
slsqp.setOption('IFILE','slsqp1.out')
slsqp(opt_prob,store_hst=True)
print
|
opt_prob.solution(0)
# Solve Problem Using S
|
tored History (Warm Start)
slsqp.setOption('IFILE','slsqp2.out')
slsqp(opt_prob, store_hst=True, hot_start='slsqp1')
print opt_prob.solution(1)
|
stackforge/ec2-api
|
ec2api/cmd/api_metadata.py
|
Python
|
apache-2.0
| 1,007
| 0
|
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
#
|
limitations under the License.
"""
EC2api API Metadata Server
"""
import sys
from oslo_config import cfg
from oslo_log import log as logging
from ec2api import config
from ec2api import service
CONF = cfg.CONF
def ma
|
in():
config.parse_args(sys.argv)
logging.setup(CONF, "ec2api")
server = service.WSGIService('metadata')
service.serve(server, workers=server.workers)
service.wait()
if __name__ == '__main__':
main()
|
ULHPC/easybuild-easyblocks
|
easybuild/easyblocks/m/metavelvet.py
|
Python
|
gpl-2.0
| 2,000
| 0.0025
|
##
# This file is an EasyBuild reciPY as per https://github.com/hpcugent/easybuild
#
# Copyright:: Copyright 2012-2017 Uni.Lu/LCSB, NTUA
# Authors:: Cedric Laczny <cedric.laczny@uni.lu>, Fotis Georgatos <fotis@cern.ch>, Kenneth Hoste
# License:: MIT/GPL
# $Id$
#
# This work implements a part of the HPCBIOS project and is a component of the policy:
# http://hpcbios.readthedocs.org/en/latest/HPCBIOS_2012-94.html
##
"""
EasyBuild support for building and installing MetaVelvet, implemented as an easyblock
@author: Cedric Laczny (Uni.Lu)
@author: Fotis Georgatos (Uni.Lu)
@author: Kenneth Hoste (Ghent University)
"""
import os
import shutil
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
class EB_MetaVelvet(ConfigureMake):
"""
Support for building MetaVelvet
"""
def configure_step(self):
"""
No configure
"""
pass
def install_step(self):
"""
Install by copying files to install dir
"""
srcdir = self.cfg['start_dir']
destdir = os.path.join(self.installdir, 'bin')
|
srcfile = None
# Get executable files: for i in $(find . -maxdepth 1 -type f -perm +111 -print | sed -e 's/\.\///g' | awk '{print "\""$0"\""}' | grep -vE "\.sh|\.html"); do echo -n
|
e "$i, "; done && echo
try:
os.makedirs(destdir)
for filename in ["meta-velvetg"]:
srcfile = os.path.join(srcdir, filename)
shutil.copy2(srcfile, destdir)
except OSError, err:
raise EasyBuildError("Copying %s to installation dir %s failed: %s", srcfile, destdir, err)
def sanity_check_step(self):
"""Custom sanity check for MetaVelvet."""
custom_paths = {
'files': ['bin/meta-velvetg'],
'dirs': []
}
super(EB_MetaVelvet, self).sanity_check_step(custom_paths=custom_paths)
|
Tassie-Tux/funtoo-overlay
|
funtoo/scripts/gentoo-compare-json.py
|
Python
|
gpl-2.0
| 4,518
| 0.035414
|
#!/usr/bin/python3
# This script will compare the versions of ebuilds in the funtoo portage tree against
# the versions of ebuilds in the target portage tree. Any higher versions in the
# target Portage tree will be printed to stdout.
import portage.versions
import os,sys
import subprocess
import json
from merge_utils import *
dirpath = os.path.dirname(os.path.realpath(__file__))
print("List of differences between funtoo and gentoo")
print("=============================================")
def getKeywords(portdir, ebuild, warn):
a = subprocess.getstatusoutput(dirpath + "/keywords.sh %s %s" % ( portdir, ebuild ) )
if a[0] == 0:
my_set = set(a[1].split())
return (0, my_set)
else:
return a
if len(sys.argv) != 3:
print("Please specify funtoo tree as first argument, gentoo tree as second argument.")
sys.exit(1)
gportdir=sys.argv[2]
portdir=sys.argv[1]
def filterOnKeywords(portdir, ebuilds, keywords, warn=False):
"""
This function accepts a path to a portage tree, a list of ebuilds, and a list of
keywords. It will iteratively find the "best" version in the ebuild list (the most
recent), and then manually extract this ebuild's KEYWORDS using the getKeywords()
function. If at least one of the keywords in "keywords" cannot be found in the
ebuild's KEYWORDS, then the ebuild is removed from the return list.
Think of this function as "skimming the masked cream off the top" of a particular
set of ebuilds. This way our list has been filtered somewhat and we don't have
gcc-6.0 in
|
our list just because someone added it masked to the tree. It makes
comparisons fairer.
"""
filtered = ebuilds[:]
if len(ebuilds) == 0:
return []
cps = portage.versions.catpkgsplit(filtered[0])
cat = cps[0]
pkg
|
= cps[1]
keywords = set(keywords)
while True:
fbest = portage.versions.best(filtered)
if fbest == "":
break
retval, fkeywords = getKeywords(portdir, "%s/%s/%s.ebuild" % (cat, pkg, fbest.split("/")[1] ), warn)
if len(keywords & fkeywords) == 0:
filtered.remove(fbest)
else:
break
return filtered
def get_cpv_in_portdir(portdir,cat,pkg):
if not os.path.exists("%s/%s/%s" % (portdir, cat, pkg)):
return []
if not os.path.isdir("%s/%s/%s" % (portdir, cat, pkg)):
return []
files = os.listdir("%s/%s/%s" % (portdir, cat, pkg))
ebuilds = []
for file in files:
if file[-7:] == ".ebuild":
ebuilds.append("%s/%s" % (cat, file[:-7]))
return ebuilds
def version_compare(portdir,gportdir,keywords,label):
print
print("Package comparison for %s" % keywords)
print("============================================")
print("(note that package.{un}mask(s) are ignored - looking at ebuilds only)")
print
for cat in os.listdir(portdir):
if cat == ".git":
continue
if not os.path.exists(gportdir+"/"+cat):
continue
if not os.path.isdir(gportdir+"/"+cat):
continue
for pkg in os.listdir(os.path.join(portdir,cat)):
ebuilds = get_cpv_in_portdir(portdir,cat,pkg)
gebuilds =get_cpv_in_portdir(gportdir,cat,pkg)
ebuilds = filterOnKeywords(portdir, ebuilds, keywords, warn=True)
if len(ebuilds) == 0:
continue
fbest = portage.versions.best(ebuilds)
gebuilds = filterOnKeywords(gportdir, gebuilds, keywords, warn=False)
if len(gebuilds) == 0:
continue
gbest = portage.versions.best(gebuilds)
if fbest == gbest:
continue
# a little trickery to ignore rev differences:
fps = list(portage.versions.catpkgsplit(fbest))[1:]
gps = list(portage.versions.catpkgsplit(gbest))[1:]
gps[-1] = "r0"
fps[-1] = "r0"
if gps[-2] in [ "9999", "99999", "999999", "9999999", "99999999"]:
continue
mycmp = portage.versions.pkgcmp(fps, gps)
if mycmp == -1:
json_out[label].append("%s/%s %s %s" % (cat, pkg, gbest[len(cat)+len(pkg)+2:], fbest[len(cat)+len(pkg)+2:]))
print("%s (vs. %s in funtoo)" % ( gbest, fbest ))
json_out={}
for keyw in [ "~amd64" ]:
if keyw == "~x86":
label = "fcx8632"
elif keyw == "~amd64":
label = "fcx8664"
json_out[label] = []
if keyw[0] == "~":
# for unstable, add stable arch and ~* and * keywords too
keyw = [ keyw, keyw[1:], "~*", "*"]
else:
# for stable, also consider the * keyword
keyw = [ keyw, "*"]
version_compare(portdir,gportdir,keyw,label)
for key in json_out:
json_out[key].sort()
json_out[key] = ",".join(json_out[key])
jsonfile = "/home/ports/public_html/my.json"
a = open(jsonfile, 'w')
json.dump(json_out, a, sort_keys=True, indent=4, separators=(',',": "))
a.close()
print("Wrote output to %s" % jsonfile)
|
bad-ants-fleet/ribolands
|
docs/conf.py
|
Python
|
mit
| 8,549
| 0.005966
|
# -*- coding: utf-8 -*-
#
# ribolands documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 18 15:59:48 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import alabaster
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'alabaster',
|
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinxcontrib.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ribolands'
copyright = u'2016, Stefan Badelt'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3.0'
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ribolandsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ribolands.tex', u'ribolands Documentation',
u'Stefan Badelt', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ribolands', u'ribolands Documentation',
[u'Stefan Badelt'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ribolands', u'ribolands Documentation',
u'Stefan Badelt', 'ribolands', 'One line description of project.',
|
jensenbox/singnet
|
agent/adapters/tensorflow/__init__.py
|
Python
|
mit
| 57
| 0
|
# adapters/tensorflow module ini
|
tialization goes h
|
ere...
|
floemker/django-wiki
|
src/wiki/plugins/links/__init__.py
|
Python
|
gpl-3.0
| 59
| 0
|
default_app_config = 'wi
|
ki.plug
|
ins.links.apps.LinksConfig'
|
sparkslabs/kamaelia
|
Sketches/MPS/Old/SoC/simplecube.py
|
Python
|
apache-2.0
| 4,689
| 0.025592
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
import Axon
class angleIncrement(Axon.Component.component):
def main(self):
angle = 0
while 1:
self.send(angle, "outbox")
angle += 0.1
if angle > 360:
angle -= 360
yield 1
class bounce3D(Axon.Component.component):
def main(self):
position = [ 0.0,0.0,-5.0 ]
dz = 0.01
while 1:
if abs(position[2]+10)>5: dz = -dz
position[2] += dz
self.send(position, "outbox")
yield 1
class rotatingCube(Axon.Component.component):
Inboxes = {
"inbox": "not used",
"control": "ignored",
"angle" : "We expect to recieve messages telling us the angle of rotation",
"position" : "We expect to receive messages telling us the new position",
}
def main(self):
pygame.init()
screen = pygame.display.set_mode((300,300),OPENGL|DOUBLEBUF)
pygame.display.set_caption('Simple cube')
# background
glClearColor(0.0,0.0,0.0,0.0)
# enable depth tests
glClearDepth(1.0)
glEnable(GL_DEPTH_TEST)
# projection matrix
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(300)/float(300), 0.1, 100.0)
# model matrix
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
pygame.display.flip()
angle=0
position = (0.0,0.0,-15.0)
while 1:
yield 1
for event in pygame.event.get():
if event.type == QUIT:
return
# clear screen
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
while self.dataReady("angle"):
# Use a while loop to ensure we clear the inbox to avoid messages piling up.
angle = self.recv("angle")
while self.dataReady("position"):
position = self.recv("position")
# translation and rotation
glPushMatrix()
glTranslate(*position)
glRotate(angle, 1.0,1.0,1.0)
# draw faces
glBegin(GL_QUADS)
glColor3f(1.0,0.0,0.0)
glVertex3f(1.0,1.0,1.0)
glVertex3f(1.0,-1.0,1.0)
glVertex3f(-1.0,-1.0,1.0)
glVertex3f(-1.0,1.0,1.0)
glColor3f(0.0,1.0,0.0)
glVertex3f(1.0,1.0,-1.0)
glVertex3f(1.0,-1.0,-1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(-1.0,1.0,-1.0)
glColor3f(0.0,0.0,1.0)
glVertex3f(1.0,1.0,1.0)
glVertex3f(1.0,-1.0,1.0)
glVertex3f(1.0,-1.0,-1.0)
glVertex3f(1.0,1.0,-1.0)
glColor3f(1.0,0.0,1.0)
glVertex3f(-1.0,1.0,1.0)
glVertex3f(-1.0,-1.0,1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(-1.0,1.0,-1.0)
glColor3f(0.0,1.0,1.0)
glVertex3f(1.0,1.0,1.0)
glVe
|
rtex3f(-1.0,1.0,1.0)
glVertex3f(-1.0,1.0,-1.0)
glVertex3f(1.0,1.0,-1.0)
glColor3f(1.0,1.0,0.0)
glVertex3f(1.0,-1.0,1.0)
glVertex3f(-1.0,-1.0,1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(1.0,-1.0,-1.0)
glEnd()
glPopMatrix()
glFlush()
pygame.display.fli
|
p()
if __name__=='__main__':
from Kamaelia.Util.Graphline import Graphline
Graphline(
TRANSLATION = bounce3D(),
ROTATION = angleIncrement(),
CUBE = rotatingCube(),
linkages = {
("ROTATION", "outbox") : ("CUBE", "angle"),
("TRANSLATION", "outbox") : ("CUBE", "position"),
}
).run()
|
emburse/python-quickbooks
|
setup.py
|
Python
|
mit
| 1,374
| 0
|
import codecs
import os
from setuptools import setup, find_packages
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
VERSION = (0, 3, 9)
version = '.'.join(map(str, VERSION))
setup(
name='python-quickbooks',
version=version,
author='Edward Emanuel Jr.',
author_email='edward@sidecarsinc.com',
description='A Python library for accessing the Quickbooks API.',
url='https://github.com/sidecars/python-quickbooks',
license='MIT',
keywords=['quickbooks', 'qbo', 'accounting'],
long_description=read('README.rst'),
install_requires=[
'setuptools',
'rauth>=0.7.1',
'requests>=2.7.0',
'simplejson>=2.2.0',
'six>=1.4.0',
'python-dateutil',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2
|
.7',
'Programming Language :: Python :: 3.3',
|
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
packages=find_packages(),
)
|
vstconsulting/polemarch
|
polemarch/main/migrations/0033_auto_20171211_0732.py
|
Python
|
agpl-3.0
| 628
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2017-12-11 07:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies
|
= [
('main', '0032_history_json_args'),
]
operations = [
migrations.AddField(
model_name='history',
|
name='revision',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AlterField(
model_name='history',
name='json_args',
field=models.TextField(default='{}'),
),
]
|
igryski/Indices_icclim_ClipC
|
src/PRECIP/get_put_invar_tracking_id_python_PRECIP.py
|
Python
|
gpl-3.0
| 11,422
| 0.036508
|
# Import the old tracking id from the RCM file for period 19660101-19701231,
# as it is the first used file to create historical indices:
# (e.g. tasmin_EUR-44_IPSL-IPSL-CM5A-MR_historical_r1i1p1_SMHI-RCA4_v1_day_19660101-19701231.nc)
#track_GCM_indice=$(
import netCDF4
from netCDF4 import Dataset
import ctypes
import icclim
import datetime
import icclim.util.callback as callback
#cb = callback.defaultCallback
import fnmatch
import os
print
#print '<<Loaded python modules>>'
print
# =====================================================================================================
# Define some paths
experiments_list = ['rcp45','rcp85']
for experiment in experiments_list:
# RCM output data and output of calculated indices
nobackup='/net/pc150394/nobackup/users/stepanov/'
# Precip (bias corrected)
in_path_RCM_pr_nbc_50km=nobackup+"CLIPC/Model_data/pr/"+experiment+"/50km/daily/SMHI_DBS43_2006_2100/"
out_path_RCM_pr_nbc_50km=nobackup+"icclim_indices_v4.2.3_seapoint_fixed/EUR-44/"+experiment+"/pr/"
# output path still for test only
# =====================================================================================================
# Every RCM output file has predictable root name (specific to resolution!)
# ==> Construct data file names
#8/10 models. 2 more below in separate FOR loops.
models_list_50km = ['CCCma-CanESM2','CNRM-CERFACS-CNRM-CM5','NCC-NorESM1-M',
'MPI-M-MPI-ESM-LR','IPSL-IPSL-CM5A-MR','MIROC-MIROC5',
'NOAA-GFDL-GFDL-ESM2M','CSIRO-QCCCE-CSIRO-Mk3-6-0']
#models_list_50km = ['CCCma-CanESM2']
#models_list_50km = ['CNRM-CERFACS-CNRM-CM5']
for model in models_list_50km:
# CONSTRUCT RCM FILE NAMES
# New root for non-bias corrected (!nbc!) files:
pr_nbc_file_root_hist = "prAdjust_EUR-44_"+model+"_"+experiment+"_r1i1p1_SMHI-RCA4_v1-SMHI-DBS43-EOBS10-1981-2010_day_"
pr_nbc_file_root_proj = "prAdjust_EUR-44_"+model+"_"+experiment+"_r1i1p1_SMHI-RCA4_v1-SMHI-DBS43-EOBS10-1981-2010_day_"
# Explicit list
files_pr_nbc_50km_hist = in_path_RCM_pr_nbc_50km+pr_nbc_file_root_hist+"19660101-19701231.nc"
files_pr_nbc_50km_proj = in_path_RCM_pr_nbc_50km+pr_nbc_file_root_proj+"20060101-20101231.nc"
# Tell me which files you imported
print 'Historical input Model files:', files_pr_nbc_50km_hist # sep='\n'
print 'Projection input Model files:', files_pr_nbc_50km_proj # sep='\n'
# CONSTRUCT INDICES FILE NAMES
# Create datasets from netCDF files
nc_in_hist = Dataset(files_pr_nbc_50km_hist,'r')
nc_in_proj = Dataset(files_pr_nbc_50km_proj,'r')
# Print current GCM tracking id
# Historical
print
print
print "For historical model:", model
print "Historical tracking id", nc_in_hist.tracking_id
print
for file_hist in os.listdir(out_path_RCM_pr_nbc_50km):
# ----------------------------------------------------------------
# Pre-change of
# model name in output file for models:
# indice into r1m when writing output file:
#
# NCC-NorESM1-M --> NorESM1-M
# MIROC-MIROC5 --> MIROC5
model_fout=model
#print "input model_fout is: ",model
if model == 'NCC-NorESM1-M': model_fout='NorESM1-M'
elif model == 'MIROC-MIROC5': model_fout='MIROC5'
elif model == 'CNRM-CERFACS-CNRM-CM5': model_fout='CNRM-CM5'
elif model == 'MPI-M-MPI-ESM-LR': model_fout='MPI-ESM-LR'
elif model == 'IPSL-IPSL-CM5A-MR': model_fout='IPSL-CM5A-MR'
elif model == 'NOAA-GFDL-GFDL-ESM2M': model_fout='GFDL-ESM2M'
elif model == 'CSIRO-QCCCE-CSIRO-Mk3-6-0': model_fout='CSIRO-Mk3-6-0'
else: model_fout=model
#print "new model_fout is: ",model_fout
#if fnmatch.fnmatch(file_h
|
ist, '*CCCma-CanESM2_historical*'):
if fnmatch.fnmatch(file_hist, "*"+model_fout+"_historical*"):
#if fnmatch.fnmatch(file_hist, "*historical*"):
print "Indice where new historical invar_tracking_id goes is:", file_hist
#pri
|
nt
#print '%s' % (model)
# Create Dataset from these files
nc_indice_pr_hist = Dataset(out_path_RCM_pr_nbc_50km+file_hist,'a')
# Insert invar_tracking_id global attributed with value on the right
# (imported RCM tracking id from the single RCM file above)
#nc_indice_pr_hist.comment='fun'
nc_indice_pr_hist.invar_tracking_id=nc_in_hist.tracking_id
#nc_in_hist.comment = 'test'
#nc_in_hist.invar_tracking_id_test = 'test'
# Projections
print
print
print "For projections model:", model
print "Projection tracking id", nc_in_proj.tracking_id
print
print
for file_proj in os.listdir(out_path_RCM_pr_nbc_50km):
# ----------------------------------------------------------------
# Pre-change of
# model name in output file for models:
# indice into r1m when writing output file:
#
# NCC-NorESM1-M --> NorESM1-M
# MIROC-MIROC5 --> MIROC5
model_fout=model
#print "input model_fout is: ",model
if model == 'NCC-NorESM1-M': model_fout='NorESM1-M'
elif model == 'MIROC-MIROC5': model_fout='MIROC5'
elif model == 'CNRM-CERFACS-CNRM-CM5': model_fout='CNRM-CM5'
elif model == 'MPI-M-MPI-ESM-LR': model_fout='MPI-ESM-LR'
elif model == 'IPSL-IPSL-CM5A-MR': model_fout='IPSL-CM5A-MR'
elif model == 'NOAA-GFDL-GFDL-ESM2M': model_fout='GFDL-ESM2M'
elif model == 'CSIRO-QCCCE-CSIRO-Mk3-6-0': model_fout='CSIRO-Mk3-6-0'
else: model_fout=model
#print "new model_fout is: ",model_fout
if fnmatch.fnmatch(file_proj, "*"+model_fout+"_"+experiment+"*"):
print "Indice where new projection invar_tracking_id goes is:", file_proj
print
# Create Dataset from these files
nc_indice_pr_proj = Dataset(out_path_RCM_pr_nbc_50km+file_proj,'a')
# Insert invar_tracking_id global attributed with value on the right
# (imported RCM tracking id from the single RCM file above)
#nc_indice_pr_hist.comment='fun'
nc_indice_pr_proj.invar_tracking_id=nc_in_proj.tracking_id
# Had-GEM
models_list_50km_HadGEM = ['MOHC-HadGEM2-ES']
for model in models_list_50km_HadGEM:
# CONSTRUCT RCM FILE NAMES
# New root for non-bias corrected (!nbc!) files:
pr_nbc_file_root_hist = "prAdjust_EUR-44_"+model+"_"+experiment+"_r1i1p1_SMHI-RCA4_v1-SMHI-DBS43-EOBS10-1981-2010_day_"
pr_nbc_file_root_proj = "prAdjust_EUR-44_"+model+"_"+experiment+"_r1i1p1_SMHI-RCA4_v1-SMHI-DBS43-EOBS10-1981-2010_day_"
# Explicit list
files_pr_nbc_50km_hist = in_path_RCM_pr_nbc_50km+pr_nbc_file_root_hist+"19660101-19701230.nc"
files_pr_nbc_50km_proj = in_path_RCM_pr_nbc_50km+pr_nbc_file_root_proj+"20060101-20101230.nc"
# Tell me which files you imported
print 'Historical input Model files:', files_pr_nbc_50km_hist # sep='\n'
print 'Projection input Model files:', files_pr_nbc_50km_proj # sep='\n'
# CONSTRUCT INDICES FILE NAMES
# Create datasets from netCDF files
nc_in_hist = Dataset(files_pr_nbc_50km_hist,'r')
nc_in_proj = Dataset(files_pr_nbc_50km_proj,'r')
# Print current GCM tracking id
# Historical
print
print
print "For historical model:", model
print "Historical tracking id", nc_in_hist.tracking_id
print
for file_hist in os.listdir(out_path_RCM_pr_nbc_50km):
#if fnmatch.fnmatch(file_hist, '*CCCma-CanESM2_historical*'):
if fnmatch.fnmatch(file_hist, "*"+model[5:15]+"_historical*"):
#if fnmatch.fnmatch(file_hist, "*historical*"):
print "Indice where new historical invar_tracking_id goes is:", file_hist
#print
#print '%s' % (model)
# Create Dataset from these files
nc_indice_pr_hist = Dataset(out_path_RCM_pr_nbc_50km+file_hist,'a')
# Insert invar_tracking_id global attributed with value on the right
# (imported RCM tracking id from the single RCM file above)
#nc_indice_pr_hist.comment='fun'
nc_indice_pr_hist.invar_tracking_id=nc_in_hist.tracking_id
#nc_in_hist.comment = 'test'
#nc_in_hist.invar_tracking_id_test = 'test'
# Projections
print
print
print "For projections model:", model
print "Projection tracking id", nc_in_proj.tracking_id
print
print
for
|
kochetov-a/python_training
|
fixture/session.py
|
Python
|
apache-2.0
| 2,654
| 0.001881
|
# Класс-помощник для работы с сессией
class SessionHelper:
def __init__(self, app):
self.app = app
# Функция входа на сайт
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
# Функция выхода с сайта
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
# Функция удаления фикстуры после завершения теста
def destroy(self):
self.app.wd.quit()
# Функция проверки выхода с сайта
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.logout()
# Функция проверки входа на сайт
def is_logged_in(self):
wd = self.app.wd
# Если на странице есть элемент с текстом "Logout", то пользователь вошел на сайт
return len(wd.find_elements_by_link_text("Logout")) > 0
# Функция проверки имени с которым произошел вход на сайт
def is_logged_in_as(self, username):
wd = self.app.wd
# Если на странице есть элемент с текстом который соответсвует имени пользователя, то есть логин
return wd.find_element_by_xpath("//div/div[1]/form/b").text == "("+username+")"
# Функция проверки логина во время прогона тестов
|
def ensure_login(self, username, password):
wd
|
= self.app.wd
# Если пользователь вошел на сайт
if self.is_logged_in():
# И если пользователь вошел на сайт под ожидаемым именем
if self.is_logged_in_as(username):
# Тогда ничего не делаем
return
else:
# Иначе производим выход с сайта, для последующего входа
self.logout()
self.login(username, password)
|
tensorflow/tensorflow
|
tensorflow/python/eager/lift_to_graph_test.py
|
Python
|
apache-2.0
| 3,531
| 0.004815
|
# Copyright 2019 The TensorFlow Aut
|
hors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this
|
file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lift_to_graph."""
from tensorflow.python.eager import def_function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops as framework_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util import compat
class LiftToGraphTest(test.TestCase):
def testCaptureOrdering(self):
v1 = resource_variable_ops.ResourceVariable(1.0)
v2 = resource_variable_ops.ResourceVariable(2.0)
v3 = resource_variable_ops.ResourceVariable(3.0)
@def_function.function
def fn():
return v1 + v2 + v3
concrete_fn = fn.get_concrete_function()
original_captures = concrete_fn.graph.internal_captures
outputs = concrete_fn.graph.outputs
for _ in range(100):
g = func_graph.FuncGraph('lifted')
lift_to_graph.lift_to_graph(
outputs, g, add_sources=True, handle_captures=True)
lifted_captures = g.internal_captures
self.assertLen(lifted_captures, 3)
for original, lifted in zip(original_captures, lifted_captures):
self.assertEqual(original.name, lifted.name)
def testClassAttrsRemoved(self):
"""Tests that _class attrs (from colocate_with()) are removed."""
@def_function.function
def fn():
two = constant_op.constant(2.0, name='two')
ten = constant_op.constant(10.0, name='ten')
twenty = math_ops.multiply(two, ten, name='twenty')
three = constant_op.constant(3.0, name='three')
with framework_ops.colocate_with(twenty):
thirty = math_ops.multiply(three, ten, name='thirty')
return ten, twenty, thirty
concrete_fn = fn.get_concrete_function()
self.assertItemsEqual( # Before lifting, 'fn' has colocation attrs.
concrete_fn.graph.get_operation_by_name('thirty').colocation_groups(),
[compat.as_bytes('loc:@twenty')])
thirty_out = concrete_fn.graph.outputs[2]
g = func_graph.FuncGraph('lifted')
lift_to_graph.lift_to_graph([thirty_out], g)
# After lifting, colocation attrs are gone.
ops = g.get_operations()
self.assertItemsEqual([op.name for op in ops],
['three', 'ten', 'thirty', # Lifted from `fn` body.
thirty_out.op.name]) # Wrapper for output.
for op in ops:
with self.assertRaises(ValueError):
class_attr = op.get_attr('_class') # Expected not to exist.
print('Unexpected class_attr', class_attr, 'on', op.name)
self.assertItemsEqual(op.colocation_groups(), # Expect default self-ref.
[compat.as_bytes('loc:@%s' % op.name)])
if __name__ == '__main__':
test.main()
|
sayan801/indivo_server
|
indivo/urls/carenet.py
|
Python
|
gpl-3.0
| 1,997
| 0.022534
|
from django.conf.urls.defaults import *
from indivo.views import *
from indivo.lib.utils import MethodDispatcher
urlpatterns = patterns('',
(r'^$', MethodDispatcher({
'DELETE' : carenet_delete})),
(r'^/rename$', MethodDispatcher({
'POST' : carenet_rename})),
(r'^/record$', MethodDispatcher({'GET':carenet_record})),
# Manage documents
(r'^/documents/', include('indivo.urls.carenet_documents')),
# Manage accounts
(r'^/accounts/$',
MethodDispatcher({
'GET' : carenet_account_list,
'POST' : carenet_account_create
})),
(r'^/accounts/(?P<account_id>[^/]+)$',
MethodDispatcher({ 'DELETE' : carenet_account_delete })),
# Manage apps
(r'^/apps/$',
MethodDispatcher({ 'GET' : carenet_apps_list})),
(r'^/apps/(?P<pha_email>[^/]+)$',
MethodDis
|
patcher({ 'PUT' : carenet_apps_create,
'DELETE': carenet_apps_delete})),
# Permissions Calls
(r'^/accounts/(
|
?P<account_id>[^/]+)/permissions$',
MethodDispatcher({ 'GET' : carenet_account_permissions })),
(r'^/apps/(?P<pha_email>[^/]+)/permissions$',
MethodDispatcher({ 'GET' : carenet_app_permissions })),
# Reporting Calls
(r'^/reports/minimal/procedures/$',
MethodDispatcher({'GET':carenet_procedure_list})),
(r'^/reports/minimal/simple-clinical-notes/$',
MethodDispatcher({'GET':carenet_simple_clinical_notes_list})),
(r'^/reports/minimal/equipment/$',
MethodDispatcher({'GET':carenet_equipment_list})),
(r'^/reports/minimal/measurements/(?P<lab_code>[^/]+)/$',
MethodDispatcher({'GET':carenet_measurement_list})),
(r'^/reports/(?P<data_model>[^/]+)/$',
MethodDispatcher({'GET':carenet_generic_list})),
# Demographics
(r'^/demographics$', MethodDispatcher({'GET': read_demographics_carenet})),
)
|
sixty-north/cosmic-ray
|
tools/inspector.py
|
Python
|
mit
| 491
| 0.002037
|
# This is just a simple example of how to inspect ASTs visually.
#
#
|
This can be useful for developing new operators, etc.
import ast
from cosmic_ray.mutating import MutatingCore
from cosmic_ray.operators.comparison_opera
|
tor_replacement import MutateComparisonOperator
code = "((x is not y) ^ (x is y))"
node = ast.parse(code)
print()
print(ast.dump(node))
core = MutatingCore(0)
operator = MutateComparisonOperator(core)
new_node = operator.visit(node)
print()
print(ast.dump(new_node))
|
knuu/competitive-programming
|
atcoder/abc/abc003_c.py
|
Python
|
mit
| 137
| 0
|
N, K = map(int, in
|
put().split())
R = sorted(map(int, input().split()))
ans = 0
for r in R[len(R)-K:]:
ans = (ans + r) / 2
print(
|
ans)
|
eroicaleo/ThePythonStandardLibraryByExample
|
ch01Text/1.3re/ConstrainSearch.py
|
Python
|
mit
| 508
| 0
|
#!/usr/bin/env python3
import re
text = 'This is some
|
text -- with punctuation.'
pattern = 'is'
print('Text :', text)
print('Pattern:', pattern)
m = re.match(pattern, text)
print('Match :', m)
s = re.search(pattern, text)
print('Search :', s)
pattern = re.compile(r'\b\w*is\w*\b')
print('Text:', text)
pos = 0
while True:
match = pattern.search(text, pos)
if not match:
break
s = match.start()
e = match.end()
print(' %2d : %2d = "%s"' % (s, e-1, text[s:e]))
|
pos = e
|
imownbey/dygraphs
|
plotkit_v091/doc/generate.py
|
Python
|
mit
| 867
| 0.00692
|
#!/usr/bin/python
import sys
import os
import re
sys.path.append('/home/al/sites')
os.environ['DJANGO_SETTINGS_MODULE'] = '__main__'
DEFAULT_CHARSET = "utf-8"
TEMPLATE_DEBUG = False
LANGUAGE_CODE = "en"
INSTALLED_APPS = (
'django.contrib.markup',
)
TEMPLATE_DIRS = (
'/home/al/sites/liquidx/templates',
'.'
)
from django.template import Template, Context, loader
def make(src, dst):
print '%s -> %s' % (src, dst)
c = Context({})
filled = loader.render_
|
to_string(src, {})
open(dst, 'w').write(filled)
if __name__ == "__main__":
for dirname, dirs, files in os.walk('.'):
if re.search('/\.svn', dirname):
|
continue
for f in files:
if f[-4:] == ".txt":
newname = f.replace('.txt', '.html')
make(os.path.join(dirname, f), os.path.join(dirname, newname))
|
allenai/document-qa
|
docqa/configurable.py
|
Python
|
apache-2.0
| 6,024
| 0.001992
|
import json
from collections import OrderedDict
from inspect import signature
from warnings import warn
import numpy as np
from sklearn.base import BaseEstimator
class Configuration(object):
def __init__(self, name, version, params):
if not isinstance(name, str):
raise ValueError()
if not isinstance(params, dict):
raise ValueError()
self.name = name
self.version = version
self.params = params
def __str__(self):
if len(self.params) == 0:
return "%s-v%s" % (self.name, self.version)
json_params = config_to_json(self.params)
if len(json_params) < 200:
return "%s-v%s: %s" % (self.name, self.version, json_params)
else:
return "%s-v%s {...}" % (self.name, self.version)
def __eq__(self, other):
return isinstance(other, Configuration) and \
self.name == other.name and \
self.version == other.version and \
self.params == other.params
class Configurable(object):
"""
Configurable classes have names, versions, and a set of parameters that are either "simple" aka JSON serializable
types or other Configurable objects. Configurable objects should also be serializable via pickle.
Configurable classes are defined mainly to give us a human-readable way of reading of the `parameters`
set for different objects and to attach version numbers to them.
By default we follow the format sklearn uses for its `BaseEstimator` class, where parameters are automatically
derived based on the constructor parameters.
"""
@classmethod
def _get_param_names(cls):
# fetch the constructor or the original constru
|
ctor before
init = cls.__init__
if init is object.__init__:
# No explicit constructor to introspect
return []
init_signature = signature(init)
parameters = [p for p in init_signature.parameters
|
.values()
if p.name != 'self']
if any(p.kind == p.VAR_POSITIONAL for p in parameters):
raise RuntimeError()
return sorted([p.name for p in parameters])
@property
def name(self):
return self.__class__.__name__
@property
def version(self):
return 0
def get_params(self):
out = {}
for key in self._get_param_names():
v = getattr(self, key, None)
if isinstance(v, Configurable):
out[key] = v.get_config()
elif hasattr(v, "get_config"): # for keras objects
out[key] = {"name": v.__class__.__name__, "config": v.get_config()}
else:
out[key] = v
return out
def get_config(self) -> Configuration:
params = {k: describe(v) for k,v in self.get_params().items()}
return Configuration(self.name, self.version, params)
def __getstate__(self):
state = dict(self.__dict__)
if "version" in state:
if state["version"] != self.version:
raise RuntimeError()
else:
state["version"] = self.version
return state
def __setstate__(self, state):
if "version" not in state:
raise RuntimeError("Version should be in state (%s)" % self.__class__.__name__)
if state["version"] != self.version:
warn(("%s loaded with version %s, but class " +
"version is %s") % (self.__class__.__name__, state["version"], self.version))
if "state" in state:
self.__dict__ = state["state"]
else:
del state["version"]
self.__dict__ = state
def describe(obj):
if isinstance(obj, Configurable):
return obj.get_config()
else:
obj_type = type(obj)
if obj_type in (list, set, frozenset, tuple):
return obj_type([describe(e) for e in obj])
elif isinstance(obj, tuple):
# Name tuple, convert to tuple
return tuple(describe(e) for e in obj)
elif obj_type in (dict, OrderedDict):
output = OrderedDict()
for k, v in obj.items():
if isinstance(k, Configurable):
raise ValueError()
output[k] = describe(v)
return output
else:
return obj
class EncodeDescription(json.JSONEncoder):
""" Json encoder that encodes 'Configurable' objects as dictionaries and handles
some numpy types. Note decoding this output will not reproduce the original input,
for these types, this is only intended to be used to produce human readable output.
'"""
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.dtype):
return str(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.bool_):
return bool(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, BaseEstimator): # handle sklearn estimators
return Configuration(obj.__class__.__name__, 0, obj.get_params())
elif isinstance(obj, Configuration):
if "version" in obj.params or "name" in obj.params:
raise ValueError()
out = OrderedDict()
out["name"] = obj.name
if obj.version != 0:
out["version"] = obj.version
out.update(obj.params)
return out
elif isinstance(obj, Configurable):
return obj.get_config()
elif isinstance(obj, set):
return sorted(obj) # Ensure deterministic order
else:
try:
return super().default(obj)
except TypeError:
return str(obj)
def config_to_json(data, indent=None):
return json.dumps(data, sort_keys=False, cls=EncodeDescription, indent=indent)
|
madsbk/bohrium
|
bridge/py_api/bohrium_api/messaging.py
|
Python
|
apache-2.0
| 934
| 0.001071
|
"""
Send and receive pre-defined messages through the Bohrium component stack
=========================================================================
"""
from ._bh_api import message as msg
def statistic_enable_and_reset():
"""Reset and enable the Bohrium statistic"""
return msg("statistic_enable_and_reset")
def statistic():
"""Return a YAML string of Bohrium statistic"""
|
return msg("statistic")
def gpu_disable():
"""Disable the GPU backend in the current runtime stack"""
return msg("GPU: disable")
def gpu_enable():
"""Enable the GPU backend in the current runtime stack"""
return msg("GPU: enable")
def runtime_info():
"""Return a YAML string describing the current Bohrium runtime"""
return msg("in
|
fo")
def cuda_use_current_context():
"""Tell the CUDA backend to use the current CUDA context (useful for PyCUDA interop)"""
return msg("CUDA: use current context")
|
kassoulet/soundconverter
|
setup.py
|
Python
|
gpl-3.0
| 2,521
| 0.000793
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# SoundConverter - GNOME application for converting between audio formats.
# Copyright 2004 Lars Wirzenius
# Copyright 2005-2020 Gautier Portet
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3 of the License.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import
|
sys
try:
i
|
mport DistUtilsExtra.auto
except ImportError:
sys.stderr.write('You need python-distutils-extra\n')
sys.exit(1)
import os
import DistUtilsExtra.auto
# This will automatically, assuming that the prefix is /usr
# - Compile and install po files to /usr/share/locale*.mo,
# - Install .desktop files to /usr/share/applications
# - Install all the py files to /usr/lib/python3.8/site-packages/soundconverter
# - Copy bin to /usr/bin
# - Copy the rest to /usr/share/soundconverter, like the .glade file
# Thanks to DistUtilsExtra (https://salsa.debian.org/python-team/modules/python-distutils-extra/-/tree/master/doc) # noqa
class Install(DistUtilsExtra.auto.install_auto):
def run(self):
DistUtilsExtra.auto.install_auto.run(self)
# after DistUtilsExtra automatically copied data/org.soundconverter.gschema.xml
# to /usr/share/glib-2.0/schemas/ it doesn't seem to compile them.
glib_schema_path = os.path.join(self.install_data, 'share/glib-2.0/schemas/')
cmd = 'glib-compile-schemas {}'.format(glib_schema_path)
print('running {}'.format(cmd))
os.system(cmd)
DistUtilsExtra.auto.setup(
name='soundconverter',
version='4.0.2',
description=(
'A simple sound converter application for the GNOME environment. '
'It writes WAV, FLAC, MP3, and Ogg Vorbis files.'
),
license='GPL-3.0',
data_files=[
('share/metainfo/', ['data/soundconverter.appdata.xml']),
('share/pixmaps/', ['data/soundconverter.png']),
('share/icons/hicolor/scalable/apps/', ['data/soundconverter.svg'])
],
cmdclass={
'install': Install
}
)
|
sander76/home-assistant
|
tests/components/geonetnz_quakes/test_sensor.py
|
Python
|
apache-2.0
| 4,517
| 0.001328
|
"""The tests for the GeoNet NZ Quakes Feed integration."""
import datetime
from unittest.mock import patch
from homeassistant.components import geonetnz_quakes
from homeassistant.components.geonetnz_quakes import DEFAULT_SCAN_INTERVAL
from homeassistant.components.geonetnz_quakes.sensor import (
ATTR_CREATED,
ATTR_LAST_UPDATE,
ATTR_LAST_UPDATE_SUCCESSFUL,
ATTR_REMOVED,
ATTR_STATUS,
ATTR_UPDATED,
)
from homeassistant.const import (
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
CONF_RADIUS,
EVENT_HOMEASSISTANT_START,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.geonetnz_quakes import _generate_mock_feed_entry
CONFIG = {geonetnz_quakes.DOMAIN: {CONF_RADIUS: 200}}
async def test_setup(hass, legacy_patchable_time):
"""Test the general setup of the integration."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry(
"1234",
"Title 1",
15.5,
(38.0, -3.0),
locality="Locality 1",
attribution="Attribution 1",
time=datetime.datetime(2018, 9, 22, 8, 0, tzinfo=datetime.timezone.utc),
magnitude=5.7,
mmi=5,
depth=10.5,
quality="best",
)
mock_entry_2 = _generate_mock_feed_entry(
"2345", "Title 2", 20.5, (38.1, -3.1), magnitude=4.6
)
mock_entry_3 = _generate_mock_feed_entry(
"3456", "Title 3", 25.5, (38.2, -3.2), locality="Locality 3"
)
mock_entry_4 = _generate_mock_feed_entry("4567", "Title 4", 12.5, (38.3, -3.3))
# Patching 'utcnow' to gain more control over the timed update.
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"aio_geojson_client.feed.GeoJsonFeed.update"
) as mock_feed_update:
mock_feed_update.return_value = "OK", [mock_entry_1, mock_entry_2, mock_entry_3]
assert await async_setup_component(hass, geonetnz_quakes.DOMAIN, CONFIG)
# Artificially trigger update and collect events.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
all_states = hass.states.async_all()
# 3 geolocation and 1 sensor entities
assert len(all_states) == 4
state = hass.states.get("sensor.geonet_nz_quakes_32_87336_117_22743")
assert state is not None
assert int(state.state) == 3
assert state.name == "GeoNet NZ Quakes (32.87336, -117.22743)"
attributes = state.attributes
assert attributes[ATTR_STATUS] == "OK"
assert attributes[ATTR_CREATED] == 3
assert attributes[ATTR_LAST_UPDATE].tzinfo == dt_util.UTC
assert attributes[ATTR_LAST_UPDATE_SUCCESSFUL].tzinfo == dt_util.UTC
assert attributes[ATTR_LAST_UPDATE] == attributes[ATTR_LAST_UPDATE_SUCCESSFUL]
assert attributes[ATTR_UNIT_OF_MEASUREMENT] == "quakes"
assert attributes[ATTR_ICON] == "mdi:pulse"
# Simulate an update - two existing, one new entry, one outdated entry
mock_feed_update.return_value = "OK", [mock_entry_1, mock_entry_4, moc
|
k_entry_3]
async_fire_time_changed(hass, utcnow + DEFAULT_SCAN_INTERVAL)
await hass.async_block_till_done()
|
all_states = hass.states.async_all()
assert len(all_states) == 4
state = hass.states.get("sensor.geonet_nz_quakes_32_87336_117_22743")
attributes = state.attributes
assert attributes[ATTR_CREATED] == 1
assert attributes[ATTR_UPDATED] == 2
assert attributes[ATTR_REMOVED] == 1
# Simulate an update - empty data, but successful update,
# so no changes to entities.
mock_feed_update.return_value = "OK_NO_DATA", None
async_fire_time_changed(hass, utcnow + 2 * DEFAULT_SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 4
# Simulate an update - empty data, removes all entities
mock_feed_update.return_value = "ERROR", None
async_fire_time_changed(hass, utcnow + 3 * DEFAULT_SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
state = hass.states.get("sensor.geonet_nz_quakes_32_87336_117_22743")
attributes = state.attributes
assert attributes[ATTR_REMOVED] == 3
|
blackball/an-test6
|
util/tstimg.py
|
Python
|
gpl-2.0
| 249
| 0.052209
|
import pyfits
from numpy import *
if __name__ == '__main__':
W,H = 10,10
sigma = 1.
X,Y = meshgrid(range(W), r
|
ange(H))
img = 50 + 200 * exp(-0.5 * ((X -
|
W/2)**2 + (Y - H/2)**2)/(sigma**2))
pyfits.writeto('tstimg.fits', img, clobber=True)
|
googledatalab/pydatalab
|
datalab/bigquery/commands/_bigquery.py
|
Python
|
apache-2.0
| 43,171
| 0.012161
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Google Cloud Platform library - BigQuery IPython Functionality."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from builtins import zip
from builtins import str
from past.builtins import basestring
try:
import IPython
import IPython.core.display
import IPython.core.magic
except ImportError:
raise Exception('This module can only be loaded in ipython.')
import fnmatch
import json
import re
import datalab.bigquery
import datalab.data
import datalab.utils
import datalab.utils.commands
def _create_create_subparser(parser):
create_parser = parser.subcommand('create', 'Create a dataset or table.')
sub_commands = create_parser.add_subparsers(dest='command')
create_dataset_parser = sub_commands.add_parser('dataset', help='Create a dataset.')
create_dataset_parser.add_argument('-n', '--name', help='The name of the dataset to create.',
required=True)
create_dataset_parser.add_argument('-f', '--friendly', help='The friendly name of the dataset.')
create_table_parser = sub_commands.add_parser('table', help='Create a table.')
create_table_parser.add_argument('-n', '--name', help='The name of the table to create.',
required=True)
create_table_parser.add_argument('-o', '--overwrite', help='Overwrite table if it exists.',
action='store_true')
return create_parser
def _create_delete_subparser(parser):
delete_parser = parser.subcommand('delete', 'Delete a dataset or table.')
sub_commands = delete_parser.add_subparsers(dest='command')
delete_dataset_parser = sub_commands.add_parser('dataset', help='Delete a dataset.')
delete_dataset_parser.add_argument('-n', '--name', help='The name of the dataset to delete.',
required=True)
delete_table_parser = sub_commands.add_parser('table', help='Delete a table.')
delete_table_parser.add_argument('-n', '--name', help='The name of
|
the table to delete.',
required=True)
return delete_parser
def _create_sample_subparser(parser):
sample_parser = parser.subcommand('sample',
'Display a sample of the results of a BigQuery SQL query.\nThe '
'cell can optionally contain arguments for expanding v
|
ariables '
'in the query,\nif -q/--query was used, or it can contain SQL '
'for a query.')
group = sample_parser.add_mutually_exclusive_group()
group.add_argument('-q', '--query', help='the name of the query to sample')
group.add_argument('-t', '--table', help='the name of the table to sample')
group.add_argument('-v', '--view', help='the name of the view to sample')
sample_parser.add_argument('-d', '--dialect', help='BigQuery SQL dialect',
choices=['legacy', 'standard'])
sample_parser.add_argument('-b', '--billing', type=int, help='BigQuery billing tier')
sample_parser.add_argument('-c', '--count', type=int, default=10,
help='The number of rows to limit to, if sampling')
sample_parser.add_argument('-m', '--method', help='The type of sampling to use',
choices=['limit', 'random', 'hashed', 'sorted'], default='limit')
sample_parser.add_argument('-p', '--percent', type=int, default=1,
help='For random or hashed sampling, what percentage to sample from')
sample_parser.add_argument('-f', '--field',
help='The field to use for sorted or hashed sampling')
sample_parser.add_argument('-o', '--order', choices=['ascending', 'descending'],
default='ascending', help='The sort order to use for sorted sampling')
sample_parser.add_argument('-P', '--profile', action='store_true',
default=False, help='Generate an interactive profile of the data')
sample_parser.add_argument('--verbose',
help='Show the expanded SQL that is being executed',
action='store_true')
return sample_parser
def _create_udf_subparser(parser):
udf_parser = parser.subcommand('udf', 'Create a named Javascript BigQuery UDF')
udf_parser.add_argument('-m', '--module', help='The name for this UDF')
return udf_parser
def _create_dry_run_subparser(parser):
dry_run_parser = parser.subcommand('dryrun',
'Execute a dry run of a BigQuery query and display '
'approximate usage statistics')
dry_run_parser.add_argument('-q', '--query',
help='The name of the query to be dry run')
dry_run_parser.add_argument('-d', '--dialect', help='BigQuery SQL dialect',
choices=['legacy', 'standard'])
dry_run_parser.add_argument('-b', '--billing', type=int, help='BigQuery billing tier')
dry_run_parser.add_argument('-v', '--verbose',
help='Show the expanded SQL that is being executed',
action='store_true')
return dry_run_parser
def _create_execute_subparser(parser):
execute_parser = parser.subcommand('execute',
'Execute a BigQuery SQL query and optionally send the results '
'to a named table.\nThe cell can optionally contain arguments '
'for expanding variables in the query.')
execute_parser.add_argument('-nc', '--nocache', help='Don\'t use previously cached results',
action='store_true')
execute_parser.add_argument('-d', '--dialect', help='BigQuery SQL dialect',
choices=['legacy', 'standard'])
execute_parser.add_argument('-b', '--billing', type=int, help='BigQuery billing tier')
execute_parser.add_argument('-m', '--mode', help='The table creation mode', default='create',
choices=['create', 'append', 'overwrite'])
execute_parser.add_argument('-l', '--large', help='Whether to allow large results',
action='store_true')
execute_parser.add_argument('-q', '--query', help='The name of query to run')
execute_parser.add_argument('-t', '--target', help='target table name')
execute_parser.add_argument('-v', '--verbose',
help='Show the expanded SQL that is being executed',
action='store_true')
return execute_parser
def _create_pipeline_subparser(parser):
pipeline_parser = parser.subcommand('pipeline',
'Define a deployable pipeline based on a BigQuery query.\n'
'The cell can optionally contain arguments for expanding '
'variables in the query.')
pipeline_parser.add_argument('-n', '--name', help='The pipeline name')
pipeline_parser.add_argument('-nc', '--nocache', help='Don\'t use previously cached results',
action='store_true')
pipeline_parser.add_argument('-d', '--dialect', help='BigQuery SQL dialect',
choices=['legacy', 'standard'])
pipeline_parser.add_argument('-b', '--billing', type=int, help='BigQuery billing tier')
pipeline_parser.add_argument('-m', '--mode', help='The table creation mode', default='cre
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.